--- /dev/null
+From 3380f741e24fcd01b6464088a0c1b32de1454c55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Nov 2017 07:52:52 +0100
+Subject: ARM: 8723/2: always assume the "unified" syntax for assembly code
+
+From: Nicolas Pitre <nicolas.pitre@linaro.org>
+
+[ Upstream commit 75fea300d73ae5b18957949a53ec770daaeb6fc2 ]
+
+The GNU assembler has implemented the "unified syntax" parsing since
+2005. This "unified" syntax is required when the kernel is built in
+Thumb2 mode. However the "unified" syntax is a mixed bag of features,
+including not requiring a `#' prefix with immediate operands. This leads
+to situations where some code builds just fine in Thumb2 mode and fails
+to build in ARM mode if that prefix is missing. This behavior
+discrepancy makes build tests less valuable, forcing both ARM and Thumb2
+builds for proper coverage.
+
+Let's "fix" this issue by always using the "unified" syntax for both ARM
+and Thumb2 mode. Given that the documented minimum binutils version that
+properly builds the kernel is version 2.20 released in 2010, we can
+assume that any toolchain capable of building the latest kernel is also
+"unified syntax" capable.
+
+Whith this, a bunch of macros used to mask some differences between both
+syntaxes can be removed, with the side effect of making LTO easier.
+
+Suggested-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Nicolas Pitre <nico@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/Kconfig | 7 +---
+ arch/arm/Makefile | 6 ++-
+ arch/arm/include/asm/unified.h | 77 ++--------------------------------
+ 3 files changed, 8 insertions(+), 82 deletions(-)
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index cf69aab648fbd..ba9325fc75b85 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1533,12 +1533,10 @@ config THUMB2_KERNEL
+ bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
+ depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K
+ default y if CPU_THUMBONLY
+- select ARM_ASM_UNIFIED
+ select ARM_UNWIND
+ help
+ By enabling this option, the kernel will be compiled in
+- Thumb-2 mode. A compiler/assembler that understand the unified
+- ARM-Thumb syntax is needed.
++ Thumb-2 mode.
+
+ If unsure, say N.
+
+@@ -1573,9 +1571,6 @@ config THUMB2_AVOID_R_ARM_THM_JUMP11
+
+ Unless you are sure your tools don't have this problem, say Y.
+
+-config ARM_ASM_UNIFIED
+- bool
+-
+ config ARM_PATCH_IDIV
+ bool "Runtime patch udiv/sdiv instructions into __aeabi_{u}idiv()"
+ depends on CPU_32v7 && !XIP_KERNEL
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index 17e80f4832816..234ee43b44384 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -115,9 +115,11 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
+ CFLAGS_ABI +=-funwind-tables
+ endif
+
++# Accept old syntax despite ".syntax unified"
++AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
++
+ ifeq ($(CONFIG_THUMB2_KERNEL),y)
+ AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
+-AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
+ CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
+ AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
+ # Work around buggy relocation from gas if requested:
+@@ -125,7 +127,7 @@ ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
+ CFLAGS_MODULE +=-fno-optimize-sibling-calls
+ endif
+ else
+-CFLAGS_ISA :=$(call cc-option,-marm,)
++CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN)
+ AFLAGS_ISA :=$(CFLAGS_ISA)
+ endif
+
+diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
+index a91ae499614cb..2c3b952be63eb 100644
+--- a/arch/arm/include/asm/unified.h
++++ b/arch/arm/include/asm/unified.h
+@@ -20,8 +20,10 @@
+ #ifndef __ASM_UNIFIED_H
+ #define __ASM_UNIFIED_H
+
+-#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED)
++#if defined(__ASSEMBLY__)
+ .syntax unified
++#else
++__asm__(".syntax unified");
+ #endif
+
+ #ifdef CONFIG_CPU_V7M
+@@ -64,77 +66,4 @@
+
+ #endif /* CONFIG_THUMB2_KERNEL */
+
+-#ifndef CONFIG_ARM_ASM_UNIFIED
+-
+-/*
+- * If the unified assembly syntax isn't used (in ARM mode), these
+- * macros expand to an empty string
+- */
+-#ifdef __ASSEMBLY__
+- .macro it, cond
+- .endm
+- .macro itt, cond
+- .endm
+- .macro ite, cond
+- .endm
+- .macro ittt, cond
+- .endm
+- .macro itte, cond
+- .endm
+- .macro itet, cond
+- .endm
+- .macro itee, cond
+- .endm
+- .macro itttt, cond
+- .endm
+- .macro ittte, cond
+- .endm
+- .macro ittet, cond
+- .endm
+- .macro ittee, cond
+- .endm
+- .macro itett, cond
+- .endm
+- .macro itete, cond
+- .endm
+- .macro iteet, cond
+- .endm
+- .macro iteee, cond
+- .endm
+-#else /* !__ASSEMBLY__ */
+-__asm__(
+-" .macro it, cond\n"
+-" .endm\n"
+-" .macro itt, cond\n"
+-" .endm\n"
+-" .macro ite, cond\n"
+-" .endm\n"
+-" .macro ittt, cond\n"
+-" .endm\n"
+-" .macro itte, cond\n"
+-" .endm\n"
+-" .macro itet, cond\n"
+-" .endm\n"
+-" .macro itee, cond\n"
+-" .endm\n"
+-" .macro itttt, cond\n"
+-" .endm\n"
+-" .macro ittte, cond\n"
+-" .endm\n"
+-" .macro ittet, cond\n"
+-" .endm\n"
+-" .macro ittee, cond\n"
+-" .endm\n"
+-" .macro itett, cond\n"
+-" .endm\n"
+-" .macro itete, cond\n"
+-" .endm\n"
+-" .macro iteet, cond\n"
+-" .endm\n"
+-" .macro iteee, cond\n"
+-" .endm\n");
+-#endif /* __ASSEMBLY__ */
+-
+-#endif /* CONFIG_ARM_ASM_UNIFIED */
+-
+ #endif /* !__ASM_UNIFIED_H */
+--
+2.20.1
+
--- /dev/null
+From 0daa191d7f817ffc1984bbe9f8e8a195705e24ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2020 15:19:35 +0000
+Subject: arm64: cpufeature: Set the FP/SIMD compat HWCAP bits properly
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 7559950aef1ab8792c50797c6c5c7c5150a02460 upstream
+
+We set the compat_elf_hwcap bits unconditionally on arm64 to
+include the VFP and NEON support. However, the FP/SIMD unit
+is optional on Arm v8 and thus could be missing. We already
+handle this properly in the kernel, but still advertise to
+the COMPAT applications that the VFP is available. Fix this
+to make sure we only advertise when we really have them.
+
+Cc: stable@vger.kernel.org # v4.14
+Cc: Will Deacon <will@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/cpufeature.c | 52 +++++++++++++++++++++++++++++-----
+ 1 file changed, 45 insertions(+), 7 deletions(-)
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index c477fd34a9120..6b3bb67596ae8 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -41,9 +41,7 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
+ #define COMPAT_ELF_HWCAP_DEFAULT \
+ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
++ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
+ COMPAT_HWCAP_LPAE)
+ unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+ unsigned int compat_elf_hwcap2 __read_mostly;
+@@ -1134,17 +1132,30 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
+ {},
+ };
+
+-#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
+- { \
+- .desc = #cap, \
+- .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
++
++#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
+ .matches = has_cpuid_feature, \
+ .sys_reg = reg, \
+ .field_pos = field, \
+ .sign = s, \
+ .min_field_value = min_value, \
++
++#define __HWCAP_CAP(name, cap_type, cap) \
++ .desc = name, \
++ .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
+ .hwcap_type = cap_type, \
+ .hwcap = cap, \
++
++#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
++ { \
++ __HWCAP_CAP(#cap, cap_type, cap) \
++ HWCAP_CPUID_MATCH(reg, field, s, min_value) \
++ }
++
++#define HWCAP_CAP_MATCH(match, cap_type, cap) \
++ { \
++ __HWCAP_CAP(#cap, cap_type, cap) \
++ .matches = match, \
+ }
+
+ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+@@ -1177,8 +1188,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ {},
+ };
+
++#ifdef CONFIG_COMPAT
++static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
++{
++ /*
++ * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
++ * in line with that of arm32 as in vfp_init(). We make sure that the
++ * check is future proof, by making sure value is non-zero.
++ */
++ u32 mvfr1;
++
++ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
++ if (scope == SCOPE_SYSTEM)
++ mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
++ else
++ mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
++
++ return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
++ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
++ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
++}
++#endif
++
+ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
+ #ifdef CONFIG_COMPAT
++ HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
++ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
++ /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
++ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
++ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
+--
+2.20.1
+
--- /dev/null
+From 3a15eef46cd8073603a50267bd4e64b7fc459c6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2020 15:19:37 +0000
+Subject: arm64: nofpsimd: Handle TIF_FOREIGN_FPSTATE flag cleanly
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 52f73c383b2418f2d31b798e765ae7d596c35021 upstream
+
+We detect the absence of FP/SIMD after an incapable CPU is brought up,
+and by then we have kernel threads running already with TIF_FOREIGN_FPSTATE set
+which could be set for early userspace applications (e.g, modprobe triggered
+from initramfs) and init. This could cause the applications to loop forever in
+do_nofity_resume() as we never clear the TIF flag, once we now know that
+we don't support FP.
+
+Fix this by making sure that we clear the TIF_FOREIGN_FPSTATE flag
+for tasks which may have them set, as we would have done in the normal
+case, but avoiding touching the hardware state (since we don't support any).
+
+Cc: stable@vger.kernel.org # v4.14
+Cc: Will Deacon <will@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/fpsimd.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index f4fdf6420ac5c..4cd962f6c4302 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -206,8 +206,19 @@ void fpsimd_preserve_current_state(void)
+ */
+ void fpsimd_restore_current_state(void)
+ {
+- if (!system_supports_fpsimd())
++ /*
++ * For the tasks that were created before we detected the absence of
++ * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
++ * e.g, init. This could be then inherited by the children processes.
++ * If we later detect that the system doesn't support FP/SIMD,
++ * we must clear the flag for all the tasks to indicate that the
++ * FPSTATE is clean (as we can't have one) to avoid looping for ever in
++ * do_notify_resume().
++ */
++ if (!system_supports_fpsimd()) {
++ clear_thread_flag(TIF_FOREIGN_FPSTATE);
+ return;
++ }
+
+ local_bh_disable();
+
+@@ -229,7 +240,7 @@ void fpsimd_restore_current_state(void)
+ */
+ void fpsimd_update_current_state(struct fpsimd_state *state)
+ {
+- if (!system_supports_fpsimd())
++ if (WARN_ON(!system_supports_fpsimd()))
+ return;
+
+ local_bh_disable();
+--
+2.20.1
+
--- /dev/null
+From 3ae1a621483f226d59d2bac57d84fbc44a65d64a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2020 15:19:36 +0000
+Subject: arm64: ptrace: nofpsimd: Fail FP/SIMD regset operations
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit c9d66999f064947e6b577ceacc1eb2fbca6a8d3c upstream
+
+When fp/simd is not supported on the system, fail the operations
+of FP/SIMD regsets.
+
+Cc: stable@vger.kernel.org # v4.14
+Cc: Will Deacon <will@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/ptrace.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 242527f29c410..e230b4dff9602 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -624,6 +624,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ return 0;
+ }
+
++static int fpr_active(struct task_struct *target, const struct user_regset *regset)
++{
++ if (!system_supports_fpsimd())
++ return -ENODEV;
++ return regset->n;
++}
++
+ /*
+ * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
+ */
+@@ -634,6 +641,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ struct user_fpsimd_state *uregs;
+ uregs = &target->thread.fpsimd_state.user_fpsimd;
+
++ if (!system_supports_fpsimd())
++ return -EINVAL;
++
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+@@ -648,6 +658,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ struct user_fpsimd_state newstate =
+ target->thread.fpsimd_state.user_fpsimd;
+
++ if (!system_supports_fpsimd())
++ return -EINVAL;
++
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
+ if (ret)
+ return ret;
+@@ -740,6 +753,7 @@ static const struct user_regset aarch64_regsets[] = {
+ */
+ .size = sizeof(u32),
+ .align = sizeof(u32),
++ .active = fpr_active,
+ .get = fpr_get,
+ .set = fpr_set
+ },
+@@ -914,6 +928,9 @@ static int compat_vfp_get(struct task_struct *target,
+ compat_ulong_t fpscr;
+ int ret, vregs_end_pos;
+
++ if (!system_supports_fpsimd())
++ return -EINVAL;
++
+ uregs = &target->thread.fpsimd_state.user_fpsimd;
+
+ if (target == current)
+@@ -947,6 +964,9 @@ static int compat_vfp_set(struct task_struct *target,
+ compat_ulong_t fpscr;
+ int ret, vregs_end_pos;
+
++ if (!system_supports_fpsimd())
++ return -EINVAL;
++
+ uregs = &target->thread.fpsimd_state.user_fpsimd;
+
+ vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
+@@ -1004,6 +1024,7 @@ static const struct user_regset aarch32_regsets[] = {
+ .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
+ .size = sizeof(compat_ulong_t),
+ .align = sizeof(compat_ulong_t),
++ .active = fpr_active,
+ .get = compat_vfp_get,
+ .set = compat_vfp_set
+ },
+--
+2.20.1
+
--- /dev/null
+arm64-cpufeature-set-the-fp-simd-compat-hwcap-bits-p.patch
+arm64-ptrace-nofpsimd-fail-fp-simd-regset-operations.patch
+arm64-nofpsimd-handle-tif_foreign_fpstate-flag-clean.patch
+arm-8723-2-always-assume-the-unified-syntax-for-asse.patch