--- /dev/null
+From a111b7c0f20e13b54df2fa959b3dc0bdf1925ae6 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Fri, 12 Apr 2019 15:39:32 -0500
+Subject: arm64/speculation: Support 'mitigations=' cmdline option
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit a111b7c0f20e13b54df2fa959b3dc0bdf1925ae6 upstream.
+
+Configure arm64 runtime CPU speculation bug mitigations in accordance
+with the 'mitigations=' cmdline option. This affects Meltdown, Spectre
+v2, and Speculative Store Bypass.
+
+The default behavior is unchanged.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+[will: reorder checks so KASLR implies KPTI and SSBS is affected by cmdline]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/admin-guide/kernel-parameters.txt | 8 +++++---
+ arch/arm64/kernel/cpu_errata.c | 6 +++++-
+ arch/arm64/kernel/cpufeature.c | 8 +++++++-
+ 3 files changed, 17 insertions(+), 5 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2503,8 +2503,8 @@
+ http://repo.or.cz/w/linux-2.6/mini2440.git
+
+ mitigations=
+- [X86,PPC,S390] Control optional mitigations for CPU
+- vulnerabilities. This is a set of curated,
++ [X86,PPC,S390,ARM64] Control optional mitigations for
++ CPU vulnerabilities. This is a set of curated,
+ arch-independent options, each of which is an
+ aggregation of existing arch-specific options.
+
+@@ -2513,12 +2513,14 @@
+ improves system performance, but it may also
+ expose users to several CPU vulnerabilities.
+ Equivalent to: nopti [X86,PPC]
++ kpti=0 [ARM64]
+ nospectre_v1 [PPC]
+ nobp=0 [S390]
+ nospectre_v1 [X86]
+- nospectre_v2 [X86,PPC,S390]
++ nospectre_v2 [X86,PPC,S390,ARM64]
+ spectre_v2_user=off [X86]
+ spec_store_bypass_disable=off [X86,PPC]
++ ssbd=force-off [ARM64]
+ l1tf=off [X86]
+ mds=off [X86]
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -19,6 +19,7 @@
+ #include <linux/arm-smccc.h>
+ #include <linux/psci.h>
+ #include <linux/types.h>
++#include <linux/cpu.h>
+ #include <asm/cpu.h>
+ #include <asm/cputype.h>
+ #include <asm/cpufeature.h>
+@@ -355,6 +356,9 @@ static bool has_ssbd_mitigation(const st
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
++ if (cpu_mitigations_off())
++ ssbd_state = ARM64_SSBD_FORCE_DISABLE;
++
+ /* delay setting __ssb_safe until we get a firmware response */
+ if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+ this_cpu_safe = true;
+@@ -600,7 +604,7 @@ check_branch_predictor(const struct arm6
+ }
+
+ /* forced off */
+- if (__nospectre_v2) {
++ if (__nospectre_v2 || cpu_mitigations_off()) {
+ pr_info_once("spectrev2 mitigation disabled by command line option\n");
+ __hardenbp_enab = false;
+ return false;
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -24,6 +24,7 @@
+ #include <linux/stop_machine.h>
+ #include <linux/types.h>
+ #include <linux/mm.h>
++#include <linux/cpu.h>
+ #include <asm/cpu.h>
+ #include <asm/cpufeature.h>
+ #include <asm/cpu_ops.h>
+@@ -907,7 +908,7 @@ static bool unmap_kernel_at_el0(const st
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ { /* sentinel */ }
+ };
+- char const *str = "command line option";
++ char const *str = "kpti command line option";
+ bool meltdown_safe;
+
+ meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+@@ -937,6 +938,11 @@ static bool unmap_kernel_at_el0(const st
+ }
+ }
+
++ if (cpu_mitigations_off() && !__kpti_forced) {
++ str = "mitigations=off";
++ __kpti_forced = -1;
++ }
++
+ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+ return false;
--- /dev/null
+From 517953c2c47f9c00a002f588ac856a5bc70cede3 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Mon, 15 Apr 2019 16:21:24 -0500
+Subject: arm64: Use firmware to detect CPUs that are not affected by Spectre-v2
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 517953c2c47f9c00a002f588ac856a5bc70cede3 upstream.
+
+The SMCCC ARCH_WORKAROUND_1 service can indicate that although the
+firmware knows about the Spectre-v2 mitigation, this particular
+CPU is not vulnerable, and it is thus not necessary to call
+the firmware on this CPU.
+
+Let's use this information to our benefit.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c | 32 +++++++++++++++++++++++---------
+ 1 file changed, 23 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -198,22 +198,36 @@ static int detect_harden_bp_fw(void)
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+- if ((int)res.a0 < 0)
++ switch ((int)res.a0) {
++ case 1:
++ /* Firmware says we're just fine */
++ return 0;
++ case 0:
++ cb = call_hvc_arch_workaround_1;
++ /* This is a guest, no need to patch KVM vectors */
++ smccc_start = NULL;
++ smccc_end = NULL;
++ break;
++ default:
+ return -1;
+- cb = call_hvc_arch_workaround_1;
+- /* This is a guest, no need to patch KVM vectors */
+- smccc_start = NULL;
+- smccc_end = NULL;
++ }
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+- if ((int)res.a0 < 0)
++ switch ((int)res.a0) {
++ case 1:
++ /* Firmware says we're just fine */
++ return 0;
++ case 0:
++ cb = call_smc_arch_workaround_1;
++ smccc_start = __smccc_workaround_1_smc_start;
++ smccc_end = __smccc_workaround_1_smc_end;
++ break;
++ default:
+ return -1;
+- cb = call_smc_arch_workaround_1;
+- smccc_start = __smccc_workaround_1_smc_start;
+- smccc_end = __smccc_workaround_1_smc_end;
++ }
+ break;
+
+ default:
--- /dev/null
+From 1004ce4c255fc3eb3ad9145ddd53547d1b7ce327 Mon Sep 17 00:00:00 2001
+From: Andrew Murray <andrew.murray@arm.com>
+Date: Thu, 29 Aug 2019 14:28:35 -0600
+Subject: coresight: etm4x: Use explicit barriers on enable/disable
+
+From: Andrew Murray <andrew.murray@arm.com>
+
+commit 1004ce4c255fc3eb3ad9145ddd53547d1b7ce327 upstream.
+
+Synchronization is recommended before disabling the trace registers
+to prevent any start or stop points being speculative at the point
+of disabling the unit (section 7.3.77 of ARM IHI 0064D).
+
+Synchronization is also recommended after programming the trace
+registers to ensure all updates are committed prior to normal code
+resuming (section 4.3.7 of ARM IHI 0064D).
+
+Let's ensure these syncronization points are present in the code
+and clearly commented.
+
+Note that we could rely on the barriers in CS_LOCK and
+coresight_disclaim_device_unlocked or the context switch to user
+space - however coresight may be of use in the kernel.
+
+On armv8 the mb macro is defined as dsb(sy) - Given that the etm4x is
+only used on armv8 let's directly use dsb(sy) instead of mb(). This
+removes some ambiguity and makes it easier to correlate the code with
+the TRM.
+
+Signed-off-by: Andrew Murray <andrew.murray@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+[Fixed capital letter for "use" in title]
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Link: https://lore.kernel.org/r/20190829202842.580-11-mathieu.poirier@linaro.org
+Cc: stable@vger.kernel.org # 4.9+
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwtracing/coresight/coresight-etm4x.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/hwtracing/coresight/coresight-etm4x.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x.c
+@@ -174,6 +174,12 @@ static void etm4_enable_hw(void *info)
+ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
+ dev_err(drvdata->dev,
+ "timeout while waiting for Idle Trace Status\n");
++ /*
++ * As recommended by section 4.3.7 ("Synchronization when using the
++ * memory-mapped interface") of ARM IHI 0064D
++ */
++ dsb(sy);
++ isb();
+
+ CS_LOCK(drvdata->base);
+
+@@ -324,8 +330,12 @@ static void etm4_disable_hw(void *info)
+ /* EN, bit[0] Trace unit enable bit */
+ control &= ~0x1;
+
+- /* make sure everything completes before disabling */
+- mb();
++ /*
++ * Make sure everything completes before disabling, as recommended
++ * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
++ * SSTATUS") of ARM IHI 0064D
++ */
++ dsb(sy);
+ isb();
+ writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+
arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch
arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch
arm64-force-ssbs-on-context-switch.patch
+arm64-use-firmware-to-detect-cpus-that-are-not-affected-by-spectre-v2.patch
+arm64-speculation-support-mitigations-cmdline-option.patch
+vfs-fix-eoverflow-testing-in-put_compat_statfs64.patch
+coresight-etm4x-use-explicit-barriers-on-enable-disable.patch
--- /dev/null
+From cc3a7bfe62b947b423fcb2cfe89fcba92bf48fa3 Mon Sep 17 00:00:00 2001
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Wed, 2 Oct 2019 16:17:54 -0500
+Subject: vfs: Fix EOVERFLOW testing in put_compat_statfs64
+
+From: Eric Sandeen <sandeen@redhat.com>
+
+commit cc3a7bfe62b947b423fcb2cfe89fcba92bf48fa3 upstream.
+
+Today, put_compat_statfs64() disallows nearly any field value over
+2^32 if f_bsize is only 32 bits, but that makes no sense.
+compat_statfs64 is there for the explicit purpose of providing 64-bit
+fields for f_files, f_ffree, etc. And f_bsize is always only 32 bits.
+
+As a result, 32-bit userspace gets -EOVERFLOW for i.e. large file
+counts even with -D_FILE_OFFSET_BITS=64 set.
+
+In reality, only f_bsize and f_frsize can legitimately overflow
+(fields like f_type and f_namelen should never be large), so test
+only those fields.
+
+This bug was discussed at length some time ago, and this is the proposal
+Al suggested at https://lkml.org/lkml/2018/8/6/640. It seemed to get
+dropped amid the discussion of other related changes, but this
+part seems obviously correct on its own, so I've picked it up and
+sent it, for expediency.
+
+Fixes: 64d2ab32efe3 ("vfs: fix put_compat_statfs64() does not handle errors")
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/statfs.c | 17 ++++-------------
+ 1 file changed, 4 insertions(+), 13 deletions(-)
+
+--- a/fs/statfs.c
++++ b/fs/statfs.c
+@@ -304,19 +304,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned
+ static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
+ {
+ struct compat_statfs64 buf;
+- if (sizeof(ubuf->f_bsize) == 4) {
+- if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
+- kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
+- return -EOVERFLOW;
+- /* f_files and f_ffree may be -1; it's okay
+- * to stuff that into 32 bits */
+- if (kbuf->f_files != 0xffffffffffffffffULL
+- && (kbuf->f_files & 0xffffffff00000000ULL))
+- return -EOVERFLOW;
+- if (kbuf->f_ffree != 0xffffffffffffffffULL
+- && (kbuf->f_ffree & 0xffffffff00000000ULL))
+- return -EOVERFLOW;
+- }
++
++ if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
++ return -EOVERFLOW;
++
+ memset(&buf, 0, sizeof(struct compat_statfs64));
+ buf.f_type = kbuf->f_type;
+ buf.f_bsize = kbuf->f_bsize;