]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Feb 2018 16:31:19 +0000 (17:31 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Feb 2018 16:31:19 +0000 (17:31 +0100)
added patches:
arm-arm64-kvm-add-psci_version-helper.patch
arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
arm-arm64-kvm-advertise-smccc-v1.1.patch
arm-arm64-kvm-consolidate-the-psci-include-files.patch
arm-arm64-kvm-implement-psci-1.0-support.patch
arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
arm64-barrier-add-csdb-macros-to-control-data-value-prediction.patch
arm64-branch-predictor-hardening-for-cavium-thunderx2.patch
arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
arm64-cpu_errata-add-kryo-to-falkor-1003-errata.patch
arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
arm64-cpufeature-pass-capability-structure-to-enable-callback.patch
arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch
arm64-cputype-add-missing-midr-values-for-cortex-a72-and-cortex-a75.patch
arm64-entry-add-exception-trampoline-page-for-exceptions-from-el0.patch
arm64-entry-add-fake-cpu-feature-for-unmapping-the-kernel-at-el0.patch
arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
arm64-entry-ensure-branch-through-syscall-table-is-bounded-under-speculation.patch
arm64-entry-explicitly-pass-exception-level-to-kernel_ventry-macro.patch
arm64-entry-hook-up-entry-trampoline-to-exception-vectors.patch
arm64-entry-reword-comment-about-post_ttbr_update_workaround.patch
arm64-erratum-work-around-falkor-erratum-e1003-in-trampoline-code.patch
arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
arm64-futex-mask-__user-pointers-prior-to-dereference.patch
arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
arm64-implement-array_index_mask_nospec.patch
arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
arm64-implement-branch-predictor-hardening-for-falkor.patch
arm64-kaslr-put-kernel-vectors-address-in-separate-data-page.patch
arm64-kconfig-add-config_unmap_kernel_at_el0.patch
arm64-kconfig-reword-unmap_kernel_at_el0-kconfig-entry.patch
arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
arm64-kpti-make-use-of-ng-dependent-on-arm64_kernel_unmapped_at_el0.patch
arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
arm64-kvm-increment-pc-after-handling-an-smc-trap.patch
arm64-kvm-make-psci_version-a-fast-path.patch
arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
arm64-make-user_ds-an-inclusive-limit.patch
arm64-mm-add-arm64_kernel_unmapped_at_el0-helper.patch
arm64-mm-allocate-asids-in-pairs.patch
arm64-mm-fix-and-re-enable-arm64_sw_ttbr0_pan.patch
arm64-mm-introduce-ttbr_asid_mask-for-getting-at-the-asid-in-the-ttbr.patch
arm64-mm-invalidate-both-kernel-and-user-asids-when-performing-tlbi.patch
arm64-mm-map-entry-trampoline-into-trampoline-and-kernel-page-tables.patch
arm64-mm-move-asid-from-ttbr0-to-ttbr1.patch
arm64-mm-permit-transitioning-from-global-to-non-global-without-bbm.patch
arm64-mm-remove-pre_ttbr0_update_workaround-for-falkor-erratum-e1003.patch
arm64-mm-rename-post_ttbr0_update_workaround.patch
arm64-mm-temporarily-disable-arm64_sw_ttbr0_pan.patch
arm64-mm-use-non-global-mappings-for-kernel-space.patch
arm64-move-bp-hardening-to-check_and_switch_context.patch
arm64-move-post_ttbr_update_workaround-to-c-code.patch
arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch
arm64-take-into-account-id_aa64pfr0_el1.csv3.patch
arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch
arm64-turn-on-kpti-only-on-cpus-that-need-it.patch
arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch
arm64-uaccess-mask-__user-pointers-for-__arch_-clear-copy_-_user.patch
arm64-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch
arm64-use-pointer-masking-to-limit-uaccess-speculation.patch
arm64-use-ret-instruction-for-exiting-the-trampoline.patch
cifs-fix-autonegotiate-security-settings-mismatch.patch
cifs-fix-missing-put_xid-in-cifs_file_strict_mmap.patch
cifs-zero-sensitive-data-when-freeing.patch
cpufreq-mediatek-add-mediatek-related-projects-into-blacklist.patch
dmaengine-dmatest-fix-container_of-member-in-dmatest_callback.patch
drivers-firmware-expose-psci_get_version-through-psci_ops-structure.patch
firmware-psci-expose-psci-conduit.patch
firmware-psci-expose-smccc-version-through-psci_ops.patch
media-dvb-usb-v2-lmedm04-improve-logic-checking-of-warm-start.patch
media-dvb-usb-v2-lmedm04-move-ts2020-attach-to-dm04_lme2510_tuner.patch
media-hdpvr-fix-an-error-handling-path-in-hdpvr_probe.patch
perf-arm_spe-fail-device-probe-when-arm64_kernel_unmapped_at_el0.patch
revert-drm-i915-mark-all-device-info-struct-with-__initconst.patch
sched-rt-up-the-root-domain-ref-count-when-passing-it-around-via-ipis.patch
sched-rt-use-container_of-to-get-root-domain-in-rto_push_irq_work_func.patch
ssb-do-not-disable-pci-host-on-non-mips.patch
watchdog-gpio_wdt-set-wdog_hw_running-in-gpio_wdt_stop.patch

87 files changed:
queue-4.15/arm-arm64-kvm-add-psci_version-helper.patch [new file with mode: 0644]
queue-4.15/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch [new file with mode: 0644]
queue-4.15/arm-arm64-kvm-advertise-smccc-v1.1.patch [new file with mode: 0644]
queue-4.15/arm-arm64-kvm-consolidate-the-psci-include-files.patch [new file with mode: 0644]
queue-4.15/arm-arm64-kvm-implement-psci-1.0-support.patch [new file with mode: 0644]
queue-4.15/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch [new file with mode: 0644]
queue-4.15/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch [new file with mode: 0644]
queue-4.15/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch [new file with mode: 0644]
queue-4.15/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch [new file with mode: 0644]
queue-4.15/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch [new file with mode: 0644]
queue-4.15/arm64-barrier-add-csdb-macros-to-control-data-value-prediction.patch [new file with mode: 0644]
queue-4.15/arm64-branch-predictor-hardening-for-cavium-thunderx2.patch [new file with mode: 0644]
queue-4.15/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch [new file with mode: 0644]
queue-4.15/arm64-cpu_errata-add-kryo-to-falkor-1003-errata.patch [new file with mode: 0644]
queue-4.15/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch [new file with mode: 0644]
queue-4.15/arm64-cpufeature-pass-capability-structure-to-enable-callback.patch [new file with mode: 0644]
queue-4.15/arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch [new file with mode: 0644]
queue-4.15/arm64-cputype-add-missing-midr-values-for-cortex-a72-and-cortex-a75.patch [new file with mode: 0644]
queue-4.15/arm64-entry-add-exception-trampoline-page-for-exceptions-from-el0.patch [new file with mode: 0644]
queue-4.15/arm64-entry-add-fake-cpu-feature-for-unmapping-the-kernel-at-el0.patch [new file with mode: 0644]
queue-4.15/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch [new file with mode: 0644]
queue-4.15/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch [new file with mode: 0644]
queue-4.15/arm64-entry-ensure-branch-through-syscall-table-is-bounded-under-speculation.patch [new file with mode: 0644]
queue-4.15/arm64-entry-explicitly-pass-exception-level-to-kernel_ventry-macro.patch [new file with mode: 0644]
queue-4.15/arm64-entry-hook-up-entry-trampoline-to-exception-vectors.patch [new file with mode: 0644]
queue-4.15/arm64-entry-reword-comment-about-post_ttbr_update_workaround.patch [new file with mode: 0644]
queue-4.15/arm64-erratum-work-around-falkor-erratum-e1003-in-trampoline-code.patch [new file with mode: 0644]
queue-4.15/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch [new file with mode: 0644]
queue-4.15/arm64-futex-mask-__user-pointers-prior-to-dereference.patch [new file with mode: 0644]
queue-4.15/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch [new file with mode: 0644]
queue-4.15/arm64-implement-array_index_mask_nospec.patch [new file with mode: 0644]
queue-4.15/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch [new file with mode: 0644]
queue-4.15/arm64-implement-branch-predictor-hardening-for-falkor.patch [new file with mode: 0644]
queue-4.15/arm64-kaslr-put-kernel-vectors-address-in-separate-data-page.patch [new file with mode: 0644]
queue-4.15/arm64-kconfig-add-config_unmap_kernel_at_el0.patch [new file with mode: 0644]
queue-4.15/arm64-kconfig-reword-unmap_kernel_at_el0-kconfig-entry.patch [new file with mode: 0644]
queue-4.15/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch [new file with mode: 0644]
queue-4.15/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch [new file with mode: 0644]
queue-4.15/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch [new file with mode: 0644]
queue-4.15/arm64-kpti-make-use-of-ng-dependent-on-arm64_kernel_unmapped_at_el0.patch [new file with mode: 0644]
queue-4.15/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch [new file with mode: 0644]
queue-4.15/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch [new file with mode: 0644]
queue-4.15/arm64-kvm-make-psci_version-a-fast-path.patch [new file with mode: 0644]
queue-4.15/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch [new file with mode: 0644]
queue-4.15/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch [new file with mode: 0644]
queue-4.15/arm64-make-user_ds-an-inclusive-limit.patch [new file with mode: 0644]
queue-4.15/arm64-mm-add-arm64_kernel_unmapped_at_el0-helper.patch [new file with mode: 0644]
queue-4.15/arm64-mm-allocate-asids-in-pairs.patch [new file with mode: 0644]
queue-4.15/arm64-mm-fix-and-re-enable-arm64_sw_ttbr0_pan.patch [new file with mode: 0644]
queue-4.15/arm64-mm-introduce-ttbr_asid_mask-for-getting-at-the-asid-in-the-ttbr.patch [new file with mode: 0644]
queue-4.15/arm64-mm-invalidate-both-kernel-and-user-asids-when-performing-tlbi.patch [new file with mode: 0644]
queue-4.15/arm64-mm-map-entry-trampoline-into-trampoline-and-kernel-page-tables.patch [new file with mode: 0644]
queue-4.15/arm64-mm-move-asid-from-ttbr0-to-ttbr1.patch [new file with mode: 0644]
queue-4.15/arm64-mm-permit-transitioning-from-global-to-non-global-without-bbm.patch [new file with mode: 0644]
queue-4.15/arm64-mm-remove-pre_ttbr0_update_workaround-for-falkor-erratum-e1003.patch [new file with mode: 0644]
queue-4.15/arm64-mm-rename-post_ttbr0_update_workaround.patch [new file with mode: 0644]
queue-4.15/arm64-mm-temporarily-disable-arm64_sw_ttbr0_pan.patch [new file with mode: 0644]
queue-4.15/arm64-mm-use-non-global-mappings-for-kernel-space.patch [new file with mode: 0644]
queue-4.15/arm64-move-bp-hardening-to-check_and_switch_context.patch [new file with mode: 0644]
queue-4.15/arm64-move-post_ttbr_update_workaround-to-c-code.patch [new file with mode: 0644]
queue-4.15/arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch [new file with mode: 0644]
queue-4.15/arm64-take-into-account-id_aa64pfr0_el1.csv3.patch [new file with mode: 0644]
queue-4.15/arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch [new file with mode: 0644]
queue-4.15/arm64-turn-on-kpti-only-on-cpus-that-need-it.patch [new file with mode: 0644]
queue-4.15/arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch [new file with mode: 0644]
queue-4.15/arm64-uaccess-mask-__user-pointers-for-__arch_-clear-copy_-_user.patch [new file with mode: 0644]
queue-4.15/arm64-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch [new file with mode: 0644]
queue-4.15/arm64-use-pointer-masking-to-limit-uaccess-speculation.patch [new file with mode: 0644]
queue-4.15/arm64-use-ret-instruction-for-exiting-the-trampoline.patch [new file with mode: 0644]
queue-4.15/cifs-fix-autonegotiate-security-settings-mismatch.patch [new file with mode: 0644]
queue-4.15/cifs-fix-missing-put_xid-in-cifs_file_strict_mmap.patch [new file with mode: 0644]
queue-4.15/cifs-zero-sensitive-data-when-freeing.patch [new file with mode: 0644]
queue-4.15/cpufreq-mediatek-add-mediatek-related-projects-into-blacklist.patch [new file with mode: 0644]
queue-4.15/dmaengine-dmatest-fix-container_of-member-in-dmatest_callback.patch [new file with mode: 0644]
queue-4.15/drivers-firmware-expose-psci_get_version-through-psci_ops-structure.patch [new file with mode: 0644]
queue-4.15/firmware-psci-expose-psci-conduit.patch [new file with mode: 0644]
queue-4.15/firmware-psci-expose-smccc-version-through-psci_ops.patch [new file with mode: 0644]
queue-4.15/media-dvb-usb-v2-lmedm04-improve-logic-checking-of-warm-start.patch [new file with mode: 0644]
queue-4.15/media-dvb-usb-v2-lmedm04-move-ts2020-attach-to-dm04_lme2510_tuner.patch [new file with mode: 0644]
queue-4.15/media-hdpvr-fix-an-error-handling-path-in-hdpvr_probe.patch [new file with mode: 0644]
queue-4.15/perf-arm_spe-fail-device-probe-when-arm64_kernel_unmapped_at_el0.patch [new file with mode: 0644]
queue-4.15/revert-drm-i915-mark-all-device-info-struct-with-__initconst.patch [new file with mode: 0644]
queue-4.15/sched-rt-up-the-root-domain-ref-count-when-passing-it-around-via-ipis.patch [new file with mode: 0644]
queue-4.15/sched-rt-use-container_of-to-get-root-domain-in-rto_push_irq_work_func.patch [new file with mode: 0644]
queue-4.15/series
queue-4.15/ssb-do-not-disable-pci-host-on-non-mips.patch [new file with mode: 0644]
queue-4.15/watchdog-gpio_wdt-set-wdog_hw_running-in-gpio_wdt_stop.patch [new file with mode: 0644]

diff --git a/queue-4.15/arm-arm64-kvm-add-psci_version-helper.patch b/queue-4.15/arm-arm64-kvm-add-psci_version-helper.patch
new file mode 100644 (file)
index 0000000..5c63a20
--- /dev/null
@@ -0,0 +1,73 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:09 +0000
+Subject: [Variant 2/Spectre-v2] arm/arm64: KVM: Add PSCI_VERSION helper
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit d0a144f12a7c upstream.
+
+As we're about to trigger a PSCI version explosion, it doesn't
+hurt to introduce a PSCI_VERSION helper that is going to be
+used everywhere.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/kvm/arm_psci.h    |    6 ++++--
+ include/uapi/linux/psci.h |    3 +++
+ virt/kvm/arm/psci.c       |    4 +---
+ 3 files changed, 8 insertions(+), 5 deletions(-)
+
+--- a/include/kvm/arm_psci.h
++++ b/include/kvm/arm_psci.h
+@@ -18,8 +18,10 @@
+ #ifndef __KVM_ARM_PSCI_H__
+ #define __KVM_ARM_PSCI_H__
+-#define KVM_ARM_PSCI_0_1      1
+-#define KVM_ARM_PSCI_0_2      2
++#include <uapi/linux/psci.h>
++
++#define KVM_ARM_PSCI_0_1      PSCI_VERSION(0, 1)
++#define KVM_ARM_PSCI_0_2      PSCI_VERSION(0, 2)
+ int kvm_psci_version(struct kvm_vcpu *vcpu);
+ int kvm_psci_call(struct kvm_vcpu *vcpu);
+--- a/include/uapi/linux/psci.h
++++ b/include/uapi/linux/psci.h
+@@ -88,6 +88,9 @@
+               (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
+ #define PSCI_VERSION_MINOR(ver)                       \
+               ((ver) & PSCI_VERSION_MINOR_MASK)
++#define PSCI_VERSION(maj, min)                                                \
++      ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
++       ((min) & PSCI_VERSION_MINOR_MASK))
+ /* PSCI features decoding (>=1.0) */
+ #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT        1
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -25,8 +25,6 @@
+ #include <kvm/arm_psci.h>
+-#include <uapi/linux/psci.h>
+-
+ /*
+  * This is an implementation of the Power State Coordination Interface
+  * as described in ARM document number ARM DEN 0022A.
+@@ -222,7 +220,7 @@ static int kvm_psci_0_2_call(struct kvm_
+                * Bits[31:16] = Major Version = 0
+                * Bits[15:0] = Minor Version = 2
+                */
+-              val = 2;
++              val = KVM_ARM_PSCI_0_2;
+               break;
+       case PSCI_0_2_FN_CPU_SUSPEND:
+       case PSCI_0_2_FN64_CPU_SUSPEND:
diff --git a/queue-4.15/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch b/queue-4.15/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
new file mode 100644 (file)
index 0000000..440eaf7
--- /dev/null
@@ -0,0 +1,139 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:10 +0000
+Subject: [Variant 2/Spectre-v2] arm/arm64: KVM: Add smccc accessors to PSCI code
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 84684fecd7ea upstream.
+
+Instead of open coding the accesses to the various registers,
+let's add explicit SMCCC accessors.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/arm/psci.c |   52 ++++++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 42 insertions(+), 10 deletions(-)
+
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -32,6 +32,38 @@
+ #define AFFINITY_MASK(level)  ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
++static u32 smccc_get_function(struct kvm_vcpu *vcpu)
++{
++      return vcpu_get_reg(vcpu, 0);
++}
++
++static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
++{
++      return vcpu_get_reg(vcpu, 1);
++}
++
++static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
++{
++      return vcpu_get_reg(vcpu, 2);
++}
++
++static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
++{
++      return vcpu_get_reg(vcpu, 3);
++}
++
++static void smccc_set_retval(struct kvm_vcpu *vcpu,
++                           unsigned long a0,
++                           unsigned long a1,
++                           unsigned long a2,
++                           unsigned long a3)
++{
++      vcpu_set_reg(vcpu, 0, a0);
++      vcpu_set_reg(vcpu, 1, a1);
++      vcpu_set_reg(vcpu, 2, a2);
++      vcpu_set_reg(vcpu, 3, a3);
++}
++
+ static unsigned long psci_affinity_mask(unsigned long affinity_level)
+ {
+       if (affinity_level <= 3)
+@@ -77,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(st
+       unsigned long context_id;
+       phys_addr_t target_pc;
+-      cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
++      cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
+       if (vcpu_mode_is_32bit(source_vcpu))
+               cpu_id &= ~((u32) 0);
+@@ -96,8 +128,8 @@ static unsigned long kvm_psci_vcpu_on(st
+                       return PSCI_RET_INVALID_PARAMS;
+       }
+-      target_pc = vcpu_get_reg(source_vcpu, 2);
+-      context_id = vcpu_get_reg(source_vcpu, 3);
++      target_pc = smccc_get_arg2(source_vcpu);
++      context_id = smccc_get_arg3(source_vcpu);
+       kvm_reset_vcpu(vcpu);
+@@ -116,7 +148,7 @@ static unsigned long kvm_psci_vcpu_on(st
+        * NOTE: We always update r0 (or x0) because for PSCI v0.1
+        * the general puspose registers are undefined upon CPU_ON.
+        */
+-      vcpu_set_reg(vcpu, 0, context_id);
++      smccc_set_retval(vcpu, context_id, 0, 0, 0);
+       vcpu->arch.power_off = false;
+       smp_mb();               /* Make sure the above is visible */
+@@ -136,8 +168,8 @@ static unsigned long kvm_psci_vcpu_affin
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_vcpu *tmp;
+-      target_affinity = vcpu_get_reg(vcpu, 1);
+-      lowest_affinity_level = vcpu_get_reg(vcpu, 2);
++      target_affinity = smccc_get_arg1(vcpu);
++      lowest_affinity_level = smccc_get_arg2(vcpu);
+       /* Determine target affinity mask */
+       target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
+@@ -210,7 +242,7 @@ int kvm_psci_version(struct kvm_vcpu *vc
+ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ {
+       struct kvm *kvm = vcpu->kvm;
+-      unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
++      u32 psci_fn = smccc_get_function(vcpu);
+       unsigned long val;
+       int ret = 1;
+@@ -277,14 +309,14 @@ static int kvm_psci_0_2_call(struct kvm_
+               break;
+       }
+-      vcpu_set_reg(vcpu, 0, val);
++      smccc_set_retval(vcpu, val, 0, 0, 0);
+       return ret;
+ }
+ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ {
+       struct kvm *kvm = vcpu->kvm;
+-      unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
++      u32 psci_fn = smccc_get_function(vcpu);
+       unsigned long val;
+       switch (psci_fn) {
+@@ -302,7 +334,7 @@ static int kvm_psci_0_1_call(struct kvm_
+               break;
+       }
+-      vcpu_set_reg(vcpu, 0, val);
++      smccc_set_retval(vcpu, val, 0, 0, 0);
+       return 1;
+ }
diff --git a/queue-4.15/arm-arm64-kvm-advertise-smccc-v1.1.patch b/queue-4.15/arm-arm64-kvm-advertise-smccc-v1.1.patch
new file mode 100644 (file)
index 0000000..6b31f03
--- /dev/null
@@ -0,0 +1,135 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:12 +0000
+Subject: [Variant 2/Spectre-v2] arm/arm64: KVM: Advertise SMCCC v1.1
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 09e6be12effd upstream.
+
+The new SMC Calling Convention (v1.1) allows for a reduced overhead
+when calling into the firmware, and provides a new feature discovery
+mechanism.
+
+Make it visible to KVM guests.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kvm/handle_exit.c   |    2 +-
+ arch/arm64/kvm/handle_exit.c |    2 +-
+ include/kvm/arm_psci.h       |    2 +-
+ include/linux/arm-smccc.h    |   13 +++++++++++++
+ virt/kvm/arm/psci.c          |   24 +++++++++++++++++++++++-
+ 5 files changed, 39 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/kvm/handle_exit.c
++++ b/arch/arm/kvm/handle_exit.c
+@@ -36,7 +36,7 @@ static int handle_hvc(struct kvm_vcpu *v
+                     kvm_vcpu_hvc_get_imm(vcpu));
+       vcpu->stat.hvc_exit_stat++;
+-      ret = kvm_psci_call(vcpu);
++      ret = kvm_hvc_call_handler(vcpu);
+       if (ret < 0) {
+               kvm_inject_undefined(vcpu);
+               return 1;
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -44,7 +44,7 @@ static int handle_hvc(struct kvm_vcpu *v
+                           kvm_vcpu_hvc_get_imm(vcpu));
+       vcpu->stat.hvc_exit_stat++;
+-      ret = kvm_psci_call(vcpu);
++      ret = kvm_hvc_call_handler(vcpu);
+       if (ret < 0) {
+               vcpu_set_reg(vcpu, 0, ~0UL);
+               return 1;
+--- a/include/kvm/arm_psci.h
++++ b/include/kvm/arm_psci.h
+@@ -27,6 +27,6 @@
+ #define KVM_ARM_PSCI_LATEST   KVM_ARM_PSCI_1_0
+ int kvm_psci_version(struct kvm_vcpu *vcpu);
+-int kvm_psci_call(struct kvm_vcpu *vcpu);
++int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+ #endif /* __KVM_ARM_PSCI_H__ */
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -60,6 +60,19 @@
+ #define ARM_SMCCC_QUIRK_NONE          0
+ #define ARM_SMCCC_QUIRK_QCOM_A6               1 /* Save/restore register a6 */
++#define ARM_SMCCC_VERSION_1_0         0x10000
++#define ARM_SMCCC_VERSION_1_1         0x10001
++
++#define ARM_SMCCC_VERSION_FUNC_ID                                     \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
++                         ARM_SMCCC_SMC_32,                            \
++                         0, 0)
++
++#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID                                       \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
++                         ARM_SMCCC_SMC_32,                            \
++                         0, 1)
++
+ #ifndef __ASSEMBLY__
+ #include <linux/linkage.h>
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -15,6 +15,7 @@
+  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
++#include <linux/arm-smccc.h>
+ #include <linux/preempt.h>
+ #include <linux/kvm_host.h>
+ #include <linux/wait.h>
+@@ -339,6 +340,7 @@ static int kvm_psci_1_0_call(struct kvm_
+               case PSCI_0_2_FN_SYSTEM_OFF:
+               case PSCI_0_2_FN_SYSTEM_RESET:
+               case PSCI_1_0_FN_PSCI_FEATURES:
++              case ARM_SMCCC_VERSION_FUNC_ID:
+                       val = 0;
+                       break;
+               default:
+@@ -393,7 +395,7 @@ static int kvm_psci_0_1_call(struct kvm_
+  * Errors:
+  * -EINVAL: Unrecognized PSCI function
+  */
+-int kvm_psci_call(struct kvm_vcpu *vcpu)
++static int kvm_psci_call(struct kvm_vcpu *vcpu)
+ {
+       switch (kvm_psci_version(vcpu)) {
+       case KVM_ARM_PSCI_1_0:
+@@ -406,3 +408,23 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
+               return -EINVAL;
+       };
+ }
++
++int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
++{
++      u32 func_id = smccc_get_function(vcpu);
++      u32 val = PSCI_RET_NOT_SUPPORTED;
++
++      switch (func_id) {
++      case ARM_SMCCC_VERSION_FUNC_ID:
++              val = ARM_SMCCC_VERSION_1_1;
++              break;
++      case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
++              /* Nothing supported yet */
++              break;
++      default:
++              return kvm_psci_call(vcpu);
++      }
++
++      smccc_set_retval(vcpu, val, 0, 0, 0);
++      return 1;
++}
diff --git a/queue-4.15/arm-arm64-kvm-consolidate-the-psci-include-files.patch b/queue-4.15/arm-arm64-kvm-consolidate-the-psci-include-files.patch
new file mode 100644 (file)
index 0000000..01f2a86
--- /dev/null
@@ -0,0 +1,182 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:08 +0000
+Subject: [Variant 2/Spectre-v2] arm/arm64: KVM: Consolidate the PSCI include files
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 1a2fb94e6a77 upstream.
+
+As we're about to update the PSCI support, and because I'm lazy,
+let's move the PSCI include file to include/kvm so that both
+ARM architectures can find it.
+
+Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_psci.h   |   27 ---------------------------
+ arch/arm/kvm/handle_exit.c        |    2 +-
+ arch/arm64/include/asm/kvm_psci.h |   27 ---------------------------
+ arch/arm64/kvm/handle_exit.c      |    3 ++-
+ include/kvm/arm_psci.h            |   27 +++++++++++++++++++++++++++
+ virt/kvm/arm/arm.c                |    2 +-
+ virt/kvm/arm/psci.c               |    3 ++-
+ 7 files changed, 33 insertions(+), 58 deletions(-)
+ delete mode 100644 arch/arm/include/asm/kvm_psci.h
+ rename arch/arm64/include/asm/kvm_psci.h => include/kvm/arm_psci.h (89%)
+
+--- a/arch/arm/include/asm/kvm_psci.h
++++ /dev/null
+@@ -1,27 +0,0 @@
+-/*
+- * Copyright (C) 2012 - ARM Ltd
+- * Author: Marc Zyngier <marc.zyngier@arm.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+- */
+-
+-#ifndef __ARM_KVM_PSCI_H__
+-#define __ARM_KVM_PSCI_H__
+-
+-#define KVM_ARM_PSCI_0_1      1
+-#define KVM_ARM_PSCI_0_2      2
+-
+-int kvm_psci_version(struct kvm_vcpu *vcpu);
+-int kvm_psci_call(struct kvm_vcpu *vcpu);
+-
+-#endif /* __ARM_KVM_PSCI_H__ */
+--- a/arch/arm/kvm/handle_exit.c
++++ b/arch/arm/kvm/handle_exit.c
+@@ -21,7 +21,7 @@
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
+ #include <asm/kvm_mmu.h>
+-#include <asm/kvm_psci.h>
++#include <kvm/arm_psci.h>
+ #include <trace/events/kvm.h>
+ #include "trace.h"
+--- a/arch/arm64/include/asm/kvm_psci.h
++++ /dev/null
+@@ -1,27 +0,0 @@
+-/*
+- * Copyright (C) 2012,2013 - ARM Ltd
+- * Author: Marc Zyngier <marc.zyngier@arm.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+- */
+-
+-#ifndef __ARM64_KVM_PSCI_H__
+-#define __ARM64_KVM_PSCI_H__
+-
+-#define KVM_ARM_PSCI_0_1      1
+-#define KVM_ARM_PSCI_0_2      2
+-
+-int kvm_psci_version(struct kvm_vcpu *vcpu);
+-int kvm_psci_call(struct kvm_vcpu *vcpu);
+-
+-#endif /* __ARM64_KVM_PSCI_H__ */
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -22,12 +22,13 @@
+ #include <linux/kvm.h>
+ #include <linux/kvm_host.h>
++#include <kvm/arm_psci.h>
++
+ #include <asm/esr.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_coproc.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_mmu.h>
+-#include <asm/kvm_psci.h>
+ #include <asm/debug-monitors.h>
+ #define CREATE_TRACE_POINTS
+--- /dev/null
++++ b/include/kvm/arm_psci.h
+@@ -0,0 +1,27 @@
++/*
++ * Copyright (C) 2012,2013 - ARM Ltd
++ * Author: Marc Zyngier <marc.zyngier@arm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef __KVM_ARM_PSCI_H__
++#define __KVM_ARM_PSCI_H__
++
++#define KVM_ARM_PSCI_0_1      1
++#define KVM_ARM_PSCI_0_2      2
++
++int kvm_psci_version(struct kvm_vcpu *vcpu);
++int kvm_psci_call(struct kvm_vcpu *vcpu);
++
++#endif /* __KVM_ARM_PSCI_H__ */
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -31,6 +31,7 @@
+ #include <linux/irqbypass.h>
+ #include <trace/events/kvm.h>
+ #include <kvm/arm_pmu.h>
++#include <kvm/arm_psci.h>
+ #define CREATE_TRACE_POINTS
+ #include "trace.h"
+@@ -46,7 +47,6 @@
+ #include <asm/kvm_mmu.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
+-#include <asm/kvm_psci.h>
+ #include <asm/sections.h>
+ #ifdef REQUIRES_VIRT
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -21,9 +21,10 @@
+ #include <asm/cputype.h>
+ #include <asm/kvm_emulate.h>
+-#include <asm/kvm_psci.h>
+ #include <asm/kvm_host.h>
++#include <kvm/arm_psci.h>
++
+ #include <uapi/linux/psci.h>
+ /*
diff --git a/queue-4.15/arm-arm64-kvm-implement-psci-1.0-support.patch b/queue-4.15/arm-arm64-kvm-implement-psci-1.0-support.patch
new file mode 100644 (file)
index 0000000..989637c
--- /dev/null
@@ -0,0 +1,110 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:11 +0000
+Subject: [Variant 2/Spectre-v2] arm/arm64: KVM: Implement PSCI 1.0 support
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 58e0b2239a4d upstream.
+
+PSCI 1.0 can be trivially implemented by providing the FEATURES
+call on top of PSCI 0.2 and returning 1.0 as the PSCI version.
+
+We happily ignore everything else, as they are either optional or
+are clarifications that do not require any additional change.
+
+PSCI 1.0 is now the default until we decide to add a userspace
+selection API.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/kvm/arm_psci.h |    3 +++
+ virt/kvm/arm/psci.c    |   45 ++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 47 insertions(+), 1 deletion(-)
+
+--- a/include/kvm/arm_psci.h
++++ b/include/kvm/arm_psci.h
+@@ -22,6 +22,9 @@
+ #define KVM_ARM_PSCI_0_1      PSCI_VERSION(0, 1)
+ #define KVM_ARM_PSCI_0_2      PSCI_VERSION(0, 2)
++#define KVM_ARM_PSCI_1_0      PSCI_VERSION(1, 0)
++
++#define KVM_ARM_PSCI_LATEST   KVM_ARM_PSCI_1_0
+ int kvm_psci_version(struct kvm_vcpu *vcpu);
+ int kvm_psci_call(struct kvm_vcpu *vcpu);
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -234,7 +234,7 @@ static void kvm_psci_system_reset(struct
+ int kvm_psci_version(struct kvm_vcpu *vcpu)
+ {
+       if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+-              return KVM_ARM_PSCI_0_2;
++              return KVM_ARM_PSCI_LATEST;
+       return KVM_ARM_PSCI_0_1;
+ }
+@@ -313,6 +313,47 @@ static int kvm_psci_0_2_call(struct kvm_
+       return ret;
+ }
++static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
++{
++      u32 psci_fn = smccc_get_function(vcpu);
++      u32 feature;
++      unsigned long val;
++      int ret = 1;
++
++      switch(psci_fn) {
++      case PSCI_0_2_FN_PSCI_VERSION:
++              val = KVM_ARM_PSCI_1_0;
++              break;
++      case PSCI_1_0_FN_PSCI_FEATURES:
++              feature = smccc_get_arg1(vcpu);
++              switch(feature) {
++              case PSCI_0_2_FN_PSCI_VERSION:
++              case PSCI_0_2_FN_CPU_SUSPEND:
++              case PSCI_0_2_FN64_CPU_SUSPEND:
++              case PSCI_0_2_FN_CPU_OFF:
++              case PSCI_0_2_FN_CPU_ON:
++              case PSCI_0_2_FN64_CPU_ON:
++              case PSCI_0_2_FN_AFFINITY_INFO:
++              case PSCI_0_2_FN64_AFFINITY_INFO:
++              case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
++              case PSCI_0_2_FN_SYSTEM_OFF:
++              case PSCI_0_2_FN_SYSTEM_RESET:
++              case PSCI_1_0_FN_PSCI_FEATURES:
++                      val = 0;
++                      break;
++              default:
++                      val = PSCI_RET_NOT_SUPPORTED;
++                      break;
++              }
++              break;
++      default:
++              return kvm_psci_0_2_call(vcpu);
++      }
++
++      smccc_set_retval(vcpu, val, 0, 0, 0);
++      return ret;
++}
++
+ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ {
+       struct kvm *kvm = vcpu->kvm;
+@@ -355,6 +396,8 @@ static int kvm_psci_0_1_call(struct kvm_
+ int kvm_psci_call(struct kvm_vcpu *vcpu)
+ {
+       switch (kvm_psci_version(vcpu)) {
++      case KVM_ARM_PSCI_1_0:
++              return kvm_psci_1_0_call(vcpu);
+       case KVM_ARM_PSCI_0_2:
+               return kvm_psci_0_2_call(vcpu);
+       case KVM_ARM_PSCI_0_1:
diff --git a/queue-4.15/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch b/queue-4.15/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
new file mode 100644 (file)
index 0000000..2fe72f5
--- /dev/null
@@ -0,0 +1,133 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:13 +0000
+Subject: [Variant 2/Spectre-v2] arm/arm64: KVM: Turn kvm_psci_version into a static inline
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit a4097b351118 upstream.
+
+We're about to need kvm_psci_version in HYP too. So let's turn it
+into a static inline, and pass the kvm structure as a second
+parameter (so that HYP can do a kern_hyp_va on it).
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp/switch.c |   18 +++++++++++-------
+ include/kvm/arm_psci.h      |   21 ++++++++++++++++++++-
+ virt/kvm/arm/psci.c         |   12 ++----------
+ 3 files changed, 33 insertions(+), 18 deletions(-)
+
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -19,6 +19,8 @@
+ #include <linux/jump_label.h>
+ #include <uapi/linux/psci.h>
++#include <kvm/arm_psci.h>
++
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_hyp.h>
+@@ -344,14 +346,16 @@ again:
+       if (exit_code == ARM_EXCEPTION_TRAP &&
+           (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
+-           kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
+-          vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
+-              u64 val = PSCI_RET_NOT_SUPPORTED;
+-              if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+-                      val = 2;
++           kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32)) {
++              u32 val = vcpu_get_reg(vcpu, 0);
+-              vcpu_set_reg(vcpu, 0, val);
+-              goto again;
++              if (val == PSCI_0_2_FN_PSCI_VERSION) {
++                      val = kvm_psci_version(vcpu, kern_hyp_va(vcpu->kvm));
++                      if (unlikely(val == KVM_ARM_PSCI_0_1))
++                              val = PSCI_RET_NOT_SUPPORTED;
++                      vcpu_set_reg(vcpu, 0, val);
++                      goto again;
++              }
+       }
+       if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
+--- a/include/kvm/arm_psci.h
++++ b/include/kvm/arm_psci.h
+@@ -18,6 +18,7 @@
+ #ifndef __KVM_ARM_PSCI_H__
+ #define __KVM_ARM_PSCI_H__
++#include <linux/kvm_host.h>
+ #include <uapi/linux/psci.h>
+ #define KVM_ARM_PSCI_0_1      PSCI_VERSION(0, 1)
+@@ -26,7 +27,25 @@
+ #define KVM_ARM_PSCI_LATEST   KVM_ARM_PSCI_1_0
+-int kvm_psci_version(struct kvm_vcpu *vcpu);
++/*
++ * We need the KVM pointer independently from the vcpu as we can call
++ * this from HYP, and need to apply kern_hyp_va on it...
++ */
++static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
++{
++      /*
++       * Our PSCI implementation stays the same across versions from
++       * v0.2 onward, only adding the few mandatory functions (such
++       * as FEATURES with 1.0) that are required by newer
++       * revisions. It is thus safe to return the latest.
++       */
++      if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
++              return KVM_ARM_PSCI_LATEST;
++
++      return KVM_ARM_PSCI_0_1;
++}
++
++
+ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+ #endif /* __KVM_ARM_PSCI_H__ */
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -123,7 +123,7 @@ static unsigned long kvm_psci_vcpu_on(st
+       if (!vcpu)
+               return PSCI_RET_INVALID_PARAMS;
+       if (!vcpu->arch.power_off) {
+-              if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
++              if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
+                       return PSCI_RET_ALREADY_ON;
+               else
+                       return PSCI_RET_INVALID_PARAMS;
+@@ -232,14 +232,6 @@ static void kvm_psci_system_reset(struct
+       kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
+ }
+-int kvm_psci_version(struct kvm_vcpu *vcpu)
+-{
+-      if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+-              return KVM_ARM_PSCI_LATEST;
+-
+-      return KVM_ARM_PSCI_0_1;
+-}
+-
+ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ {
+       struct kvm *kvm = vcpu->kvm;
+@@ -397,7 +389,7 @@ static int kvm_psci_0_1_call(struct kvm_
+  */
+ static int kvm_psci_call(struct kvm_vcpu *vcpu)
+ {
+-      switch (kvm_psci_version(vcpu)) {
++      switch (kvm_psci_version(vcpu, vcpu->kvm)) {
+       case KVM_ARM_PSCI_1_0:
+               return kvm_psci_1_0_call(vcpu);
+       case KVM_ARM_PSCI_0_2:
diff --git a/queue-4.15/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch b/queue-4.15/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
new file mode 100644 (file)
index 0000000..50ce683
--- /dev/null
@@ -0,0 +1,175 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:19 +0000
+Subject: [Variant 2/Spectre-v2] arm/arm64: smccc: Implement SMCCC v1.1 inline primitive
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit f2d3b2e8759a upstream.
+
+One of the major improvement of SMCCC v1.1 is that it only clobbers
+the first 4 registers, both on 32 and 64bit. This means that it
+becomes very easy to provide an inline version of the SMC call
+primitive, and avoid performing a function call to stash the
+registers that would otherwise be clobbered by SMCCC v1.0.
+
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |  141 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 141 insertions(+)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -150,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned
+ #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
++/* SMCCC v1.1 implementation madness follows */
++#ifdef CONFIG_ARM64
++
++#define SMCCC_SMC_INST        "smc    #0"
++#define SMCCC_HVC_INST        "hvc    #0"
++
++#elif defined(CONFIG_ARM)
++#include <asm/opcodes-sec.h>
++#include <asm/opcodes-virt.h>
++
++#define SMCCC_SMC_INST        __SMC(0)
++#define SMCCC_HVC_INST        __HVC(0)
++
++#endif
++
++#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
++
++#define __count_args(...)                                             \
++      ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
++
++#define __constraint_write_0                                          \
++      "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
++#define __constraint_write_1                                          \
++      "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
++#define __constraint_write_2                                          \
++      "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
++#define __constraint_write_3                                          \
++      "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
++#define __constraint_write_4  __constraint_write_3
++#define __constraint_write_5  __constraint_write_4
++#define __constraint_write_6  __constraint_write_5
++#define __constraint_write_7  __constraint_write_6
++
++#define __constraint_read_0
++#define __constraint_read_1
++#define __constraint_read_2
++#define __constraint_read_3
++#define __constraint_read_4   "r" (r4)
++#define __constraint_read_5   __constraint_read_4, "r" (r5)
++#define __constraint_read_6   __constraint_read_5, "r" (r6)
++#define __constraint_read_7   __constraint_read_6, "r" (r7)
++
++#define __declare_arg_0(a0, res)                                      \
++      struct arm_smccc_res   *___res = res;                           \
++      register u32           r0 asm("r0") = a0;                       \
++      register unsigned long r1 asm("r1");                            \
++      register unsigned long r2 asm("r2");                            \
++      register unsigned long r3 asm("r3")
++
++#define __declare_arg_1(a0, a1, res)                                  \
++      struct arm_smccc_res   *___res = res;                           \
++      register u32           r0 asm("r0") = a0;                       \
++      register typeof(a1)    r1 asm("r1") = a1;                       \
++      register unsigned long r2 asm("r2");                            \
++      register unsigned long r3 asm("r3")
++
++#define __declare_arg_2(a0, a1, a2, res)                              \
++      struct arm_smccc_res   *___res = res;                           \
++      register u32           r0 asm("r0") = a0;                       \
++      register typeof(a1)    r1 asm("r1") = a1;                       \
++      register typeof(a2)    r2 asm("r2") = a2;                       \
++      register unsigned long r3 asm("r3")
++
++#define __declare_arg_3(a0, a1, a2, a3, res)                          \
++      struct arm_smccc_res   *___res = res;                           \
++      register u32           r0 asm("r0") = a0;                       \
++      register typeof(a1)    r1 asm("r1") = a1;                       \
++      register typeof(a2)    r2 asm("r2") = a2;                       \
++      register typeof(a3)    r3 asm("r3") = a3
++
++#define __declare_arg_4(a0, a1, a2, a3, a4, res)                      \
++      __declare_arg_3(a0, a1, a2, a3, res);                           \
++      register typeof(a4) r4 asm("r4") = a4
++
++#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)                  \
++      __declare_arg_4(a0, a1, a2, a3, a4, res);                       \
++      register typeof(a5) r5 asm("r5") = a5
++
++#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)              \
++      __declare_arg_5(a0, a1, a2, a3, a4, a5, res);                   \
++      register typeof(a6) r6 asm("r6") = a6
++
++#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)          \
++      __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);               \
++      register typeof(a7) r7 asm("r7") = a7
++
++#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
++#define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
++
++#define ___constraints(count)                                         \
++      : __constraint_write_ ## count                                  \
++      : __constraint_read_ ## count                                   \
++      : "memory"
++#define __constraints(count)  ___constraints(count)
++
++/*
++ * We have an output list that is not necessarily used, and GCC feels
++ * entitled to optimise the whole sequence away. "volatile" is what
++ * makes it stick.
++ */
++#define __arm_smccc_1_1(inst, ...)                                    \
++      do {                                                            \
++              __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
++              asm volatile(inst "\n"                                  \
++                           __constraints(__count_args(__VA_ARGS__))); \
++              if (___res)                                             \
++                      *___res = (typeof(*___res)){r0, r1, r2, r3};    \
++      } while (0)
++
++/*
++ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
++ *
++ * This is a variadic macro taking one to eight source arguments, and
++ * an optional return structure.
++ *
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
++ * The content of the supplied param are copied to registers 0 to 7 prior
++ * to the SMC instruction. The return values are updated with the content
++ * from register 0 to 3 on return from the SMC instruction if not NULL.
++ */
++#define arm_smccc_1_1_smc(...)        __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
++
++/*
++ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
++ *
++ * This is a variadic macro taking one to eight source arguments, and
++ * an optional return structure.
++ *
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
++ * The content of the supplied param are copied to registers 0 to 7 prior
++ * to the HVC instruction. The return values are updated with the content
++ * from register 0 to 3 on return from the HVC instruction if not NULL.
++ */
++#define arm_smccc_1_1_hvc(...)        __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
++
+ #endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/queue-4.15/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch b/queue-4.15/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
new file mode 100644 (file)
index 0000000..0cb2c12
--- /dev/null
@@ -0,0 +1,52 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:18 +0000
+Subject: [Variant 2/Spectre-v2] arm/arm64: smccc: Make function identifiers an unsigned quantity
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit ded4c39e93f3 upstream.
+
+Function identifiers are a 32bit, unsigned quantity. But we never
+tell so to the compiler, resulting in the following:
+
+ 4ac:   b26187e0        mov     x0, #0xffffffff80000001
+
+We thus rely on the firmware narrowing it for us, which is not
+always a reasonable expectation.
+
+Cc: stable@vger.kernel.org
+Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -14,14 +14,16 @@
+ #ifndef __LINUX_ARM_SMCCC_H
+ #define __LINUX_ARM_SMCCC_H
++#include <uapi/linux/const.h>
++
+ /*
+  * This file provides common defines for ARM SMC Calling Convention as
+  * specified in
+  * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+  */
+-#define ARM_SMCCC_STD_CALL            0
+-#define ARM_SMCCC_FAST_CALL           1
++#define ARM_SMCCC_STD_CALL            _AC(0,U)
++#define ARM_SMCCC_FAST_CALL           _AC(1,U)
+ #define ARM_SMCCC_TYPE_SHIFT          31
+ #define ARM_SMCCC_SMC_32              0
diff --git a/queue-4.15/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch b/queue-4.15/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
new file mode 100644 (file)
index 0000000..0f686f3
--- /dev/null
@@ -0,0 +1,157 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:20 +0000
+Subject: [Variant 2/Spectre-v2] arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit b092201e0020 upstream.
+
+Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1.
+It is lovely. Really.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/bpi.S        |   20 ++++++++++++
+ arch/arm64/kernel/cpu_errata.c |   68 ++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 87 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/bpi.S
++++ b/arch/arm64/kernel/bpi.S
+@@ -17,6 +17,7 @@
+  */
+ #include <linux/linkage.h>
++#include <linux/arm-smccc.h>
+ .macro ventry target
+       .rept 31
+@@ -85,3 +86,22 @@ ENTRY(__qcom_hyp_sanitize_link_stack_sta
+       .endr
+       ldp     x29, x30, [sp], #16
+ ENTRY(__qcom_hyp_sanitize_link_stack_end)
++
++.macro smccc_workaround_1 inst
++      sub     sp, sp, #(8 * 4)
++      stp     x2, x3, [sp, #(8 * 0)]
++      stp     x0, x1, [sp, #(8 * 2)]
++      mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_1
++      \inst   #0
++      ldp     x2, x3, [sp, #(8 * 0)]
++      ldp     x0, x1, [sp, #(8 * 2)]
++      add     sp, sp, #(8 * 4)
++.endm
++
++ENTRY(__smccc_workaround_1_smc_start)
++      smccc_workaround_1      smc
++ENTRY(__smccc_workaround_1_smc_end)
++
++ENTRY(__smccc_workaround_1_hvc_start)
++      smccc_workaround_1      hvc
++ENTRY(__smccc_workaround_1_hvc_end)
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -70,6 +70,10 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_har
+ extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+ extern char __qcom_hyp_sanitize_link_stack_start[];
+ extern char __qcom_hyp_sanitize_link_stack_end[];
++extern char __smccc_workaround_1_smc_start[];
++extern char __smccc_workaround_1_smc_end[];
++extern char __smccc_workaround_1_hvc_start[];
++extern char __smccc_workaround_1_hvc_end[];
+ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+                               const char *hyp_vecs_end)
+@@ -116,6 +120,10 @@ static void __install_bp_hardening_cb(bp
+ #define __psci_hyp_bp_inval_end                       NULL
+ #define __qcom_hyp_sanitize_link_stack_start  NULL
+ #define __qcom_hyp_sanitize_link_stack_end    NULL
++#define __smccc_workaround_1_smc_start                NULL
++#define __smccc_workaround_1_smc_end          NULL
++#define __smccc_workaround_1_hvc_start                NULL
++#define __smccc_workaround_1_hvc_end          NULL
+ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                     const char *hyp_vecs_start,
+@@ -142,17 +150,75 @@ static void  install_bp_hardening_cb(con
+       __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+ }
++#include <uapi/linux/psci.h>
++#include <linux/arm-smccc.h>
+ #include <linux/psci.h>
++static void call_smc_arch_workaround_1(void)
++{
++      arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
++}
++
++static void call_hvc_arch_workaround_1(void)
++{
++      arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
++}
++
++static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
++{
++      bp_hardening_cb_t cb;
++      void *smccc_start, *smccc_end;
++      struct arm_smccc_res res;
++
++      if (!entry->matches(entry, SCOPE_LOCAL_CPU))
++              return false;
++
++      if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++              return false;
++
++      switch (psci_ops.conduit) {
++      case PSCI_CONDUIT_HVC:
++              arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                                ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++              if (res.a0)
++                      return false;
++              cb = call_hvc_arch_workaround_1;
++              smccc_start = __smccc_workaround_1_hvc_start;
++              smccc_end = __smccc_workaround_1_hvc_end;
++              break;
++
++      case PSCI_CONDUIT_SMC:
++              arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                                ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++              if (res.a0)
++                      return false;
++              cb = call_smc_arch_workaround_1;
++              smccc_start = __smccc_workaround_1_smc_start;
++              smccc_end = __smccc_workaround_1_smc_end;
++              break;
++
++      default:
++              return false;
++      }
++
++      install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
++
++      return true;
++}
++
+ static int enable_psci_bp_hardening(void *data)
+ {
+       const struct arm64_cpu_capabilities *entry = data;
+-      if (psci_ops.get_version)
++      if (psci_ops.get_version) {
++              if (check_smccc_arch_workaround_1(entry))
++                      return 0;
++
+               install_bp_hardening_cb(entry,
+                                      (bp_hardening_cb_t)psci_ops.get_version,
+                                      __psci_hyp_bp_inval_start,
+                                      __psci_hyp_bp_inval_end);
++      }
+       return 0;
+ }
diff --git a/queue-4.15/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch b/queue-4.15/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
new file mode 100644 (file)
index 0000000..7a26abc
--- /dev/null
@@ -0,0 +1,355 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 3 Jan 2018 11:17:58 +0000
+Subject: [Variant 2/Spectre-v2] arm64: Add skeleton to harden the branch predictor against aliasing attacks
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 0f15adbb2861 upstream.
+
+Aliasing attacks against CPU branch predictors can allow an attacker to
+redirect speculative control flow on some CPUs and potentially divulge
+information from one context to another.
+
+This patch adds initial skeleton code behind a new Kconfig option to
+enable implementation-specific mitigations against these attacks for
+CPUs that are affected.
+
+Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+Conflicts:
+       arch/arm64/kernel/cpufeature.c
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig               |   17 ++++++++
+ arch/arm64/include/asm/cpucaps.h |    3 +
+ arch/arm64/include/asm/mmu.h     |   37 +++++++++++++++++++
+ arch/arm64/include/asm/sysreg.h  |    1 
+ arch/arm64/kernel/Makefile       |    4 ++
+ arch/arm64/kernel/bpi.S          |   55 ++++++++++++++++++++++++++++
+ arch/arm64/kernel/cpu_errata.c   |   74 +++++++++++++++++++++++++++++++++++++++
+ arch/arm64/kernel/cpufeature.c   |    1 
+ arch/arm64/kernel/entry.S        |    7 ++-
+ arch/arm64/mm/context.c          |    2 +
+ arch/arm64/mm/fault.c            |   17 ++++++++
+ 11 files changed, 215 insertions(+), 3 deletions(-)
+ create mode 100644 arch/arm64/kernel/bpi.S
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -855,6 +855,23 @@ config UNMAP_KERNEL_AT_EL0
+         If unsure, say Y.
++config HARDEN_BRANCH_PREDICTOR
++      bool "Harden the branch predictor against aliasing attacks" if EXPERT
++      default y
++      help
++        Speculation attacks against some high-performance processors rely on
++        being able to manipulate the branch predictor for a victim context by
++        executing aliasing branches in the attacker context.  Such attacks
++        can be partially mitigated against by clearing internal branch
++        predictor state and limiting the prediction logic in some situations.
++
++        This config option will take CPU-specific actions to harden the
++        branch predictor against aliasing attacks and may rely on specific
++        instruction sequences or control bits being set by the system
++        firmware.
++
++        If unsure, say Y.
++
+ menuconfig ARMV8_DEPRECATED
+       bool "Emulate deprecated/obsolete ARMv8 instructions"
+       depends on COMPAT
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -42,7 +42,8 @@
+ #define ARM64_HAS_DCPOP                               21
+ #define ARM64_SVE                             22
+ #define ARM64_UNMAP_KERNEL_AT_EL0             23
++#define ARM64_HARDEN_BRANCH_PREDICTOR         24
+-#define ARM64_NCAPS                           24
++#define ARM64_NCAPS                           25
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/include/asm/mmu.h
++++ b/arch/arm64/include/asm/mmu.h
+@@ -41,6 +41,43 @@ static inline bool arm64_kernel_unmapped
+              cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+ }
++typedef void (*bp_hardening_cb_t)(void);
++
++struct bp_hardening_data {
++      int                     hyp_vectors_slot;
++      bp_hardening_cb_t       fn;
++};
++
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
++
++DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
++
++static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
++{
++      return this_cpu_ptr(&bp_hardening_data);
++}
++
++static inline void arm64_apply_bp_hardening(void)
++{
++      struct bp_hardening_data *d;
++
++      if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
++              return;
++
++      d = arm64_get_bp_hardening_data();
++      if (d->fn)
++              d->fn();
++}
++#else
++static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
++{
++      return NULL;
++}
++
++static inline void arm64_apply_bp_hardening(void)     { }
++#endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
++
+ extern void paging_init(void);
+ extern void bootmem_init(void);
+ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -438,6 +438,7 @@
+ /* id_aa64pfr0 */
+ #define ID_AA64PFR0_CSV3_SHIFT                60
++#define ID_AA64PFR0_CSV2_SHIFT                56
+ #define ID_AA64PFR0_SVE_SHIFT         32
+ #define ID_AA64PFR0_GIC_SHIFT         24
+ #define ID_AA64PFR0_ASIMD_SHIFT               20
+--- a/arch/arm64/kernel/Makefile
++++ b/arch/arm64/kernel/Makefile
+@@ -53,6 +53,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST)        +=
+ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
+ arm64-obj-$(CONFIG_CRASH_DUMP)                += crash_dump.o
++ifeq ($(CONFIG_KVM),y)
++arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)   += bpi.o
++endif
++
+ obj-y                                 += $(arm64-obj-y) vdso/ probes/
+ obj-m                                 += $(arm64-obj-m)
+ head-y                                        := head.o
+--- /dev/null
++++ b/arch/arm64/kernel/bpi.S
+@@ -0,0 +1,55 @@
++/*
++ * Contains CPU specific branch predictor invalidation sequences
++ *
++ * Copyright (C) 2018 ARM Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/linkage.h>
++
++.macro ventry target
++      .rept 31
++      nop
++      .endr
++      b       \target
++.endm
++
++.macro vectors target
++      ventry \target + 0x000
++      ventry \target + 0x080
++      ventry \target + 0x100
++      ventry \target + 0x180
++
++      ventry \target + 0x200
++      ventry \target + 0x280
++      ventry \target + 0x300
++      ventry \target + 0x380
++
++      ventry \target + 0x400
++      ventry \target + 0x480
++      ventry \target + 0x500
++      ventry \target + 0x580
++
++      ventry \target + 0x600
++      ventry \target + 0x680
++      ventry \target + 0x700
++      ventry \target + 0x780
++.endm
++
++      .align  11
++ENTRY(__bp_harden_hyp_vecs_start)
++      .rept 4
++      vectors __kvm_hyp_vector
++      .endr
++ENTRY(__bp_harden_hyp_vecs_end)
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -60,6 +60,80 @@ static int cpu_enable_trap_ctr_access(vo
+       return 0;
+ }
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++#include <asm/mmu_context.h>
++#include <asm/cacheflush.h>
++
++DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
++
++#ifdef CONFIG_KVM
++static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
++                              const char *hyp_vecs_end)
++{
++      void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
++      int i;
++
++      for (i = 0; i < SZ_2K; i += 0x80)
++              memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
++
++      flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
++}
++
++static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
++                                    const char *hyp_vecs_start,
++                                    const char *hyp_vecs_end)
++{
++      static int last_slot = -1;
++      static DEFINE_SPINLOCK(bp_lock);
++      int cpu, slot = -1;
++
++      spin_lock(&bp_lock);
++      for_each_possible_cpu(cpu) {
++              if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
++                      slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
++                      break;
++              }
++      }
++
++      if (slot == -1) {
++              last_slot++;
++              BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
++                      / SZ_2K) <= last_slot);
++              slot = last_slot;
++              __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
++      }
++
++      __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
++      __this_cpu_write(bp_hardening_data.fn, fn);
++      spin_unlock(&bp_lock);
++}
++#else
++static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
++                                    const char *hyp_vecs_start,
++                                    const char *hyp_vecs_end)
++{
++      __this_cpu_write(bp_hardening_data.fn, fn);
++}
++#endif        /* CONFIG_KVM */
++
++static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
++                                   bp_hardening_cb_t fn,
++                                   const char *hyp_vecs_start,
++                                   const char *hyp_vecs_end)
++{
++      u64 pfr0;
++
++      if (!entry->matches(entry, SCOPE_LOCAL_CPU))
++              return;
++
++      pfr0 = read_cpuid(ID_AA64PFR0_EL1);
++      if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
++              return;
++
++      __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
++}
++#endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
++
+ #define MIDR_RANGE(model, min, max) \
+       .def_scope = SCOPE_LOCAL_CPU, \
+       .matches = is_affected_midr_range, \
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -146,6 +146,7 @@ static const struct arm64_ftr_bits ftr_i
+ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                                  FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -722,12 +722,15 @@ el0_ia:
+        * Instruction abort handling
+        */
+       mrs     x26, far_el1
+-      enable_daif
++      enable_da_f
++#ifdef CONFIG_TRACE_IRQFLAGS
++      bl      trace_hardirqs_off
++#endif
+       ct_user_exit
+       mov     x0, x26
+       mov     x1, x25
+       mov     x2, sp
+-      bl      do_mem_abort
++      bl      do_el0_ia_bp_hardening
+       b       ret_to_user
+ el0_fpsimd_acc:
+       /*
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -246,6 +246,8 @@ asmlinkage void post_ttbr_update_workaro
+                       "ic iallu; dsb nsh; isb",
+                       ARM64_WORKAROUND_CAVIUM_27456,
+                       CONFIG_CAVIUM_ERRATUM_27456));
++
++      arm64_apply_bp_hardening();
+ }
+ static int asids_init(void)
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -707,6 +707,23 @@ asmlinkage void __exception do_mem_abort
+       arm64_notify_die("", regs, &info, esr);
+ }
++asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
++                                                 unsigned int esr,
++                                                 struct pt_regs *regs)
++{
++      /*
++       * We've taken an instruction abort from userspace and not yet
++       * re-enabled IRQs. If the address is a kernel address, apply
++       * BP hardening prior to enabling IRQs and pre-emption.
++       */
++      if (addr > TASK_SIZE)
++              arm64_apply_bp_hardening();
++
++      local_irq_enable();
++      do_mem_abort(addr, esr, regs);
++}
++
++
+ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
+                                          unsigned int esr,
+                                          struct pt_regs *regs)
diff --git a/queue-4.15/arm64-barrier-add-csdb-macros-to-control-data-value-prediction.patch b/queue-4.15/arm64-barrier-add-csdb-macros-to-control-data-value-prediction.patch
new file mode 100644 (file)
index 0000000..d6ad4e5
--- /dev/null
@@ -0,0 +1,52 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 5 Feb 2018 15:34:16 +0000
+Subject: [Variant 1/Spectre-v1] arm64: barrier: Add CSDB macros to control data-value prediction
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 669474e772b9 upstream.
+
+For CPUs capable of data value prediction, CSDB waits for any outstanding
+predictions to architecturally resolve before allowing speculative execution
+to continue. Provide macros to expose it to the arch code.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+Conflicts:
+       arch/arm64/include/asm/assembler.h
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/assembler.h |    7 +++++++
+ arch/arm64/include/asm/barrier.h   |    1 +
+ 2 files changed, 8 insertions(+)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -109,6 +109,13 @@
+       .endm
+ /*
++ * Value prediction barrier
++ */
++      .macro  csdb
++      hint    #20
++      .endm
++
++/*
+  * NOP sequence
+  */
+       .macro  nops, num
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -32,6 +32,7 @@
+ #define dsb(opt)      asm volatile("dsb " #opt : : : "memory")
+ #define psb_csync()   asm volatile("hint #17" : : : "memory")
++#define csdb()                asm volatile("hint #20" : : : "memory")
+ #define mb()          dsb(sy)
+ #define rmb()         dsb(ld)
diff --git a/queue-4.15/arm64-branch-predictor-hardening-for-cavium-thunderx2.patch b/queue-4.15/arm64-branch-predictor-hardening-for-cavium-thunderx2.patch
new file mode 100644 (file)
index 0000000..6bd29f7
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Jayachandran C <jnair@caviumnetworks.com>
+Date: Fri, 19 Jan 2018 04:22:47 -0800
+Subject: [Variant 2/Spectre-v2] arm64: Branch predictor hardening for Cavium ThunderX2
+
+From: Jayachandran C <jnair@caviumnetworks.com>
+
+
+Commit f3d795d9b360 upstream.
+
+Use PSCI based mitigation for speculative execution attacks targeting
+the branch predictor. We use the same mechanism as the one used for
+Cortex-A CPUs, we expect the PSCI version call to have a side effect
+of clearing the BTBs.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Jayachandran C <jnair@caviumnetworks.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -359,6 +359,16 @@ const struct arm64_cpu_capabilities arm6
+               .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+               MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+       },
++      {
++              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
++              MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
++              .enable = enable_psci_bp_hardening,
++      },
++      {
++              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
++              MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
++              .enable = enable_psci_bp_hardening,
++      },
+ #endif
+       {
+       }
diff --git a/queue-4.15/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch b/queue-4.15/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
new file mode 100644 (file)
index 0000000..76f55ae
--- /dev/null
@@ -0,0 +1,106 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 9 Jan 2018 16:12:18 +0000
+Subject: [Variant 3/Meltdown] arm64: capabilities: Handle duplicate entries for a capability
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+
+Commit 67948af41f2e upstream.
+
+Sometimes a single capability could be listed multiple times with
+differing matches(), e.g, CPU errata for different MIDR versions.
+This breaks verify_local_cpu_feature() and this_cpu_has_cap() as
+we stop checking for a capability on a CPU with the first
+entry in the given table, which is not sufficient. Make sure we
+run the checks for all entries of the same capability. We do
+this by fixing __this_cpu_has_cap() to run through all the
+entries in the given table for a match and reuse it for
+verify_local_cpu_feature().
+
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   44 +++++++++++++++++++++--------------------
+ 1 file changed, 23 insertions(+), 21 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1118,6 +1118,26 @@ static void __init setup_elf_hwcaps(cons
+                       cap_set_elf_hwcap(hwcaps);
+ }
++/*
++ * Check if the current CPU has a given feature capability.
++ * Should be called from non-preemptible context.
++ */
++static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
++                             unsigned int cap)
++{
++      const struct arm64_cpu_capabilities *caps;
++
++      if (WARN_ON(preemptible()))
++              return false;
++
++      for (caps = cap_array; caps->desc; caps++)
++              if (caps->capability == cap &&
++                  caps->matches &&
++                  caps->matches(caps, SCOPE_LOCAL_CPU))
++                      return true;
++      return false;
++}
++
+ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+                           const char *info)
+ {
+@@ -1181,8 +1201,9 @@ verify_local_elf_hwcaps(const struct arm
+ }
+ static void
+-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
++verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
+ {
++      const struct arm64_cpu_capabilities *caps = caps_list;
+       for (; caps->matches; caps++) {
+               if (!cpus_have_cap(caps->capability))
+                       continue;
+@@ -1190,7 +1211,7 @@ verify_local_cpu_features(const struct a
+                * If the new CPU misses an advertised feature, we cannot proceed
+                * further, park the cpu.
+                */
+-              if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
++              if (!__this_cpu_has_cap(caps_list, caps->capability)) {
+                       pr_crit("CPU%d: missing feature: %s\n",
+                                       smp_processor_id(), caps->desc);
+                       cpu_die_early();
+@@ -1272,25 +1293,6 @@ static void __init mark_const_caps_ready
+       static_branch_enable(&arm64_const_caps_ready);
+ }
+-/*
+- * Check if the current CPU has a given feature capability.
+- * Should be called from non-preemptible context.
+- */
+-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
+-                             unsigned int cap)
+-{
+-      const struct arm64_cpu_capabilities *caps;
+-
+-      if (WARN_ON(preemptible()))
+-              return false;
+-
+-      for (caps = cap_array; caps->desc; caps++)
+-              if (caps->capability == cap && caps->matches)
+-                      return caps->matches(caps, SCOPE_LOCAL_CPU);
+-
+-      return false;
+-}
+-
+ extern const struct arm64_cpu_capabilities arm64_errata[];
+ bool this_cpu_has_cap(unsigned int cap)
diff --git a/queue-4.15/arm64-cpu_errata-add-kryo-to-falkor-1003-errata.patch b/queue-4.15/arm64-cpu_errata-add-kryo-to-falkor-1003-errata.patch
new file mode 100644 (file)
index 0000000..7986400
--- /dev/null
@@ -0,0 +1,99 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Stephen Boyd <sboyd@codeaurora.org>
+Date: Wed, 13 Dec 2017 14:19:37 -0800
+Subject: [Variant 3/Meltdown] arm64: cpu_errata: Add Kryo to Falkor 1003 errata
+
+From: Stephen Boyd <sboyd@codeaurora.org>
+
+
+Commit bb48711800e6 upstream.
+
+The Kryo CPUs are also affected by the Falkor 1003 errata, so
+we need to do the same workaround on Kryo CPUs. The MIDR is
+slightly more complicated here, where the PART number is not
+always the same when looking at all the bits from 15 to 4. Drop
+the lower 8 bits and just look at the top 4 to see if it's '2'
+and then consider those as Kryo CPUs. This covers all the
+combinations without having to list them all out.
+
+Fixes: 38fd94b0275c ("arm64: Work around Falkor erratum 1003")
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+
+Conflicts:
+       arch/arm64/include/asm/cputype.h
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/silicon-errata.txt |    2 +-
+ arch/arm64/include/asm/cputype.h       |    2 ++
+ arch/arm64/kernel/cpu_errata.c         |   21 +++++++++++++++++++++
+ 3 files changed, 24 insertions(+), 1 deletion(-)
+
+--- a/Documentation/arm64/silicon-errata.txt
++++ b/Documentation/arm64/silicon-errata.txt
+@@ -72,7 +72,7 @@ stable kernels.
+ | Hisilicon      | Hip0{6,7}       | #161010701      | N/A                         |
+ | Hisilicon      | Hip07           | #161600802      | HISILICON_ERRATUM_161600802 |
+ |                |                 |                 |                             |
+-| Qualcomm Tech. | Falkor v1       | E1003           | QCOM_FALKOR_ERRATUM_1003    |
++| Qualcomm Tech. | Kryo/Falkor v1  | E1003           | QCOM_FALKOR_ERRATUM_1003    |
+ | Qualcomm Tech. | Falkor v1       | E1009           | QCOM_FALKOR_ERRATUM_1009    |
+ | Qualcomm Tech. | QDF2400 ITS     | E0065           | QCOM_QDF2400_ERRATUM_0065   |
+ | Qualcomm Tech. | Falkor v{1,2}   | E1041           | QCOM_FALKOR_ERRATUM_1041    |
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -92,6 +92,7 @@
+ #define QCOM_CPU_PART_FALKOR_V1               0x800
+ #define QCOM_CPU_PART_FALKOR          0xC00
++#define QCOM_CPU_PART_KRYO            0x200
+ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+ #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+@@ -101,6 +102,7 @@
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+ #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
+ #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
++#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
+ #ifndef __ASSEMBLY__
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm6
+                                      entry->midr_range_max);
+ }
++static bool __maybe_unused
++is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
++{
++      u32 model;
++
++      WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++      model = read_cpuid_id();
++      model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
++               MIDR_ARCHITECTURE_MASK;
++
++      return model == entry->midr_model;
++}
++
+ static bool
+ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
+                               int scope)
+@@ -169,6 +183,13 @@ const struct arm64_cpu_capabilities arm6
+                          MIDR_CPU_VAR_REV(0, 0),
+                          MIDR_CPU_VAR_REV(0, 0)),
+       },
++      {
++              .desc = "Qualcomm Technologies Kryo erratum 1003",
++              .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
++              .def_scope = SCOPE_LOCAL_CPU,
++              .midr_model = MIDR_QCOM_KRYO,
++              .matches = is_kryo_midr,
++      },
+ #endif
+ #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
+       {
diff --git a/queue-4.15/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch b/queue-4.15/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
new file mode 100644 (file)
index 0000000..ca4577a
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: James Morse <james.morse@arm.com>
+Date: Mon, 15 Jan 2018 19:38:54 +0000
+Subject: [Variant 2/Spectre-v2] arm64: cpufeature: __this_cpu_has_cap() shouldn't stop early
+
+From: James Morse <james.morse@arm.com>
+
+
+Commit edf298cfce47 upstream.
+
+this_cpu_has_cap() tests caps->desc not caps->matches, so it stops
+walking the list when it finds a 'silent' feature, instead of
+walking to the end of the list.
+
+Prior to v4.6's 644c2ae198412 ("arm64: cpufeature: Test 'matches' pointer
+to find the end of the list") we always tested desc to find the end of
+a capability list. This was changed for dubious things like PAN_NOT_UAO.
+v4.7's e3661b128e53e ("arm64: Allow a capability to be checked on
+single CPU") added this_cpu_has_cap() using the old desc style test.
+
+CC: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1173,9 +1173,8 @@ static bool __this_cpu_has_cap(const str
+       if (WARN_ON(preemptible()))
+               return false;
+-      for (caps = cap_array; caps->desc; caps++)
++      for (caps = cap_array; caps->matches; caps++)
+               if (caps->capability == cap &&
+-                  caps->matches &&
+                   caps->matches(caps, SCOPE_LOCAL_CPU))
+                       return true;
+       return false;
diff --git a/queue-4.15/arm64-cpufeature-pass-capability-structure-to-enable-callback.patch b/queue-4.15/arm64-cpufeature-pass-capability-structure-to-enable-callback.patch
new file mode 100644 (file)
index 0000000..657f662
--- /dev/null
@@ -0,0 +1,45 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 2 Jan 2018 21:37:25 +0000
+Subject: [Variant 2/Spectre-v2] arm64: cpufeature: Pass capability structure to ->enable callback
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 0a0d111d40fd upstream.
+
+In order to invoke the CPU capability ->matches callback from the ->enable
+callback for applying local-CPU workarounds, we need a handle on the
+capability structure.
+
+This patch passes a pointer to the capability structure to the ->enable
+callback.
+
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1215,7 +1215,7 @@ void __init enable_cpu_capabilities(cons
+                        * uses an IPI, giving us a PSTATE that disappears when
+                        * we return.
+                        */
+-                      stop_machine(caps->enable, NULL, cpu_online_mask);
++                      stop_machine(caps->enable, (void *)caps, cpu_online_mask);
+               }
+       }
+ }
+@@ -1259,7 +1259,7 @@ verify_local_cpu_features(const struct a
+                       cpu_die_early();
+               }
+               if (caps->enable)
+-                      caps->enable(NULL);
++                      caps->enable((void *)caps);
+       }
+ }
diff --git a/queue-4.15/arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch b/queue-4.15/arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch
new file mode 100644 (file)
index 0000000..bc43da4
--- /dev/null
@@ -0,0 +1,40 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Jayachandran C <jnair@caviumnetworks.com>
+Date: Sun, 7 Jan 2018 22:53:35 -0800
+Subject: [Variant 3/Meltdown] arm64: cputype: Add MIDR values for Cavium ThunderX2 CPUs
+
+From: Jayachandran C <jnair@caviumnetworks.com>
+
+
+Commit 0d90718871fe upstream.
+
+Add the older Broadcom ID as well as the new Cavium ID for ThunderX2
+CPUs.
+
+Signed-off-by: Jayachandran C <jnair@caviumnetworks.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cputype.h |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -87,6 +87,7 @@
+ #define CAVIUM_CPU_PART_THUNDERX      0x0A1
+ #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
+ #define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
++#define CAVIUM_CPU_PART_THUNDERX2     0x0AF
+ #define BRCM_CPU_PART_VULCAN          0x516
+@@ -100,6 +101,8 @@
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
++#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
++#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
+ #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
+ #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
+ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
diff --git a/queue-4.15/arm64-cputype-add-missing-midr-values-for-cortex-a72-and-cortex-a75.patch b/queue-4.15/arm64-cputype-add-missing-midr-values-for-cortex-a72-and-cortex-a75.patch
new file mode 100644 (file)
index 0000000..2ecc098
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 3 Jan 2018 11:19:34 +0000
+Subject: [Variant 2/Spectre-v2] arm64: cputype: Add missing MIDR values for Cortex-A72 and Cortex-A75
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit a65d219fe5dc upstream.
+
+Hook up MIDR values for the Cortex-A72 and Cortex-A75 CPUs, since they
+will soon need MIDR matches for hardening the branch predictor.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cputype.h |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -79,8 +79,10 @@
+ #define ARM_CPU_PART_AEM_V8           0xD0F
+ #define ARM_CPU_PART_FOUNDATION               0xD00
+ #define ARM_CPU_PART_CORTEX_A57               0xD07
++#define ARM_CPU_PART_CORTEX_A72               0xD08
+ #define ARM_CPU_PART_CORTEX_A53               0xD03
+ #define ARM_CPU_PART_CORTEX_A73               0xD09
++#define ARM_CPU_PART_CORTEX_A75               0xD0A
+ #define APM_CPU_PART_POTENZA          0x000
+@@ -97,7 +99,9 @@
+ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+ #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
++#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+ #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
++#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/queue-4.15/arm64-entry-add-exception-trampoline-page-for-exceptions-from-el0.patch b/queue-4.15/arm64-entry-add-exception-trampoline-page-for-exceptions-from-el0.patch
new file mode 100644 (file)
index 0000000..2f7abeb
--- /dev/null
@@ -0,0 +1,169 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 14:07:40 +0000
+Subject: [Variant 3/Meltdown] arm64: entry: Add exception trampoline page for exceptions from EL0
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit c7b9adaf85f8 upstream.
+
+To allow unmapping of the kernel whilst running at EL0, we need to
+point the exception vectors at an entry trampoline that can map/unmap
+the kernel on entry/exit respectively.
+
+This patch adds the trampoline page, although it is not yet plugged
+into the vector table and is therefore unused.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S       |   86 ++++++++++++++++++++++++++++++++++++++++
+ arch/arm64/kernel/vmlinux.lds.S |   17 +++++++
+ 2 files changed, 103 insertions(+)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -28,6 +28,8 @@
+ #include <asm/errno.h>
+ #include <asm/esr.h>
+ #include <asm/irq.h>
++#include <asm/memory.h>
++#include <asm/mmu.h>
+ #include <asm/processor.h>
+ #include <asm/ptrace.h>
+ #include <asm/thread_info.h>
+@@ -943,6 +945,90 @@ __ni_sys_trace:
+       .popsection                             // .entry.text
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++/*
++ * Exception vectors trampoline.
++ */
++      .pushsection ".entry.tramp.text", "ax"
++
++      .macro tramp_map_kernel, tmp
++      mrs     \tmp, ttbr1_el1
++      sub     \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
++      bic     \tmp, \tmp, #USER_ASID_FLAG
++      msr     ttbr1_el1, \tmp
++      .endm
++
++      .macro tramp_unmap_kernel, tmp
++      mrs     \tmp, ttbr1_el1
++      add     \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
++      orr     \tmp, \tmp, #USER_ASID_FLAG
++      msr     ttbr1_el1, \tmp
++      /*
++       * We avoid running the post_ttbr_update_workaround here because the
++       * user and kernel ASIDs don't have conflicting mappings, so any
++       * "blessing" as described in:
++       *
++       *   http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
++       *
++       * will not hurt correctness. Whilst this may partially defeat the
++       * point of using split ASIDs in the first place, it avoids
++       * the hit of invalidating the entire I-cache on every return to
++       * userspace.
++       */
++      .endm
++
++      .macro tramp_ventry, regsize = 64
++      .align  7
++1:
++      .if     \regsize == 64
++      msr     tpidrro_el0, x30        // Restored in kernel_ventry
++      .endif
++      tramp_map_kernel        x30
++      ldr     x30, =vectors
++      prfm    plil1strm, [x30, #(1b - tramp_vectors)]
++      msr     vbar_el1, x30
++      add     x30, x30, #(1b - tramp_vectors)
++      isb
++      br      x30
++      .endm
++
++      .macro tramp_exit, regsize = 64
++      adr     x30, tramp_vectors
++      msr     vbar_el1, x30
++      tramp_unmap_kernel      x30
++      .if     \regsize == 64
++      mrs     x30, far_el1
++      .endif
++      eret
++      .endm
++
++      .align  11
++ENTRY(tramp_vectors)
++      .space  0x400
++
++      tramp_ventry
++      tramp_ventry
++      tramp_ventry
++      tramp_ventry
++
++      tramp_ventry    32
++      tramp_ventry    32
++      tramp_ventry    32
++      tramp_ventry    32
++END(tramp_vectors)
++
++ENTRY(tramp_exit_native)
++      tramp_exit
++END(tramp_exit_native)
++
++ENTRY(tramp_exit_compat)
++      tramp_exit      32
++END(tramp_exit_compat)
++
++      .ltorg
++      .popsection                             // .entry.tramp.text
++#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
++
+ /*
+  * Special system call wrappers.
+  */
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -57,6 +57,17 @@ jiffies = jiffies_64;
+ #define HIBERNATE_TEXT
+ #endif
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++#define TRAMP_TEXT                                    \
++      . = ALIGN(PAGE_SIZE);                           \
++      VMLINUX_SYMBOL(__entry_tramp_text_start) = .;   \
++      *(.entry.tramp.text)                            \
++      . = ALIGN(PAGE_SIZE);                           \
++      VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
++#else
++#define TRAMP_TEXT
++#endif
++
+ /*
+  * The size of the PE/COFF section that covers the kernel image, which
+  * runs from stext to _edata, must be a round multiple of the PE/COFF
+@@ -113,6 +124,7 @@ SECTIONS
+                       HYPERVISOR_TEXT
+                       IDMAP_TEXT
+                       HIBERNATE_TEXT
++                      TRAMP_TEXT
+                       *(.fixup)
+                       *(.gnu.warning)
+               . = ALIGN(16);
+@@ -214,6 +226,11 @@ SECTIONS
+       . += RESERVED_TTBR0_SIZE;
+ #endif
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++      tramp_pg_dir = .;
++      . += PAGE_SIZE;
++#endif
++
+       __pecoff_data_size = ABSOLUTE(. - __initdata_begin);
+       _end = .;
diff --git a/queue-4.15/arm64-entry-add-fake-cpu-feature-for-unmapping-the-kernel-at-el0.patch b/queue-4.15/arm64-entry-add-fake-cpu-feature-for-unmapping-the-kernel-at-el0.patch
new file mode 100644 (file)
index 0000000..ab92fcf
--- /dev/null
@@ -0,0 +1,152 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 14:38:19 +0000
+Subject: [Variant 3/Meltdown] arm64: entry: Add fake CPU feature for unmapping the kernel at EL0
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit ea1e3de85e94 upstream.
+
+Allow explicit disabling of the entry trampoline on the kernel command
+line (kpti=off) by adding a fake CPU feature (ARM64_UNMAP_KERNEL_AT_EL0)
+that can be used to toggle the alternative sequences in our entry code and
+avoid use of the trampoline altogether if desired. This also allows us to
+make use of a static key in arm64_kernel_unmapped_at_el0().
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpucaps.h |    3 +-
+ arch/arm64/include/asm/mmu.h     |    3 +-
+ arch/arm64/kernel/cpufeature.c   |   41 +++++++++++++++++++++++++++++++++++++++
+ arch/arm64/kernel/entry.S        |    9 ++++----
+ 4 files changed, 50 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -41,7 +41,8 @@
+ #define ARM64_WORKAROUND_CAVIUM_30115         20
+ #define ARM64_HAS_DCPOP                               21
+ #define ARM64_SVE                             22
++#define ARM64_UNMAP_KERNEL_AT_EL0             23
+-#define ARM64_NCAPS                           23
++#define ARM64_NCAPS                           24
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/include/asm/mmu.h
++++ b/arch/arm64/include/asm/mmu.h
+@@ -36,7 +36,8 @@ typedef struct {
+ static inline bool arm64_kernel_unmapped_at_el0(void)
+ {
+-      return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
++      return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
++             cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+ }
+ extern void paging_init(void);
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -846,6 +846,40 @@ static bool has_no_fpsimd(const struct a
+                                       ID_AA64PFR0_FP_SHIFT) < 0;
+ }
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
++
++static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
++                              int __unused)
++{
++      /* Forced on command line? */
++      if (__kpti_forced) {
++              pr_info_once("kernel page table isolation forced %s by command line option\n",
++                           __kpti_forced > 0 ? "ON" : "OFF");
++              return __kpti_forced > 0;
++      }
++
++      /* Useful for KASLR robustness */
++      if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
++              return true;
++
++      return false;
++}
++
++static int __init parse_kpti(char *str)
++{
++      bool enabled;
++      int ret = strtobool(str, &enabled);
++
++      if (ret)
++              return ret;
++
++      __kpti_forced = enabled ? 1 : -1;
++      return 0;
++}
++__setup("kpti=", parse_kpti);
++#endif        /* CONFIG_UNMAP_KERNEL_AT_EL0 */
++
+ static const struct arm64_cpu_capabilities arm64_features[] = {
+       {
+               .desc = "GIC system register CPU interface",
+@@ -932,6 +966,13 @@ static const struct arm64_cpu_capabiliti
+               .def_scope = SCOPE_SYSTEM,
+               .matches = hyp_offset_low,
+       },
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++      {
++              .capability = ARM64_UNMAP_KERNEL_AT_EL0,
++              .def_scope = SCOPE_SYSTEM,
++              .matches = unmap_kernel_at_el0,
++      },
++#endif
+       {
+               /* FP/SIMD is not implemented */
+               .capability = ARM64_HAS_NO_FPSIMD,
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -74,6 +74,7 @@
+       .macro kernel_ventry, el, label, regsize = 64
+       .align 7
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+       .if     \el == 0
+       .if     \regsize == 64
+       mrs     x30, tpidrro_el0
+@@ -82,6 +83,7 @@
+       mov     x30, xzr
+       .endif
+       .endif
++alternative_else_nop_endif
+ #endif
+       sub     sp, sp, #S_FRAME_SIZE
+@@ -323,10 +325,9 @@ alternative_else_nop_endif
+       ldr     lr, [sp, #S_LR]
+       add     sp, sp, #S_FRAME_SIZE           // restore sp
+-#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
+-      eret
+-#else
+       .if     \el == 0
++alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       bne     4f
+       msr     far_el1, x30
+       tramp_alias     x30, tramp_exit_native
+@@ -334,10 +335,10 @@ alternative_else_nop_endif
+ 4:
+       tramp_alias     x30, tramp_exit_compat
+       br      x30
++#endif
+       .else
+       eret
+       .endif
+-#endif
+       .endm
+       .macro  irq_stack_entry
diff --git a/queue-4.15/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch b/queue-4.15/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
new file mode 100644 (file)
index 0000000..354c40f
--- /dev/null
@@ -0,0 +1,63 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 2 Feb 2018 17:31:39 +0000
+Subject: [Variant 2/Spectre-v2] arm64: entry: Apply BP hardening for high-priority synchronous exceptions
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 5dfc6ed27710 upstream.
+
+Software-step and PC alignment fault exceptions have higher priority than
+instruction abort exceptions, so apply the BP hardening hooks there too
+if the user PC appears to reside in kernel space.
+
+Reported-by: Dan Hettena <dhettena@nvidia.com>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S |    5 ++++-
+ arch/arm64/mm/fault.c     |    9 +++++++++
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -767,7 +767,10 @@ el0_sp_pc:
+        * Stack or PC alignment exception handling
+        */
+       mrs     x26, far_el1
+-      enable_daif
++      enable_da_f
++#ifdef CONFIG_TRACE_IRQFLAGS
++      bl      trace_hardirqs_off
++#endif
+       ct_user_exit
+       mov     x0, x26
+       mov     x1, x25
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -731,6 +731,12 @@ asmlinkage void __exception do_sp_pc_abo
+       struct siginfo info;
+       struct task_struct *tsk = current;
++      if (user_mode(regs)) {
++              if (instruction_pointer(regs) > TASK_SIZE)
++                      arm64_apply_bp_hardening();
++              local_irq_enable();
++      }
++
+       if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
+               pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
+                                   tsk->comm, task_pid_nr(tsk),
+@@ -790,6 +796,9 @@ asmlinkage int __exception do_debug_exce
+       if (interrupts_enabled(regs))
+               trace_hardirqs_off();
++      if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
++              arm64_apply_bp_hardening();
++
+       if (!inf->fn(addr, esr, regs)) {
+               rv = 1;
+       } else {
diff --git a/queue-4.15/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch b/queue-4.15/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
new file mode 100644 (file)
index 0000000..69f7419
--- /dev/null
@@ -0,0 +1,56 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 2 Feb 2018 17:31:40 +0000
+Subject: [Variant 2/Spectre-v2] arm64: entry: Apply BP hardening for suspicious interrupts from EL0
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 30d88c0e3ace upstream.
+
+It is possible to take an IRQ from EL0 following a branch to a kernel
+address in such a way that the IRQ is prioritised over the instruction
+abort. Whilst an attacker would need to get the stars to align here,
+it might be sufficient with enough calibration so perform BP hardening
+in the rare case that we see a kernel address in the ELR when handling
+an IRQ from EL0.
+
+Reported-by: Dan Hettena <dhettena@nvidia.com>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S |    5 +++++
+ arch/arm64/mm/fault.c     |    6 ++++++
+ 2 files changed, 11 insertions(+)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -828,6 +828,11 @@ el0_irq_naked:
+ #endif
+       ct_user_exit
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++      tbz     x22, #55, 1f
++      bl      do_el0_irq_bp_hardening
++1:
++#endif
+       irq_handler
+ #ifdef CONFIG_TRACE_IRQFLAGS
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -707,6 +707,12 @@ asmlinkage void __exception do_mem_abort
+       arm64_notify_die("", regs, &info, esr);
+ }
++asmlinkage void __exception do_el0_irq_bp_hardening(void)
++{
++      /* PC has already been checked in entry.S */
++      arm64_apply_bp_hardening();
++}
++
+ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+                                                  unsigned int esr,
+                                                  struct pt_regs *regs)
diff --git a/queue-4.15/arm64-entry-ensure-branch-through-syscall-table-is-bounded-under-speculation.patch b/queue-4.15/arm64-entry-ensure-branch-through-syscall-table-is-bounded-under-speculation.patch
new file mode 100644 (file)
index 0000000..b83c907
--- /dev/null
@@ -0,0 +1,63 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 5 Feb 2018 15:34:20 +0000
+Subject: [Variant 1/Spectre-v1] arm64: entry: Ensure branch through syscall table is bounded under speculation
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 6314d90e6493 upstream.
+
+In a similar manner to array_index_mask_nospec, this patch introduces an
+assembly macro (mask_nospec64) which can be used to bound a value under
+speculation. This macro is then used to ensure that the indirect branch
+through the syscall table is bounded under speculation, with out-of-range
+addresses speculating as calls to sys_io_setup (0).
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/assembler.h |   11 +++++++++++
+ arch/arm64/kernel/entry.S          |    2 ++
+ 2 files changed, 13 insertions(+)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -116,6 +116,17 @@
+       .endm
+ /*
++ * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
++ * of bounds.
++ */
++      .macro  mask_nospec64, idx, limit, tmp
++      sub     \tmp, \idx, \limit
++      bic     \tmp, \tmp, \idx
++      and     \idx, \idx, \tmp, asr #63
++      csdb
++      .endm
++
++/*
+  * NOP sequence
+  */
+       .macro  nops, num
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -378,6 +378,7 @@ alternative_insn eret, nop, ARM64_UNMAP_
+  * x7 is reserved for the system call number in 32-bit mode.
+  */
+ wsc_nr        .req    w25             // number of system calls
++xsc_nr        .req    x25             // number of system calls (zero-extended)
+ wscno .req    w26             // syscall number
+ xscno .req    x26             // syscall number (zero-extended)
+ stbl  .req    x27             // syscall table pointer
+@@ -932,6 +933,7 @@ el0_svc_naked:                                     // compat entry point
+       b.ne    __sys_trace
+       cmp     wscno, wsc_nr                   // check upper syscall limit
+       b.hs    ni_sys
++      mask_nospec64 xscno, xsc_nr, x19        // enforce bounds for syscall number
+       ldr     x16, [stbl, xscno, lsl #3]      // address in the syscall table
+       blr     x16                             // call sys_* routine
+       b       ret_fast_syscall
diff --git a/queue-4.15/arm64-entry-explicitly-pass-exception-level-to-kernel_ventry-macro.patch b/queue-4.15/arm64-entry-explicitly-pass-exception-level-to-kernel_ventry-macro.patch
new file mode 100644 (file)
index 0000000..534682c
--- /dev/null
@@ -0,0 +1,106 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 14:20:21 +0000
+Subject: [Variant 3/Meltdown] arm64: entry: Explicitly pass exception level to kernel_ventry macro
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 5b1f7fe41909 upstream.
+
+We will need to treat exceptions from EL0 differently in kernel_ventry,
+so rework the macro to take the exception level as an argument and
+construct the branch target using that.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S |   50 +++++++++++++++++++++++-----------------------
+ 1 file changed, 25 insertions(+), 25 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -71,7 +71,7 @@
+ #define BAD_FIQ               2
+ #define BAD_ERROR     3
+-      .macro kernel_ventry    label
++      .macro kernel_ventry, el, label, regsize = 64
+       .align 7
+       sub     sp, sp, #S_FRAME_SIZE
+ #ifdef CONFIG_VMAP_STACK
+@@ -84,7 +84,7 @@
+       tbnz    x0, #THREAD_SHIFT, 0f
+       sub     x0, sp, x0                      // x0'' = sp' - x0' = (sp + x0) - sp = x0
+       sub     sp, sp, x0                      // sp'' = sp' - x0 = (sp + x0) - x0 = sp
+-      b       \label
++      b       el\()\el\()_\label
+ 0:
+       /*
+@@ -116,7 +116,7 @@
+       sub     sp, sp, x0
+       mrs     x0, tpidrro_el0
+ #endif
+-      b       \label
++      b       el\()\el\()_\label
+       .endm
+       .macro  kernel_entry, el, regsize = 64
+@@ -369,31 +369,31 @@ tsk      .req    x28             // current thread_info
+       .align  11
+ ENTRY(vectors)
+-      kernel_ventry   el1_sync_invalid                // Synchronous EL1t
+-      kernel_ventry   el1_irq_invalid                 // IRQ EL1t
+-      kernel_ventry   el1_fiq_invalid                 // FIQ EL1t
+-      kernel_ventry   el1_error_invalid               // Error EL1t
+-
+-      kernel_ventry   el1_sync                        // Synchronous EL1h
+-      kernel_ventry   el1_irq                         // IRQ EL1h
+-      kernel_ventry   el1_fiq_invalid                 // FIQ EL1h
+-      kernel_ventry   el1_error                       // Error EL1h
+-
+-      kernel_ventry   el0_sync                        // Synchronous 64-bit EL0
+-      kernel_ventry   el0_irq                         // IRQ 64-bit EL0
+-      kernel_ventry   el0_fiq_invalid                 // FIQ 64-bit EL0
+-      kernel_ventry   el0_error                       // Error 64-bit EL0
++      kernel_ventry   1, sync_invalid                 // Synchronous EL1t
++      kernel_ventry   1, irq_invalid                  // IRQ EL1t
++      kernel_ventry   1, fiq_invalid                  // FIQ EL1t
++      kernel_ventry   1, error_invalid                // Error EL1t
++
++      kernel_ventry   1, sync                         // Synchronous EL1h
++      kernel_ventry   1, irq                          // IRQ EL1h
++      kernel_ventry   1, fiq_invalid                  // FIQ EL1h
++      kernel_ventry   1, error                        // Error EL1h
++
++      kernel_ventry   0, sync                         // Synchronous 64-bit EL0
++      kernel_ventry   0, irq                          // IRQ 64-bit EL0
++      kernel_ventry   0, fiq_invalid                  // FIQ 64-bit EL0
++      kernel_ventry   0, error                        // Error 64-bit EL0
+ #ifdef CONFIG_COMPAT
+-      kernel_ventry   el0_sync_compat                 // Synchronous 32-bit EL0
+-      kernel_ventry   el0_irq_compat                  // IRQ 32-bit EL0
+-      kernel_ventry   el0_fiq_invalid_compat          // FIQ 32-bit EL0
+-      kernel_ventry   el0_error_compat                // Error 32-bit EL0
++      kernel_ventry   0, sync_compat, 32              // Synchronous 32-bit EL0
++      kernel_ventry   0, irq_compat, 32               // IRQ 32-bit EL0
++      kernel_ventry   0, fiq_invalid_compat, 32       // FIQ 32-bit EL0
++      kernel_ventry   0, error_compat, 32             // Error 32-bit EL0
+ #else
+-      kernel_ventry   el0_sync_invalid                // Synchronous 32-bit EL0
+-      kernel_ventry   el0_irq_invalid                 // IRQ 32-bit EL0
+-      kernel_ventry   el0_fiq_invalid                 // FIQ 32-bit EL0
+-      kernel_ventry   el0_error_invalid               // Error 32-bit EL0
++      kernel_ventry   0, sync_invalid, 32             // Synchronous 32-bit EL0
++      kernel_ventry   0, irq_invalid, 32              // IRQ 32-bit EL0
++      kernel_ventry   0, fiq_invalid, 32              // FIQ 32-bit EL0
++      kernel_ventry   0, error_invalid, 32            // Error 32-bit EL0
+ #endif
+ END(vectors)
diff --git a/queue-4.15/arm64-entry-hook-up-entry-trampoline-to-exception-vectors.patch b/queue-4.15/arm64-entry-hook-up-entry-trampoline-to-exception-vectors.patch
new file mode 100644 (file)
index 0000000..1fdcfff
--- /dev/null
@@ -0,0 +1,104 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 14:24:29 +0000
+Subject: [Variant 3/Meltdown] arm64: entry: Hook up entry trampoline to exception vectors
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 4bf3286d29f3 upstream.
+
+Hook up the entry trampoline to our exception vectors so that all
+exceptions from and returns to EL0 go via the trampoline, which swizzles
+the vector base register accordingly. Transitioning to and from the
+kernel clobbers x30, so we use tpidrro_el0 and far_el1 as scratch
+registers for native tasks.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S |   39 ++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 36 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -73,6 +73,17 @@
+       .macro kernel_ventry, el, label, regsize = 64
+       .align 7
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++      .if     \el == 0
++      .if     \regsize == 64
++      mrs     x30, tpidrro_el0
++      msr     tpidrro_el0, xzr
++      .else
++      mov     x30, xzr
++      .endif
++      .endif
++#endif
++
+       sub     sp, sp, #S_FRAME_SIZE
+ #ifdef CONFIG_VMAP_STACK
+       /*
+@@ -119,6 +130,11 @@
+       b       el\()\el\()_\label
+       .endm
++      .macro tramp_alias, dst, sym
++      mov_q   \dst, TRAMP_VALIAS
++      add     \dst, \dst, #(\sym - .entry.tramp.text)
++      .endm
++
+       .macro  kernel_entry, el, regsize = 64
+       .if     \regsize == 32
+       mov     w0, w0                          // zero upper 32 bits of x0
+@@ -271,18 +287,20 @@ alternative_else_nop_endif
+       .if     \el == 0
+       ldr     x23, [sp, #S_SP]                // load return stack pointer
+       msr     sp_el0, x23
++      tst     x22, #PSR_MODE32_BIT            // native task?
++      b.eq    3f
++
+ #ifdef CONFIG_ARM64_ERRATUM_845719
+ alternative_if ARM64_WORKAROUND_845719
+-      tbz     x22, #4, 1f
+ #ifdef CONFIG_PID_IN_CONTEXTIDR
+       mrs     x29, contextidr_el1
+       msr     contextidr_el1, x29
+ #else
+       msr contextidr_el1, xzr
+ #endif
+-1:
+ alternative_else_nop_endif
+ #endif
++3:
+       .endif
+       msr     elr_el1, x21                    // set up the return data
+@@ -304,7 +322,22 @@ alternative_else_nop_endif
+       ldp     x28, x29, [sp, #16 * 14]
+       ldr     lr, [sp, #S_LR]
+       add     sp, sp, #S_FRAME_SIZE           // restore sp
+-      eret                                    // return to kernel
++
++#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
++      eret
++#else
++      .if     \el == 0
++      bne     4f
++      msr     far_el1, x30
++      tramp_alias     x30, tramp_exit_native
++      br      x30
++4:
++      tramp_alias     x30, tramp_exit_compat
++      br      x30
++      .else
++      eret
++      .endif
++#endif
+       .endm
+       .macro  irq_stack_entry
diff --git a/queue-4.15/arm64-entry-reword-comment-about-post_ttbr_update_workaround.patch b/queue-4.15/arm64-entry-reword-comment-about-post_ttbr_update_workaround.patch
new file mode 100644 (file)
index 0000000..3e07711
--- /dev/null
@@ -0,0 +1,45 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 29 Jan 2018 11:59:58 +0000
+Subject: [Variant 3/Meltdown] arm64: entry: Reword comment about post_ttbr_update_workaround
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit f167211a93ac upstream.
+
+We don't fully understand the Cavium ThunderX erratum, but it appears
+that mapping the kernel as nG can lead to horrible consequences such as
+attempting to execute userspace from kernel context. Since kpti isn't
+enabled for these CPUs anyway, simplify the comment justifying the lack
+of post_ttbr_update_workaround in the exception trampoline.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S |   13 +++----------
+ 1 file changed, 3 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -1010,16 +1010,9 @@ alternative_else_nop_endif
+       orr     \tmp, \tmp, #USER_ASID_FLAG
+       msr     ttbr1_el1, \tmp
+       /*
+-       * We avoid running the post_ttbr_update_workaround here because the
+-       * user and kernel ASIDs don't have conflicting mappings, so any
+-       * "blessing" as described in:
+-       *
+-       *   http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
+-       *
+-       * will not hurt correctness. Whilst this may partially defeat the
+-       * point of using split ASIDs in the first place, it avoids
+-       * the hit of invalidating the entire I-cache on every return to
+-       * userspace.
++       * We avoid running the post_ttbr_update_workaround here because
++       * it's only needed by Cavium ThunderX, which requires KPTI to be
++       * disabled.
+        */
+       .endm
diff --git a/queue-4.15/arm64-erratum-work-around-falkor-erratum-e1003-in-trampoline-code.patch b/queue-4.15/arm64-erratum-work-around-falkor-erratum-e1003-in-trampoline-code.patch
new file mode 100644 (file)
index 0000000..3fd0a20
--- /dev/null
@@ -0,0 +1,77 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 14:29:19 +0000
+Subject: [Variant 3/Meltdown] arm64: erratum: Work around Falkor erratum #E1003 in trampoline code
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit d1777e686ad1 upstream.
+
+We rely on an atomic swizzling of TTBR1 when transitioning from the entry
+trampoline to the kernel proper on an exception. We can't rely on this
+atomicity in the face of Falkor erratum #E1003, so on affected cores we
+can issue a TLB invalidation to invalidate the walk cache prior to
+jumping into the kernel. There is still the possibility of a TLB conflict
+here due to conflicting walk cache entries prior to the invalidation, but
+this doesn't appear to be the case on these CPUs in practice.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig        |   17 +++++------------
+ arch/arm64/kernel/entry.S |   12 ++++++++++++
+ 2 files changed, 17 insertions(+), 12 deletions(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -522,20 +522,13 @@ config CAVIUM_ERRATUM_30115
+ config QCOM_FALKOR_ERRATUM_1003
+       bool "Falkor E1003: Incorrect translation due to ASID change"
+       default y
+-      select ARM64_PAN if ARM64_SW_TTBR0_PAN
+       help
+         On Falkor v1, an incorrect ASID may be cached in the TLB when ASID
+-        and BADDR are changed together in TTBRx_EL1. The workaround for this
+-        issue is to use a reserved ASID in cpu_do_switch_mm() before
+-        switching to the new ASID. Saying Y here selects ARM64_PAN if
+-        ARM64_SW_TTBR0_PAN is selected. This is done because implementing and
+-        maintaining the E1003 workaround in the software PAN emulation code
+-        would be an unnecessary complication. The affected Falkor v1 CPU
+-        implements ARMv8.1 hardware PAN support and using hardware PAN
+-        support versus software PAN emulation is mutually exclusive at
+-        runtime.
+-
+-        If unsure, say Y.
++        and BADDR are changed together in TTBRx_EL1. Since we keep the ASID
++        in TTBR1_EL1, this situation only occurs in the entry trampoline and
++        then only for entries in the walk cache, since the leaf translation
++        is unchanged. Work around the erratum by invalidating the walk cache
++        entries for the trampoline before entering the kernel proper.
+ config QCOM_FALKOR_ERRATUM_1009
+       bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -989,6 +989,18 @@ __ni_sys_trace:
+       sub     \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+       bic     \tmp, \tmp, #USER_ASID_FLAG
+       msr     ttbr1_el1, \tmp
++#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
++alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
++      /* ASID already in \tmp[63:48] */
++      movk    \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
++      movk    \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
++      /* 2MB boundary containing the vectors, so we nobble the walk cache */
++      movk    \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
++      isb
++      tlbi    vae1, \tmp
++      dsb     nsh
++alternative_else_nop_endif
++#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
+       .endm
+       .macro tramp_unmap_kernel, tmp
diff --git a/queue-4.15/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch b/queue-4.15/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
new file mode 100644 (file)
index 0000000..0c43d70
--- /dev/null
@@ -0,0 +1,55 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 29 Jan 2018 11:59:56 +0000
+Subject: [Variant 3/Meltdown] arm64: Force KPTI to be disabled on Cavium ThunderX
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 6dc52b15c4a4 upstream.
+
+Cavium ThunderX's erratum 27456 results in a corruption of icache
+entries that are loaded from memory that is mapped as non-global
+(i.e. ASID-tagged).
+
+As KPTI is based on memory being mapped non-global, let's prevent
+it from kicking in if this erratum is detected.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+[will: Update comment]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -853,12 +853,23 @@ static int __kpti_forced; /* 0: not forc
+ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+                               int __unused)
+ {
++      char const *str = "command line option";
+       u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+-      /* Forced on command line? */
++      /*
++       * For reasons that aren't entirely clear, enabling KPTI on Cavium
++       * ThunderX leads to apparent I-cache corruption of kernel text, which
++       * ends as well as you might imagine. Don't even try.
++       */
++      if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
++              str = "ARM64_WORKAROUND_CAVIUM_27456";
++              __kpti_forced = -1;
++      }
++
++      /* Forced? */
+       if (__kpti_forced) {
+-              pr_info_once("kernel page table isolation forced %s by command line option\n",
+-                           __kpti_forced > 0 ? "ON" : "OFF");
++              pr_info_once("kernel page table isolation forced %s by %s\n",
++                           __kpti_forced > 0 ? "ON" : "OFF", str);
+               return __kpti_forced > 0;
+       }
diff --git a/queue-4.15/arm64-futex-mask-__user-pointers-prior-to-dereference.patch b/queue-4.15/arm64-futex-mask-__user-pointers-prior-to-dereference.patch
new file mode 100644 (file)
index 0000000..af0191c
--- /dev/null
@@ -0,0 +1,56 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 5 Feb 2018 15:34:24 +0000
+Subject: [Variant 1/Spectre-v1] arm64: futex: Mask __user pointers prior to dereference
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 91b2d3442f6a upstream.
+
+The arm64 futex code has some explicit dereferencing of user pointers
+where performing atomic operations in response to a futex command. This
+patch uses masking to limit any speculative futex operations to within
+the user address space.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/futex.h |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -48,9 +48,10 @@ do {                                                                        \
+ } while (0)
+ static inline int
+-arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
+ {
+       int oldval = 0, ret, tmp;
++      u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
+       pagefault_disable();
+@@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int
+ }
+ static inline int
+-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
++futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
+                             u32 oldval, u32 newval)
+ {
+       int ret = 0;
+       u32 val, tmp;
++      u32 __user *uaddr;
+-      if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
++      if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
+               return -EFAULT;
++      uaddr = __uaccess_mask_ptr(_uaddr);
+       uaccess_enable();
+       asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+ "     prfm    pstl1strm, %2\n"
diff --git a/queue-4.15/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch b/queue-4.15/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
new file mode 100644 (file)
index 0000000..1adb5d6
--- /dev/null
@@ -0,0 +1,97 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 29 Jan 2018 12:00:00 +0000
+Subject: [Variant 3/Meltdown] arm64: idmap: Use "awx" flags for .idmap.text .pushsection directives
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 439e70e27a51 upstream.
+
+The identity map is mapped as both writeable and executable by the
+SWAPPER_MM_MMUFLAGS and this is relied upon by the kpti code to manage
+a synchronisation flag. Update the .pushsection flags to reflect the
+actual mapping attributes.
+
+Reported-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu-reset.S |    2 +-
+ arch/arm64/kernel/head.S      |    2 +-
+ arch/arm64/kernel/sleep.S     |    2 +-
+ arch/arm64/mm/proc.S          |    8 ++++----
+ 4 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kernel/cpu-reset.S
++++ b/arch/arm64/kernel/cpu-reset.S
+@@ -16,7 +16,7 @@
+ #include <asm/virt.h>
+ .text
+-.pushsection    .idmap.text, "ax"
++.pushsection    .idmap.text, "awx"
+ /*
+  * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -371,7 +371,7 @@ ENDPROC(__primary_switched)
+  * end early head section, begin head code that is also used for
+  * hotplug and needs to have the same protections as the text region
+  */
+-      .section ".idmap.text","ax"
++      .section ".idmap.text","awx"
+ ENTRY(kimage_vaddr)
+       .quad           _text - TEXT_OFFSET
+--- a/arch/arm64/kernel/sleep.S
++++ b/arch/arm64/kernel/sleep.S
+@@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter)
+       ret
+ ENDPROC(__cpu_suspend_enter)
+-      .pushsection ".idmap.text", "ax"
++      .pushsection ".idmap.text", "awx"
+ ENTRY(cpu_resume)
+       bl      el2_setup               // if in EL2 drop to EL1 cleanly
+       bl      __cpu_setup
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -86,7 +86,7 @@ ENDPROC(cpu_do_suspend)
+  *
+  * x0: Address of context pointer
+  */
+-      .pushsection ".idmap.text", "ax"
++      .pushsection ".idmap.text", "awx"
+ ENTRY(cpu_do_resume)
+       ldp     x2, x3, [x0]
+       ldp     x4, x5, [x0, #16]
+@@ -152,7 +152,7 @@ ENTRY(cpu_do_switch_mm)
+       ret
+ ENDPROC(cpu_do_switch_mm)
+-      .pushsection ".idmap.text", "ax"
++      .pushsection ".idmap.text", "awx"
+ .macro        __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
+       adrp    \tmp1, empty_zero_page
+@@ -184,7 +184,7 @@ ENDPROC(idmap_cpu_replace_ttbr1)
+       .popsection
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-      .pushsection ".idmap.text", "ax"
++      .pushsection ".idmap.text", "awx"
+       .macro  __idmap_kpti_get_pgtable_ent, type
+       dc      cvac, cur_\()\type\()p          // Ensure any existing dirty
+@@ -373,7 +373,7 @@ ENDPROC(idmap_kpti_install_ng_mappings)
+  *    Initialise the processor for turning the MMU on.  Return in x0 the
+  *    value of the SCTLR_EL1 register.
+  */
+-      .pushsection ".idmap.text", "ax"
++      .pushsection ".idmap.text", "awx"
+ ENTRY(__cpu_setup)
+       tlbi    vmalle1                         // Invalidate local TLB
+       dsb     nsh
diff --git a/queue-4.15/arm64-implement-array_index_mask_nospec.patch b/queue-4.15/arm64-implement-array_index_mask_nospec.patch
new file mode 100644 (file)
index 0000000..05e6701
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Robin Murphy <robin.murphy@arm.com>
+Date: Mon, 5 Feb 2018 15:34:17 +0000
+Subject: [Variant 1/Spectre-v1] arm64: Implement array_index_mask_nospec()
+
+From: Robin Murphy <robin.murphy@arm.com>
+
+
+Commit 022620eed3d0 upstream.
+
+Provide an optimised, assembly implementation of array_index_mask_nospec()
+for arm64 so that the compiler is not in a position to transform the code
+in ways which affect its ability to inhibit speculation (e.g. by introducing
+conditional branches).
+
+This is similar to the sequence used by x86, modulo architectural differences
+in the carry/borrow flags.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/barrier.h |   21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -41,6 +41,27 @@
+ #define dma_rmb()     dmb(oshld)
+ #define dma_wmb()     dmb(oshst)
++/*
++ * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
++ * and 0 otherwise.
++ */
++#define array_index_mask_nospec array_index_mask_nospec
++static inline unsigned long array_index_mask_nospec(unsigned long idx,
++                                                  unsigned long sz)
++{
++      unsigned long mask;
++
++      asm volatile(
++      "       cmp     %1, %2\n"
++      "       sbc     %0, xzr, xzr\n"
++      : "=r" (mask)
++      : "r" (idx), "Ir" (sz)
++      : "cc");
++
++      csdb();
++      return mask;
++}
++
+ #define __smp_mb()    dmb(ish)
+ #define __smp_rmb()   dmb(ishld)
+ #define __smp_wmb()   dmb(ishst)
diff --git a/queue-4.15/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch b/queue-4.15/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
new file mode 100644 (file)
index 0000000..6b3a036
--- /dev/null
@@ -0,0 +1,128 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 3 Jan 2018 12:46:21 +0000
+Subject: [Variant 2/Spectre-v2] arm64: Implement branch predictor hardening for affected Cortex-A CPUs
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit aa6acde65e03 upstream.
+
+Cortex-A57, A72, A73 and A75 are susceptible to branch predictor aliasing
+and can theoretically be attacked by malicious code.
+
+This patch implements a PSCI-based mitigation for these CPUs when available.
+The call into firmware will invalidate the branch predictor state, preventing
+any malicious entries from affecting other victim contexts.
+
+Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/bpi.S        |   24 +++++++++++++++++++++++
+ arch/arm64/kernel/cpu_errata.c |   42 +++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 66 insertions(+)
+
+--- a/arch/arm64/kernel/bpi.S
++++ b/arch/arm64/kernel/bpi.S
+@@ -53,3 +53,27 @@ ENTRY(__bp_harden_hyp_vecs_start)
+       vectors __kvm_hyp_vector
+       .endr
+ ENTRY(__bp_harden_hyp_vecs_end)
++ENTRY(__psci_hyp_bp_inval_start)
++      sub     sp, sp, #(8 * 18)
++      stp     x16, x17, [sp, #(16 * 0)]
++      stp     x14, x15, [sp, #(16 * 1)]
++      stp     x12, x13, [sp, #(16 * 2)]
++      stp     x10, x11, [sp, #(16 * 3)]
++      stp     x8, x9, [sp, #(16 * 4)]
++      stp     x6, x7, [sp, #(16 * 5)]
++      stp     x4, x5, [sp, #(16 * 6)]
++      stp     x2, x3, [sp, #(16 * 7)]
++      stp     x0, x1, [sp, #(16 * 8)]
++      mov     x0, #0x84000000
++      smc     #0
++      ldp     x16, x17, [sp, #(16 * 0)]
++      ldp     x14, x15, [sp, #(16 * 1)]
++      ldp     x12, x13, [sp, #(16 * 2)]
++      ldp     x10, x11, [sp, #(16 * 3)]
++      ldp     x8, x9, [sp, #(16 * 4)]
++      ldp     x6, x7, [sp, #(16 * 5)]
++      ldp     x4, x5, [sp, #(16 * 6)]
++      ldp     x2, x3, [sp, #(16 * 7)]
++      ldp     x0, x1, [sp, #(16 * 8)]
++      add     sp, sp, #(8 * 18)
++ENTRY(__psci_hyp_bp_inval_end)
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -67,6 +67,8 @@ static int cpu_enable_trap_ctr_access(vo
+ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+ #ifdef CONFIG_KVM
++extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
++
+ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+                               const char *hyp_vecs_end)
+ {
+@@ -108,6 +110,9 @@ static void __install_bp_hardening_cb(bp
+       spin_unlock(&bp_lock);
+ }
+ #else
++#define __psci_hyp_bp_inval_start     NULL
++#define __psci_hyp_bp_inval_end               NULL
++
+ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                     const char *hyp_vecs_start,
+                                     const char *hyp_vecs_end)
+@@ -132,6 +137,21 @@ static void  install_bp_hardening_cb(con
+       __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+ }
++
++#include <linux/psci.h>
++
++static int enable_psci_bp_hardening(void *data)
++{
++      const struct arm64_cpu_capabilities *entry = data;
++
++      if (psci_ops.get_version)
++              install_bp_hardening_cb(entry,
++                                     (bp_hardening_cb_t)psci_ops.get_version,
++                                     __psci_hyp_bp_inval_start,
++                                     __psci_hyp_bp_inval_end);
++
++      return 0;
++}
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+ #define MIDR_RANGE(model, min, max) \
+@@ -282,6 +302,28 @@ const struct arm64_cpu_capabilities arm6
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+       },
+ #endif
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++      {
++              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++              .enable = enable_psci_bp_hardening,
++      },
++      {
++              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++              .enable = enable_psci_bp_hardening,
++      },
++      {
++              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
++              .enable = enable_psci_bp_hardening,
++      },
++      {
++              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
++              .enable = enable_psci_bp_hardening,
++      },
++#endif
+       {
+       }
+ };
diff --git a/queue-4.15/arm64-implement-branch-predictor-hardening-for-falkor.patch b/queue-4.15/arm64-implement-branch-predictor-hardening-for-falkor.patch
new file mode 100644 (file)
index 0000000..516a518
--- /dev/null
@@ -0,0 +1,172 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Shanker Donthineni <shankerd@codeaurora.org>
+Date: Fri, 5 Jan 2018 14:28:59 -0600
+Subject: [Variant 2/Spectre-v2] arm64: Implement branch predictor hardening for Falkor
+
+From: Shanker Donthineni <shankerd@codeaurora.org>
+
+
+Commit ec82b567a74f upstream.
+
+Falkor is susceptible to branch predictor aliasing and can
+theoretically be attacked by malicious code. This patch
+implements a mitigation for these attacks, preventing any
+malicious entries from affecting other victim contexts.
+
+Signed-off-by: Shanker Donthineni <shankerd@codeaurora.org>
+[will: fix label name when !CONFIG_KVM and remove references to MIDR_FALKOR]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpucaps.h |    3 +-
+ arch/arm64/include/asm/kvm_asm.h |    2 +
+ arch/arm64/kernel/bpi.S          |    8 +++++++
+ arch/arm64/kernel/cpu_errata.c   |   40 +++++++++++++++++++++++++++++++++++++--
+ arch/arm64/kvm/hyp/entry.S       |   12 +++++++++++
+ arch/arm64/kvm/hyp/switch.c      |    8 +++++++
+ 6 files changed, 70 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -43,7 +43,8 @@
+ #define ARM64_SVE                             22
+ #define ARM64_UNMAP_KERNEL_AT_EL0             23
+ #define ARM64_HARDEN_BRANCH_PREDICTOR         24
++#define ARM64_HARDEN_BP_POST_GUEST_EXIT               25
+-#define ARM64_NCAPS                           25
++#define ARM64_NCAPS                           26
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -68,6 +68,8 @@ extern u32 __kvm_get_mdcr_el2(void);
+ extern u32 __init_stage2_translation(void);
++extern void __qcom_hyp_sanitize_btac_predictors(void);
++
+ #endif
+ #endif /* __ARM_KVM_ASM_H__ */
+--- a/arch/arm64/kernel/bpi.S
++++ b/arch/arm64/kernel/bpi.S
+@@ -77,3 +77,11 @@ ENTRY(__psci_hyp_bp_inval_start)
+       ldp     x0, x1, [sp, #(16 * 8)]
+       add     sp, sp, #(8 * 18)
+ ENTRY(__psci_hyp_bp_inval_end)
++
++ENTRY(__qcom_hyp_sanitize_link_stack_start)
++      stp     x29, x30, [sp, #-16]!
++      .rept   16
++      bl      . + 4
++      .endr
++      ldp     x29, x30, [sp], #16
++ENTRY(__qcom_hyp_sanitize_link_stack_end)
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -68,6 +68,8 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_har
+ #ifdef CONFIG_KVM
+ extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
++extern char __qcom_hyp_sanitize_link_stack_start[];
++extern char __qcom_hyp_sanitize_link_stack_end[];
+ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+                               const char *hyp_vecs_end)
+@@ -110,8 +112,10 @@ static void __install_bp_hardening_cb(bp
+       spin_unlock(&bp_lock);
+ }
+ #else
+-#define __psci_hyp_bp_inval_start     NULL
+-#define __psci_hyp_bp_inval_end               NULL
++#define __psci_hyp_bp_inval_start             NULL
++#define __psci_hyp_bp_inval_end                       NULL
++#define __qcom_hyp_sanitize_link_stack_start  NULL
++#define __qcom_hyp_sanitize_link_stack_end    NULL
+ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                     const char *hyp_vecs_start,
+@@ -152,6 +156,29 @@ static int enable_psci_bp_hardening(void
+       return 0;
+ }
++
++static void qcom_link_stack_sanitization(void)
++{
++      u64 tmp;
++
++      asm volatile("mov       %0, x30         \n"
++                   ".rept     16              \n"
++                   "bl        . + 4           \n"
++                   ".endr                     \n"
++                   "mov       x30, %0         \n"
++                   : "=&r" (tmp));
++}
++
++static int qcom_enable_link_stack_sanitization(void *data)
++{
++      const struct arm64_cpu_capabilities *entry = data;
++
++      install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
++                              __qcom_hyp_sanitize_link_stack_start,
++                              __qcom_hyp_sanitize_link_stack_end);
++
++      return 0;
++}
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+ #define MIDR_RANGE(model, min, max) \
+@@ -323,6 +350,15 @@ const struct arm64_cpu_capabilities arm6
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+               .enable = enable_psci_bp_hardening,
+       },
++      {
++              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
++              MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
++              .enable = qcom_enable_link_stack_sanitization,
++      },
++      {
++              .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
++              MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
++      },
+ #endif
+       {
+       }
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -196,3 +196,15 @@ alternative_endif
+       eret
+ ENDPROC(__fpsimd_guest_restore)
++
++ENTRY(__qcom_hyp_sanitize_btac_predictors)
++      /**
++       * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
++       * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
++       * b15-b0: contains SiP functionID
++       */
++      movz    x0, #0x1700
++      movk    x0, #0xc200, lsl #16
++      smc     #0
++      ret
++ENDPROC(__qcom_hyp_sanitize_btac_predictors)
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -393,6 +393,14 @@ again:
+               /* 0 falls through to be handled out of EL2 */
+       }
++      if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
++              u32 midr = read_cpuid_id();
++
++              /* Apply BTAC predictors mitigation to all Falkor chips */
++              if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
++                      __qcom_hyp_sanitize_btac_predictors();
++      }
++
+       fp_enabled = __fpsimd_enabled();
+       __sysreg_save_guest_state(guest_ctxt);
diff --git a/queue-4.15/arm64-kaslr-put-kernel-vectors-address-in-separate-data-page.patch b/queue-4.15/arm64-kaslr-put-kernel-vectors-address-in-separate-data-page.patch
new file mode 100644 (file)
index 0000000..0741532
--- /dev/null
@@ -0,0 +1,106 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 6 Dec 2017 11:24:02 +0000
+Subject: [Variant 3/Meltdown] arm64: kaslr: Put kernel vectors address in separate data page
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 6c27c4082f4f upstream.
+
+The literal pool entry for identifying the vectors base is the only piece
+of information in the trampoline page that identifies the true location
+of the kernel.
+
+This patch moves it into a page-aligned region of the .rodata section
+and maps this adjacent to the trampoline text via an additional fixmap
+entry, which protects against any accidental leakage of the trampoline
+contents.
+
+Suggested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/fixmap.h |    1 +
+ arch/arm64/kernel/entry.S       |   14 ++++++++++++++
+ arch/arm64/kernel/vmlinux.lds.S |    5 ++++-
+ arch/arm64/mm/mmu.c             |   10 +++++++++-
+ 4 files changed, 28 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/fixmap.h
++++ b/arch/arm64/include/asm/fixmap.h
+@@ -59,6 +59,7 @@ enum fixed_addresses {
+ #endif /* CONFIG_ACPI_APEI_GHES */
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++      FIX_ENTRY_TRAMP_DATA,
+       FIX_ENTRY_TRAMP_TEXT,
+ #define TRAMP_VALIAS          (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -1030,7 +1030,13 @@ alternative_else_nop_endif
+       msr     tpidrro_el0, x30        // Restored in kernel_ventry
+       .endif
+       tramp_map_kernel        x30
++#ifdef CONFIG_RANDOMIZE_BASE
++      adr     x30, tramp_vectors + PAGE_SIZE
++alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
++      ldr     x30, [x30]
++#else
+       ldr     x30, =vectors
++#endif
+       prfm    plil1strm, [x30, #(1b - tramp_vectors)]
+       msr     vbar_el1, x30
+       add     x30, x30, #(1b - tramp_vectors)
+@@ -1073,6 +1079,14 @@ END(tramp_exit_compat)
+       .ltorg
+       .popsection                             // .entry.tramp.text
++#ifdef CONFIG_RANDOMIZE_BASE
++      .pushsection ".rodata", "a"
++      .align PAGE_SHIFT
++      .globl  __entry_tramp_data_start
++__entry_tramp_data_start:
++      .quad   vectors
++      .popsection                             // .rodata
++#endif /* CONFIG_RANDOMIZE_BASE */
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ /*
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -251,7 +251,10 @@ ASSERT(__idmap_text_end - (__idmap_text_
+ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
+       <= SZ_4K, "Hibernate exit text too big or misaligned")
+ #endif
+-
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
++      "Entry trampoline text too big")
++#endif
+ /*
+  * If padding is applied before .head.text, virt<->phys conversions will fail.
+  */
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -541,8 +541,16 @@ static int __init map_entry_trampoline(v
+       __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+                            prot, pgd_pgtable_alloc, 0);
+-      /* ...as well as the kernel page table */
++      /* Map both the text and data into the kernel page table */
+       __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
++      if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
++              extern char __entry_tramp_data_start[];
++
++              __set_fixmap(FIX_ENTRY_TRAMP_DATA,
++                           __pa_symbol(__entry_tramp_data_start),
++                           PAGE_KERNEL_RO);
++      }
++
+       return 0;
+ }
+ core_initcall(map_entry_trampoline);
diff --git a/queue-4.15/arm64-kconfig-add-config_unmap_kernel_at_el0.patch b/queue-4.15/arm64-kconfig-add-config_unmap_kernel_at_el0.patch
new file mode 100644 (file)
index 0000000..0472636
--- /dev/null
@@ -0,0 +1,45 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 14:41:01 +0000
+Subject: [Variant 3/Meltdown] arm64: Kconfig: Add CONFIG_UNMAP_KERNEL_AT_EL0
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 084eb77cd3a8 upstream.
+
+Add a Kconfig entry to control use of the entry trampoline, which allows
+us to unmap the kernel whilst running in userspace and improve the
+robustness of KASLR.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -843,6 +843,19 @@ config FORCE_MAX_ZONEORDER
+         However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
+         4M allocations matching the default size used by generic code.
++config UNMAP_KERNEL_AT_EL0
++      bool "Unmap kernel when running in userspace (aka \"KAISER\")"
++      default y
++      help
++        Some attacks against KASLR make use of the timing difference between
++        a permission fault which could arise from a page table entry that is
++        present in the TLB, and a translation fault which always requires a
++        page table walk. This option defends against these attacks by unmapping
++        the kernel whilst running in userspace, therefore forcing translation
++        faults for all of kernel space.
++
++        If unsure, say Y.
++
+ menuconfig ARMV8_DEPRECATED
+       bool "Emulate deprecated/obsolete ARMv8 instructions"
+       depends on COMPAT
diff --git a/queue-4.15/arm64-kconfig-reword-unmap_kernel_at_el0-kconfig-entry.patch b/queue-4.15/arm64-kconfig-reword-unmap_kernel_at_el0-kconfig-entry.patch
new file mode 100644 (file)
index 0000000..31fd3ae
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 16:19:39 +0000
+Subject: [Variant 3/Meltdown] arm64: Kconfig: Reword UNMAP_KERNEL_AT_EL0 kconfig entry
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 0617052ddde3 upstream.
+
+Although CONFIG_UNMAP_KERNEL_AT_EL0 does make KASLR more robust, it's
+actually more useful as a mitigation against speculation attacks that
+can leak arbitrary kernel data to userspace through speculation.
+
+Reword the Kconfig help message to reflect this, and make the option
+depend on EXPERT so that it is on by default for the majority of users.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig |   13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -844,15 +844,14 @@ config FORCE_MAX_ZONEORDER
+         4M allocations matching the default size used by generic code.
+ config UNMAP_KERNEL_AT_EL0
+-      bool "Unmap kernel when running in userspace (aka \"KAISER\")"
++      bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
+       default y
+       help
+-        Some attacks against KASLR make use of the timing difference between
+-        a permission fault which could arise from a page table entry that is
+-        present in the TLB, and a translation fault which always requires a
+-        page table walk. This option defends against these attacks by unmapping
+-        the kernel whilst running in userspace, therefore forcing translation
+-        faults for all of kernel space.
++        Speculation attacks against some high-performance processors can
++        be used to bypass MMU permission checks and leak kernel data to
++        userspace. This can be defended against by unmapping the kernel
++        when running in userspace, mapping it back in on exception entry
++        via a trampoline page in the vector table.
+         If unsure, say Y.
diff --git a/queue-4.15/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch b/queue-4.15/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
new file mode 100644 (file)
index 0000000..d03cc90
--- /dev/null
@@ -0,0 +1,211 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:21 +0000
+Subject: [Variant 2/Spectre-v2] arm64: Kill PSCI_GET_VERSION as a variant-2 workaround
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 3a0a397ff5ff upstream.
+
+Now that we've standardised on SMCCC v1.1 to perform the branch
+prediction invalidation, let's drop the previous band-aid.
+If vendors haven't updated their firmware to do SMCCC 1.1, they
+haven't updated PSCI either, so we don't loose anything.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/bpi.S        |   24 ---------------------
+ arch/arm64/kernel/cpu_errata.c |   45 +++++++++++------------------------------
+ arch/arm64/kvm/hyp/switch.c    |   14 ------------
+ 3 files changed, 13 insertions(+), 70 deletions(-)
+
+--- a/arch/arm64/kernel/bpi.S
++++ b/arch/arm64/kernel/bpi.S
+@@ -54,30 +54,6 @@ ENTRY(__bp_harden_hyp_vecs_start)
+       vectors __kvm_hyp_vector
+       .endr
+ ENTRY(__bp_harden_hyp_vecs_end)
+-ENTRY(__psci_hyp_bp_inval_start)
+-      sub     sp, sp, #(8 * 18)
+-      stp     x16, x17, [sp, #(16 * 0)]
+-      stp     x14, x15, [sp, #(16 * 1)]
+-      stp     x12, x13, [sp, #(16 * 2)]
+-      stp     x10, x11, [sp, #(16 * 3)]
+-      stp     x8, x9, [sp, #(16 * 4)]
+-      stp     x6, x7, [sp, #(16 * 5)]
+-      stp     x4, x5, [sp, #(16 * 6)]
+-      stp     x2, x3, [sp, #(16 * 7)]
+-      stp     x0, x1, [sp, #(16 * 8)]
+-      mov     x0, #0x84000000
+-      smc     #0
+-      ldp     x16, x17, [sp, #(16 * 0)]
+-      ldp     x14, x15, [sp, #(16 * 1)]
+-      ldp     x12, x13, [sp, #(16 * 2)]
+-      ldp     x10, x11, [sp, #(16 * 3)]
+-      ldp     x8, x9, [sp, #(16 * 4)]
+-      ldp     x6, x7, [sp, #(16 * 5)]
+-      ldp     x4, x5, [sp, #(16 * 6)]
+-      ldp     x2, x3, [sp, #(16 * 7)]
+-      ldp     x0, x1, [sp, #(16 * 8)]
+-      add     sp, sp, #(8 * 18)
+-ENTRY(__psci_hyp_bp_inval_end)
+ ENTRY(__qcom_hyp_sanitize_link_stack_start)
+       stp     x29, x30, [sp, #-16]!
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -67,7 +67,6 @@ static int cpu_enable_trap_ctr_access(vo
+ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+ #ifdef CONFIG_KVM
+-extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+ extern char __qcom_hyp_sanitize_link_stack_start[];
+ extern char __qcom_hyp_sanitize_link_stack_end[];
+ extern char __smccc_workaround_1_smc_start[];
+@@ -116,8 +115,6 @@ static void __install_bp_hardening_cb(bp
+       spin_unlock(&bp_lock);
+ }
+ #else
+-#define __psci_hyp_bp_inval_start             NULL
+-#define __psci_hyp_bp_inval_end                       NULL
+ #define __qcom_hyp_sanitize_link_stack_start  NULL
+ #define __qcom_hyp_sanitize_link_stack_end    NULL
+ #define __smccc_workaround_1_smc_start                NULL
+@@ -164,24 +161,25 @@ static void call_hvc_arch_workaround_1(v
+       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+ }
+-static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
++static int enable_smccc_arch_workaround_1(void *data)
+ {
++      const struct arm64_cpu_capabilities *entry = data;
+       bp_hardening_cb_t cb;
+       void *smccc_start, *smccc_end;
+       struct arm_smccc_res res;
+       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+-              return false;
++              return 0;
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+-              return false;
++              return 0;
+       switch (psci_ops.conduit) {
+       case PSCI_CONDUIT_HVC:
+               arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+               if (res.a0)
+-                      return false;
++                      return 0;
+               cb = call_hvc_arch_workaround_1;
+               smccc_start = __smccc_workaround_1_hvc_start;
+               smccc_end = __smccc_workaround_1_hvc_end;
+@@ -191,35 +189,18 @@ static bool check_smccc_arch_workaround_
+               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+               if (res.a0)
+-                      return false;
++                      return 0;
+               cb = call_smc_arch_workaround_1;
+               smccc_start = __smccc_workaround_1_smc_start;
+               smccc_end = __smccc_workaround_1_smc_end;
+               break;
+       default:
+-              return false;
++              return 0;
+       }
+       install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+-      return true;
+-}
+-
+-static int enable_psci_bp_hardening(void *data)
+-{
+-      const struct arm64_cpu_capabilities *entry = data;
+-
+-      if (psci_ops.get_version) {
+-              if (check_smccc_arch_workaround_1(entry))
+-                      return 0;
+-
+-              install_bp_hardening_cb(entry,
+-                                     (bp_hardening_cb_t)psci_ops.get_version,
+-                                     __psci_hyp_bp_inval_start,
+-                                     __psci_hyp_bp_inval_end);
+-      }
+-
+       return 0;
+ }
+@@ -399,22 +380,22 @@ const struct arm64_cpu_capabilities arm6
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+-              .enable = enable_psci_bp_hardening,
++              .enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+-              .enable = enable_psci_bp_hardening,
++              .enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+-              .enable = enable_psci_bp_hardening,
++              .enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+-              .enable = enable_psci_bp_hardening,
++              .enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+@@ -428,12 +409,12 @@ const struct arm64_cpu_capabilities arm6
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+-              .enable = enable_psci_bp_hardening,
++              .enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+-              .enable = enable_psci_bp_hardening,
++              .enable = enable_smccc_arch_workaround_1,
+       },
+ #endif
+       {
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -344,20 +344,6 @@ again:
+       if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
+               goto again;
+-      if (exit_code == ARM_EXCEPTION_TRAP &&
+-          (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
+-           kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32)) {
+-              u32 val = vcpu_get_reg(vcpu, 0);
+-
+-              if (val == PSCI_0_2_FN_PSCI_VERSION) {
+-                      val = kvm_psci_version(vcpu, kern_hyp_va(vcpu->kvm));
+-                      if (unlikely(val == KVM_ARM_PSCI_0_1))
+-                              val = PSCI_RET_NOT_SUPPORTED;
+-                      vcpu_set_reg(vcpu, 0, val);
+-                      goto again;
+-              }
+-      }
+-
+       if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
+           exit_code == ARM_EXCEPTION_TRAP) {
+               bool valid;
diff --git a/queue-4.15/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch b/queue-4.15/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
new file mode 100644 (file)
index 0000000..b138e1a
--- /dev/null
@@ -0,0 +1,314 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 6 Feb 2018 22:22:50 +0000
+Subject: [Variant 3/Meltdown] arm64: kpti: Add ->enable callback to remap swapper using nG mappings
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit f992b4dfd58b upstream.
+
+Defaulting to global mappings for kernel space is generally good for
+performance and appears to be necessary for Cavium ThunderX. If we
+subsequently decide that we need to enable kpti, then we need to rewrite
+our existing page table entries to be non-global. This is fiddly, and
+made worse by the possible use of contiguous mappings, which require
+a strict break-before-make sequence.
+
+Since the enable callback runs on each online CPU from stop_machine
+context, we can have all CPUs enter the idmap, where secondaries can
+wait for the primary CPU to rewrite swapper with its MMU off. It's all
+fairly horrible, but at least it only runs once.
+
+Tested-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Conflicts:
+       arch/arm64/mm/proc.S
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/assembler.h |    4 
+ arch/arm64/kernel/cpufeature.c     |   25 ++++
+ arch/arm64/mm/proc.S               |  202 +++++++++++++++++++++++++++++++++++--
+ 3 files changed, 224 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -489,6 +489,10 @@ alternative_else_nop_endif
+ #endif
+       .endm
++      .macro  pte_to_phys, phys, pte
++      and     \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
++      .endm
++
+ /**
+  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
+  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -878,6 +878,30 @@ static bool unmap_kernel_at_el0(const st
+                                                    ID_AA64PFR0_CSV3_SHIFT);
+ }
++static int kpti_install_ng_mappings(void *__unused)
++{
++      typedef void (kpti_remap_fn)(int, int, phys_addr_t);
++      extern kpti_remap_fn idmap_kpti_install_ng_mappings;
++      kpti_remap_fn *remap_fn;
++
++      static bool kpti_applied = false;
++      int cpu = smp_processor_id();
++
++      if (kpti_applied)
++              return 0;
++
++      remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
++
++      cpu_install_idmap();
++      remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
++      cpu_uninstall_idmap();
++
++      if (!cpu)
++              kpti_applied = true;
++
++      return 0;
++}
++
+ static int __init parse_kpti(char *str)
+ {
+       bool enabled;
+@@ -984,6 +1008,7 @@ static const struct arm64_cpu_capabiliti
+               .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+               .def_scope = SCOPE_SYSTEM,
+               .matches = unmap_kernel_at_el0,
++              .enable = kpti_install_ng_mappings,
+       },
+ #endif
+       {
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -153,6 +153,16 @@ ENTRY(cpu_do_switch_mm)
+ ENDPROC(cpu_do_switch_mm)
+       .pushsection ".idmap.text", "ax"
++
++.macro        __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
++      adrp    \tmp1, empty_zero_page
++      msr     ttbr1_el1, \tmp2
++      isb
++      tlbi    vmalle1
++      dsb     nsh
++      isb
++.endm
++
+ /*
+  * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
+  *
+@@ -162,13 +172,7 @@ ENDPROC(cpu_do_switch_mm)
+ ENTRY(idmap_cpu_replace_ttbr1)
+       save_and_disable_daif flags=x2
+-      adrp    x1, empty_zero_page
+-      msr     ttbr1_el1, x1
+-      isb
+-
+-      tlbi    vmalle1
+-      dsb     nsh
+-      isb
++      __idmap_cpu_set_reserved_ttbr1 x1, x3
+       msr     ttbr1_el1, x0
+       isb
+@@ -179,6 +183,190 @@ ENTRY(idmap_cpu_replace_ttbr1)
+ ENDPROC(idmap_cpu_replace_ttbr1)
+       .popsection
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++      .pushsection ".idmap.text", "ax"
++
++      .macro  __idmap_kpti_get_pgtable_ent, type
++      dc      cvac, cur_\()\type\()p          // Ensure any existing dirty
++      dmb     sy                              // lines are written back before
++      ldr     \type, [cur_\()\type\()p]       // loading the entry
++      tbz     \type, #0, next_\()\type        // Skip invalid entries
++      .endm
++
++      .macro __idmap_kpti_put_pgtable_ent_ng, type
++      orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
++      str     \type, [cur_\()\type\()p]       // Update the entry and ensure it
++      dc      civac, cur_\()\type\()p         // is visible to all CPUs.
++      .endm
++
++/*
++ * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
++ *
++ * Called exactly once from stop_machine context by each CPU found during boot.
++ */
++__idmap_kpti_flag:
++      .long   1
++ENTRY(idmap_kpti_install_ng_mappings)
++      cpu             .req    w0
++      num_cpus        .req    w1
++      swapper_pa      .req    x2
++      swapper_ttb     .req    x3
++      flag_ptr        .req    x4
++      cur_pgdp        .req    x5
++      end_pgdp        .req    x6
++      pgd             .req    x7
++      cur_pudp        .req    x8
++      end_pudp        .req    x9
++      pud             .req    x10
++      cur_pmdp        .req    x11
++      end_pmdp        .req    x12
++      pmd             .req    x13
++      cur_ptep        .req    x14
++      end_ptep        .req    x15
++      pte             .req    x16
++
++      mrs     swapper_ttb, ttbr1_el1
++      adr     flag_ptr, __idmap_kpti_flag
++
++      cbnz    cpu, __idmap_kpti_secondary
++
++      /* We're the boot CPU. Wait for the others to catch up */
++      sevl
++1:    wfe
++      ldaxr   w18, [flag_ptr]
++      eor     w18, w18, num_cpus
++      cbnz    w18, 1b
++
++      /* We need to walk swapper, so turn off the MMU. */
++      pre_disable_mmu_workaround
++      mrs     x18, sctlr_el1
++      bic     x18, x18, #SCTLR_ELx_M
++      msr     sctlr_el1, x18
++      isb
++
++      /* Everybody is enjoying the idmap, so we can rewrite swapper. */
++      /* PGD */
++      mov     cur_pgdp, swapper_pa
++      add     end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
++do_pgd:       __idmap_kpti_get_pgtable_ent    pgd
++      tbnz    pgd, #1, walk_puds
++      __idmap_kpti_put_pgtable_ent_ng pgd
++next_pgd:
++      add     cur_pgdp, cur_pgdp, #8
++      cmp     cur_pgdp, end_pgdp
++      b.ne    do_pgd
++
++      /* Publish the updated tables and nuke all the TLBs */
++      dsb     sy
++      tlbi    vmalle1is
++      dsb     ish
++      isb
++
++      /* We're done: fire up the MMU again */
++      mrs     x18, sctlr_el1
++      orr     x18, x18, #SCTLR_ELx_M
++      msr     sctlr_el1, x18
++      isb
++
++      /* Set the flag to zero to indicate that we're all done */
++      str     wzr, [flag_ptr]
++      ret
++
++      /* PUD */
++walk_puds:
++      .if CONFIG_PGTABLE_LEVELS > 3
++      pte_to_phys     cur_pudp, pgd
++      add     end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
++do_pud:       __idmap_kpti_get_pgtable_ent    pud
++      tbnz    pud, #1, walk_pmds
++      __idmap_kpti_put_pgtable_ent_ng pud
++next_pud:
++      add     cur_pudp, cur_pudp, 8
++      cmp     cur_pudp, end_pudp
++      b.ne    do_pud
++      b       next_pgd
++      .else /* CONFIG_PGTABLE_LEVELS <= 3 */
++      mov     pud, pgd
++      b       walk_pmds
++next_pud:
++      b       next_pgd
++      .endif
++
++      /* PMD */
++walk_pmds:
++      .if CONFIG_PGTABLE_LEVELS > 2
++      pte_to_phys     cur_pmdp, pud
++      add     end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
++do_pmd:       __idmap_kpti_get_pgtable_ent    pmd
++      tbnz    pmd, #1, walk_ptes
++      __idmap_kpti_put_pgtable_ent_ng pmd
++next_pmd:
++      add     cur_pmdp, cur_pmdp, #8
++      cmp     cur_pmdp, end_pmdp
++      b.ne    do_pmd
++      b       next_pud
++      .else /* CONFIG_PGTABLE_LEVELS <= 2 */
++      mov     pmd, pud
++      b       walk_ptes
++next_pmd:
++      b       next_pud
++      .endif
++
++      /* PTE */
++walk_ptes:
++      pte_to_phys     cur_ptep, pmd
++      add     end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
++do_pte:       __idmap_kpti_get_pgtable_ent    pte
++      __idmap_kpti_put_pgtable_ent_ng pte
++next_pte:
++      add     cur_ptep, cur_ptep, #8
++      cmp     cur_ptep, end_ptep
++      b.ne    do_pte
++      b       next_pmd
++
++      /* Secondary CPUs end up here */
++__idmap_kpti_secondary:
++      /* Uninstall swapper before surgery begins */
++      __idmap_cpu_set_reserved_ttbr1 x18, x17
++
++      /* Increment the flag to let the boot CPU we're ready */
++1:    ldxr    w18, [flag_ptr]
++      add     w18, w18, #1
++      stxr    w17, w18, [flag_ptr]
++      cbnz    w17, 1b
++
++      /* Wait for the boot CPU to finish messing around with swapper */
++      sevl
++1:    wfe
++      ldxr    w18, [flag_ptr]
++      cbnz    w18, 1b
++
++      /* All done, act like nothing happened */
++      msr     ttbr1_el1, swapper_ttb
++      isb
++      ret
++
++      .unreq  cpu
++      .unreq  num_cpus
++      .unreq  swapper_pa
++      .unreq  swapper_ttb
++      .unreq  flag_ptr
++      .unreq  cur_pgdp
++      .unreq  end_pgdp
++      .unreq  pgd
++      .unreq  cur_pudp
++      .unreq  end_pudp
++      .unreq  pud
++      .unreq  cur_pmdp
++      .unreq  end_pmdp
++      .unreq  pmd
++      .unreq  cur_ptep
++      .unreq  end_ptep
++      .unreq  pte
++ENDPROC(idmap_kpti_install_ng_mappings)
++      .popsection
++#endif
++
+ /*
+  *    __cpu_setup
+  *
diff --git a/queue-4.15/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch b/queue-4.15/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
new file mode 100644 (file)
index 0000000..4df4b76
--- /dev/null
@@ -0,0 +1,287 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Wed, 10 Jan 2018 13:18:30 +0000
+Subject: [Variant 3/Meltdown] arm64: kpti: Fix the interaction between ASID switching and software PAN
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+
+Commit 6b88a32c7af6 upstream.
+
+With ARM64_SW_TTBR0_PAN enabled, the exception entry code checks the
+active ASID to decide whether user access was enabled (non-zero ASID)
+when the exception was taken. On return from exception, if user access
+was previously disabled, it re-instates TTBR0_EL1 from the per-thread
+saved value (updated in switch_mm() or efi_set_pgd()).
+
+Commit 7655abb95386 ("arm64: mm: Move ASID from TTBR0 to TTBR1") makes a
+TTBR0_EL1 + ASID switching non-atomic. Subsequently, commit 27a921e75711
+("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN") changes the
+__uaccess_ttbr0_disable() function and asm macro to first write the
+reserved TTBR0_EL1 followed by the ASID=0 update in TTBR1_EL1. If an
+exception occurs between these two, the exception return code will
+re-instate a valid TTBR0_EL1. Similar scenario can happen in
+cpu_switch_mm() between setting the reserved TTBR0_EL1 and the ASID
+update in cpu_do_switch_mm().
+
+This patch reverts the entry.S check for ASID == 0 to TTBR0_EL1 and
+disables the interrupts around the TTBR0_EL1 and ASID switching code in
+__uaccess_ttbr0_disable(). It also ensures that, when returning from the
+EFI runtime services, efi_set_pgd() doesn't leave a non-zero ASID in
+TTBR1_EL1 by using uaccess_ttbr0_{enable,disable}.
+
+The accesses to current_thread_info()->ttbr0 are updated to use
+READ_ONCE/WRITE_ONCE.
+
+As a safety measure, __uaccess_ttbr0_enable() always masks out any
+existing non-zero ASID TTBR1_EL1 before writing in the new ASID.
+
+Fixes: 27a921e75711 ("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN")
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: James Morse <james.morse@arm.com>
+Tested-by: James Morse <james.morse@arm.com>
+Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+
+Conflicts:
+       arch/arm64/include/asm/asm-uaccess.h
+       arch/arm64/include/asm/uaccess.h
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/asm-uaccess.h |   12 +++++++-----
+ arch/arm64/include/asm/efi.h         |   12 +++++++-----
+ arch/arm64/include/asm/mmu_context.h |    3 ++-
+ arch/arm64/include/asm/uaccess.h     |    9 ++++++---
+ arch/arm64/kernel/entry.S            |    2 +-
+ arch/arm64/lib/clear_user.S          |    2 +-
+ arch/arm64/lib/copy_from_user.S      |    2 +-
+ arch/arm64/lib/copy_in_user.S        |    2 +-
+ arch/arm64/lib/copy_to_user.S        |    2 +-
+ arch/arm64/mm/cache.S                |    2 +-
+ arch/arm64/mm/proc.S                 |    3 +++
+ arch/arm64/xen/hypercall.S           |    2 +-
+ 12 files changed, 32 insertions(+), 21 deletions(-)
+
+--- a/arch/arm64/include/asm/asm-uaccess.h
++++ b/arch/arm64/include/asm/asm-uaccess.h
+@@ -14,11 +14,11 @@
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       .macro  __uaccess_ttbr0_disable, tmp1
+       mrs     \tmp1, ttbr1_el1                // swapper_pg_dir
++      bic     \tmp1, \tmp1, #TTBR_ASID_MASK
+       add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
+       msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
+       isb
+       sub     \tmp1, \tmp1, #SWAPPER_DIR_SIZE
+-      bic     \tmp1, \tmp1, #TTBR_ASID_MASK
+       msr     ttbr1_el1, \tmp1                // set reserved ASID
+       isb
+       .endm
+@@ -35,9 +35,11 @@
+       isb
+       .endm
+-      .macro  uaccess_ttbr0_disable, tmp1
++      .macro  uaccess_ttbr0_disable, tmp1, tmp2
+ alternative_if_not ARM64_HAS_PAN
++      save_and_disable_irq \tmp2              // avoid preemption
+       __uaccess_ttbr0_disable \tmp1
++      restore_irq \tmp2
+ alternative_else_nop_endif
+       .endm
+@@ -49,7 +51,7 @@ alternative_if_not ARM64_HAS_PAN
+ alternative_else_nop_endif
+       .endm
+ #else
+-      .macro  uaccess_ttbr0_disable, tmp1
++      .macro  uaccess_ttbr0_disable, tmp1, tmp2
+       .endm
+       .macro  uaccess_ttbr0_enable, tmp1, tmp2, tmp3
+@@ -59,8 +61,8 @@ alternative_else_nop_endif
+ /*
+  * These macros are no-ops when UAO is present.
+  */
+-      .macro  uaccess_disable_not_uao, tmp1
+-      uaccess_ttbr0_disable \tmp1
++      .macro  uaccess_disable_not_uao, tmp1, tmp2
++      uaccess_ttbr0_disable \tmp1, \tmp2
+ alternative_if ARM64_ALT_PAN_NOT_UAO
+       SET_PSTATE_PAN(1)
+ alternative_else_nop_endif
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm
+               if (mm != current->active_mm) {
+                       /*
+                        * Update the current thread's saved ttbr0 since it is
+-                       * restored as part of a return from exception. Set
+-                       * the hardware TTBR0_EL1 using cpu_switch_mm()
+-                       * directly to enable potential errata workarounds.
++                       * restored as part of a return from exception. Enable
++                       * access to the valid TTBR0_EL1 and invoke the errata
++                       * workaround directly since there is no return from
++                       * exception when invoking the EFI run-time services.
+                        */
+                       update_saved_ttbr0(current, mm);
+-                      cpu_switch_mm(mm->pgd, mm);
++                      uaccess_ttbr0_enable();
++                      post_ttbr_update_workaround();
+               } else {
+                       /*
+                        * Defer the switch to the current thread's TTBR0_EL1
+                        * until uaccess_enable(). Restore the current
+                        * thread's saved ttbr0 corresponding to its active_mm
+                        */
+-                      cpu_set_reserved_ttbr0();
++                      uaccess_ttbr0_disable();
+                       update_saved_ttbr0(current, current->active_mm);
+               }
+       }
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -175,7 +175,7 @@ static inline void update_saved_ttbr0(st
+       else
+               ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+-      task_thread_info(tsk)->ttbr0 = ttbr;
++      WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
+ }
+ #else
+ static inline void update_saved_ttbr0(struct task_struct *tsk,
+@@ -230,6 +230,7 @@ switch_mm(struct mm_struct *prev, struct
+ #define activate_mm(prev,next)        switch_mm(prev, next, current)
+ void verify_cpu_asid_bits(void);
++void post_ttbr_update_workaround(void);
+ #endif /* !__ASSEMBLY__ */
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -105,16 +105,18 @@ static inline void set_fs(mm_segment_t f
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ static inline void __uaccess_ttbr0_disable(void)
+ {
+-      unsigned long ttbr;
++      unsigned long flags, ttbr;
++      local_irq_save(flags);
+       ttbr = read_sysreg(ttbr1_el1);
++      ttbr &= ~TTBR_ASID_MASK;
+       /* reserved_ttbr0 placed at the end of swapper_pg_dir */
+       write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
+       isb();
+       /* Set reserved ASID */
+-      ttbr &= ~TTBR_ASID_MASK;
+       write_sysreg(ttbr, ttbr1_el1);
+       isb();
++      local_irq_restore(flags);
+ }
+ static inline void __uaccess_ttbr0_enable(void)
+@@ -127,10 +129,11 @@ static inline void __uaccess_ttbr0_enabl
+        * roll-over and an update of 'ttbr0'.
+        */
+       local_irq_save(flags);
+-      ttbr0 = current_thread_info()->ttbr0;
++      ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
+       /* Restore active ASID */
+       ttbr1 = read_sysreg(ttbr1_el1);
++      ttbr1 &= ~TTBR_ASID_MASK;               /* safety measure */
+       ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+       write_sysreg(ttbr1, ttbr1_el1);
+       isb();
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -204,7 +204,7 @@ alternative_if ARM64_HAS_PAN
+ alternative_else_nop_endif
+       .if     \el != 0
+-      mrs     x21, ttbr1_el1
++      mrs     x21, ttbr0_el1
+       tst     x21, #TTBR_ASID_MASK            // Check for the reserved ASID
+       orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
+       b.eq    1f                              // TTBR0 access already disabled
+--- a/arch/arm64/lib/clear_user.S
++++ b/arch/arm64/lib/clear_user.S
+@@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wz
+       b.mi    5f
+ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
+ 5:    mov     x0, #0
+-      uaccess_disable_not_uao x2
++      uaccess_disable_not_uao x2, x3
+       ret
+ ENDPROC(__clear_user)
+--- a/arch/arm64/lib/copy_from_user.S
++++ b/arch/arm64/lib/copy_from_user.S
+@@ -67,7 +67,7 @@ ENTRY(__arch_copy_from_user)
+       uaccess_enable_not_uao x3, x4, x5
+       add     end, x0, x2
+ #include "copy_template.S"
+-      uaccess_disable_not_uao x3
++      uaccess_disable_not_uao x3, x4
+       mov     x0, #0                          // Nothing to copy
+       ret
+ ENDPROC(__arch_copy_from_user)
+--- a/arch/arm64/lib/copy_in_user.S
++++ b/arch/arm64/lib/copy_in_user.S
+@@ -68,7 +68,7 @@ ENTRY(raw_copy_in_user)
+       uaccess_enable_not_uao x3, x4, x5
+       add     end, x0, x2
+ #include "copy_template.S"
+-      uaccess_disable_not_uao x3
++      uaccess_disable_not_uao x3, x4
+       mov     x0, #0
+       ret
+ ENDPROC(raw_copy_in_user)
+--- a/arch/arm64/lib/copy_to_user.S
++++ b/arch/arm64/lib/copy_to_user.S
+@@ -66,7 +66,7 @@ ENTRY(__arch_copy_to_user)
+       uaccess_enable_not_uao x3, x4, x5
+       add     end, x0, x2
+ #include "copy_template.S"
+-      uaccess_disable_not_uao x3
++      uaccess_disable_not_uao x3, x4
+       mov     x0, #0
+       ret
+ ENDPROC(__arch_copy_to_user)
+--- a/arch/arm64/mm/cache.S
++++ b/arch/arm64/mm/cache.S
+@@ -72,7 +72,7 @@ USER(9f, ic  ivau, x4        )               // invalidate I
+       isb
+       mov     x0, #0
+ 1:
+-      uaccess_ttbr0_disable x1
++      uaccess_ttbr0_disable x1, x2
+       ret
+ 9:
+       mov     x0, #-EFAULT
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -140,6 +140,9 @@ ENDPROC(cpu_do_resume)
+ ENTRY(cpu_do_switch_mm)
+       mrs     x2, ttbr1_el1
+       mmid    x1, x1                          // get mm->context.id
++#ifdef CONFIG_ARM64_SW_TTBR0_PAN
++      bfi     x0, x1, #48, #16                // set the ASID field in TTBR0
++#endif
+       bfi     x2, x1, #48, #16                // set the ASID
+       msr     ttbr1_el1, x2                   // in TTBR1 (since TCR.A1 is set)
+       isb
+--- a/arch/arm64/xen/hypercall.S
++++ b/arch/arm64/xen/hypercall.S
+@@ -107,6 +107,6 @@ ENTRY(privcmd_call)
+       /*
+        * Disable userspace access from kernel once the hyp call completed.
+        */
+-      uaccess_ttbr0_disable x6
++      uaccess_ttbr0_disable x6, x7
+       ret
+ ENDPROC(privcmd_call);
diff --git a/queue-4.15/arm64-kpti-make-use-of-ng-dependent-on-arm64_kernel_unmapped_at_el0.patch b/queue-4.15/arm64-kpti-make-use-of-ng-dependent-on-arm64_kernel_unmapped_at_el0.patch
new file mode 100644 (file)
index 0000000..4eb276a
--- /dev/null
@@ -0,0 +1,98 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 29 Jan 2018 11:59:53 +0000
+Subject: [Variant 3/Meltdown] arm64: kpti: Make use of nG dependent on arm64_kernel_unmapped_at_el0()
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 41acec624087 upstream.
+
+To allow systems which do not require kpti to continue running with
+global kernel mappings (which appears to be a requirement for Cavium
+ThunderX due to a CPU erratum), make the use of nG in the kernel page
+tables dependent on arm64_kernel_unmapped_at_el0(), which is resolved
+at runtime.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kernel-pgtable.h |   12 ++----------
+ arch/arm64/include/asm/pgtable-prot.h   |   30 ++++++++++++++----------------
+ 2 files changed, 16 insertions(+), 26 deletions(-)
+
+--- a/arch/arm64/include/asm/kernel-pgtable.h
++++ b/arch/arm64/include/asm/kernel-pgtable.h
+@@ -78,16 +78,8 @@
+ /*
+  * Initial memory map attributes.
+  */
+-#define _SWAPPER_PTE_FLAGS    (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+-#define _SWAPPER_PMD_FLAGS    (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+-
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-#define SWAPPER_PTE_FLAGS     (_SWAPPER_PTE_FLAGS | PTE_NG)
+-#define SWAPPER_PMD_FLAGS     (_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
+-#else
+-#define SWAPPER_PTE_FLAGS     _SWAPPER_PTE_FLAGS
+-#define SWAPPER_PMD_FLAGS     _SWAPPER_PMD_FLAGS
+-#endif
++#define SWAPPER_PTE_FLAGS     (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
++#define SWAPPER_PMD_FLAGS     (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+ #if ARM64_SWAPPER_USES_SECTION_MAPS
+ #define SWAPPER_MM_MMUFLAGS   (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -37,13 +37,11 @@
+ #define _PROT_DEFAULT         (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+ #define _PROT_SECT_DEFAULT    (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-#define PROT_DEFAULT          (_PROT_DEFAULT | PTE_NG)
+-#define PROT_SECT_DEFAULT     (_PROT_SECT_DEFAULT | PMD_SECT_NG)
+-#else
+-#define PROT_DEFAULT          _PROT_DEFAULT
+-#define PROT_SECT_DEFAULT     _PROT_SECT_DEFAULT
+-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
++#define PTE_MAYBE_NG          (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
++#define PMD_MAYBE_NG          (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
++
++#define PROT_DEFAULT          (_PROT_DEFAULT | PTE_MAYBE_NG)
++#define PROT_SECT_DEFAULT     (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
+ #define PROT_DEVICE_nGnRnE    (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+ #define PROT_DEVICE_nGnRE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+@@ -55,22 +53,22 @@
+ #define PROT_SECT_NORMAL      (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+ #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+-#define _PAGE_DEFAULT         (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+-#define _HYP_PAGE_DEFAULT     (_PAGE_DEFAULT & ~PTE_NG)
++#define _PAGE_DEFAULT         (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
++#define _HYP_PAGE_DEFAULT     _PAGE_DEFAULT
+-#define PAGE_KERNEL           __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+-#define PAGE_KERNEL_RO                __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+-#define PAGE_KERNEL_ROX               __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+-#define PAGE_KERNEL_EXEC      __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+-#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
++#define PAGE_KERNEL           __pgprot(PROT_NORMAL)
++#define PAGE_KERNEL_RO                __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
++#define PAGE_KERNEL_ROX               __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
++#define PAGE_KERNEL_EXEC      __pgprot(PROT_NORMAL & ~PTE_PXN)
++#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
+ #define PAGE_HYP              __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
+ #define PAGE_HYP_EXEC         __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
+ #define PAGE_HYP_RO           __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
+ #define PAGE_HYP_DEVICE               __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+-#define PAGE_S2                       __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+-#define PAGE_S2_DEVICE                __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
++#define PAGE_S2                       __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
++#define PAGE_S2_DEVICE                __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
+ #define PAGE_NONE             __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+ #define PAGE_SHARED           __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
diff --git a/queue-4.15/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch b/queue-4.15/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
new file mode 100644 (file)
index 0000000..aa611fc
--- /dev/null
@@ -0,0 +1,70 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:15 +0000
+Subject: [Variant 2/Spectre-v2] arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit f72af90c3783 upstream.
+
+We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible.
+So let's intercept it as early as we can by testing for the
+function call number as soon as we've identified a HVC call
+coming from the guest.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp/hyp-entry.S |   20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -15,6 +15,7 @@
+  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
++#include <linux/arm-smccc.h>
+ #include <linux/linkage.h>
+ #include <asm/alternative.h>
+@@ -64,10 +65,11 @@ alternative_endif
+       lsr     x0, x1, #ESR_ELx_EC_SHIFT
+       cmp     x0, #ESR_ELx_EC_HVC64
++      ccmp    x0, #ESR_ELx_EC_HVC32, #4, ne
+       b.ne    el1_trap
+-      mrs     x1, vttbr_el2           // If vttbr is valid, the 64bit guest
+-      cbnz    x1, el1_trap            // called HVC
++      mrs     x1, vttbr_el2           // If vttbr is valid, the guest
++      cbnz    x1, el1_hvc_guest       // called HVC
+       /* Here, we're pretty sure the host called HVC. */
+       ldp     x0, x1, [sp], #16
+@@ -100,6 +102,20 @@ alternative_endif
+       eret
++el1_hvc_guest:
++      /*
++       * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
++       * The workaround has already been applied on the host,
++       * so let's quickly get back to the guest. We don't bother
++       * restoring x1, as it can be clobbered anyway.
++       */
++      ldr     x1, [sp]                                // Guest's x0
++      eor     w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
++      cbnz    w1, el1_trap
++      mov     x0, x1
++      add     sp, sp, #16
++      eret
++
+ el1_trap:
+       /*
+        * x0: ESR_EC
diff --git a/queue-4.15/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch b/queue-4.15/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch
new file mode 100644 (file)
index 0000000..1416801
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:07 +0000
+Subject: [Variant 2/Spectre-v2] arm64: KVM: Increment PC after handling an SMC trap
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit f5115e8869e1 upstream.
+
+When handling an SMC trap, the "preferred return address" is set
+to that of the SMC, and not the next PC (which is a departure from
+the behaviour of an SMC that isn't trapped).
+
+Increment PC in the handler, as the guest is otherwise forever
+stuck...
+
+Cc: stable@vger.kernel.org
+Fixes: acfb3b883f6d ("arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls")
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/handle_exit.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -54,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *v
+ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
++      /*
++       * "If an SMC instruction executed at Non-secure EL1 is
++       * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
++       * Trap exception, not a Secure Monitor Call exception [...]"
++       *
++       * We need to advance the PC after the trap, as it would
++       * otherwise return to the same address...
++       */
+       vcpu_set_reg(vcpu, 0, ~0UL);
++      kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+       return 1;
+ }
diff --git a/queue-4.15/arm64-kvm-make-psci_version-a-fast-path.patch b/queue-4.15/arm64-kvm-make-psci_version-a-fast-path.patch
new file mode 100644 (file)
index 0000000..80e97d7
--- /dev/null
@@ -0,0 +1,51 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Wed, 3 Jan 2018 16:38:37 +0000
+Subject: [Variant 2/Spectre-v2] arm64: KVM: Make PSCI_VERSION a fast path
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 90348689d500 upstream.
+
+For those CPUs that require PSCI to perform a BP invalidation,
+going all the way to the PSCI code for not much is a waste of
+precious cycles. Let's terminate that call as early as possible.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp/switch.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -17,6 +17,7 @@
+ #include <linux/types.h>
+ #include <linux/jump_label.h>
++#include <uapi/linux/psci.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_emulate.h>
+@@ -341,6 +342,18 @@ again:
+       if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
+               goto again;
++      if (exit_code == ARM_EXCEPTION_TRAP &&
++          (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
++           kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
++          vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
++              u64 val = PSCI_RET_NOT_SUPPORTED;
++              if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
++                      val = 2;
++
++              vcpu_set_reg(vcpu, 0, val);
++              goto again;
++      }
++
+       if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
+           exit_code == ARM_EXCEPTION_TRAP) {
+               bool valid;
diff --git a/queue-4.15/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch b/queue-4.15/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
new file mode 100644 (file)
index 0000000..4c33c4f
--- /dev/null
@@ -0,0 +1,98 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:14 +0000
+Subject: [Variant 2/Spectre-v2] arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 6167ec5c9145 upstream.
+
+A new feature of SMCCC 1.1 is that it offers firmware-based CPU
+workarounds. In particular, SMCCC_ARCH_WORKAROUND_1 provides
+BP hardening for CVE-2017-5715.
+
+If the host has some mitigation for this issue, report that
+we deal with it using SMCCC_ARCH_WORKAROUND_1, as we apply the
+host workaround on every guest exit.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+
+Conflicts:
+       arch/arm/include/asm/kvm_host.h
+       arch/arm64/include/asm/kvm_host.h
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_host.h   |    6 ++++++
+ arch/arm64/include/asm/kvm_host.h |    5 +++++
+ include/linux/arm-smccc.h         |    5 +++++
+ virt/kvm/arm/psci.c               |    9 ++++++++-
+ 4 files changed, 24 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -301,4 +301,10 @@ int kvm_arm_vcpu_arch_has_attr(struct kv
+ /* All host FP/SIMD state is restored on guest exit, so nothing to save: */
+ static inline void kvm_fpsimd_flush_cpu_state(void) {}
++static inline bool kvm_arm_harden_branch_predictor(void)
++{
++      /* No way to detect it yet, pretend it is not there. */
++      return false;
++}
++
+ #endif /* __ARM_KVM_HOST_H__ */
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -396,4 +396,9 @@ static inline void kvm_fpsimd_flush_cpu_
+               sve_flush_cpu_state();
+ }
++static inline bool kvm_arm_harden_branch_predictor(void)
++{
++      return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
++}
++
+ #endif /* __ARM64_KVM_HOST_H__ */
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -73,6 +73,11 @@
+                          ARM_SMCCC_SMC_32,                            \
+                          0, 1)
++#define ARM_SMCCC_ARCH_WORKAROUND_1                                   \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
++                         ARM_SMCCC_SMC_32,                            \
++                         0, 0x8000)
++
+ #ifndef __ASSEMBLY__
+ #include <linux/linkage.h>
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -405,13 +405,20 @@ int kvm_hvc_call_handler(struct kvm_vcpu
+ {
+       u32 func_id = smccc_get_function(vcpu);
+       u32 val = PSCI_RET_NOT_SUPPORTED;
++      u32 feature;
+       switch (func_id) {
+       case ARM_SMCCC_VERSION_FUNC_ID:
+               val = ARM_SMCCC_VERSION_1_1;
+               break;
+       case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
+-              /* Nothing supported yet */
++              feature = smccc_get_arg1(vcpu);
++              switch(feature) {
++              case ARM_SMCCC_ARCH_WORKAROUND_1:
++                      if (kvm_arm_harden_branch_predictor())
++                              val = 0;
++                      break;
++              }
+               break;
+       default:
+               return kvm_psci_call(vcpu);
diff --git a/queue-4.15/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch b/queue-4.15/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
new file mode 100644 (file)
index 0000000..162192f
--- /dev/null
@@ -0,0 +1,127 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Wed, 3 Jan 2018 16:38:35 +0000
+Subject: [Variant 2/Spectre-v2] arm64: KVM: Use per-CPU vector when BP hardening is enabled
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 6840bdd73d07 upstream.
+
+Now that we have per-CPU vectors, let's plug then in the KVM/arm64 code.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+Conflicts:
+       arch/arm/include/asm/kvm_mmu.h
+       arch/arm64/include/asm/kvm_mmu.h
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_mmu.h   |   10 ++++++++++
+ arch/arm64/include/asm/kvm_mmu.h |   38 ++++++++++++++++++++++++++++++++++++++
+ arch/arm64/kvm/hyp/switch.c      |    2 +-
+ virt/kvm/arm/arm.c               |    8 +++++++-
+ 4 files changed, 56 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_
+       return 8;
+ }
++static inline void *kvm_get_hyp_vector(void)
++{
++      return kvm_ksym_ref(__kvm_hyp_vector);
++}
++
++static inline int kvm_map_vectors(void)
++{
++      return 0;
++}
++
+ #endif        /* !__ASSEMBLY__ */
+ #endif /* __ARM_KVM_MMU_H__ */
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_
+       return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
+ }
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++#include <asm/mmu.h>
++
++static inline void *kvm_get_hyp_vector(void)
++{
++      struct bp_hardening_data *data = arm64_get_bp_hardening_data();
++      void *vect = kvm_ksym_ref(__kvm_hyp_vector);
++
++      if (data->fn) {
++              vect = __bp_harden_hyp_vecs_start +
++                     data->hyp_vectors_slot * SZ_2K;
++
++              if (!has_vhe())
++                      vect = lm_alias(vect);
++      }
++
++      return vect;
++}
++
++static inline int kvm_map_vectors(void)
++{
++      return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
++                                 kvm_ksym_ref(__bp_harden_hyp_vecs_end),
++                                 PAGE_HYP_EXEC);
++}
++
++#else
++static inline void *kvm_get_hyp_vector(void)
++{
++      return kvm_ksym_ref(__kvm_hyp_vector);
++}
++
++static inline int kvm_map_vectors(void)
++{
++      return 0;
++}
++#endif
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ARM64_KVM_MMU_H__ */
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -52,7 +52,7 @@ static void __hyp_text __activate_traps_
+       val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
+       write_sysreg(val, cpacr_el1);
+-      write_sysreg(__kvm_hyp_vector, vbar_el1);
++      write_sysreg(kvm_get_hyp_vector(), vbar_el1);
+ }
+ static void __hyp_text __activate_traps_nvhe(void)
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -1158,7 +1158,7 @@ static void cpu_init_hyp_mode(void *dumm
+       pgd_ptr = kvm_mmu_get_httbr();
+       stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
+       hyp_stack_ptr = stack_page + PAGE_SIZE;
+-      vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
++      vector_ptr = (unsigned long)kvm_get_hyp_vector();
+       __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
+       __cpu_init_stage2();
+@@ -1403,6 +1403,12 @@ static int init_hyp_mode(void)
+               goto out_err;
+       }
++      err = kvm_map_vectors();
++      if (err) {
++              kvm_err("Cannot map vectors\n");
++              goto out_err;
++      }
++
+       /*
+        * Map the Hyp stack pages
+        */
diff --git a/queue-4.15/arm64-make-user_ds-an-inclusive-limit.patch b/queue-4.15/arm64-make-user_ds-an-inclusive-limit.patch
new file mode 100644 (file)
index 0000000..cc5191f
--- /dev/null
@@ -0,0 +1,150 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Robin Murphy <robin.murphy@arm.com>
+Date: Mon, 5 Feb 2018 15:34:18 +0000
+Subject: [Variant 1/Spectre-v1] arm64: Make USER_DS an inclusive limit
+
+From: Robin Murphy <robin.murphy@arm.com>
+
+
+Commit 51369e398d0d upstream.
+
+Currently, USER_DS represents an exclusive limit while KERNEL_DS is
+inclusive. In order to do some clever trickery for speculation-safe
+masking, we need them both to behave equivalently - there aren't enough
+bits to make KERNEL_DS exclusive, so we have precisely one option. This
+also happens to correct a longstanding false negative for a range
+ending on the very top byte of kernel memory.
+
+Mark Rutland points out that we've actually got the semantics of
+addresses vs. segments muddled up in most of the places we need to
+amend, so shuffle the {USER,KERNEL}_DS definitions around such that we
+can correct those properly instead of just pasting "-1"s everywhere.
+
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/processor.h |    3 ++
+ arch/arm64/include/asm/uaccess.h   |   45 +++++++++++++++++++++----------------
+ arch/arm64/kernel/entry.S          |    4 +--
+ arch/arm64/mm/fault.c              |    4 +--
+ 4 files changed, 33 insertions(+), 23 deletions(-)
+
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -21,6 +21,9 @@
+ #define TASK_SIZE_64          (UL(1) << VA_BITS)
++#define KERNEL_DS     UL(-1)
++#define USER_DS               (TASK_SIZE_64 - 1)
++
+ #ifndef __ASSEMBLY__
+ /*
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -35,10 +35,7 @@
+ #include <asm/compiler.h>
+ #include <asm/extable.h>
+-#define KERNEL_DS     (-1UL)
+ #define get_ds()      (KERNEL_DS)
+-
+-#define USER_DS               TASK_SIZE_64
+ #define get_fs()      (current_thread_info()->addr_limit)
+ static inline void set_fs(mm_segment_t fs)
+@@ -66,22 +63,32 @@ static inline void set_fs(mm_segment_t f
+  * Returns 1 if the range is valid, 0 otherwise.
+  *
+  * This is equivalent to the following test:
+- * (u65)addr + (u65)size <= current->addr_limit
+- *
+- * This needs 65-bit arithmetic.
++ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
+  */
+-#define __range_ok(addr, size)                                                \
+-({                                                                    \
+-      unsigned long __addr = (unsigned long)(addr);                   \
+-      unsigned long flag, roksum;                                     \
+-      __chk_user_ptr(addr);                                           \
+-      asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"         \
+-              : "=&r" (flag), "=&r" (roksum)                          \
+-              : "1" (__addr), "Ir" (size),                            \
+-                "r" (current_thread_info()->addr_limit)               \
+-              : "cc");                                                \
+-      flag;                                                           \
+-})
++static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
++{
++      unsigned long limit = current_thread_info()->addr_limit;
++
++      __chk_user_ptr(addr);
++      asm volatile(
++      // A + B <= C + 1 for all A,B,C, in four easy steps:
++      // 1: X = A + B; X' = X % 2^64
++      "       adds    %0, %0, %2\n"
++      // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
++      "       csel    %1, xzr, %1, hi\n"
++      // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
++      //    to compensate for the carry flag being set in step 4. For
++      //    X > 2^64, X' merely has to remain nonzero, which it does.
++      "       csinv   %0, %0, xzr, cc\n"
++      // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
++      //    comes from the carry in being clear. Otherwise, we are
++      //    testing X' - C == 0, subject to the previous adjustments.
++      "       sbcs    xzr, %0, %1\n"
++      "       cset    %0, ls\n"
++      : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
++
++      return addr;
++}
+ /*
+  * When dealing with data aborts, watchpoints, or instruction traps we may end
+@@ -90,7 +97,7 @@ static inline void set_fs(mm_segment_t f
+  */
+ #define untagged_addr(addr)           sign_extend64(addr, 55)
+-#define access_ok(type, addr, size)   __range_ok(addr, size)
++#define access_ok(type, addr, size)   __range_ok((unsigned long)(addr), size)
+ #define user_addr_max                 get_fs
+ #define _ASM_EXTABLE(from, to)                                                \
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -167,10 +167,10 @@ alternative_else_nop_endif
+       .else
+       add     x21, sp, #S_FRAME_SIZE
+       get_thread_info tsk
+-      /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
++      /* Save the task's original addr_limit and set USER_DS */
+       ldr     x20, [tsk, #TSK_TI_ADDR_LIMIT]
+       str     x20, [sp, #S_ORIG_ADDR_LIMIT]
+-      mov     x20, #TASK_SIZE_64
++      mov     x20, #USER_DS
+       str     x20, [tsk, #TSK_TI_ADDR_LIMIT]
+       /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
+       .endif /* \el == 0 */
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -240,7 +240,7 @@ static inline bool is_permission_fault(u
+       if (fsc_type == ESR_ELx_FSC_PERM)
+               return true;
+-      if (addr < USER_DS && system_uses_ttbr0_pan())
++      if (addr < TASK_SIZE && system_uses_ttbr0_pan())
+               return fsc_type == ESR_ELx_FSC_FAULT &&
+                       (regs->pstate & PSR_PAN_BIT);
+@@ -414,7 +414,7 @@ static int __kprobes do_page_fault(unsig
+               mm_flags |= FAULT_FLAG_WRITE;
+       }
+-      if (addr < USER_DS && is_permission_fault(esr, regs, addr)) {
++      if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
+               /* regs->orig_addr_limit may be 0 if we entered from EL0 */
+               if (regs->orig_addr_limit == KERNEL_DS)
+                       die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
diff --git a/queue-4.15/arm64-mm-add-arm64_kernel_unmapped_at_el0-helper.patch b/queue-4.15/arm64-mm-add-arm64_kernel_unmapped_at_el0-helper.patch
new file mode 100644 (file)
index 0000000..b2fd7a3
--- /dev/null
@@ -0,0 +1,56 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 13:58:08 +0000
+Subject: [Variant 3/Meltdown] arm64: mm: Add arm64_kernel_unmapped_at_el0 helper
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit fc0e1299da54 upstream.
+
+In order for code such as TLB invalidation to operate efficiently when
+the decision to map the kernel at EL0 is determined at runtime, this
+patch introduces a helper function, arm64_kernel_unmapped_at_el0, to
+determine whether or not the kernel is mapped whilst running in userspace.
+
+Currently, this just reports the value of CONFIG_UNMAP_KERNEL_AT_EL0,
+but will later be hooked up to a fake CPU capability using a static key.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/mmu.h |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm64/include/asm/mmu.h
++++ b/arch/arm64/include/asm/mmu.h
+@@ -19,6 +19,8 @@
+ #define MMCF_AARCH32  0x1     /* mm context flag for AArch32 executables */
+ #define USER_ASID_FLAG        (UL(1) << 48)
++#ifndef __ASSEMBLY__
++
+ typedef struct {
+       atomic64_t      id;
+       void            *vdso;
+@@ -32,6 +34,11 @@ typedef struct {
+  */
+ #define ASID(mm)      ((mm)->context.id.counter & 0xffff)
++static inline bool arm64_kernel_unmapped_at_el0(void)
++{
++      return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
++}
++
+ extern void paging_init(void);
+ extern void bootmem_init(void);
+ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
+@@ -42,4 +49,5 @@ extern void create_pgd_mapping(struct mm
+ extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+ extern void mark_linear_text_alias_ro(void);
++#endif        /* !__ASSEMBLY__ */
+ #endif
diff --git a/queue-4.15/arm64-mm-allocate-asids-in-pairs.patch b/queue-4.15/arm64-mm-allocate-asids-in-pairs.patch
new file mode 100644 (file)
index 0000000..1fa7ca8
--- /dev/null
@@ -0,0 +1,94 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Aug 2017 14:10:28 +0100
+Subject: [Variant 3/Meltdown] arm64: mm: Allocate ASIDs in pairs
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 0c8ea531b774 upstream.
+
+In preparation for separate kernel/user ASIDs, allocate them in pairs
+for each mm_struct. The bottom bit distinguishes the two: if it is set,
+then the ASID will map only userspace.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/mmu.h |    1 +
+ arch/arm64/mm/context.c      |   25 +++++++++++++++++--------
+ 2 files changed, 18 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/include/asm/mmu.h
++++ b/arch/arm64/include/asm/mmu.h
+@@ -17,6 +17,7 @@
+ #define __ASM_MMU_H
+ #define MMCF_AARCH32  0x1     /* mm context flag for AArch32 executables */
++#define USER_ASID_FLAG        (UL(1) << 48)
+ typedef struct {
+       atomic64_t      id;
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending;
+ #define ASID_MASK             (~GENMASK(asid_bits - 1, 0))
+ #define ASID_FIRST_VERSION    (1UL << asid_bits)
+-#define NUM_USER_ASIDS                ASID_FIRST_VERSION
++
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++#define NUM_USER_ASIDS                (ASID_FIRST_VERSION >> 1)
++#define asid2idx(asid)                (((asid) & ~ASID_MASK) >> 1)
++#define idx2asid(idx)         (((idx) << 1) & ~ASID_MASK)
++#else
++#define NUM_USER_ASIDS                (ASID_FIRST_VERSION)
++#define asid2idx(asid)                ((asid) & ~ASID_MASK)
++#define idx2asid(idx)         asid2idx(idx)
++#endif
+ /* Get the ASIDBits supported by the current CPU */
+ static u32 get_cpu_asid_bits(void)
+@@ -98,7 +107,7 @@ static void flush_context(unsigned int c
+                */
+               if (asid == 0)
+                       asid = per_cpu(reserved_asids, i);
+-              __set_bit(asid & ~ASID_MASK, asid_map);
++              __set_bit(asid2idx(asid), asid_map);
+               per_cpu(reserved_asids, i) = asid;
+       }
+@@ -153,16 +162,16 @@ static u64 new_context(struct mm_struct
+                * We had a valid ASID in a previous life, so try to re-use
+                * it if possible.
+                */
+-              asid &= ~ASID_MASK;
+-              if (!__test_and_set_bit(asid, asid_map))
++              if (!__test_and_set_bit(asid2idx(asid), asid_map))
+                       return newasid;
+       }
+       /*
+        * Allocate a free ASID. If we can't find one, take a note of the
+-       * currently active ASIDs and mark the TLBs as requiring flushes.
+-       * We always count from ASID #1, as we use ASID #0 when setting a
+-       * reserved TTBR0 for the init_mm.
++       * currently active ASIDs and mark the TLBs as requiring flushes.  We
++       * always count from ASID #2 (index 1), as we use ASID #0 when setting
++       * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
++       * pairs.
+        */
+       asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
+       if (asid != NUM_USER_ASIDS)
+@@ -179,7 +188,7 @@ static u64 new_context(struct mm_struct
+ set_asid:
+       __set_bit(asid, asid_map);
+       cur_idx = asid;
+-      return asid | generation;
++      return idx2asid(asid) | generation;
+ }
+ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
diff --git a/queue-4.15/arm64-mm-fix-and-re-enable-arm64_sw_ttbr0_pan.patch b/queue-4.15/arm64-mm-fix-and-re-enable-arm64_sw_ttbr0_pan.patch
new file mode 100644 (file)
index 0000000..82927a2
--- /dev/null
@@ -0,0 +1,230 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Aug 2017 13:58:16 +0100
+Subject: [Variant 3/Meltdown] arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 27a921e75711 upstream.
+
+With the ASID now installed in TTBR1, we can re-enable ARM64_SW_TTBR0_PAN
+by ensuring that we switch to a reserved ASID of zero when disabling
+user access and restore the active user ASID on the uaccess enable path.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig                   |    1 -
+ arch/arm64/include/asm/asm-uaccess.h |   25 +++++++++++++++++--------
+ arch/arm64/include/asm/uaccess.h     |   21 +++++++++++++++++----
+ arch/arm64/kernel/entry.S            |    4 ++--
+ arch/arm64/lib/clear_user.S          |    2 +-
+ arch/arm64/lib/copy_from_user.S      |    2 +-
+ arch/arm64/lib/copy_in_user.S        |    2 +-
+ arch/arm64/lib/copy_to_user.S        |    2 +-
+ arch/arm64/mm/cache.S                |    2 +-
+ arch/arm64/xen/hypercall.S           |    2 +-
+ 10 files changed, 42 insertions(+), 21 deletions(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -920,7 +920,6 @@ endif
+ config ARM64_SW_TTBR0_PAN
+       bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+-      depends on BROKEN       # Temporary while switch_mm is reworked
+       help
+         Enabling this option prevents the kernel from accessing
+         user-space memory directly by pointing TTBR0_EL1 to a reserved
+--- a/arch/arm64/include/asm/asm-uaccess.h
++++ b/arch/arm64/include/asm/asm-uaccess.h
+@@ -16,11 +16,20 @@
+       add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
+       msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
+       isb
++      sub     \tmp1, \tmp1, #SWAPPER_DIR_SIZE
++      bic     \tmp1, \tmp1, #(0xffff << 48)
++      msr     ttbr1_el1, \tmp1                // set reserved ASID
++      isb
+       .endm
+-      .macro  __uaccess_ttbr0_enable, tmp1
++      .macro  __uaccess_ttbr0_enable, tmp1, tmp2
+       get_thread_info \tmp1
+       ldr     \tmp1, [\tmp1, #TSK_TI_TTBR0]   // load saved TTBR0_EL1
++      mrs     \tmp2, ttbr1_el1
++      extr    \tmp2, \tmp2, \tmp1, #48
++      ror     \tmp2, \tmp2, #16
++      msr     ttbr1_el1, \tmp2                // set the active ASID
++      isb
+       msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
+       isb
+       .endm
+@@ -31,18 +40,18 @@ alternative_if_not ARM64_HAS_PAN
+ alternative_else_nop_endif
+       .endm
+-      .macro  uaccess_ttbr0_enable, tmp1, tmp2
++      .macro  uaccess_ttbr0_enable, tmp1, tmp2, tmp3
+ alternative_if_not ARM64_HAS_PAN
+-      save_and_disable_irq \tmp2              // avoid preemption
+-      __uaccess_ttbr0_enable \tmp1
+-      restore_irq \tmp2
++      save_and_disable_irq \tmp3              // avoid preemption
++      __uaccess_ttbr0_enable \tmp1, \tmp2
++      restore_irq \tmp3
+ alternative_else_nop_endif
+       .endm
+ #else
+       .macro  uaccess_ttbr0_disable, tmp1
+       .endm
+-      .macro  uaccess_ttbr0_enable, tmp1, tmp2
++      .macro  uaccess_ttbr0_enable, tmp1, tmp2, tmp3
+       .endm
+ #endif
+@@ -56,8 +65,8 @@ alternative_if ARM64_ALT_PAN_NOT_UAO
+ alternative_else_nop_endif
+       .endm
+-      .macro  uaccess_enable_not_uao, tmp1, tmp2
+-      uaccess_ttbr0_enable \tmp1, \tmp2
++      .macro  uaccess_enable_not_uao, tmp1, tmp2, tmp3
++      uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
+ alternative_if ARM64_ALT_PAN_NOT_UAO
+       SET_PSTATE_PAN(0)
+ alternative_else_nop_endif
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -107,15 +107,19 @@ static inline void __uaccess_ttbr0_disab
+ {
+       unsigned long ttbr;
++      ttbr = read_sysreg(ttbr1_el1);
+       /* reserved_ttbr0 placed at the end of swapper_pg_dir */
+-      ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
+-      write_sysreg(ttbr, ttbr0_el1);
++      write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
++      isb();
++      /* Set reserved ASID */
++      ttbr &= ~(0xffffUL << 48);
++      write_sysreg(ttbr, ttbr1_el1);
+       isb();
+ }
+ static inline void __uaccess_ttbr0_enable(void)
+ {
+-      unsigned long flags;
++      unsigned long flags, ttbr0, ttbr1;
+       /*
+        * Disable interrupts to avoid preemption between reading the 'ttbr0'
+@@ -123,7 +127,16 @@ static inline void __uaccess_ttbr0_enabl
+        * roll-over and an update of 'ttbr0'.
+        */
+       local_irq_save(flags);
+-      write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
++      ttbr0 = current_thread_info()->ttbr0;
++
++      /* Restore active ASID */
++      ttbr1 = read_sysreg(ttbr1_el1);
++      ttbr1 |= ttbr0 & (0xffffUL << 48);
++      write_sysreg(ttbr1, ttbr1_el1);
++      isb();
++
++      /* Restore user page table */
++      write_sysreg(ttbr0, ttbr0_el1);
+       isb();
+       local_irq_restore(flags);
+ }
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -184,7 +184,7 @@ alternative_if ARM64_HAS_PAN
+ alternative_else_nop_endif
+       .if     \el != 0
+-      mrs     x21, ttbr0_el1
++      mrs     x21, ttbr1_el1
+       tst     x21, #0xffff << 48              // Check for the reserved ASID
+       orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
+       b.eq    1f                              // TTBR0 access already disabled
+@@ -248,7 +248,7 @@ alternative_else_nop_endif
+       tbnz    x22, #22, 1f                    // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
+       .endif
+-      __uaccess_ttbr0_enable x0
++      __uaccess_ttbr0_enable x0, x1
+       .if     \el == 0
+       /*
+--- a/arch/arm64/lib/clear_user.S
++++ b/arch/arm64/lib/clear_user.S
+@@ -30,7 +30,7 @@
+  * Alignment fixed up by hardware.
+  */
+ ENTRY(__clear_user)
+-      uaccess_enable_not_uao x2, x3
++      uaccess_enable_not_uao x2, x3, x4
+       mov     x2, x1                  // save the size for fixup return
+       subs    x1, x1, #8
+       b.mi    2f
+--- a/arch/arm64/lib/copy_from_user.S
++++ b/arch/arm64/lib/copy_from_user.S
+@@ -64,7 +64,7 @@
+ end   .req    x5
+ ENTRY(__arch_copy_from_user)
+-      uaccess_enable_not_uao x3, x4
++      uaccess_enable_not_uao x3, x4, x5
+       add     end, x0, x2
+ #include "copy_template.S"
+       uaccess_disable_not_uao x3
+--- a/arch/arm64/lib/copy_in_user.S
++++ b/arch/arm64/lib/copy_in_user.S
+@@ -65,7 +65,7 @@
+ end   .req    x5
+ ENTRY(raw_copy_in_user)
+-      uaccess_enable_not_uao x3, x4
++      uaccess_enable_not_uao x3, x4, x5
+       add     end, x0, x2
+ #include "copy_template.S"
+       uaccess_disable_not_uao x3
+--- a/arch/arm64/lib/copy_to_user.S
++++ b/arch/arm64/lib/copy_to_user.S
+@@ -63,7 +63,7 @@
+ end   .req    x5
+ ENTRY(__arch_copy_to_user)
+-      uaccess_enable_not_uao x3, x4
++      uaccess_enable_not_uao x3, x4, x5
+       add     end, x0, x2
+ #include "copy_template.S"
+       uaccess_disable_not_uao x3
+--- a/arch/arm64/mm/cache.S
++++ b/arch/arm64/mm/cache.S
+@@ -49,7 +49,7 @@ ENTRY(flush_icache_range)
+  *    - end     - virtual end address of region
+  */
+ ENTRY(__flush_cache_user_range)
+-      uaccess_ttbr0_enable x2, x3
++      uaccess_ttbr0_enable x2, x3, x4
+       dcache_line_size x2, x3
+       sub     x3, x2, #1
+       bic     x4, x0, x3
+--- a/arch/arm64/xen/hypercall.S
++++ b/arch/arm64/xen/hypercall.S
+@@ -101,7 +101,7 @@ ENTRY(privcmd_call)
+        * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
+        * is enabled (it implies that hardware UAO and PAN disabled).
+        */
+-      uaccess_ttbr0_enable x6, x7
++      uaccess_ttbr0_enable x6, x7, x8
+       hvc XEN_IMM
+       /*
diff --git a/queue-4.15/arm64-mm-introduce-ttbr_asid_mask-for-getting-at-the-asid-in-the-ttbr.patch b/queue-4.15/arm64-mm-introduce-ttbr_asid_mask-for-getting-at-the-asid-in-the-ttbr.patch
new file mode 100644 (file)
index 0000000..294b9cc
--- /dev/null
@@ -0,0 +1,86 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 1 Dec 2017 17:33:48 +0000
+Subject: [Variant 3/Meltdown] arm64: mm: Introduce TTBR_ASID_MASK for getting at the ASID in the TTBR
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit b519538dfefc upstream.
+
+There are now a handful of open-coded masks to extract the ASID from a
+TTBR value, so introduce a TTBR_ASID_MASK and use that instead.
+
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/asm-uaccess.h |    3 ++-
+ arch/arm64/include/asm/mmu.h         |    1 +
+ arch/arm64/include/asm/uaccess.h     |    4 ++--
+ arch/arm64/kernel/entry.S            |    2 +-
+ 4 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/asm-uaccess.h
++++ b/arch/arm64/include/asm/asm-uaccess.h
+@@ -4,6 +4,7 @@
+ #include <asm/alternative.h>
+ #include <asm/kernel-pgtable.h>
++#include <asm/mmu.h>
+ #include <asm/sysreg.h>
+ #include <asm/assembler.h>
+@@ -17,7 +18,7 @@
+       msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
+       isb
+       sub     \tmp1, \tmp1, #SWAPPER_DIR_SIZE
+-      bic     \tmp1, \tmp1, #(0xffff << 48)
++      bic     \tmp1, \tmp1, #TTBR_ASID_MASK
+       msr     ttbr1_el1, \tmp1                // set reserved ASID
+       isb
+       .endm
+--- a/arch/arm64/include/asm/mmu.h
++++ b/arch/arm64/include/asm/mmu.h
+@@ -18,6 +18,7 @@
+ #define MMCF_AARCH32  0x1     /* mm context flag for AArch32 executables */
+ #define USER_ASID_FLAG        (UL(1) << 48)
++#define TTBR_ASID_MASK        (UL(0xffff) << 48)
+ #ifndef __ASSEMBLY__
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -112,7 +112,7 @@ static inline void __uaccess_ttbr0_disab
+       write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
+       isb();
+       /* Set reserved ASID */
+-      ttbr &= ~(0xffffUL << 48);
++      ttbr &= ~TTBR_ASID_MASK;
+       write_sysreg(ttbr, ttbr1_el1);
+       isb();
+ }
+@@ -131,7 +131,7 @@ static inline void __uaccess_ttbr0_enabl
+       /* Restore active ASID */
+       ttbr1 = read_sysreg(ttbr1_el1);
+-      ttbr1 |= ttbr0 & (0xffffUL << 48);
++      ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+       write_sysreg(ttbr1, ttbr1_el1);
+       isb();
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -205,7 +205,7 @@ alternative_else_nop_endif
+       .if     \el != 0
+       mrs     x21, ttbr1_el1
+-      tst     x21, #0xffff << 48              // Check for the reserved ASID
++      tst     x21, #TTBR_ASID_MASK            // Check for the reserved ASID
+       orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
+       b.eq    1f                              // TTBR0 access already disabled
+       and     x23, x23, #~PSR_PAN_BIT         // Clear the emulated PAN in the saved SPSR
diff --git a/queue-4.15/arm64-mm-invalidate-both-kernel-and-user-asids-when-performing-tlbi.patch b/queue-4.15/arm64-mm-invalidate-both-kernel-and-user-asids-when-performing-tlbi.patch
new file mode 100644 (file)
index 0000000..61eb738
--- /dev/null
@@ -0,0 +1,85 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Aug 2017 14:13:33 +0100
+Subject: [Variant 3/Meltdown] arm64: mm: Invalidate both kernel and user ASIDs when performing TLBI
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 9b0de864b5bc upstream.
+
+Since an mm has both a kernel and a user ASID, we need to ensure that
+broadcast TLB maintenance targets both address spaces so that things
+like CoW continue to work with the uaccess primitives in the kernel.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/tlbflush.h |   16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -23,6 +23,7 @@
+ #include <linux/sched.h>
+ #include <asm/cputype.h>
++#include <asm/mmu.h>
+ /*
+  * Raw TLBI operations.
+@@ -54,6 +55,11 @@
+ #define __tlbi(op, ...)               __TLBI_N(op, ##__VA_ARGS__, 1, 0)
++#define __tlbi_user(op, arg) do {                                             \
++      if (arm64_kernel_unmapped_at_el0())                                     \
++              __tlbi(op, (arg) | USER_ASID_FLAG);                             \
++} while (0)
++
+ /*
+  *    TLB Management
+  *    ==============
+@@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct m
+       dsb(ishst);
+       __tlbi(aside1is, asid);
++      __tlbi_user(aside1is, asid);
+       dsb(ish);
+ }
+@@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct
+       dsb(ishst);
+       __tlbi(vale1is, addr);
++      __tlbi_user(vale1is, addr);
+       dsb(ish);
+ }
+@@ -151,10 +159,13 @@ static inline void __flush_tlb_range(str
+       dsb(ishst);
+       for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+-              if (last_level)
++              if (last_level) {
+                       __tlbi(vale1is, addr);
+-              else
++                      __tlbi_user(vale1is, addr);
++              } else {
+                       __tlbi(vae1is, addr);
++                      __tlbi_user(vae1is, addr);
++              }
+       }
+       dsb(ish);
+ }
+@@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(s
+       unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
+       __tlbi(vae1is, addr);
++      __tlbi_user(vae1is, addr);
+       dsb(ish);
+ }
diff --git a/queue-4.15/arm64-mm-map-entry-trampoline-into-trampoline-and-kernel-page-tables.patch b/queue-4.15/arm64-mm-map-entry-trampoline-into-trampoline-and-kernel-page-tables.patch
new file mode 100644 (file)
index 0000000..367fcb0
--- /dev/null
@@ -0,0 +1,112 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 14:14:17 +0000
+Subject: [Variant 3/Meltdown] arm64: mm: Map entry trampoline into trampoline and kernel page tables
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 51a0048beb44 upstream.
+
+The exception entry trampoline needs to be mapped at the same virtual
+address in both the trampoline page table (which maps nothing else)
+and also the kernel page table, so that we can swizzle TTBR1_EL1 on
+exceptions from and return to EL0.
+
+This patch maps the trampoline at a fixed virtual address in the fixmap
+area of the kernel virtual address space, which allows the kernel proper
+to be randomized with respect to the trampoline when KASLR is enabled.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/fixmap.h  |    4 ++++
+ arch/arm64/include/asm/pgtable.h |    1 +
+ arch/arm64/kernel/asm-offsets.c  |    6 +++++-
+ arch/arm64/mm/mmu.c              |   23 +++++++++++++++++++++++
+ 4 files changed, 33 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/fixmap.h
++++ b/arch/arm64/include/asm/fixmap.h
+@@ -58,6 +58,10 @@ enum fixed_addresses {
+       FIX_APEI_GHES_NMI,
+ #endif /* CONFIG_ACPI_APEI_GHES */
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++      FIX_ENTRY_TRAMP_TEXT,
++#define TRAMP_VALIAS          (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
++#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+       __end_of_permanent_fixed_addresses,
+       /*
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -683,6 +683,7 @@ static inline void pmdp_set_wrprotect(st
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
++extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
+ /*
+  * Encode and decode a swap entry:
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -24,6 +24,7 @@
+ #include <linux/kvm_host.h>
+ #include <linux/suspend.h>
+ #include <asm/cpufeature.h>
++#include <asm/fixmap.h>
+ #include <asm/thread_info.h>
+ #include <asm/memory.h>
+ #include <asm/smp_plat.h>
+@@ -148,11 +149,14 @@ int main(void)
+   DEFINE(ARM_SMCCC_RES_X2_OFFS,               offsetof(struct arm_smccc_res, a2));
+   DEFINE(ARM_SMCCC_QUIRK_ID_OFFS,     offsetof(struct arm_smccc_quirk, id));
+   DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS,  offsetof(struct arm_smccc_quirk, state));
+-
+   BLANK();
+   DEFINE(HIBERN_PBE_ORIG,     offsetof(struct pbe, orig_address));
+   DEFINE(HIBERN_PBE_ADDR,     offsetof(struct pbe, address));
+   DEFINE(HIBERN_PBE_NEXT,     offsetof(struct pbe, next));
+   DEFINE(ARM64_FTR_SYSVAL,    offsetof(struct arm64_ftr_reg, sys_val));
++  BLANK();
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++  DEFINE(TRAMP_VALIAS,                TRAMP_VALIAS);
++#endif
+   return 0;
+ }
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -525,6 +525,29 @@ static int __init parse_rodata(char *arg
+ }
+ early_param("rodata", parse_rodata);
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++static int __init map_entry_trampoline(void)
++{
++      extern char __entry_tramp_text_start[];
++
++      pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
++      phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
++
++      /* The trampoline is always mapped and can therefore be global */
++      pgprot_val(prot) &= ~PTE_NG;
++
++      /* Map only the text into the trampoline page table */
++      memset(tramp_pg_dir, 0, PGD_SIZE);
++      __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
++                           prot, pgd_pgtable_alloc, 0);
++
++      /* ...as well as the kernel page table */
++      __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
++      return 0;
++}
++core_initcall(map_entry_trampoline);
++#endif
++
+ /*
+  * Create fine-grained mappings for the kernel.
+  */
diff --git a/queue-4.15/arm64-mm-move-asid-from-ttbr0-to-ttbr1.patch b/queue-4.15/arm64-mm-move-asid-from-ttbr0-to-ttbr1.patch
new file mode 100644 (file)
index 0000000..9108ae2
--- /dev/null
@@ -0,0 +1,93 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Aug 2017 13:19:09 +0100
+Subject: [Variant 3/Meltdown] arm64: mm: Move ASID from TTBR0 to TTBR1
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 7655abb95386 upstream.
+
+In preparation for mapping kernelspace and userspace with different
+ASIDs, move the ASID to TTBR1 and update switch_mm to context-switch
+TTBR0 via an invalid mapping (the zero page).
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/mmu_context.h   |    7 +++++++
+ arch/arm64/include/asm/pgtable-hwdef.h |    1 +
+ arch/arm64/include/asm/proc-fns.h      |    6 ------
+ arch/arm64/mm/proc.S                   |    9 ++++++---
+ 4 files changed, 14 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -57,6 +57,13 @@ static inline void cpu_set_reserved_ttbr
+       isb();
+ }
++static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
++{
++      BUG_ON(pgd == swapper_pg_dir);
++      cpu_set_reserved_ttbr0();
++      cpu_do_switch_mm(virt_to_phys(pgd),mm);
++}
++
+ /*
+  * TCR.T0SZ value to use when the ID map is active. Usually equals
+  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
+--- a/arch/arm64/include/asm/pgtable-hwdef.h
++++ b/arch/arm64/include/asm/pgtable-hwdef.h
+@@ -272,6 +272,7 @@
+ #define TCR_TG1_4K            (UL(2) << TCR_TG1_SHIFT)
+ #define TCR_TG1_64K           (UL(3) << TCR_TG1_SHIFT)
++#define TCR_A1                        (UL(1) << 22)
+ #define TCR_ASID16            (UL(1) << 36)
+ #define TCR_TBI0              (UL(1) << 37)
+ #define TCR_HA                        (UL(1) << 39)
+--- a/arch/arm64/include/asm/proc-fns.h
++++ b/arch/arm64/include/asm/proc-fns.h
+@@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr
+ #include <asm/memory.h>
+-#define cpu_switch_mm(pgd,mm)                         \
+-do {                                                  \
+-      BUG_ON(pgd == swapper_pg_dir);                  \
+-      cpu_do_switch_mm(virt_to_phys(pgd),mm);         \
+-} while (0)
+-
+ #endif /* __ASSEMBLY__ */
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_PROCFNS_H */
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -139,9 +139,12 @@ ENDPROC(cpu_do_resume)
+  */
+ ENTRY(cpu_do_switch_mm)
+       pre_ttbr0_update_workaround x0, x2, x3
++      mrs     x2, ttbr1_el1
+       mmid    x1, x1                          // get mm->context.id
+-      bfi     x0, x1, #48, #16                // set the ASID
+-      msr     ttbr0_el1, x0                   // set TTBR0
++      bfi     x2, x1, #48, #16                // set the ASID
++      msr     ttbr1_el1, x2                   // in TTBR1 (since TCR.A1 is set)
++      isb
++      msr     ttbr0_el1, x0                   // now update TTBR0
+       isb
+       post_ttbr0_update_workaround
+       ret
+@@ -224,7 +227,7 @@ ENTRY(__cpu_setup)
+        * both user and kernel.
+        */
+       ldr     x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+-                      TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
++                      TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
+       tcr_set_idmap_t0sz      x10, x9
+       /*
diff --git a/queue-4.15/arm64-mm-permit-transitioning-from-global-to-non-global-without-bbm.patch b/queue-4.15/arm64-mm-permit-transitioning-from-global-to-non-global-without-bbm.patch
new file mode 100644 (file)
index 0000000..89df011
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 29 Jan 2018 11:59:54 +0000
+Subject: [Variant 3/Meltdown] arm64: mm: Permit transitioning from Global to Non-Global without BBM
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 4e6020565596 upstream.
+
+Break-before-make is not needed when transitioning from Global to
+Non-Global mappings, provided that the contiguous hint is not being used.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/mmu.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -117,6 +117,10 @@ static bool pgattr_change_is_safe(u64 ol
+       if ((old | new) & PTE_CONT)
+               return false;
++      /* Transitioning from Global to Non-Global is safe */
++      if (((old ^ new) == PTE_NG) && (new & PTE_NG))
++              return true;
++
+       return ((old ^ new) & ~mask) == 0;
+ }
diff --git a/queue-4.15/arm64-mm-remove-pre_ttbr0_update_workaround-for-falkor-erratum-e1003.patch b/queue-4.15/arm64-mm-remove-pre_ttbr0_update_workaround-for-falkor-erratum-e1003.patch
new file mode 100644 (file)
index 0000000..6e871d7
--- /dev/null
@@ -0,0 +1,126 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Aug 2017 13:29:06 +0100
+Subject: [Variant 3/Meltdown] arm64: mm: Remove pre_ttbr0_update_workaround for Falkor erratum #E1003
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 85d13c001497 upstream.
+
+The pre_ttbr0_update_workaround hook is called prior to context-switching
+TTBR0 because Falkor erratum E1003 can cause TLB allocation with the wrong
+ASID if both the ASID and the base address of the TTBR are updated at
+the same time.
+
+With the ASID sitting safely in TTBR1, we no longer update things
+atomically, so we can remove the pre_ttbr0_update_workaround macro as
+it's no longer required. The erratum infrastructure and documentation
+is left around for #E1003, as it will be required by the entry
+trampoline code in a future patch.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/assembler.h   |   22 ----------------------
+ arch/arm64/include/asm/mmu_context.h |    2 --
+ arch/arm64/mm/context.c              |   11 -----------
+ arch/arm64/mm/proc.S                 |    1 -
+ 4 files changed, 36 deletions(-)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -26,7 +26,6 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
+-#include <asm/mmu_context.h>
+ #include <asm/page.h>
+ #include <asm/pgtable-hwdef.h>
+ #include <asm/ptrace.h>
+@@ -478,27 +477,6 @@ alternative_endif
+       .endm
+ /*
+- * Errata workaround prior to TTBR0_EL1 update
+- *
+- *    val:    TTBR value with new BADDR, preserved
+- *    tmp0:   temporary register, clobbered
+- *    tmp1:   other temporary register, clobbered
+- */
+-      .macro  pre_ttbr0_update_workaround, val, tmp0, tmp1
+-#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+-alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
+-      mrs     \tmp0, ttbr0_el1
+-      mov     \tmp1, #FALKOR_RESERVED_ASID
+-      bfi     \tmp0, \tmp1, #48, #16          // reserved ASID + old BADDR
+-      msr     ttbr0_el1, \tmp0
+-      isb
+-      bfi     \tmp0, \val, #0, #48            // reserved ASID + new BADDR
+-      msr     ttbr0_el1, \tmp0
+-      isb
+-alternative_else_nop_endif
+-#endif
+-      .endm
+-
+ /*
+  * Errata workaround post TTBR0_EL1 update.
+  */
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -19,8 +19,6 @@
+ #ifndef __ASM_MMU_CONTEXT_H
+ #define __ASM_MMU_CONTEXT_H
+-#define FALKOR_RESERVED_ASID  1
+-
+ #ifndef __ASSEMBLY__
+ #include <linux/compiler.h>
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -79,13 +79,6 @@ void verify_cpu_asid_bits(void)
+       }
+ }
+-static void set_reserved_asid_bits(void)
+-{
+-      if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) &&
+-          cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003))
+-              __set_bit(FALKOR_RESERVED_ASID, asid_map);
+-}
+-
+ static void flush_context(unsigned int cpu)
+ {
+       int i;
+@@ -94,8 +87,6 @@ static void flush_context(unsigned int c
+       /* Update the list of reserved ASIDs and the ASID bitmap. */
+       bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+-      set_reserved_asid_bits();
+-
+       for_each_possible_cpu(i) {
+               asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
+               /*
+@@ -254,8 +245,6 @@ static int asids_init(void)
+               panic("Failed to allocate bitmap for %lu ASIDs\n",
+                     NUM_USER_ASIDS);
+-      set_reserved_asid_bits();
+-
+       pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
+       return 0;
+ }
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -138,7 +138,6 @@ ENDPROC(cpu_do_resume)
+  *    - pgd_phys - physical address of new TTB
+  */
+ ENTRY(cpu_do_switch_mm)
+-      pre_ttbr0_update_workaround x0, x2, x3
+       mrs     x2, ttbr1_el1
+       mmid    x1, x1                          // get mm->context.id
+       bfi     x2, x1, #48, #16                // set the ASID
diff --git a/queue-4.15/arm64-mm-rename-post_ttbr0_update_workaround.patch b/queue-4.15/arm64-mm-rename-post_ttbr0_update_workaround.patch
new file mode 100644 (file)
index 0000000..03bb0a9
--- /dev/null
@@ -0,0 +1,62 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Aug 2017 13:34:30 +0100
+Subject: [Variant 3/Meltdown] arm64: mm: Rename post_ttbr0_update_workaround
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 158d495899ce upstream.
+
+The post_ttbr0_update_workaround hook applies to any change to TTBRx_EL1.
+Since we're using TTBR1 for the ASID, rename the hook to make it clearer
+as to what it's doing.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/assembler.h |    5 ++---
+ arch/arm64/kernel/entry.S          |    2 +-
+ arch/arm64/mm/proc.S               |    2 +-
+ 3 files changed, 4 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -477,10 +477,9 @@ alternative_endif
+       .endm
+ /*
+-/*
+- * Errata workaround post TTBR0_EL1 update.
++ * Errata workaround post TTBRx_EL1 update.
+  */
+-      .macro  post_ttbr0_update_workaround
++      .macro  post_ttbr_update_workaround
+ #ifdef CONFIG_CAVIUM_ERRATUM_27456
+ alternative_if ARM64_WORKAROUND_CAVIUM_27456
+       ic      iallu
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -257,7 +257,7 @@ alternative_else_nop_endif
+        * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+        * corruption).
+        */
+-      post_ttbr0_update_workaround
++      post_ttbr_update_workaround
+       .endif
+ 1:
+       .if     \el != 0
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -145,7 +145,7 @@ ENTRY(cpu_do_switch_mm)
+       isb
+       msr     ttbr0_el1, x0                   // now update TTBR0
+       isb
+-      post_ttbr0_update_workaround
++      post_ttbr_update_workaround
+       ret
+ ENDPROC(cpu_do_switch_mm)
diff --git a/queue-4.15/arm64-mm-temporarily-disable-arm64_sw_ttbr0_pan.patch b/queue-4.15/arm64-mm-temporarily-disable-arm64_sw_ttbr0_pan.patch
new file mode 100644 (file)
index 0000000..9d33481
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Aug 2017 13:04:48 +0100
+Subject: [Variant 3/Meltdown] arm64: mm: Temporarily disable ARM64_SW_TTBR0_PAN
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 376133b7edc2 upstream.
+
+We're about to rework the way ASIDs are allocated, switch_mm is
+implemented and low-level kernel entry/exit is handled, so keep the
+ARM64_SW_TTBR0_PAN code out of the way whilst we do the heavy lifting.
+
+It will be re-enabled in a subsequent patch.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -920,6 +920,7 @@ endif
+ config ARM64_SW_TTBR0_PAN
+       bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
++      depends on BROKEN       # Temporary while switch_mm is reworked
+       help
+         Enabling this option prevents the kernel from accessing
+         user-space memory directly by pointing TTBR0_EL1 to a reserved
diff --git a/queue-4.15/arm64-mm-use-non-global-mappings-for-kernel-space.patch b/queue-4.15/arm64-mm-use-non-global-mappings-for-kernel-space.patch
new file mode 100644 (file)
index 0000000..6fcaa95
--- /dev/null
@@ -0,0 +1,101 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Aug 2017 12:56:18 +0100
+Subject: [Variant 3/Meltdown] arm64: mm: Use non-global mappings for kernel space
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit e046eb0c9bf2 upstream.
+
+In preparation for unmapping the kernel whilst running in userspace,
+make the kernel mappings non-global so we can avoid expensive TLB
+invalidation on kernel exit to userspace.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kernel-pgtable.h | 12 ++++++++++--
+ arch/arm64/include/asm/pgtable-prot.h   | 21 +++++++++++++++------
+ 2 files changed, 25 insertions(+), 8 deletions(-)
+
+diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
+index 7803343e5881..77a27af01371 100644
+--- a/arch/arm64/include/asm/kernel-pgtable.h
++++ b/arch/arm64/include/asm/kernel-pgtable.h
+@@ -78,8 +78,16 @@
+ /*
+  * Initial memory map attributes.
+  */
+-#define SWAPPER_PTE_FLAGS     (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+-#define SWAPPER_PMD_FLAGS     (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
++#define _SWAPPER_PTE_FLAGS    (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
++#define _SWAPPER_PMD_FLAGS    (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
++
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++#define SWAPPER_PTE_FLAGS     (_SWAPPER_PTE_FLAGS | PTE_NG)
++#define SWAPPER_PMD_FLAGS     (_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
++#else
++#define SWAPPER_PTE_FLAGS     _SWAPPER_PTE_FLAGS
++#define SWAPPER_PMD_FLAGS     _SWAPPER_PMD_FLAGS
++#endif
+ #if ARM64_SWAPPER_USES_SECTION_MAPS
+ #define SWAPPER_MM_MMUFLAGS   (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
+diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
+index 0a5635fb0ef9..22a926825e3f 100644
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -34,8 +34,16 @@
+ #include <asm/pgtable-types.h>
+-#define PROT_DEFAULT          (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+-#define PROT_SECT_DEFAULT     (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
++#define _PROT_DEFAULT         (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
++#define _PROT_SECT_DEFAULT    (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
++
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++#define PROT_DEFAULT          (_PROT_DEFAULT | PTE_NG)
++#define PROT_SECT_DEFAULT     (_PROT_SECT_DEFAULT | PMD_SECT_NG)
++#else
++#define PROT_DEFAULT          _PROT_DEFAULT
++#define PROT_SECT_DEFAULT     _PROT_SECT_DEFAULT
++#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ #define PROT_DEVICE_nGnRnE    (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+ #define PROT_DEVICE_nGnRE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+@@ -48,6 +56,7 @@
+ #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+ #define _PAGE_DEFAULT         (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
++#define _HYP_PAGE_DEFAULT     (_PAGE_DEFAULT & ~PTE_NG)
+ #define PAGE_KERNEL           __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+ #define PAGE_KERNEL_RO                __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+@@ -55,15 +64,15 @@
+ #define PAGE_KERNEL_EXEC      __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+ #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
+-#define PAGE_HYP              __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
+-#define PAGE_HYP_EXEC         __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
+-#define PAGE_HYP_RO           __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
++#define PAGE_HYP              __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
++#define PAGE_HYP_EXEC         __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
++#define PAGE_HYP_RO           __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
+ #define PAGE_HYP_DEVICE               __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+ #define PAGE_S2                       __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+ #define PAGE_S2_DEVICE                __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
+-#define PAGE_NONE             __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
++#define PAGE_NONE             __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+ #define PAGE_SHARED           __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+ #define PAGE_SHARED_EXEC      __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+ #define PAGE_READONLY         __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+-- 
+2.16.1
+
diff --git a/queue-4.15/arm64-move-bp-hardening-to-check_and_switch_context.patch b/queue-4.15/arm64-move-bp-hardening-to-check_and_switch_context.patch
new file mode 100644 (file)
index 0000000..e4da87e
--- /dev/null
@@ -0,0 +1,53 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 19 Jan 2018 15:42:09 +0000
+Subject: [Variant 2/Spectre-v2] arm64: Move BP hardening to check_and_switch_context
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit a8e4c0a919ae upstream.
+
+We call arm64_apply_bp_hardening() from post_ttbr_update_workaround,
+which has the unexpected consequence of being triggered on every
+exception return to userspace when ARM64_SW_TTBR0_PAN is selected,
+even if no context switch actually occured.
+
+This is a bit suboptimal, and it would be more logical to only
+invalidate the branch predictor when we actually switch to
+a different mm.
+
+In order to solve this, move the call to arm64_apply_bp_hardening()
+into check_and_switch_context(), where we're guaranteed to pick
+a different mm context.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/context.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -231,6 +231,9 @@ void check_and_switch_context(struct mm_
+       raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+ switch_mm_fastpath:
++
++      arm64_apply_bp_hardening();
++
+       /*
+        * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
+        * emulating PAN.
+@@ -246,8 +249,6 @@ asmlinkage void post_ttbr_update_workaro
+                       "ic iallu; dsb nsh; isb",
+                       ARM64_WORKAROUND_CAVIUM_27456,
+                       CONFIG_CAVIUM_ERRATUM_27456));
+-
+-      arm64_apply_bp_hardening();
+ }
+ static int asids_init(void)
diff --git a/queue-4.15/arm64-move-post_ttbr_update_workaround-to-c-code.patch b/queue-4.15/arm64-move-post_ttbr_update_workaround-to-c-code.patch
new file mode 100644 (file)
index 0000000..847a0db
--- /dev/null
@@ -0,0 +1,91 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 2 Jan 2018 18:19:39 +0000
+Subject: [Variant 2/Spectre-v2] arm64: Move post_ttbr_update_workaround to C code
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 95e3de3590e3 upstream.
+
+We will soon need to invoke a CPU-specific function pointer after changing
+page tables, so move post_ttbr_update_workaround out into C code to make
+this possible.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+Conflicts:
+       arch/arm64/include/asm/assembler.h
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/assembler.h |   13 -------------
+ arch/arm64/kernel/entry.S          |    2 +-
+ arch/arm64/mm/context.c            |    9 +++++++++
+ arch/arm64/mm/proc.S               |    3 +--
+ 4 files changed, 11 insertions(+), 16 deletions(-)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -494,19 +494,6 @@ alternative_endif
+       mrs     \rd, sp_el0
+       .endm
+-/*
+- * Errata workaround post TTBRx_EL1 update.
+- */
+-      .macro  post_ttbr_update_workaround
+-#ifdef CONFIG_CAVIUM_ERRATUM_27456
+-alternative_if ARM64_WORKAROUND_CAVIUM_27456
+-      ic      iallu
+-      dsb     nsh
+-      isb
+-alternative_else_nop_endif
+-#endif
+-      .endm
+-
+       .macro  pte_to_phys, phys, pte
+       and     \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
+       .endm
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -277,7 +277,7 @@ alternative_else_nop_endif
+        * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+        * corruption).
+        */
+-      post_ttbr_update_workaround
++      bl      post_ttbr_update_workaround
+       .endif
+ 1:
+       .if     \el != 0
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -239,6 +239,15 @@ switch_mm_fastpath:
+               cpu_switch_mm(mm->pgd, mm);
+ }
++/* Errata workaround post TTBRx_EL1 update. */
++asmlinkage void post_ttbr_update_workaround(void)
++{
++      asm(ALTERNATIVE("nop; nop; nop",
++                      "ic iallu; dsb nsh; isb",
++                      ARM64_WORKAROUND_CAVIUM_27456,
++                      CONFIG_CAVIUM_ERRATUM_27456));
++}
++
+ static int asids_init(void)
+ {
+       asid_bits = get_cpu_asid_bits();
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -148,8 +148,7 @@ ENTRY(cpu_do_switch_mm)
+       isb
+       msr     ttbr0_el1, x0                   // now update TTBR0
+       isb
+-      post_ttbr_update_workaround
+-      ret
++      b       post_ttbr_update_workaround     // Back to C code...
+ ENDPROC(cpu_do_switch_mm)
+       .pushsection ".idmap.text", "awx"
diff --git a/queue-4.15/arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch b/queue-4.15/arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch
new file mode 100644 (file)
index 0000000..0d69309
--- /dev/null
@@ -0,0 +1,56 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Wed, 17 Jan 2018 17:42:20 +0000
+Subject: [Variant 2/Spectre-v2] arm64: Run enable method for errata work arounds on late CPUs
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+
+Commit 55b35d070c25 upstream.
+
+When a CPU is brought up after we have finalised the system
+wide capabilities (i.e, features and errata), we make sure the
+new CPU doesn't need a new errata work around which has not been
+detected already. However we don't run enable() method on the new
+CPU for the errata work arounds already detected. This could
+cause the new CPU running without potential work arounds.
+It is upto the "enable()" method to decide if this CPU should
+do something about the errata.
+
+Fixes: commit 6a6efbb45b7d95c84 ("arm64: Verify CPU errata work arounds on hotplugged CPU")
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Andre Przywara <andre.przywara@arm.com>
+Cc: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -221,15 +221,18 @@ void verify_local_cpu_errata_workarounds
+ {
+       const struct arm64_cpu_capabilities *caps = arm64_errata;
+-      for (; caps->matches; caps++)
+-              if (!cpus_have_cap(caps->capability) &&
+-                      caps->matches(caps, SCOPE_LOCAL_CPU)) {
++      for (; caps->matches; caps++) {
++              if (cpus_have_cap(caps->capability)) {
++                      if (caps->enable)
++                              caps->enable((void *)caps);
++              } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
+                       pr_crit("CPU%d: Requires work around for %s, not detected"
+                                       " at boot time\n",
+                               smp_processor_id(),
+                               caps->desc ? : "an erratum");
+                       cpu_die_early();
+               }
++      }
+ }
+ void update_cpu_errata_workarounds(void)
diff --git a/queue-4.15/arm64-take-into-account-id_aa64pfr0_el1.csv3.patch b/queue-4.15/arm64-take-into-account-id_aa64pfr0_el1.csv3.patch
new file mode 100644 (file)
index 0000000..2b5318b
--- /dev/null
@@ -0,0 +1,74 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 27 Nov 2017 18:29:30 +0000
+Subject: [Variant 3/Meltdown] arm64: Take into account ID_AA64PFR0_EL1.CSV3
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 179a56f6f9fb upstream.
+
+For non-KASLR kernels where the KPTI behaviour has not been overridden
+on the command line we can use ID_AA64PFR0_EL1.CSV3 to determine whether
+or not we should unmap the kernel whilst running at EL0.
+
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+Conflicts:
+       arch/arm64/kernel/cpufeature.c
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/sysreg.h |    1 +
+ arch/arm64/kernel/cpufeature.c  |    8 +++++++-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -437,6 +437,7 @@
+ #define ID_AA64ISAR1_DPB_SHIFT                0
+ /* id_aa64pfr0 */
++#define ID_AA64PFR0_CSV3_SHIFT                60
+ #define ID_AA64PFR0_SVE_SHIFT         32
+ #define ID_AA64PFR0_GIC_SHIFT         24
+ #define ID_AA64PFR0_ASIMD_SHIFT               20
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -145,6 +145,7 @@ static const struct arm64_ftr_bits ftr_i
+ };
+ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                                  FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+@@ -852,6 +853,8 @@ static int __kpti_forced; /* 0: not forc
+ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+                               int __unused)
+ {
++      u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++
+       /* Forced on command line? */
+       if (__kpti_forced) {
+               pr_info_once("kernel page table isolation forced %s by command line option\n",
+@@ -863,7 +866,9 @@ static bool unmap_kernel_at_el0(const st
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+               return true;
+-      return false;
++      /* Defer to CPU feature registers */
++      return !cpuid_feature_extract_unsigned_field(pfr0,
++                                                   ID_AA64PFR0_CSV3_SHIFT);
+ }
+ static int __init parse_kpti(char *str)
+@@ -968,6 +973,7 @@ static const struct arm64_cpu_capabiliti
+       },
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       {
++              .desc = "Kernel page table isolation (KPTI)",
+               .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+               .def_scope = SCOPE_SYSTEM,
+               .matches = unmap_kernel_at_el0,
diff --git a/queue-4.15/arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch b/queue-4.15/arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch
new file mode 100644 (file)
index 0000000..6a5e837
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 14:33:28 +0000
+Subject: [Variant 3/Meltdown] arm64: tls: Avoid unconditional zeroing of tpidrro_el0 for native tasks
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 18011eac28c7 upstream.
+
+When unmapping the kernel at EL0, we use tpidrro_el0 as a scratch register
+during exception entry from native tasks and subsequently zero it in
+the kernel_ventry macro. We can therefore avoid zeroing tpidrro_el0
+in the context-switch path for native tasks using the entry trampoline.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/process.c |   12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -370,16 +370,14 @@ void tls_preserve_current_state(void)
+ static void tls_thread_switch(struct task_struct *next)
+ {
+-      unsigned long tpidr, tpidrro;
+-
+       tls_preserve_current_state();
+-      tpidr = *task_user_tls(next);
+-      tpidrro = is_compat_thread(task_thread_info(next)) ?
+-                next->thread.tp_value : 0;
++      if (is_compat_thread(task_thread_info(next)))
++              write_sysreg(next->thread.tp_value, tpidrro_el0);
++      else if (!arm64_kernel_unmapped_at_el0())
++              write_sysreg(0, tpidrro_el0);
+-      write_sysreg(tpidr, tpidr_el0);
+-      write_sysreg(tpidrro, tpidrro_el0);
++      write_sysreg(*task_user_tls(next), tpidr_el0);
+ }
+ /* Restore the UAO state depending on next's addr_limit */
diff --git a/queue-4.15/arm64-turn-on-kpti-only-on-cpus-that-need-it.patch b/queue-4.15/arm64-turn-on-kpti-only-on-cpus-that-need-it.patch
new file mode 100644 (file)
index 0000000..311d85a
--- /dev/null
@@ -0,0 +1,39 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Jayachandran C <jnair@caviumnetworks.com>
+Date: Fri, 19 Jan 2018 04:22:48 -0800
+Subject: [Variant 3/Meltdown] arm64: Turn on KPTI only on CPUs that need it
+
+From: Jayachandran C <jnair@caviumnetworks.com>
+
+
+Commit 0ba2e29c7fc1 upstream.
+
+Whitelist Broadcom Vulcan/Cavium ThunderX2 processors in
+unmap_kernel_at_el0(). These CPUs are not vulnerable to
+CVE-2017-5754 and do not need KPTI when KASLR is off.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Jayachandran C <jnair@caviumnetworks.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -866,6 +866,13 @@ static bool unmap_kernel_at_el0(const st
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+               return true;
++      /* Don't force KPTI for CPUs that are not vulnerable */
++      switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
++      case MIDR_CAVIUM_THUNDERX2:
++      case MIDR_BRCM_VULCAN:
++              return false;
++      }
++
+       /* Defer to CPU feature registers */
+       return !cpuid_feature_extract_unsigned_field(pfr0,
+                                                    ID_AA64PFR0_CSV3_SHIFT);
diff --git a/queue-4.15/arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch b/queue-4.15/arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch
new file mode 100644 (file)
index 0000000..7305896
--- /dev/null
@@ -0,0 +1,119 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 5 Feb 2018 15:34:22 +0000
+Subject: [Variant 1/Spectre-v1] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 84624087dd7e upstream.
+
+access_ok isn't an expensive operation once the addr_limit for the current
+thread has been loaded into the cache. Given that the initial access_ok
+check preceding a sequence of __{get,put}_user operations will take
+the brunt of the miss, we can make the __* variants identical to the
+full-fat versions, which brings with it the benefits of address masking.
+
+The likely cost in these sequences will be from toggling PAN/UAO, which
+we can address later by implementing the *_unsafe versions.
+
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/uaccess.h |   54 +++++++++++++++++++++++----------------
+ 1 file changed, 32 insertions(+), 22 deletions(-)
+
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -294,28 +294,33 @@ do {                                                                     \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+ } while (0)
+-#define __get_user(x, ptr)                                            \
++#define __get_user_check(x, ptr, err)                                 \
+ ({                                                                    \
+-      int __gu_err = 0;                                               \
+-      __get_user_err((x), (ptr), __gu_err);                           \
+-      __gu_err;                                                       \
++      __typeof__(*(ptr)) __user *__p = (ptr);                         \
++      might_fault();                                                  \
++      if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {                \
++              __p = uaccess_mask_ptr(__p);                            \
++              __get_user_err((x), __p, (err));                        \
++      } else {                                                        \
++              (x) = 0; (err) = -EFAULT;                               \
++      }                                                               \
+ })
+ #define __get_user_error(x, ptr, err)                                 \
+ ({                                                                    \
+-      __get_user_err((x), (ptr), (err));                              \
++      __get_user_check((x), (ptr), (err));                            \
+       (void)0;                                                        \
+ })
+-#define get_user(x, ptr)                                              \
++#define __get_user(x, ptr)                                            \
+ ({                                                                    \
+-      __typeof__(*(ptr)) __user *__p = (ptr);                         \
+-      might_fault();                                                  \
+-      access_ok(VERIFY_READ, __p, sizeof(*__p)) ?                     \
+-              __p = uaccess_mask_ptr(__p), __get_user((x), __p) :     \
+-              ((x) = 0, -EFAULT);                                     \
++      int __gu_err = 0;                                               \
++      __get_user_check((x), (ptr), __gu_err);                         \
++      __gu_err;                                                       \
+ })
++#define get_user      __get_user
++
+ #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)  \
+       asm volatile(                                                   \
+       "1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",                  \
+@@ -358,28 +363,33 @@ do {                                                                     \
+       uaccess_disable_not_uao();                                      \
+ } while (0)
+-#define __put_user(x, ptr)                                            \
++#define __put_user_check(x, ptr, err)                                 \
+ ({                                                                    \
+-      int __pu_err = 0;                                               \
+-      __put_user_err((x), (ptr), __pu_err);                           \
+-      __pu_err;                                                       \
++      __typeof__(*(ptr)) __user *__p = (ptr);                         \
++      might_fault();                                                  \
++      if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {               \
++              __p = uaccess_mask_ptr(__p);                            \
++              __put_user_err((x), __p, (err));                        \
++      } else  {                                                       \
++              (err) = -EFAULT;                                        \
++      }                                                               \
+ })
+ #define __put_user_error(x, ptr, err)                                 \
+ ({                                                                    \
+-      __put_user_err((x), (ptr), (err));                              \
++      __put_user_check((x), (ptr), (err));                            \
+       (void)0;                                                        \
+ })
+-#define put_user(x, ptr)                                              \
++#define __put_user(x, ptr)                                            \
+ ({                                                                    \
+-      __typeof__(*(ptr)) __user *__p = (ptr);                         \
+-      might_fault();                                                  \
+-      access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ?                    \
+-              __p = uaccess_mask_ptr(__p), __put_user((x), __p) :     \
+-              -EFAULT;                                                \
++      int __pu_err = 0;                                               \
++      __put_user_check((x), (ptr), __pu_err);                         \
++      __pu_err;                                                       \
+ })
++#define put_user      __put_user
++
+ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+ #define raw_copy_from_user __arch_copy_from_user
+ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
diff --git a/queue-4.15/arm64-uaccess-mask-__user-pointers-for-__arch_-clear-copy_-_user.patch b/queue-4.15/arm64-uaccess-mask-__user-pointers-for-__arch_-clear-copy_-_user.patch
new file mode 100644 (file)
index 0000000..dab951b
--- /dev/null
@@ -0,0 +1,139 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 5 Feb 2018 15:34:23 +0000
+Subject: [Variant 1/Spectre-v1] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit f71c2ffcb20d upstream.
+
+Like we've done for get_user and put_user, ensure that user pointers
+are masked before invoking the underlying __arch_{clear,copy_*}_user
+operations.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/uaccess.h |   29 ++++++++++++++++++++++-------
+ arch/arm64/kernel/arm64ksyms.c   |    4 ++--
+ arch/arm64/lib/clear_user.S      |    6 +++---
+ arch/arm64/lib/copy_in_user.S    |    5 +++--
+ 4 files changed, 30 insertions(+), 14 deletions(-)
+
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -391,20 +391,35 @@ do {                                                                     \
+ #define put_user      __put_user
+ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+-#define raw_copy_from_user __arch_copy_from_user
++#define raw_copy_from_user(to, from, n)                                       \
++({                                                                    \
++      __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n));     \
++})
++
+ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+-#define raw_copy_to_user __arch_copy_to_user
+-extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
+-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
++#define raw_copy_to_user(to, from, n)                                 \
++({                                                                    \
++      __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n));       \
++})
++
++extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
++#define raw_copy_in_user(to, from, n)                                 \
++({                                                                    \
++      __arch_copy_in_user(__uaccess_mask_ptr(to),                     \
++                          __uaccess_mask_ptr(from), (n));             \
++})
++
+ #define INLINE_COPY_TO_USER
+ #define INLINE_COPY_FROM_USER
+-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
++extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
++static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
+ {
+       if (access_ok(VERIFY_WRITE, to, n))
+-              n = __clear_user(__uaccess_mask_ptr(to), n);
++              n = __arch_clear_user(__uaccess_mask_ptr(to), n);
+       return n;
+ }
++#define clear_user    __clear_user
+ extern long strncpy_from_user(char *dest, const char __user *src, long count);
+@@ -418,7 +433,7 @@ extern unsigned long __must_check __copy
+ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
+ {
+       kasan_check_write(dst, size);
+-      return __copy_user_flushcache(dst, src, size);
++      return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
+ }
+ #endif
+--- a/arch/arm64/kernel/arm64ksyms.c
++++ b/arch/arm64/kernel/arm64ksyms.c
+@@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page);
+       /* user mem (segment) */
+ EXPORT_SYMBOL(__arch_copy_from_user);
+ EXPORT_SYMBOL(__arch_copy_to_user);
+-EXPORT_SYMBOL(__clear_user);
+-EXPORT_SYMBOL(raw_copy_in_user);
++EXPORT_SYMBOL(__arch_clear_user);
++EXPORT_SYMBOL(__arch_copy_in_user);
+       /* physical memory */
+ EXPORT_SYMBOL(memstart_addr);
+--- a/arch/arm64/lib/clear_user.S
++++ b/arch/arm64/lib/clear_user.S
+@@ -21,7 +21,7 @@
+       .text
+-/* Prototype: int __clear_user(void *addr, size_t sz)
++/* Prototype: int __arch_clear_user(void *addr, size_t sz)
+  * Purpose  : clear some user memory
+  * Params   : addr - user memory address to clear
+  *          : sz   - number of bytes to clear
+@@ -29,7 +29,7 @@
+  *
+  * Alignment fixed up by hardware.
+  */
+-ENTRY(__clear_user)
++ENTRY(__arch_clear_user)
+       uaccess_enable_not_uao x2, x3, x4
+       mov     x2, x1                  // save the size for fixup return
+       subs    x1, x1, #8
+@@ -52,7 +52,7 @@ uao_user_alternative 9f, strb, sttrb, wz
+ 5:    mov     x0, #0
+       uaccess_disable_not_uao x2, x3
+       ret
+-ENDPROC(__clear_user)
++ENDPROC(__arch_clear_user)
+       .section .fixup,"ax"
+       .align  2
+--- a/arch/arm64/lib/copy_in_user.S
++++ b/arch/arm64/lib/copy_in_user.S
+@@ -64,14 +64,15 @@
+       .endm
+ end   .req    x5
+-ENTRY(raw_copy_in_user)
++
++ENTRY(__arch_copy_in_user)
+       uaccess_enable_not_uao x3, x4, x5
+       add     end, x0, x2
+ #include "copy_template.S"
+       uaccess_disable_not_uao x3, x4
+       mov     x0, #0
+       ret
+-ENDPROC(raw_copy_in_user)
++ENDPROC(__arch_copy_in_user)
+       .section .fixup,"ax"
+       .align  2
diff --git a/queue-4.15/arm64-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch b/queue-4.15/arm64-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch
new file mode 100644 (file)
index 0000000..fc6da83
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 5 Feb 2018 15:34:21 +0000
+Subject: [Variant 1/Spectre-v1] arm64: uaccess: Prevent speculative use of the current addr_limit
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit c2f0ad4fc089 upstream.
+
+A mispredicted conditional call to set_fs could result in the wrong
+addr_limit being forwarded under speculation to a subsequent access_ok
+check, potentially forming part of a spectre-v1 attack using uaccess
+routines.
+
+This patch prevents this forwarding from taking place, but putting heavy
+barriers in set_fs after writing the addr_limit.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/uaccess.h |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -42,6 +42,13 @@ static inline void set_fs(mm_segment_t f
+ {
+       current_thread_info()->addr_limit = fs;
++      /*
++       * Prevent a mispredicted conditional call to set_fs from forwarding
++       * the wrong address limit to access_ok under speculation.
++       */
++      dsb(nsh);
++      isb();
++
+       /* On user-mode return, check fs is correct */
+       set_thread_flag(TIF_FSCHECK);
diff --git a/queue-4.15/arm64-use-pointer-masking-to-limit-uaccess-speculation.patch b/queue-4.15/arm64-use-pointer-masking-to-limit-uaccess-speculation.patch
new file mode 100644 (file)
index 0000000..bdf781c
--- /dev/null
@@ -0,0 +1,82 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Robin Murphy <robin.murphy@arm.com>
+Date: Mon, 5 Feb 2018 15:34:19 +0000
+Subject: [Variant 1/Spectre-v1] arm64: Use pointer masking to limit uaccess speculation
+
+From: Robin Murphy <robin.murphy@arm.com>
+
+
+Commit 4d8efc2d5ee4 upstream.
+
+Similarly to x86, mitigate speculation past an access_ok() check by
+masking the pointer against the address limit before use.
+
+Even if we don't expect speculative writes per se, it is plausible that
+a CPU may still speculate at least as far as fetching a cache line for
+writing, hence we also harden put_user() and clear_user() for peace of
+mind.
+
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/uaccess.h |   26 +++++++++++++++++++++++---
+ 1 file changed, 23 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -216,6 +216,26 @@ static inline void uaccess_enable_not_ua
+ }
+ /*
++ * Sanitise a uaccess pointer such that it becomes NULL if above the
++ * current addr_limit.
++ */
++#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
++static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
++{
++      void __user *safe_ptr;
++
++      asm volatile(
++      "       bics    xzr, %1, %2\n"
++      "       csel    %0, %1, xzr, eq\n"
++      : "=&r" (safe_ptr)
++      : "r" (ptr), "r" (current_thread_info()->addr_limit)
++      : "cc");
++
++      csdb();
++      return safe_ptr;
++}
++
++/*
+  * The "__xxx" versions of the user access functions do not verify the address
+  * space - it must have been done previously with a separate "access_ok()"
+  * call.
+@@ -285,7 +305,7 @@ do {                                                                       \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
+       might_fault();                                                  \
+       access_ok(VERIFY_READ, __p, sizeof(*__p)) ?                     \
+-              __get_user((x), __p) :                                  \
++              __p = uaccess_mask_ptr(__p), __get_user((x), __p) :     \
+               ((x) = 0, -EFAULT);                                     \
+ })
+@@ -349,7 +369,7 @@ do {                                                                       \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
+       might_fault();                                                  \
+       access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ?                    \
+-              __put_user((x), __p) :                                  \
++              __p = uaccess_mask_ptr(__p), __put_user((x), __p) :     \
+               -EFAULT;                                                \
+ })
+@@ -365,7 +385,7 @@ extern unsigned long __must_check __clea
+ static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+ {
+       if (access_ok(VERIFY_WRITE, to, n))
+-              n = __clear_user(to, n);
++              n = __clear_user(__uaccess_mask_ptr(to), n);
+       return n;
+ }
diff --git a/queue-4.15/arm64-use-ret-instruction-for-exiting-the-trampoline.patch b/queue-4.15/arm64-use-ret-instruction-for-exiting-the-trampoline.patch
new file mode 100644 (file)
index 0000000..aade44f
--- /dev/null
@@ -0,0 +1,53 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Nov 2017 16:15:59 +0000
+Subject: [Variant 3/Meltdown] arm64: use RET instruction for exiting the trampoline
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit be04a6d1126b upstream.
+
+Speculation attacks against the entry trampoline can potentially resteer
+the speculative instruction stream through the indirect branch and into
+arbitrary gadgets within the kernel.
+
+This patch defends against these attacks by forcing a misprediction
+through the return stack: a dummy BL instruction loads an entry into
+the stack, so that the predicted program flow of the subsequent RET
+instruction is to a branch-to-self instruction which is finally resolved
+as a branch to the kernel vectors with speculation suppressed.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -1029,6 +1029,14 @@ alternative_else_nop_endif
+       .if     \regsize == 64
+       msr     tpidrro_el0, x30        // Restored in kernel_ventry
+       .endif
++      /*
++       * Defend against branch aliasing attacks by pushing a dummy
++       * entry onto the return stack and using a RET instruction to
++       * enter the full-fat kernel vectors.
++       */
++      bl      2f
++      b       .
++2:
+       tramp_map_kernel        x30
+ #ifdef CONFIG_RANDOMIZE_BASE
+       adr     x30, tramp_vectors + PAGE_SIZE
+@@ -1041,7 +1049,7 @@ alternative_insn isb, nop, ARM64_WORKARO
+       msr     vbar_el1, x30
+       add     x30, x30, #(1b - tramp_vectors)
+       isb
+-      br      x30
++      ret
+       .endm
+       .macro tramp_exit, regsize = 64
diff --git a/queue-4.15/cifs-fix-autonegotiate-security-settings-mismatch.patch b/queue-4.15/cifs-fix-autonegotiate-security-settings-mismatch.patch
new file mode 100644 (file)
index 0000000..99706c4
--- /dev/null
@@ -0,0 +1,47 @@
+From 9aca7e454415f7878b28524e76bebe1170911a88 Mon Sep 17 00:00:00 2001
+From: Daniel N Pettersson <danielnp@axis.com>
+Date: Thu, 11 Jan 2018 16:00:12 +0100
+Subject: cifs: Fix autonegotiate security settings mismatch
+
+From: Daniel N Pettersson <danielnp@axis.com>
+
+commit 9aca7e454415f7878b28524e76bebe1170911a88 upstream.
+
+Autonegotiation gives a security settings mismatch error if the SMB
+server selects an SMBv3 dialect that isn't SMB3.02. The exact error is
+"protocol revalidation - security settings mismatch".
+This can be tested using Samba v4.2 or by setting the global Samba
+setting max protocol = SMB3_00.
+
+The check that fails in smb3_validate_negotiate is the dialect
+verification of the negotiate info response. This is because it tries
+to verify against the protocol_id in the global smbdefault_values. The
+protocol_id in smbdefault_values is SMB3.02.
+In SMB2_negotiate the protocol_id in smbdefault_values isn't updated,
+it is global so it probably shouldn't be, but server->dialect is.
+
+This patch changes the check in smb3_validate_negotiate to use
+server->dialect instead of server->vals->protocol_id. The patch works
+with autonegotiate and when using a specific version in the vers mount
+option.
+
+Signed-off-by: Daniel N Pettersson <danielnp@axis.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -733,8 +733,7 @@ int smb3_validate_negotiate(const unsign
+       }
+       /* check validate negotiate info response matches what we got earlier */
+-      if (pneg_rsp->Dialect !=
+-                      cpu_to_le16(tcon->ses->server->vals->protocol_id))
++      if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
+               goto vneg_out;
+       if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
diff --git a/queue-4.15/cifs-fix-missing-put_xid-in-cifs_file_strict_mmap.patch b/queue-4.15/cifs-fix-missing-put_xid-in-cifs_file_strict_mmap.patch
new file mode 100644 (file)
index 0000000..294b13e
--- /dev/null
@@ -0,0 +1,74 @@
+From f04a703c3d613845ae3141bfaf223489de8ab3eb Mon Sep 17 00:00:00 2001
+From: Matthew Wilcox <mawilcox@microsoft.com>
+Date: Fri, 15 Dec 2017 12:48:32 -0800
+Subject: cifs: Fix missing put_xid in cifs_file_strict_mmap
+
+From: Matthew Wilcox <mawilcox@microsoft.com>
+
+commit f04a703c3d613845ae3141bfaf223489de8ab3eb upstream.
+
+If cifs_zap_mapping() returned an error, we would return without putting
+the xid that we got earlier.  Restructure cifs_file_strict_mmap() and
+cifs_file_mmap() to be more similar to each other and have a single
+point of return that always puts the xid.
+
+Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c |   26 ++++++++++++--------------
+ 1 file changed, 12 insertions(+), 14 deletions(-)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -3471,20 +3471,18 @@ static const struct vm_operations_struct
+ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+-      int rc, xid;
++      int xid, rc = 0;
+       struct inode *inode = file_inode(file);
+       xid = get_xid();
+-      if (!CIFS_CACHE_READ(CIFS_I(inode))) {
++      if (!CIFS_CACHE_READ(CIFS_I(inode)))
+               rc = cifs_zap_mapping(inode);
+-              if (rc)
+-                      return rc;
+-      }
+-
+-      rc = generic_file_mmap(file, vma);
+-      if (rc == 0)
++      if (!rc)
++              rc = generic_file_mmap(file, vma);
++      if (!rc)
+               vma->vm_ops = &cifs_file_vm_ops;
++
+       free_xid(xid);
+       return rc;
+ }
+@@ -3494,16 +3492,16 @@ int cifs_file_mmap(struct file *file, st
+       int rc, xid;
+       xid = get_xid();
++
+       rc = cifs_revalidate_file(file);
+-      if (rc) {
++      if (rc)
+               cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
+                        rc);
+-              free_xid(xid);
+-              return rc;
+-      }
+-      rc = generic_file_mmap(file, vma);
+-      if (rc == 0)
++      if (!rc)
++              rc = generic_file_mmap(file, vma);
++      if (!rc)
+               vma->vm_ops = &cifs_file_vm_ops;
++
+       free_xid(xid);
+       return rc;
+ }
diff --git a/queue-4.15/cifs-zero-sensitive-data-when-freeing.patch b/queue-4.15/cifs-zero-sensitive-data-when-freeing.patch
new file mode 100644 (file)
index 0000000..06e264f
--- /dev/null
@@ -0,0 +1,96 @@
+From 97f4b7276b829a8927ac903a119bef2f963ccc58 Mon Sep 17 00:00:00 2001
+From: Aurelien Aptel <aaptel@suse.com>
+Date: Thu, 25 Jan 2018 15:59:39 +0100
+Subject: CIFS: zero sensitive data when freeing
+
+From: Aurelien Aptel <aaptel@suse.com>
+
+commit 97f4b7276b829a8927ac903a119bef2f963ccc58 upstream.
+
+also replaces memset()+kfree() by kzfree().
+
+Signed-off-by: Aurelien Aptel <aaptel@suse.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsencrypt.c |    3 +--
+ fs/cifs/connect.c     |    6 +++---
+ fs/cifs/misc.c        |   14 ++++----------
+ 3 files changed, 8 insertions(+), 15 deletions(-)
+
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -325,9 +325,8 @@ int calc_lanman_hash(const char *passwor
+ {
+       int i;
+       int rc;
+-      char password_with_pad[CIFS_ENCPWD_SIZE];
++      char password_with_pad[CIFS_ENCPWD_SIZE] = {0};
+-      memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
+       if (password)
+               strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1707,7 +1707,7 @@ cifs_parse_mount_options(const char *mou
+                       tmp_end++;
+                       if (!(tmp_end < end && tmp_end[1] == delim)) {
+                               /* No it is not. Set the password to NULL */
+-                              kfree(vol->password);
++                              kzfree(vol->password);
+                               vol->password = NULL;
+                               break;
+                       }
+@@ -1745,7 +1745,7 @@ cifs_parse_mount_options(const char *mou
+                                       options = end;
+                       }
+-                      kfree(vol->password);
++                      kzfree(vol->password);
+                       /* Now build new password string */
+                       temp_len = strlen(value);
+                       vol->password = kzalloc(temp_len+1, GFP_KERNEL);
+@@ -4235,7 +4235,7 @@ cifs_construct_tcon(struct cifs_sb_info
+               reset_cifs_unix_caps(0, tcon, NULL, vol_info);
+ out:
+       kfree(vol_info->username);
+-      kfree(vol_info->password);
++      kzfree(vol_info->password);
+       kfree(vol_info);
+       return tcon;
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -98,14 +98,11 @@ sesInfoFree(struct cifs_ses *buf_to_free
+       kfree(buf_to_free->serverOS);
+       kfree(buf_to_free->serverDomain);
+       kfree(buf_to_free->serverNOS);
+-      if (buf_to_free->password) {
+-              memset(buf_to_free->password, 0, strlen(buf_to_free->password));
+-              kfree(buf_to_free->password);
+-      }
++      kzfree(buf_to_free->password);
+       kfree(buf_to_free->user_name);
+       kfree(buf_to_free->domainName);
+-      kfree(buf_to_free->auth_key.response);
+-      kfree(buf_to_free);
++      kzfree(buf_to_free->auth_key.response);
++      kzfree(buf_to_free);
+ }
+ struct cifs_tcon *
+@@ -136,10 +133,7 @@ tconInfoFree(struct cifs_tcon *buf_to_fr
+       }
+       atomic_dec(&tconInfoAllocCount);
+       kfree(buf_to_free->nativeFileSystem);
+-      if (buf_to_free->password) {
+-              memset(buf_to_free->password, 0, strlen(buf_to_free->password));
+-              kfree(buf_to_free->password);
+-      }
++      kzfree(buf_to_free->password);
+       kfree(buf_to_free);
+ }
diff --git a/queue-4.15/cpufreq-mediatek-add-mediatek-related-projects-into-blacklist.patch b/queue-4.15/cpufreq-mediatek-add-mediatek-related-projects-into-blacklist.patch
new file mode 100644 (file)
index 0000000..54a894d
--- /dev/null
@@ -0,0 +1,40 @@
+From 6066998cbd2b1012a8d5bc9a2957cfd0ad53150e Mon Sep 17 00:00:00 2001
+From: Andrew-sh Cheng <andrew-sh.cheng@mediatek.com>
+Date: Fri, 8 Dec 2017 14:07:56 +0800
+Subject: cpufreq: mediatek: add mediatek related projects into blacklist
+
+From: Andrew-sh Cheng <andrew-sh.cheng@mediatek.com>
+
+commit 6066998cbd2b1012a8d5bc9a2957cfd0ad53150e upstream.
+
+mediatek projects will use mediate-cpufreq.c as cpufreq driver,
+instead of using cpufreq_dt.c
+Add mediatek related projects into cpufreq-dt blacklist
+
+Signed-off-by: Andrew-sh Cheng <andrew-sh.cheng@mediatek.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sean Wang <sean.wang@mediatek.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq-dt-platdev.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -108,6 +108,14 @@ static const struct of_device_id blackli
+       { .compatible = "marvell,armadaxp", },
++      { .compatible = "mediatek,mt2701", },
++      { .compatible = "mediatek,mt2712", },
++      { .compatible = "mediatek,mt7622", },
++      { .compatible = "mediatek,mt7623", },
++      { .compatible = "mediatek,mt817x", },
++      { .compatible = "mediatek,mt8173", },
++      { .compatible = "mediatek,mt8176", },
++
+       { .compatible = "nvidia,tegra124", },
+       { .compatible = "st,stih407", },
diff --git a/queue-4.15/dmaengine-dmatest-fix-container_of-member-in-dmatest_callback.patch b/queue-4.15/dmaengine-dmatest-fix-container_of-member-in-dmatest_callback.patch
new file mode 100644 (file)
index 0000000..a43d191
--- /dev/null
@@ -0,0 +1,33 @@
+From 66b3bd2356e0a1531c71a3dcf96944621e25c17c Mon Sep 17 00:00:00 2001
+From: Yang Shunyong <shunyong.yang@hxt-semitech.com>
+Date: Mon, 29 Jan 2018 14:40:11 +0800
+Subject: dmaengine: dmatest: fix container_of member in dmatest_callback
+
+From: Yang Shunyong <shunyong.yang@hxt-semitech.com>
+
+commit 66b3bd2356e0a1531c71a3dcf96944621e25c17c upstream.
+
+The type of arg passed to dmatest_callback is struct dmatest_done.
+It refers to test_done in struct dmatest_thread, not done_wait.
+
+Fixes: 6f6a23a213be ("dmaengine: dmatest: move callback wait ...")
+Signed-off-by: Yang Shunyong <shunyong.yang@hxt-semitech.com>
+Acked-by: Adam Wallis <awallis@codeaurora.org>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/dmatest.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -355,7 +355,7 @@ static void dmatest_callback(void *arg)
+ {
+       struct dmatest_done *done = arg;
+       struct dmatest_thread *thread =
+-              container_of(arg, struct dmatest_thread, done_wait);
++              container_of(done, struct dmatest_thread, test_done);
+       if (!thread->done) {
+               done->done = true;
+               wake_up_all(done->wait);
diff --git a/queue-4.15/drivers-firmware-expose-psci_get_version-through-psci_ops-structure.patch b/queue-4.15/drivers-firmware-expose-psci_get_version-through-psci_ops-structure.patch
new file mode 100644 (file)
index 0000000..204a3cb
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 2 Jan 2018 21:45:41 +0000
+Subject: [Variant 2/Spectre-v2] drivers/firmware: Expose psci_get_version through psci_ops structure
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit d68e3ba5303f upstream.
+
+Entry into recent versions of ARM Trusted Firmware will invalidate the CPU
+branch predictor state in order to protect against aliasing attacks.
+
+This patch exposes the PSCI "VERSION" function via psci_ops, so that it
+can be invoked outside of the PSCI driver where necessary.
+
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/psci.c |    2 ++
+ include/linux/psci.h    |    1 +
+ 2 files changed, 3 insertions(+)
+
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -496,6 +496,8 @@ static void __init psci_init_migrate(voi
+ static void __init psci_0_2_set_functions(void)
+ {
+       pr_info("Using standard PSCI v0.2 function IDs\n");
++      psci_ops.get_version = psci_get_version;
++
+       psci_function_id[PSCI_FN_CPU_SUSPEND] =
+                                       PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
+       psci_ops.cpu_suspend = psci_cpu_suspend;
+--- a/include/linux/psci.h
++++ b/include/linux/psci.h
+@@ -26,6 +26,7 @@ int psci_cpu_init_idle(unsigned int cpu)
+ int psci_cpu_suspend_enter(unsigned long index);
+ struct psci_operations {
++      u32 (*get_version)(void);
+       int (*cpu_suspend)(u32 state, unsigned long entry_point);
+       int (*cpu_off)(u32 state);
+       int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
diff --git a/queue-4.15/firmware-psci-expose-psci-conduit.patch b/queue-4.15/firmware-psci-expose-psci-conduit.patch
new file mode 100644 (file)
index 0000000..b017afc
--- /dev/null
@@ -0,0 +1,109 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:16 +0000
+Subject: [Variant 2/Spectre-v2] firmware/psci: Expose PSCI conduit
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit 09a8d6d48499 upstream.
+
+In order to call into the firmware to apply workarounds, it is
+useful to find out whether we're using HVC or SMC. Let's expose
+this through the psci_ops.
+
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/psci.c |   28 +++++++++++++++++++++++-----
+ include/linux/psci.h    |    7 +++++++
+ 2 files changed, 30 insertions(+), 5 deletions(-)
+
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -59,7 +59,9 @@ bool psci_tos_resident_on(int cpu)
+       return cpu == resident_cpu;
+ }
+-struct psci_operations psci_ops;
++struct psci_operations psci_ops = {
++      .conduit = PSCI_CONDUIT_NONE,
++};
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+                               unsigned long, unsigned long);
+@@ -210,6 +212,22 @@ static unsigned long psci_migrate_info_u
+                             0, 0, 0);
+ }
++static void set_conduit(enum psci_conduit conduit)
++{
++      switch (conduit) {
++      case PSCI_CONDUIT_HVC:
++              invoke_psci_fn = __invoke_psci_fn_hvc;
++              break;
++      case PSCI_CONDUIT_SMC:
++              invoke_psci_fn = __invoke_psci_fn_smc;
++              break;
++      default:
++              WARN(1, "Unexpected PSCI conduit %d\n", conduit);
++      }
++
++      psci_ops.conduit = conduit;
++}
++
+ static int get_set_conduit_method(struct device_node *np)
+ {
+       const char *method;
+@@ -222,9 +240,9 @@ static int get_set_conduit_method(struct
+       }
+       if (!strcmp("hvc", method)) {
+-              invoke_psci_fn = __invoke_psci_fn_hvc;
++              set_conduit(PSCI_CONDUIT_HVC);
+       } else if (!strcmp("smc", method)) {
+-              invoke_psci_fn = __invoke_psci_fn_smc;
++              set_conduit(PSCI_CONDUIT_SMC);
+       } else {
+               pr_warn("invalid \"method\" property: %s\n", method);
+               return -EINVAL;
+@@ -654,9 +672,9 @@ int __init psci_acpi_init(void)
+       pr_info("probing for conduit method from ACPI.\n");
+       if (acpi_psci_use_hvc())
+-              invoke_psci_fn = __invoke_psci_fn_hvc;
++              set_conduit(PSCI_CONDUIT_HVC);
+       else
+-              invoke_psci_fn = __invoke_psci_fn_smc;
++              set_conduit(PSCI_CONDUIT_SMC);
+       return psci_probe();
+ }
+--- a/include/linux/psci.h
++++ b/include/linux/psci.h
+@@ -25,6 +25,12 @@ bool psci_tos_resident_on(int cpu);
+ int psci_cpu_init_idle(unsigned int cpu);
+ int psci_cpu_suspend_enter(unsigned long index);
++enum psci_conduit {
++      PSCI_CONDUIT_NONE,
++      PSCI_CONDUIT_SMC,
++      PSCI_CONDUIT_HVC,
++};
++
+ struct psci_operations {
+       u32 (*get_version)(void);
+       int (*cpu_suspend)(u32 state, unsigned long entry_point);
+@@ -34,6 +40,7 @@ struct psci_operations {
+       int (*affinity_info)(unsigned long target_affinity,
+                       unsigned long lowest_affinity_level);
+       int (*migrate_info_type)(void);
++      enum psci_conduit conduit;
+ };
+ extern struct psci_operations psci_ops;
diff --git a/queue-4.15/firmware-psci-expose-smccc-version-through-psci_ops.patch b/queue-4.15/firmware-psci-expose-smccc-version-through-psci_ops.patch
new file mode 100644 (file)
index 0000000..8d8b952
--- /dev/null
@@ -0,0 +1,98 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:17 +0000
+Subject: [Variant 2/Spectre-v2] firmware/psci: Expose SMCCC version through psci_ops
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+
+Commit e78eef554a91 upstream.
+
+Since PSCI 1.0 allows the SMCCC version to be (indirectly) probed,
+let's do that at boot time, and expose the version of the calling
+convention as part of the psci_ops structure.
+
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/psci.c |   27 +++++++++++++++++++++++++++
+ include/linux/psci.h    |    6 ++++++
+ 2 files changed, 33 insertions(+)
+
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -61,6 +61,7 @@ bool psci_tos_resident_on(int cpu)
+ struct psci_operations psci_ops = {
+       .conduit = PSCI_CONDUIT_NONE,
++      .smccc_version = SMCCC_VERSION_1_0,
+ };
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+@@ -511,6 +512,31 @@ static void __init psci_init_migrate(voi
+       pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+ }
++static void __init psci_init_smccc(void)
++{
++      u32 ver = ARM_SMCCC_VERSION_1_0;
++      int feature;
++
++      feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
++
++      if (feature != PSCI_RET_NOT_SUPPORTED) {
++              u32 ret;
++              ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
++              if (ret == ARM_SMCCC_VERSION_1_1) {
++                      psci_ops.smccc_version = SMCCC_VERSION_1_1;
++                      ver = ret;
++              }
++      }
++
++      /*
++       * Conveniently, the SMCCC and PSCI versions are encoded the
++       * same way. No, this isn't accidental.
++       */
++      pr_info("SMC Calling Convention v%d.%d\n",
++              PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
++
++}
++
+ static void __init psci_0_2_set_functions(void)
+ {
+       pr_info("Using standard PSCI v0.2 function IDs\n");
+@@ -559,6 +585,7 @@ static int __init psci_probe(void)
+       psci_init_migrate();
+       if (PSCI_VERSION_MAJOR(ver) >= 1) {
++              psci_init_smccc();
+               psci_init_cpu_suspend();
+               psci_init_system_suspend();
+       }
+--- a/include/linux/psci.h
++++ b/include/linux/psci.h
+@@ -31,6 +31,11 @@ enum psci_conduit {
+       PSCI_CONDUIT_HVC,
+ };
++enum smccc_version {
++      SMCCC_VERSION_1_0,
++      SMCCC_VERSION_1_1,
++};
++
+ struct psci_operations {
+       u32 (*get_version)(void);
+       int (*cpu_suspend)(u32 state, unsigned long entry_point);
+@@ -41,6 +46,7 @@ struct psci_operations {
+                       unsigned long lowest_affinity_level);
+       int (*migrate_info_type)(void);
+       enum psci_conduit conduit;
++      enum smccc_version smccc_version;
+ };
+ extern struct psci_operations psci_ops;
diff --git a/queue-4.15/media-dvb-usb-v2-lmedm04-improve-logic-checking-of-warm-start.patch b/queue-4.15/media-dvb-usb-v2-lmedm04-improve-logic-checking-of-warm-start.patch
new file mode 100644 (file)
index 0000000..d3610ce
--- /dev/null
@@ -0,0 +1,88 @@
+From 3d932ee27e852e4904647f15b64dedca51187ad7 Mon Sep 17 00:00:00 2001
+From: Malcolm Priestley <tvboxspy@gmail.com>
+Date: Tue, 26 Sep 2017 17:10:20 -0400
+Subject: media: dvb-usb-v2: lmedm04: Improve logic checking of warm start
+
+From: Malcolm Priestley <tvboxspy@gmail.com>
+
+commit 3d932ee27e852e4904647f15b64dedca51187ad7 upstream.
+
+Warm start has no check as whether a genuine device has
+connected and proceeds to next execution path.
+
+Check device should read 0x47 at offset of 2 on USB descriptor read
+and it is the amount requested of 6 bytes.
+
+Fix for
+kasan: CONFIG_KASAN_INLINE enabled
+kasan: GPF could be caused by NULL-ptr deref or user memory access as
+
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Malcolm Priestley <tvboxspy@gmail.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/dvb-usb-v2/lmedm04.c |   26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -494,18 +494,23 @@ static int lme2510_pid_filter(struct dvb
+ static int lme2510_return_status(struct dvb_usb_device *d)
+ {
+-      int ret = 0;
++      int ret;
+       u8 *data;
+-      data = kzalloc(10, GFP_KERNEL);
++      data = kzalloc(6, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+-      ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
+-                      0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200);
+-      info("Firmware Status: %x (%x)", ret , data[2]);
++      ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
++                            0x06, 0x80, 0x0302, 0x00,
++                            data, 0x6, 200);
++      if (ret != 6)
++              ret = -EINVAL;
++      else
++              ret = data[2];
++
++      info("Firmware Status: %6ph", data);
+-      ret = (ret < 0) ? -ENODEV : data[2];
+       kfree(data);
+       return ret;
+ }
+@@ -1189,6 +1194,7 @@ static int lme2510_get_adapter_count(str
+ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
+ {
+       struct lme2510_state *st = d->priv;
++      int status;
+       usb_reset_configuration(d->udev);
+@@ -1197,12 +1203,16 @@ static int lme2510_identify_state(struct
+       st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware;
+-      if (lme2510_return_status(d) == 0x44) {
++      status = lme2510_return_status(d);
++      if (status == 0x44) {
+               *name = lme_firmware_switch(d, 0);
+               return COLD;
+       }
+-      return 0;
++      if (status != 0x47)
++              return -EINVAL;
++
++      return WARM;
+ }
+ static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
diff --git a/queue-4.15/media-dvb-usb-v2-lmedm04-move-ts2020-attach-to-dm04_lme2510_tuner.patch b/queue-4.15/media-dvb-usb-v2-lmedm04-move-ts2020-attach-to-dm04_lme2510_tuner.patch
new file mode 100644 (file)
index 0000000..9329dc7
--- /dev/null
@@ -0,0 +1,72 @@
+From 7bf7a7116ed313c601307f7e585419369926ab05 Mon Sep 17 00:00:00 2001
+From: Malcolm Priestley <tvboxspy@gmail.com>
+Date: Tue, 26 Sep 2017 17:10:21 -0400
+Subject: media: dvb-usb-v2: lmedm04: move ts2020 attach to dm04_lme2510_tuner
+
+From: Malcolm Priestley <tvboxspy@gmail.com>
+
+commit 7bf7a7116ed313c601307f7e585419369926ab05 upstream.
+
+When the tuner was split from m88rs2000 the attach function is in wrong
+place.
+
+Move to dm04_lme2510_tuner to trap errors on failure and removing
+a call to lme_coldreset.
+
+Prevents driver starting up without any tuner connected.
+
+Fixes to trap for ts2020 fail.
+LME2510(C): FE Found M88RS2000
+ts2020: probe of 0-0060 failed with error -11
+...
+LME2510(C): TUN Found RS2000 tuner
+kasan: CONFIG_KASAN_INLINE enabled
+kasan: GPF could be caused by NULL-ptr deref or user memory access
+general protection fault: 0000 [#1] PREEMPT SMP KASAN
+
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Malcolm Priestley <tvboxspy@gmail.com>
+Tested-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/dvb-usb-v2/lmedm04.c |   13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -1076,8 +1076,6 @@ static int dm04_lme2510_frontend_attach(
+               if (adap->fe[0]) {
+                       info("FE Found M88RS2000");
+-                      dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config,
+-                                      &d->i2c_adap);
+                       st->i2c_tuner_gate_w = 5;
+                       st->i2c_tuner_gate_r = 5;
+                       st->i2c_tuner_addr = 0x60;
+@@ -1143,17 +1141,18 @@ static int dm04_lme2510_tuner(struct dvb
+                       ret = st->tuner_config;
+               break;
+       case TUNER_RS2000:
+-              ret = st->tuner_config;
++              if (dvb_attach(ts2020_attach, adap->fe[0],
++                             &ts2020_config, &d->i2c_adap))
++                      ret = st->tuner_config;
+               break;
+       default:
+               break;
+       }
+-      if (ret)
++      if (ret) {
+               info("TUN Found %s tuner", tun_msg[ret]);
+-      else {
+-              info("TUN No tuner found --- resetting device");
+-              lme_coldreset(d);
++      } else {
++              info("TUN No tuner found");
+               return -ENODEV;
+       }
diff --git a/queue-4.15/media-hdpvr-fix-an-error-handling-path-in-hdpvr_probe.patch b/queue-4.15/media-hdpvr-fix-an-error-handling-path-in-hdpvr_probe.patch
new file mode 100644 (file)
index 0000000..8d20d09
--- /dev/null
@@ -0,0 +1,104 @@
+From c0f71bbb810237a38734607ca4599632f7f5d47f Mon Sep 17 00:00:00 2001
+From: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Date: Fri, 22 Sep 2017 09:07:06 -0400
+Subject: media: hdpvr: Fix an error handling path in hdpvr_probe()
+
+From: Arvind Yadav <arvind.yadav.cs@gmail.com>
+
+commit c0f71bbb810237a38734607ca4599632f7f5d47f upstream.
+
+Here, hdpvr_register_videodev() is responsible for setup and
+register a video device. Also defining and initializing a worker.
+hdpvr_register_videodev() is calling by hdpvr_probe at last.
+So no need to flush any work here.
+Unregister v4l2, free buffers and memory. If hdpvr_probe() will fail.
+
+Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Tested-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/hdpvr/hdpvr-core.c |   26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+--- a/drivers/media/usb/hdpvr/hdpvr-core.c
++++ b/drivers/media/usb/hdpvr/hdpvr-core.c
+@@ -292,7 +292,7 @@ static int hdpvr_probe(struct usb_interf
+       /* register v4l2_device early so it can be used for printks */
+       if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
+               dev_err(&interface->dev, "v4l2_device_register failed\n");
+-              goto error;
++              goto error_free_dev;
+       }
+       mutex_init(&dev->io_mutex);
+@@ -301,7 +301,7 @@ static int hdpvr_probe(struct usb_interf
+       dev->usbc_buf = kmalloc(64, GFP_KERNEL);
+       if (!dev->usbc_buf) {
+               v4l2_err(&dev->v4l2_dev, "Out of memory\n");
+-              goto error;
++              goto error_v4l2_unregister;
+       }
+       init_waitqueue_head(&dev->wait_buffer);
+@@ -339,13 +339,13 @@ static int hdpvr_probe(struct usb_interf
+       }
+       if (!dev->bulk_in_endpointAddr) {
+               v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n");
+-              goto error;
++              goto error_put_usb;
+       }
+       /* init the device */
+       if (hdpvr_device_init(dev)) {
+               v4l2_err(&dev->v4l2_dev, "device init failed\n");
+-              goto error;
++              goto error_put_usb;
+       }
+       mutex_lock(&dev->io_mutex);
+@@ -353,7 +353,7 @@ static int hdpvr_probe(struct usb_interf
+               mutex_unlock(&dev->io_mutex);
+               v4l2_err(&dev->v4l2_dev,
+                        "allocating transfer buffers failed\n");
+-              goto error;
++              goto error_put_usb;
+       }
+       mutex_unlock(&dev->io_mutex);
+@@ -361,7 +361,7 @@ static int hdpvr_probe(struct usb_interf
+       retval = hdpvr_register_i2c_adapter(dev);
+       if (retval < 0) {
+               v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
+-              goto error;
++              goto error_free_buffers;
+       }
+       client = hdpvr_register_ir_rx_i2c(dev);
+@@ -394,13 +394,17 @@ static int hdpvr_probe(struct usb_interf
+ reg_fail:
+ #if IS_ENABLED(CONFIG_I2C)
+       i2c_del_adapter(&dev->i2c_adapter);
++error_free_buffers:
+ #endif
++      hdpvr_free_buffers(dev);
++error_put_usb:
++      usb_put_dev(dev->udev);
++      kfree(dev->usbc_buf);
++error_v4l2_unregister:
++      v4l2_device_unregister(&dev->v4l2_dev);
++error_free_dev:
++      kfree(dev);
+ error:
+-      if (dev) {
+-              flush_work(&dev->worker);
+-              /* this frees allocated memory */
+-              hdpvr_delete(dev);
+-      }
+       return retval;
+ }
diff --git a/queue-4.15/perf-arm_spe-fail-device-probe-when-arm64_kernel_unmapped_at_el0.patch b/queue-4.15/perf-arm_spe-fail-device-probe-when-arm64_kernel_unmapped_at_el0.patch
new file mode 100644 (file)
index 0000000..5638cdc
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Tue Feb 13 17:25:10 CET 2018
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 27 Nov 2017 15:49:53 +0000
+Subject: [Variant 3/Meltdown] perf: arm_spe: Fail device probe when arm64_kernel_unmapped_at_el0()
+
+From: Will Deacon <will.deacon@arm.com>
+
+
+Commit 7a4a0c1555b8 upstream.
+
+When running with the kernel unmapped whilst at EL0, the virtually-addressed
+SPE buffer is also unmapped, which can lead to buffer faults if userspace
+profiling is enabled and potentially also when writing back kernel samples
+unless an expensive drain operation is performed on exception return.
+
+For now, fail the SPE driver probe when arm64_kernel_unmapped_at_el0().
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/perf/arm_spe_pmu.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/perf/arm_spe_pmu.c
++++ b/drivers/perf/arm_spe_pmu.c
+@@ -1164,6 +1164,15 @@ static int arm_spe_pmu_device_dt_probe(s
+       struct arm_spe_pmu *spe_pmu;
+       struct device *dev = &pdev->dev;
++      /*
++       * If kernelspace is unmapped when running at EL0, then the SPE
++       * buffer will fault and prematurely terminate the AUX session.
++       */
++      if (arm64_kernel_unmapped_at_el0()) {
++              dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
++              return -EPERM;
++      }
++
+       spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
+       if (!spe_pmu) {
+               dev_err(dev, "failed to allocate spe_pmu\n");
diff --git a/queue-4.15/revert-drm-i915-mark-all-device-info-struct-with-__initconst.patch b/queue-4.15/revert-drm-i915-mark-all-device-info-struct-with-__initconst.patch
new file mode 100644 (file)
index 0000000..dd0a261
--- /dev/null
@@ -0,0 +1,394 @@
+From b5a756a722286af9702d565501e1f690d075d16b Mon Sep 17 00:00:00 2001
+From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Date: Mon, 29 Jan 2018 08:33:46 +0000
+Subject: Revert "drm/i915: mark all device info struct with __initconst"
+
+From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+
+commit b5a756a722286af9702d565501e1f690d075d16b upstream.
+
+This reverts commit 5b54eddd3920e9f6f1a6d972454baf350cbae77e.
+
+ Conflicts:
+       drivers/gpu/drm/i915/i915_pci.c
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104805
+Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Fixes: 5b54eddd3920 ("drm/i915: mark all device info struct with __initconst")
+Link: https://patchwork.freedesktop.org/patch/msgid/20180129083346.29173-1-lionel.g.landwerlin@intel.com
+(cherry picked from commit 5db47e37b38755c5e26e6b8fbc1a32ce73495940)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: Ozkan Sezer <sezeroz@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_pci.c |   94 ++++++++++++++++++++--------------------
+ 1 file changed, 47 insertions(+), 47 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_pci.c
++++ b/drivers/gpu/drm/i915/i915_pci.c
+@@ -74,19 +74,19 @@
+       GEN_DEFAULT_PAGE_SIZES, \
+       CURSOR_OFFSETS
+-static const struct intel_device_info intel_i830_info __initconst = {
++static const struct intel_device_info intel_i830_info = {
+       GEN2_FEATURES,
+       .platform = INTEL_I830,
+       .is_mobile = 1, .cursor_needs_physical = 1,
+       .num_pipes = 2, /* legal, last one wins */
+ };
+-static const struct intel_device_info intel_i845g_info __initconst = {
++static const struct intel_device_info intel_i845g_info = {
+       GEN2_FEATURES,
+       .platform = INTEL_I845G,
+ };
+-static const struct intel_device_info intel_i85x_info __initconst = {
++static const struct intel_device_info intel_i85x_info = {
+       GEN2_FEATURES,
+       .platform = INTEL_I85X, .is_mobile = 1,
+       .num_pipes = 2, /* legal, last one wins */
+@@ -94,7 +94,7 @@ static const struct intel_device_info in
+       .has_fbc = 1,
+ };
+-static const struct intel_device_info intel_i865g_info __initconst = {
++static const struct intel_device_info intel_i865g_info = {
+       GEN2_FEATURES,
+       .platform = INTEL_I865G,
+ };
+@@ -108,7 +108,7 @@ static const struct intel_device_info in
+       GEN_DEFAULT_PAGE_SIZES, \
+       CURSOR_OFFSETS
+-static const struct intel_device_info intel_i915g_info __initconst = {
++static const struct intel_device_info intel_i915g_info = {
+       GEN3_FEATURES,
+       .platform = INTEL_I915G, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+@@ -116,7 +116,7 @@ static const struct intel_device_info in
+       .unfenced_needs_alignment = 1,
+ };
+-static const struct intel_device_info intel_i915gm_info __initconst = {
++static const struct intel_device_info intel_i915gm_info = {
+       GEN3_FEATURES,
+       .platform = INTEL_I915GM,
+       .is_mobile = 1,
+@@ -128,7 +128,7 @@ static const struct intel_device_info in
+       .unfenced_needs_alignment = 1,
+ };
+-static const struct intel_device_info intel_i945g_info __initconst = {
++static const struct intel_device_info intel_i945g_info = {
+       GEN3_FEATURES,
+       .platform = INTEL_I945G,
+       .has_hotplug = 1, .cursor_needs_physical = 1,
+@@ -137,7 +137,7 @@ static const struct intel_device_info in
+       .unfenced_needs_alignment = 1,
+ };
+-static const struct intel_device_info intel_i945gm_info __initconst = {
++static const struct intel_device_info intel_i945gm_info = {
+       GEN3_FEATURES,
+       .platform = INTEL_I945GM, .is_mobile = 1,
+       .has_hotplug = 1, .cursor_needs_physical = 1,
+@@ -148,14 +148,14 @@ static const struct intel_device_info in
+       .unfenced_needs_alignment = 1,
+ };
+-static const struct intel_device_info intel_g33_info __initconst = {
++static const struct intel_device_info intel_g33_info = {
+       GEN3_FEATURES,
+       .platform = INTEL_G33,
+       .has_hotplug = 1,
+       .has_overlay = 1,
+ };
+-static const struct intel_device_info intel_pineview_info __initconst = {
++static const struct intel_device_info intel_pineview_info = {
+       GEN3_FEATURES,
+       .platform = INTEL_PINEVIEW, .is_mobile = 1,
+       .has_hotplug = 1,
+@@ -172,7 +172,7 @@ static const struct intel_device_info in
+       GEN_DEFAULT_PAGE_SIZES, \
+       CURSOR_OFFSETS
+-static const struct intel_device_info intel_i965g_info __initconst = {
++static const struct intel_device_info intel_i965g_info = {
+       GEN4_FEATURES,
+       .platform = INTEL_I965G,
+       .has_overlay = 1,
+@@ -180,7 +180,7 @@ static const struct intel_device_info in
+       .has_snoop = false,
+ };
+-static const struct intel_device_info intel_i965gm_info __initconst = {
++static const struct intel_device_info intel_i965gm_info = {
+       GEN4_FEATURES,
+       .platform = INTEL_I965GM,
+       .is_mobile = 1, .has_fbc = 1,
+@@ -190,13 +190,13 @@ static const struct intel_device_info in
+       .has_snoop = false,
+ };
+-static const struct intel_device_info intel_g45_info __initconst = {
++static const struct intel_device_info intel_g45_info = {
+       GEN4_FEATURES,
+       .platform = INTEL_G45,
+       .ring_mask = RENDER_RING | BSD_RING,
+ };
+-static const struct intel_device_info intel_gm45_info __initconst = {
++static const struct intel_device_info intel_gm45_info = {
+       GEN4_FEATURES,
+       .platform = INTEL_GM45,
+       .is_mobile = 1, .has_fbc = 1,
+@@ -213,12 +213,12 @@ static const struct intel_device_info in
+       GEN_DEFAULT_PAGE_SIZES, \
+       CURSOR_OFFSETS
+-static const struct intel_device_info intel_ironlake_d_info __initconst = {
++static const struct intel_device_info intel_ironlake_d_info = {
+       GEN5_FEATURES,
+       .platform = INTEL_IRONLAKE,
+ };
+-static const struct intel_device_info intel_ironlake_m_info __initconst = {
++static const struct intel_device_info intel_ironlake_m_info = {
+       GEN5_FEATURES,
+       .platform = INTEL_IRONLAKE,
+       .is_mobile = 1, .has_fbc = 1,
+@@ -241,12 +241,12 @@ static const struct intel_device_info in
+       GEN6_FEATURES, \
+       .platform = INTEL_SANDYBRIDGE
+-static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = {
++static const struct intel_device_info intel_sandybridge_d_gt1_info = {
+       SNB_D_PLATFORM,
+       .gt = 1,
+ };
+-static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = {
++static const struct intel_device_info intel_sandybridge_d_gt2_info = {
+       SNB_D_PLATFORM,
+       .gt = 2,
+ };
+@@ -257,12 +257,12 @@ static const struct intel_device_info in
+       .is_mobile = 1
+-static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = {
++static const struct intel_device_info intel_sandybridge_m_gt1_info = {
+       SNB_M_PLATFORM,
+       .gt = 1,
+ };
+-static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = {
++static const struct intel_device_info intel_sandybridge_m_gt2_info = {
+       SNB_M_PLATFORM,
+       .gt = 2,
+ };
+@@ -286,12 +286,12 @@ static const struct intel_device_info in
+       .platform = INTEL_IVYBRIDGE, \
+       .has_l3_dpf = 1
+-static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = {
++static const struct intel_device_info intel_ivybridge_d_gt1_info = {
+       IVB_D_PLATFORM,
+       .gt = 1,
+ };
+-static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
++static const struct intel_device_info intel_ivybridge_d_gt2_info = {
+       IVB_D_PLATFORM,
+       .gt = 2,
+ };
+@@ -302,17 +302,17 @@ static const struct intel_device_info in
+       .is_mobile = 1, \
+       .has_l3_dpf = 1
+-static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = {
++static const struct intel_device_info intel_ivybridge_m_gt1_info = {
+       IVB_M_PLATFORM,
+       .gt = 1,
+ };
+-static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = {
++static const struct intel_device_info intel_ivybridge_m_gt2_info = {
+       IVB_M_PLATFORM,
+       .gt = 2,
+ };
+-static const struct intel_device_info intel_ivybridge_q_info __initconst = {
++static const struct intel_device_info intel_ivybridge_q_info = {
+       GEN7_FEATURES,
+       .platform = INTEL_IVYBRIDGE,
+       .gt = 2,
+@@ -320,7 +320,7 @@ static const struct intel_device_info in
+       .has_l3_dpf = 1,
+ };
+-static const struct intel_device_info intel_valleyview_info __initconst = {
++static const struct intel_device_info intel_valleyview_info = {
+       .platform = INTEL_VALLEYVIEW,
+       .gen = 7,
+       .is_lp = 1,
+@@ -356,17 +356,17 @@ static const struct intel_device_info in
+       .platform = INTEL_HASWELL, \
+       .has_l3_dpf = 1
+-static const struct intel_device_info intel_haswell_gt1_info __initconst = {
++static const struct intel_device_info intel_haswell_gt1_info = {
+       HSW_PLATFORM,
+       .gt = 1,
+ };
+-static const struct intel_device_info intel_haswell_gt2_info __initconst = {
++static const struct intel_device_info intel_haswell_gt2_info = {
+       HSW_PLATFORM,
+       .gt = 2,
+ };
+-static const struct intel_device_info intel_haswell_gt3_info __initconst = {
++static const struct intel_device_info intel_haswell_gt3_info = {
+       HSW_PLATFORM,
+       .gt = 3,
+ };
+@@ -386,17 +386,17 @@ static const struct intel_device_info in
+       .gen = 8, \
+       .platform = INTEL_BROADWELL
+-static const struct intel_device_info intel_broadwell_gt1_info __initconst = {
++static const struct intel_device_info intel_broadwell_gt1_info = {
+       BDW_PLATFORM,
+       .gt = 1,
+ };
+-static const struct intel_device_info intel_broadwell_gt2_info __initconst = {
++static const struct intel_device_info intel_broadwell_gt2_info = {
+       BDW_PLATFORM,
+       .gt = 2,
+ };
+-static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
++static const struct intel_device_info intel_broadwell_rsvd_info = {
+       BDW_PLATFORM,
+       .gt = 3,
+       /* According to the device ID those devices are GT3, they were
+@@ -404,13 +404,13 @@ static const struct intel_device_info in
+        */
+ };
+-static const struct intel_device_info intel_broadwell_gt3_info __initconst = {
++static const struct intel_device_info intel_broadwell_gt3_info = {
+       BDW_PLATFORM,
+       .gt = 3,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ };
+-static const struct intel_device_info intel_cherryview_info __initconst = {
++static const struct intel_device_info intel_cherryview_info = {
+       .gen = 8, .num_pipes = 3,
+       .has_hotplug = 1,
+       .is_lp = 1,
+@@ -453,12 +453,12 @@ static const struct intel_device_info in
+       .gen = 9, \
+       .platform = INTEL_SKYLAKE
+-static const struct intel_device_info intel_skylake_gt1_info __initconst = {
++static const struct intel_device_info intel_skylake_gt1_info = {
+       SKL_PLATFORM,
+       .gt = 1,
+ };
+-static const struct intel_device_info intel_skylake_gt2_info __initconst = {
++static const struct intel_device_info intel_skylake_gt2_info = {
+       SKL_PLATFORM,
+       .gt = 2,
+ };
+@@ -468,12 +468,12 @@ static const struct intel_device_info in
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
+-static const struct intel_device_info intel_skylake_gt3_info __initconst = {
++static const struct intel_device_info intel_skylake_gt3_info = {
+       SKL_GT3_PLUS_PLATFORM,
+       .gt = 3,
+ };
+-static const struct intel_device_info intel_skylake_gt4_info __initconst = {
++static const struct intel_device_info intel_skylake_gt4_info = {
+       SKL_GT3_PLUS_PLATFORM,
+       .gt = 4,
+ };
+@@ -509,13 +509,13 @@ static const struct intel_device_info in
+       IVB_CURSOR_OFFSETS, \
+       BDW_COLORS
+-static const struct intel_device_info intel_broxton_info __initconst = {
++static const struct intel_device_info intel_broxton_info = {
+       GEN9_LP_FEATURES,
+       .platform = INTEL_BROXTON,
+       .ddb_size = 512,
+ };
+-static const struct intel_device_info intel_geminilake_info __initconst = {
++static const struct intel_device_info intel_geminilake_info = {
+       GEN9_LP_FEATURES,
+       .platform = INTEL_GEMINILAKE,
+       .ddb_size = 1024,
+@@ -527,17 +527,17 @@ static const struct intel_device_info in
+       .gen = 9, \
+       .platform = INTEL_KABYLAKE
+-static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
++static const struct intel_device_info intel_kabylake_gt1_info = {
+       KBL_PLATFORM,
+       .gt = 1,
+ };
+-static const struct intel_device_info intel_kabylake_gt2_info __initconst = {
++static const struct intel_device_info intel_kabylake_gt2_info = {
+       KBL_PLATFORM,
+       .gt = 2,
+ };
+-static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
++static const struct intel_device_info intel_kabylake_gt3_info = {
+       KBL_PLATFORM,
+       .gt = 3,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+@@ -548,17 +548,17 @@ static const struct intel_device_info in
+       .gen = 9, \
+       .platform = INTEL_COFFEELAKE
+-static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
++static const struct intel_device_info intel_coffeelake_gt1_info = {
+       CFL_PLATFORM,
+       .gt = 1,
+ };
+-static const struct intel_device_info intel_coffeelake_gt2_info __initconst = {
++static const struct intel_device_info intel_coffeelake_gt2_info = {
+       CFL_PLATFORM,
+       .gt = 2,
+ };
+-static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
++static const struct intel_device_info intel_coffeelake_gt3_info = {
+       CFL_PLATFORM,
+       .gt = 3,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+@@ -569,7 +569,7 @@ static const struct intel_device_info in
+       .ddb_size = 1024, \
+       GLK_COLORS
+-static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
++static const struct intel_device_info intel_cannonlake_gt2_info = {
+       GEN10_FEATURES,
+       .is_alpha_support = 1,
+       .platform = INTEL_CANNONLAKE,
diff --git a/queue-4.15/sched-rt-up-the-root-domain-ref-count-when-passing-it-around-via-ipis.patch b/queue-4.15/sched-rt-up-the-root-domain-ref-count-when-passing-it-around-via-ipis.patch
new file mode 100644 (file)
index 0000000..9f80c20
--- /dev/null
@@ -0,0 +1,101 @@
+From 364f56653708ba8bcdefd4f0da2a42904baa8eeb Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Tue, 23 Jan 2018 20:45:38 -0500
+Subject: sched/rt: Up the root domain ref count when passing it around via IPIs
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 364f56653708ba8bcdefd4f0da2a42904baa8eeb upstream.
+
+When issuing an IPI RT push, where an IPI is sent to each CPU that has more
+than one RT task scheduled on it, it references the root domain's rto_mask,
+that contains all the CPUs within the root domain that has more than one RT
+task in the runable state. The problem is, after the IPIs are initiated, the
+rq->lock is released. This means that the root domain that is associated to
+the run queue could be freed while the IPIs are going around.
+
+Add a sched_get_rd() and a sched_put_rd() that will increment and decrement
+the root domain's ref count respectively. This way when initiating the IPIs,
+the scheduler will up the root domain's ref count before releasing the
+rq->lock, ensuring that the root domain does not go away until the IPI round
+is complete.
+
+Reported-by: Pavan Kondeti <pkondeti@codeaurora.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 4bdced5c9a292 ("sched/rt: Simplify the IPI based RT balancing logic")
+Link: http://lkml.kernel.org/r/CAEU1=PkiHO35Dzna8EQqNSKW1fr1y1zRQ5y66X117MG06sQtNA@mail.gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/rt.c       |    9 +++++++--
+ kernel/sched/sched.h    |    2 ++
+ kernel/sched/topology.c |   13 +++++++++++++
+ 3 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1990,8 +1990,11 @@ static void tell_cpu_to_push(struct rq *
+       rto_start_unlock(&rq->rd->rto_loop_start);
+-      if (cpu >= 0)
++      if (cpu >= 0) {
++              /* Make sure the rd does not get freed while pushing */
++              sched_get_rd(rq->rd);
+               irq_work_queue_on(&rq->rd->rto_push_work, cpu);
++      }
+ }
+ /* Called from hardirq context */
+@@ -2021,8 +2024,10 @@ void rto_push_irq_work_func(struct irq_w
+       raw_spin_unlock(&rd->rto_lock);
+-      if (cpu < 0)
++      if (cpu < 0) {
++              sched_put_rd(rd);
+               return;
++      }
+       /* Try the next RT overloaded CPU */
+       irq_work_queue_on(&rd->rto_push_work, cpu);
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -665,6 +665,8 @@ extern struct mutex sched_domains_mutex;
+ extern void init_defrootdomain(void);
+ extern int sched_init_domains(const struct cpumask *cpu_map);
+ extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
++extern void sched_get_rd(struct root_domain *rd);
++extern void sched_put_rd(struct root_domain *rd);
+ #ifdef HAVE_RT_PUSH_IPI
+ extern void rto_push_irq_work_func(struct irq_work *work);
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -259,6 +259,19 @@ void rq_attach_root(struct rq *rq, struc
+               call_rcu_sched(&old_rd->rcu, free_rootdomain);
+ }
++void sched_get_rd(struct root_domain *rd)
++{
++      atomic_inc(&rd->refcount);
++}
++
++void sched_put_rd(struct root_domain *rd)
++{
++      if (!atomic_dec_and_test(&rd->refcount))
++              return;
++
++      call_rcu_sched(&rd->rcu, free_rootdomain);
++}
++
+ static int init_rootdomain(struct root_domain *rd)
+ {
+       if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
diff --git a/queue-4.15/sched-rt-use-container_of-to-get-root-domain-in-rto_push_irq_work_func.patch b/queue-4.15/sched-rt-use-container_of-to-get-root-domain-in-rto_push_irq_work_func.patch
new file mode 100644 (file)
index 0000000..77ecdff
--- /dev/null
@@ -0,0 +1,94 @@
+From ad0f1d9d65938aec72a698116cd73a980916895e Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Tue, 23 Jan 2018 20:45:37 -0500
+Subject: sched/rt: Use container_of() to get root domain in rto_push_irq_work_func()
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit ad0f1d9d65938aec72a698116cd73a980916895e upstream.
+
+When the rto_push_irq_work_func() is called, it looks at the RT overloaded
+bitmask in the root domain via the runqueue (rq->rd). The problem is that
+during CPU up and down, nothing here stops rq->rd from changing between
+taking the rq->rd->rto_lock and releasing it. That means the lock that is
+released is not the same lock that was taken.
+
+Instead of using this_rq()->rd to get the root domain, as the irq work is
+part of the root domain, we can simply get the root domain from the irq work
+that is passed to the routine:
+
+ container_of(work, struct root_domain, rto_push_work)
+
+This keeps the root domain consistent.
+
+Reported-by: Pavan Kondeti <pkondeti@codeaurora.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 4bdced5c9a292 ("sched/rt: Simplify the IPI based RT balancing logic")
+Link: http://lkml.kernel.org/r/CAEU1=PkiHO35Dzna8EQqNSKW1fr1y1zRQ5y66X117MG06sQtNA@mail.gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/rt.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1907,9 +1907,8 @@ static void push_rt_tasks(struct rq *rq)
+  * the rt_loop_next will cause the iterator to perform another scan.
+  *
+  */
+-static int rto_next_cpu(struct rq *rq)
++static int rto_next_cpu(struct root_domain *rd)
+ {
+-      struct root_domain *rd = rq->rd;
+       int next;
+       int cpu;
+@@ -1985,7 +1984,7 @@ static void tell_cpu_to_push(struct rq *
+        * Otherwise it is finishing up and an ipi needs to be sent.
+        */
+       if (rq->rd->rto_cpu < 0)
+-              cpu = rto_next_cpu(rq);
++              cpu = rto_next_cpu(rq->rd);
+       raw_spin_unlock(&rq->rd->rto_lock);
+@@ -1998,6 +1997,8 @@ static void tell_cpu_to_push(struct rq *
+ /* Called from hardirq context */
+ void rto_push_irq_work_func(struct irq_work *work)
+ {
++      struct root_domain *rd =
++              container_of(work, struct root_domain, rto_push_work);
+       struct rq *rq;
+       int cpu;
+@@ -2013,18 +2014,18 @@ void rto_push_irq_work_func(struct irq_w
+               raw_spin_unlock(&rq->lock);
+       }
+-      raw_spin_lock(&rq->rd->rto_lock);
++      raw_spin_lock(&rd->rto_lock);
+       /* Pass the IPI to the next rt overloaded queue */
+-      cpu = rto_next_cpu(rq);
++      cpu = rto_next_cpu(rd);
+-      raw_spin_unlock(&rq->rd->rto_lock);
++      raw_spin_unlock(&rd->rto_lock);
+       if (cpu < 0)
+               return;
+       /* Try the next RT overloaded CPU */
+-      irq_work_queue_on(&rq->rd->rto_push_work, cpu);
++      irq_work_queue_on(&rd->rto_push_work, cpu);
+ }
+ #endif /* HAVE_RT_PUSH_IPI */
index 937b4c3ec297112e209b050eb851264b1c550a7b..02b35578fcf8a1d70c2a5338aea8fee41be3c58d 100644 (file)
@@ -1 +1,87 @@
 watchdog-indydog-add-dependency-on-sgi_has_indydog.patch
+cifs-fix-missing-put_xid-in-cifs_file_strict_mmap.patch
+cifs-fix-autonegotiate-security-settings-mismatch.patch
+cifs-zero-sensitive-data-when-freeing.patch
+cpufreq-mediatek-add-mediatek-related-projects-into-blacklist.patch
+dmaengine-dmatest-fix-container_of-member-in-dmatest_callback.patch
+ssb-do-not-disable-pci-host-on-non-mips.patch
+watchdog-gpio_wdt-set-wdog_hw_running-in-gpio_wdt_stop.patch
+revert-drm-i915-mark-all-device-info-struct-with-__initconst.patch
+sched-rt-use-container_of-to-get-root-domain-in-rto_push_irq_work_func.patch
+sched-rt-up-the-root-domain-ref-count-when-passing-it-around-via-ipis.patch
+media-dvb-usb-v2-lmedm04-improve-logic-checking-of-warm-start.patch
+media-dvb-usb-v2-lmedm04-move-ts2020-attach-to-dm04_lme2510_tuner.patch
+media-hdpvr-fix-an-error-handling-path-in-hdpvr_probe.patch
+arm64-mm-use-non-global-mappings-for-kernel-space.patch
+arm64-mm-temporarily-disable-arm64_sw_ttbr0_pan.patch
+arm64-mm-move-asid-from-ttbr0-to-ttbr1.patch
+arm64-mm-remove-pre_ttbr0_update_workaround-for-falkor-erratum-e1003.patch
+arm64-mm-rename-post_ttbr0_update_workaround.patch
+arm64-mm-fix-and-re-enable-arm64_sw_ttbr0_pan.patch
+arm64-mm-allocate-asids-in-pairs.patch
+arm64-mm-add-arm64_kernel_unmapped_at_el0-helper.patch
+arm64-mm-invalidate-both-kernel-and-user-asids-when-performing-tlbi.patch
+arm64-entry-add-exception-trampoline-page-for-exceptions-from-el0.patch
+arm64-mm-map-entry-trampoline-into-trampoline-and-kernel-page-tables.patch
+arm64-entry-explicitly-pass-exception-level-to-kernel_ventry-macro.patch
+arm64-entry-hook-up-entry-trampoline-to-exception-vectors.patch
+arm64-erratum-work-around-falkor-erratum-e1003-in-trampoline-code.patch
+arm64-cpu_errata-add-kryo-to-falkor-1003-errata.patch
+arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch
+arm64-entry-add-fake-cpu-feature-for-unmapping-the-kernel-at-el0.patch
+arm64-kaslr-put-kernel-vectors-address-in-separate-data-page.patch
+arm64-use-ret-instruction-for-exiting-the-trampoline.patch
+arm64-kconfig-add-config_unmap_kernel_at_el0.patch
+arm64-kconfig-reword-unmap_kernel_at_el0-kconfig-entry.patch
+arm64-take-into-account-id_aa64pfr0_el1.csv3.patch
+arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
+arm64-mm-introduce-ttbr_asid_mask-for-getting-at-the-asid-in-the-ttbr.patch
+arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
+arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch
+arm64-turn-on-kpti-only-on-cpus-that-need-it.patch
+arm64-kpti-make-use-of-ng-dependent-on-arm64_kernel_unmapped_at_el0.patch
+arm64-mm-permit-transitioning-from-global-to-non-global-without-bbm.patch
+arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
+arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
+arm64-entry-reword-comment-about-post_ttbr_update_workaround.patch
+arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
+perf-arm_spe-fail-device-probe-when-arm64_kernel_unmapped_at_el0.patch
+arm64-barrier-add-csdb-macros-to-control-data-value-prediction.patch
+arm64-implement-array_index_mask_nospec.patch
+arm64-make-user_ds-an-inclusive-limit.patch
+arm64-use-pointer-masking-to-limit-uaccess-speculation.patch
+arm64-entry-ensure-branch-through-syscall-table-is-bounded-under-speculation.patch
+arm64-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch
+arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch
+arm64-uaccess-mask-__user-pointers-for-__arch_-clear-copy_-_user.patch
+arm64-futex-mask-__user-pointers-prior-to-dereference.patch
+arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
+arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch
+arm64-cpufeature-pass-capability-structure-to-enable-callback.patch
+drivers-firmware-expose-psci_get_version-through-psci_ops-structure.patch
+arm64-move-post_ttbr_update_workaround-to-c-code.patch
+arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
+arm64-move-bp-hardening-to-check_and_switch_context.patch
+arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
+arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
+arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
+arm64-cputype-add-missing-midr-values-for-cortex-a72-and-cortex-a75.patch
+arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
+arm64-implement-branch-predictor-hardening-for-falkor.patch
+arm64-branch-predictor-hardening-for-cavium-thunderx2.patch
+arm64-kvm-increment-pc-after-handling-an-smc-trap.patch
+arm-arm64-kvm-consolidate-the-psci-include-files.patch
+arm-arm64-kvm-add-psci_version-helper.patch
+arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
+arm-arm64-kvm-implement-psci-1.0-support.patch
+arm-arm64-kvm-advertise-smccc-v1.1.patch
+arm64-kvm-make-psci_version-a-fast-path.patch
+arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
+arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
+arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
+firmware-psci-expose-psci-conduit.patch
+firmware-psci-expose-smccc-version-through-psci_ops.patch
+arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
+arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
+arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
+arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
diff --git a/queue-4.15/ssb-do-not-disable-pci-host-on-non-mips.patch b/queue-4.15/ssb-do-not-disable-pci-host-on-non-mips.patch
new file mode 100644 (file)
index 0000000..ab0212a
--- /dev/null
@@ -0,0 +1,35 @@
+From a9e6d44ddeccd3522670e641f1ed9b068e746ff7 Mon Sep 17 00:00:00 2001
+From: Sven Joachim <svenjoac@gmx.de>
+Date: Fri, 26 Jan 2018 10:38:01 +0100
+Subject: ssb: Do not disable PCI host on non-Mips
+
+From: Sven Joachim <svenjoac@gmx.de>
+
+commit a9e6d44ddeccd3522670e641f1ed9b068e746ff7 upstream.
+
+After upgrading an old laptop to 4.15-rc9, I found that the eth0 and
+wlan0 interfaces had disappeared.  It turns out that the b43 and b44
+drivers require SSB_PCIHOST_POSSIBLE which depends on
+PCI_DRIVERS_LEGACY, a config option that only exists on Mips.
+
+Fixes: 58eae1416b80 ("ssb: Disable PCI host for PCI_DRIVERS_GENERIC")
+Signed-off-by: Sven Joachim <svenjoac@gmx.de>
+Reviewed-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ssb/Kconfig |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/ssb/Kconfig
++++ b/drivers/ssb/Kconfig
+@@ -32,7 +32,7 @@ config SSB_BLOCKIO
+ config SSB_PCIHOST_POSSIBLE
+       bool
+-      depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
++      depends on SSB && (PCI = y || PCI = SSB) && (PCI_DRIVERS_LEGACY || !MIPS)
+       default y
+ config SSB_PCIHOST
diff --git a/queue-4.15/watchdog-gpio_wdt-set-wdog_hw_running-in-gpio_wdt_stop.patch b/queue-4.15/watchdog-gpio_wdt-set-wdog_hw_running-in-gpio_wdt_stop.patch
new file mode 100644 (file)
index 0000000..a4e9f47
--- /dev/null
@@ -0,0 +1,57 @@
+From bc137dfdbec27c0ec5731a89002daded4a4aa1ea Mon Sep 17 00:00:00 2001
+From: Rasmus Villemoes <rasmus.villemoes@prevas.dk>
+Date: Thu, 9 Nov 2017 14:39:55 +0100
+Subject: watchdog: gpio_wdt: set WDOG_HW_RUNNING in gpio_wdt_stop
+
+From: Rasmus Villemoes <rasmus.villemoes@prevas.dk>
+
+commit bc137dfdbec27c0ec5731a89002daded4a4aa1ea upstream.
+
+The first patch above (https://patchwork.kernel.org/patch/9970181/)
+makes the oops go away, but it just papers over the problem. The real
+problem is that the watchdog core clears WDOG_HW_RUNNING in
+watchdog_stop, and the gpio driver fails to set it in its stop
+function when it doesn't actually stop it. This means that the core
+doesn't know that it now has responsibility for petting the device, in
+turn causing the device to reset the system (I hadn't noticed this
+because the board I'm working on has that reset logic disabled).
+
+How about this (other drivers may of course have the same problem, I
+haven't checked). One might say that ->stop should return an error
+when the device can't be stopped, but OTOH this brings parity between
+a device without a ->stop method and a GPIO wd that has always-running
+set. IOW, I think ->stop should only return an error when an actual
+attempt to stop the hardware failed.
+
+From: Rasmus Villemoes <rasmus.villemoes@prevas.dk>
+
+The watchdog framework clears WDOG_HW_RUNNING before calling
+->stop. If the driver is unable to stop the device, it is supposed to
+set that bit again so that the watchdog core takes care of sending
+heart-beats while the device is not open from user-space. Update the
+gpio_wdt driver to honour that contract (and get rid of the redundant
+clearing of WDOG_HW_RUNNING).
+
+Fixes: 3c10bbde10 ("watchdog: core: Clear WDOG_HW_RUNNING before calling the stop function")
+Signed-off-by: Rasmus Villemoes <rasmus.villemoes@prevas.dk>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Wim Van Sebroeck <wim@iguana.be>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/watchdog/gpio_wdt.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/watchdog/gpio_wdt.c
++++ b/drivers/watchdog/gpio_wdt.c
+@@ -80,7 +80,8 @@ static int gpio_wdt_stop(struct watchdog
+       if (!priv->always_running) {
+               gpio_wdt_disable(priv);
+-              clear_bit(WDOG_HW_RUNNING, &wdd->status);
++      } else {
++              set_bit(WDOG_HW_RUNNING, &wdd->status);
+       }
+       return 0;