]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Jul 2018 10:04:51 +0000 (12:04 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Jul 2018 10:04:51 +0000 (12:04 +0200)
added patches:
arm-arm64-smccc-add-smccc-specific-return-codes.patch
arm64-add-arch_workaround_2-probing.patch
arm64-add-per-cpu-infrastructure-to-call-arch_workaround_2.patch
arm64-add-ssbd-command-line-option.patch
arm64-alternatives-add-dynamic-patching-feature.patch
arm64-alternatives-use-tpidr_el2-on-vhe-hosts.patch
arm64-call-arch_workaround_2-on-transitions-between-el0-and-el1.patch
arm64-kvm-add-arch_workaround_2-discovery-through-arch_features_func_id.patch
arm64-kvm-add-arch_workaround_2-support-for-guests.patch
arm64-kvm-add-hyp-per-cpu-accessors.patch
arm64-kvm-handle-guest-s-arch_workaround_2-requests.patch
arm64-ssbd-add-global-mitigation-state-accessor.patch
arm64-ssbd-add-prctl-interface-for-per-thread-mitigation.patch
arm64-ssbd-introduce-thread-flag-to-control-userspace-mitigation.patch
arm64-ssbd-restore-mitigation-status-on-cpu-resume.patch
arm64-ssbd-skip-apply_ssbd-if-not-using-dynamic-mitigation.patch
autofs-fix-slab-out-of-bounds-read-in-getname_kernel.patch
bdi-fix-another-oops-in-wb_workfn.patch
keys-dns-fix-parsing-multiple-options.patch
kvm-arm-arm64-convert-kvm_host_cpu_state-to-a-static-per-cpu-allocation.patch
kvm-arm-arm64-do-not-use-kern_hyp_va-with-kvm_vgic_global_state.patch
kvm-arm64-avoid-storing-the-vcpu-pointer-on-the-stack.patch
kvm-arm64-change-hyp_panic-s-dependency-on-tpidr_el2.patch
kvm-arm64-stop-save-restoring-host-tpidr_el1-on-vhe.patch
kvm-arm64-store-vcpu-on-the-stack-during-__guest_enter.patch
net-nfc-avoid-stalls-when-nfc_alloc_send_skb-returned-null.patch
netfilter-ebtables-reject-non-bridge-targets.patch
netfilter-ipv6-nf_defrag-drop-skb-dst-before-queueing.patch
nsh-set-mac-len-based-on-inner-packet.patch
rds-avoid-unenecessary-cong_update-in-loop-transport.patch
reiserfs-fix-buffer-overflow-with-long-warning-messages.patch
tls-stricter-error-checking-in-zerocopy-sendmsg-path.patch

33 files changed:
queue-4.14/arm-arm64-smccc-add-smccc-specific-return-codes.patch [new file with mode: 0644]
queue-4.14/arm64-add-arch_workaround_2-probing.patch [new file with mode: 0644]
queue-4.14/arm64-add-per-cpu-infrastructure-to-call-arch_workaround_2.patch [new file with mode: 0644]
queue-4.14/arm64-add-ssbd-command-line-option.patch [new file with mode: 0644]
queue-4.14/arm64-alternatives-add-dynamic-patching-feature.patch [new file with mode: 0644]
queue-4.14/arm64-alternatives-use-tpidr_el2-on-vhe-hosts.patch [new file with mode: 0644]
queue-4.14/arm64-call-arch_workaround_2-on-transitions-between-el0-and-el1.patch [new file with mode: 0644]
queue-4.14/arm64-kvm-add-arch_workaround_2-discovery-through-arch_features_func_id.patch [new file with mode: 0644]
queue-4.14/arm64-kvm-add-arch_workaround_2-support-for-guests.patch [new file with mode: 0644]
queue-4.14/arm64-kvm-add-hyp-per-cpu-accessors.patch [new file with mode: 0644]
queue-4.14/arm64-kvm-handle-guest-s-arch_workaround_2-requests.patch [new file with mode: 0644]
queue-4.14/arm64-ssbd-add-global-mitigation-state-accessor.patch [new file with mode: 0644]
queue-4.14/arm64-ssbd-add-prctl-interface-for-per-thread-mitigation.patch [new file with mode: 0644]
queue-4.14/arm64-ssbd-introduce-thread-flag-to-control-userspace-mitigation.patch [new file with mode: 0644]
queue-4.14/arm64-ssbd-restore-mitigation-status-on-cpu-resume.patch [new file with mode: 0644]
queue-4.14/arm64-ssbd-skip-apply_ssbd-if-not-using-dynamic-mitigation.patch [new file with mode: 0644]
queue-4.14/autofs-fix-slab-out-of-bounds-read-in-getname_kernel.patch [new file with mode: 0644]
queue-4.14/bdi-fix-another-oops-in-wb_workfn.patch [new file with mode: 0644]
queue-4.14/keys-dns-fix-parsing-multiple-options.patch [new file with mode: 0644]
queue-4.14/kvm-arm-arm64-convert-kvm_host_cpu_state-to-a-static-per-cpu-allocation.patch [new file with mode: 0644]
queue-4.14/kvm-arm-arm64-do-not-use-kern_hyp_va-with-kvm_vgic_global_state.patch [new file with mode: 0644]
queue-4.14/kvm-arm64-avoid-storing-the-vcpu-pointer-on-the-stack.patch [new file with mode: 0644]
queue-4.14/kvm-arm64-change-hyp_panic-s-dependency-on-tpidr_el2.patch [new file with mode: 0644]
queue-4.14/kvm-arm64-stop-save-restoring-host-tpidr_el1-on-vhe.patch [new file with mode: 0644]
queue-4.14/kvm-arm64-store-vcpu-on-the-stack-during-__guest_enter.patch [new file with mode: 0644]
queue-4.14/net-nfc-avoid-stalls-when-nfc_alloc_send_skb-returned-null.patch [new file with mode: 0644]
queue-4.14/netfilter-ebtables-reject-non-bridge-targets.patch [new file with mode: 0644]
queue-4.14/netfilter-ipv6-nf_defrag-drop-skb-dst-before-queueing.patch [new file with mode: 0644]
queue-4.14/nsh-set-mac-len-based-on-inner-packet.patch [new file with mode: 0644]
queue-4.14/rds-avoid-unenecessary-cong_update-in-loop-transport.patch [new file with mode: 0644]
queue-4.14/reiserfs-fix-buffer-overflow-with-long-warning-messages.patch [new file with mode: 0644]
queue-4.14/series
queue-4.14/tls-stricter-error-checking-in-zerocopy-sendmsg-path.patch [new file with mode: 0644]

diff --git a/queue-4.14/arm-arm64-smccc-add-smccc-specific-return-codes.patch b/queue-4.14/arm-arm64-smccc-add-smccc-specific-return-codes.patch
new file mode 100644 (file)
index 0000000..40cbf96
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:52:59 +0100
+Subject: arm/arm64: smccc: Add SMCCC-specific return codes
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-10-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit eff0e9e1078ea7dc1d794dc50e31baef984c46d7 upstream.
+
+We've so far used the PSCI return codes for SMCCC because they
+were extremely similar. But with the new ARM DEN 0070A specification,
+"NOT_REQUIRED" (-2) is clashing with PSCI's "PSCI_RET_INVALID_PARAMS".
+
+Let's bite the bullet and add SMCCC specific return codes. Users
+can be repainted as and when required.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -291,5 +291,10 @@ asmlinkage void __arm_smccc_hvc(unsigned
+  */
+ #define arm_smccc_1_1_hvc(...)        __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
++/* Return codes defined in ARM DEN 0070A */
++#define SMCCC_RET_SUCCESS                     0
++#define SMCCC_RET_NOT_SUPPORTED                       -1
++#define SMCCC_RET_NOT_REQUIRED                        -2
++
+ #endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/queue-4.14/arm64-add-arch_workaround_2-probing.patch b/queue-4.14/arm64-add-arch_workaround_2-probing.patch
new file mode 100644 (file)
index 0000000..1f79375
--- /dev/null
@@ -0,0 +1,147 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:02 +0100
+Subject: arm64: Add ARCH_WORKAROUND_2 probing
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-13-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit a725e3dda1813ed306734823ac4c65ca04e38500 upstream.
+
+As for Spectre variant-2, we rely on SMCCC 1.1 to provide the
+discovery mechanism for detecting the SSBD mitigation.
+
+A new capability is also allocated for that purpose, and a
+config option.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig               |    9 +++++
+ arch/arm64/include/asm/cpucaps.h |    3 +
+ arch/arm64/kernel/cpu_errata.c   |   69 +++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 80 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -849,6 +849,15 @@ config HARDEN_BRANCH_PREDICTOR
+         If unsure, say Y.
++config ARM64_SSBD
++      bool "Speculative Store Bypass Disable" if EXPERT
++      default y
++      help
++        This enables mitigation of the bypassing of previous stores
++        by speculative loads.
++
++        If unsure, say Y.
++
+ menuconfig ARMV8_DEPRECATED
+       bool "Emulate deprecated/obsolete ARMv8 instructions"
+       depends on COMPAT
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -43,7 +43,8 @@
+ #define ARM64_UNMAP_KERNEL_AT_EL0             23
+ #define ARM64_HARDEN_BRANCH_PREDICTOR         24
+ #define ARM64_HARDEN_BP_POST_GUEST_EXIT               25
++#define ARM64_SSBD                            26
+-#define ARM64_NCAPS                           26
++#define ARM64_NCAPS                           27
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -252,6 +252,67 @@ void __init arm64_update_smccc_conduit(s
+       *updptr = cpu_to_le32(insn);
+ }
++
++static void arm64_set_ssbd_mitigation(bool state)
++{
++      switch (psci_ops.conduit) {
++      case PSCI_CONDUIT_HVC:
++              arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
++              break;
++
++      case PSCI_CONDUIT_SMC:
++              arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
++              break;
++
++      default:
++              WARN_ON_ONCE(1);
++              break;
++      }
++}
++
++static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
++                                  int scope)
++{
++      struct arm_smccc_res res;
++      bool supported = true;
++
++      WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++      if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++              return false;
++
++      /*
++       * The probe function return value is either negative
++       * (unsupported or mitigated), positive (unaffected), or zero
++       * (requires mitigation). We only need to do anything in the
++       * last case.
++       */
++      switch (psci_ops.conduit) {
++      case PSCI_CONDUIT_HVC:
++              arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                                ARM_SMCCC_ARCH_WORKAROUND_2, &res);
++              if ((int)res.a0 != 0)
++                      supported = false;
++              break;
++
++      case PSCI_CONDUIT_SMC:
++              arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                                ARM_SMCCC_ARCH_WORKAROUND_2, &res);
++              if ((int)res.a0 != 0)
++                      supported = false;
++              break;
++
++      default:
++              supported = false;
++      }
++
++      if (supported) {
++              __this_cpu_write(arm64_ssbd_callback_required, 1);
++              arm64_set_ssbd_mitigation(true);
++      }
++
++      return supported;
++}
+ #endif        /* CONFIG_ARM64_SSBD */
+ #define MIDR_RANGE(model, min, max) \
+@@ -452,6 +513,14 @@ const struct arm64_cpu_capabilities arm6
+               .enable = enable_smccc_arch_workaround_1,
+       },
+ #endif
++#ifdef CONFIG_ARM64_SSBD
++      {
++              .desc = "Speculative Store Bypass Disable",
++              .def_scope = SCOPE_LOCAL_CPU,
++              .capability = ARM64_SSBD,
++              .matches = has_ssbd_mitigation,
++      },
++#endif
+       {
+       }
+ };
diff --git a/queue-4.14/arm64-add-per-cpu-infrastructure-to-call-arch_workaround_2.patch b/queue-4.14/arm64-add-per-cpu-infrastructure-to-call-arch_workaround_2.patch
new file mode 100644 (file)
index 0000000..e35f9a7
--- /dev/null
@@ -0,0 +1,79 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:01 +0100
+Subject: arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-12-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 5cf9ce6e5ea50f805c6188c04ed0daaec7b6887d upstream.
+
+In a heterogeneous system, we can end up with both affected and
+unaffected CPUs. Let's check their status before calling into the
+firmware.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |    2 ++
+ arch/arm64/kernel/entry.S      |   11 +++++++----
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -229,6 +229,8 @@ static int qcom_enable_link_stack_saniti
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+ #ifdef CONFIG_ARM64_SSBD
++DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
++
+ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+                                      __le32 *origptr, __le32 *updptr,
+                                      int nr_inst)
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -140,8 +140,10 @@ alternative_else_nop_endif
+       // This macro corrupts x0-x3. It is the caller's duty
+       // to save/restore them if required.
+-      .macro  apply_ssbd, state
++      .macro  apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
++      ldr_this_cpu    \tmp2, arm64_ssbd_callback_required, \tmp1
++      cbz     \tmp2, \targ
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+       mov     w1, #\state
+ alternative_cb        arm64_update_smccc_conduit
+@@ -176,12 +178,13 @@ alternative_cb_end
+       ldr     x19, [tsk, #TSK_TI_FLAGS]       // since we can unmask debug
+       disable_step_tsk x19, x20               // exceptions when scheduling.
+-      apply_ssbd 1
++      apply_ssbd 1, 1f, x22, x23
+ #ifdef CONFIG_ARM64_SSBD
+       ldp     x0, x1, [sp, #16 * 0]
+       ldp     x2, x3, [sp, #16 * 1]
+ #endif
++1:
+       mov     x29, xzr                        // fp pointed to user-space
+       .else
+@@ -321,8 +324,8 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
+-      apply_ssbd 0
+-
++      apply_ssbd 0, 5f, x0, x1
++5:
+       .endif
+       msr     elr_el1, x21                    // set up the return data
diff --git a/queue-4.14/arm64-add-ssbd-command-line-option.patch b/queue-4.14/arm64-add-ssbd-command-line-option.patch
new file mode 100644 (file)
index 0000000..11e26b2
--- /dev/null
@@ -0,0 +1,215 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:03 +0100
+Subject: arm64: Add 'ssbd' command-line option
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-14-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit a43ae4dfe56a01f5b98ba0cb2f784b6a43bafcc6 upstream.
+
+On a system where the firmware implements ARCH_WORKAROUND_2,
+it may be useful to either permanently enable or disable the
+workaround for cases where the user decides that they'd rather
+not get a trap overhead, and keep the mitigation permanently
+on or off instead of switching it on exception entry/exit.
+
+In any case, default to the mitigation being enabled.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt |   17 +++
+ arch/arm64/include/asm/cpufeature.h             |    6 +
+ arch/arm64/kernel/cpu_errata.c                  |  103 ++++++++++++++++++++----
+ 3 files changed, 110 insertions(+), 16 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3997,6 +3997,23 @@
+                       expediting.  Set to zero to disable automatic
+                       expediting.
++      ssbd=           [ARM64,HW]
++                      Speculative Store Bypass Disable control
++
++                      On CPUs that are vulnerable to the Speculative
++                      Store Bypass vulnerability and offer a
++                      firmware based mitigation, this parameter
++                      indicates how the mitigation should be used:
++
++                      force-on:  Unconditionally enable mitigation for
++                                 for both kernel and userspace
++                      force-off: Unconditionally disable mitigation for
++                                 for both kernel and userspace
++                      kernel:    Always enable mitigation in the
++                                 kernel, and offer a prctl interface
++                                 to allow userspace to register its
++                                 interest in being mitigated too.
++
+       stack_guard_gap=        [MM]
+                       override the default stack gap protection. The value
+                       is in page units and it defines how many pages prior
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -262,6 +262,12 @@ static inline bool system_uses_ttbr0_pan
+               !cpus_have_const_cap(ARM64_HAS_PAN);
+ }
++#define ARM64_SSBD_UNKNOWN            -1
++#define ARM64_SSBD_FORCE_DISABLE      0
++#define ARM64_SSBD_KERNEL             1
++#define ARM64_SSBD_FORCE_ENABLE               2
++#define ARM64_SSBD_MITIGATED          3
++
+ #endif /* __ASSEMBLY__ */
+ #endif
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -231,6 +231,38 @@ static int qcom_enable_link_stack_saniti
+ #ifdef CONFIG_ARM64_SSBD
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
++int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
++
++static const struct ssbd_options {
++      const char      *str;
++      int             state;
++} ssbd_options[] = {
++      { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
++      { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
++      { "kernel",     ARM64_SSBD_KERNEL, },
++};
++
++static int __init ssbd_cfg(char *buf)
++{
++      int i;
++
++      if (!buf || !buf[0])
++              return -EINVAL;
++
++      for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
++              int len = strlen(ssbd_options[i].str);
++
++              if (strncmp(buf, ssbd_options[i].str, len))
++                      continue;
++
++              ssbd_state = ssbd_options[i].state;
++              return 0;
++      }
++
++      return -EINVAL;
++}
++early_param("ssbd", ssbd_cfg);
++
+ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+                                      __le32 *origptr, __le32 *updptr,
+                                      int nr_inst)
+@@ -274,44 +306,83 @@ static bool has_ssbd_mitigation(const st
+                                   int scope)
+ {
+       struct arm_smccc_res res;
+-      bool supported = true;
++      bool required = true;
++      s32 val;
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+-      if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++      if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
++              ssbd_state = ARM64_SSBD_UNKNOWN;
+               return false;
++      }
+-      /*
+-       * The probe function return value is either negative
+-       * (unsupported or mitigated), positive (unaffected), or zero
+-       * (requires mitigation). We only need to do anything in the
+-       * last case.
+-       */
+       switch (psci_ops.conduit) {
+       case PSCI_CONDUIT_HVC:
+               arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+-              if ((int)res.a0 != 0)
+-                      supported = false;
+               break;
+       case PSCI_CONDUIT_SMC:
+               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+-              if ((int)res.a0 != 0)
+-                      supported = false;
+               break;
+       default:
+-              supported = false;
++              ssbd_state = ARM64_SSBD_UNKNOWN;
++              return false;
++      }
++
++      val = (s32)res.a0;
++
++      switch (val) {
++      case SMCCC_RET_NOT_SUPPORTED:
++              ssbd_state = ARM64_SSBD_UNKNOWN;
++              return false;
++
++      case SMCCC_RET_NOT_REQUIRED:
++              pr_info_once("%s mitigation not required\n", entry->desc);
++              ssbd_state = ARM64_SSBD_MITIGATED;
++              return false;
++
++      case SMCCC_RET_SUCCESS:
++              required = true;
++              break;
++
++      case 1: /* Mitigation not required on this CPU */
++              required = false;
++              break;
++
++      default:
++              WARN_ON(1);
++              return false;
+       }
+-      if (supported) {
+-              __this_cpu_write(arm64_ssbd_callback_required, 1);
++      switch (ssbd_state) {
++      case ARM64_SSBD_FORCE_DISABLE:
++              pr_info_once("%s disabled from command-line\n", entry->desc);
++              arm64_set_ssbd_mitigation(false);
++              required = false;
++              break;
++
++      case ARM64_SSBD_KERNEL:
++              if (required) {
++                      __this_cpu_write(arm64_ssbd_callback_required, 1);
++                      arm64_set_ssbd_mitigation(true);
++              }
++              break;
++
++      case ARM64_SSBD_FORCE_ENABLE:
++              pr_info_once("%s forced from command-line\n", entry->desc);
+               arm64_set_ssbd_mitigation(true);
++              required = true;
++              break;
++
++      default:
++              WARN_ON(1);
++              break;
+       }
+-      return supported;
++      return required;
+ }
+ #endif        /* CONFIG_ARM64_SSBD */
diff --git a/queue-4.14/arm64-alternatives-add-dynamic-patching-feature.patch b/queue-4.14/arm64-alternatives-add-dynamic-patching-feature.patch
new file mode 100644 (file)
index 0000000..f9dbbaa
--- /dev/null
@@ -0,0 +1,215 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:52:56 +0100
+Subject: arm64: alternatives: Add dynamic patching feature
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-7-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+Commit dea5e2a4c5bcf196f879a66cebdcca07793e8ba4 upstream.
+
+We've so far relied on a patching infrastructure that only gave us
+a single alternative, without any way to provide a range of potential
+replacement instructions. For a single feature, this is an all or
+nothing thing.
+
+It would be interesting to have a more flexible grained way of patching
+the kernel though, where we could dynamically tune the code that gets
+injected.
+
+In order to achive this, let's introduce a new form of dynamic patching,
+assiciating a callback to a patching site. This callback gets source and
+target locations of the patching request, as well as the number of
+instructions to be patched.
+
+Dynamic patching is declared with the new ALTERNATIVE_CB and alternative_cb
+directives:
+
+       asm volatile(ALTERNATIVE_CB("mov %0, #0\n", callback)
+                    : "r" (v));
+or
+       alternative_cb callback
+               mov     x0, #0
+       alternative_cb_end
+
+where callback is the C function computing the alternative.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/alternative.h |   41 ++++++++++++++++++++++++++++++---
+ arch/arm64/kernel/alternative.c      |   43 ++++++++++++++++++++++++++---------
+ 2 files changed, 69 insertions(+), 15 deletions(-)
+
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -5,6 +5,8 @@
+ #include <asm/cpucaps.h>
+ #include <asm/insn.h>
++#define ARM64_CB_PATCH ARM64_NCAPS
++
+ #ifndef __ASSEMBLY__
+ #include <linux/init.h>
+@@ -22,12 +24,19 @@ struct alt_instr {
+       u8  alt_len;            /* size of new instruction(s), <= orig_len */
+ };
++typedef void (*alternative_cb_t)(struct alt_instr *alt,
++                               __le32 *origptr, __le32 *updptr, int nr_inst);
++
+ void __init apply_alternatives_all(void);
+ void apply_alternatives(void *start, size_t length);
+-#define ALTINSTR_ENTRY(feature)                                                     \
++#define ALTINSTR_ENTRY(feature,cb)                                          \
+       " .word 661b - .\n"                             /* label           */ \
++      " .if " __stringify(cb) " == 0\n"                                     \
+       " .word 663f - .\n"                             /* new instruction */ \
++      " .else\n"                                                            \
++      " .word " __stringify(cb) "- .\n"               /* callback */        \
++      " .endif\n"                                                           \
+       " .hword " __stringify(feature) "\n"            /* feature bit     */ \
+       " .byte 662b-661b\n"                            /* source len      */ \
+       " .byte 664f-663f\n"                            /* replacement len */
+@@ -45,15 +54,18 @@ void apply_alternatives(void *start, siz
+  * but most assemblers die if insn1 or insn2 have a .inst. This should
+  * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
+  * containing commit 4e4d08cf7399b606 or c1baaddf8861).
++ *
++ * Alternatives with callbacks do not generate replacement instructions.
+  */
+-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled)   \
++#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb)       \
+       ".if "__stringify(cfg_enabled)" == 1\n"                         \
+       "661:\n\t"                                                      \
+       oldinstr "\n"                                                   \
+       "662:\n"                                                        \
+       ".pushsection .altinstructions,\"a\"\n"                         \
+-      ALTINSTR_ENTRY(feature)                                         \
++      ALTINSTR_ENTRY(feature,cb)                                      \
+       ".popsection\n"                                                 \
++      " .if " __stringify(cb) " == 0\n"                               \
+       ".pushsection .altinstr_replacement, \"a\"\n"                   \
+       "663:\n\t"                                                      \
+       newinstr "\n"                                                   \
+@@ -61,11 +73,17 @@ void apply_alternatives(void *start, siz
+       ".popsection\n\t"                                               \
+       ".org   . - (664b-663b) + (662b-661b)\n\t"                      \
+       ".org   . - (662b-661b) + (664b-663b)\n"                        \
++      ".else\n\t"                                                     \
++      "663:\n\t"                                                      \
++      "664:\n\t"                                                      \
++      ".endif\n"                                                      \
+       ".endif\n"
+ #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...)       \
+-      __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
++      __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
++#define ALTERNATIVE_CB(oldinstr, cb) \
++      __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
+ #else
+ #include <asm/assembler.h>
+@@ -132,6 +150,14 @@ void apply_alternatives(void *start, siz
+ 661:
+ .endm
++.macro alternative_cb cb
++      .set .Lasm_alt_mode, 0
++      .pushsection .altinstructions, "a"
++      altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
++      .popsection
++661:
++.endm
++
+ /*
+  * Provide the other half of the alternative code sequence.
+  */
+@@ -158,6 +184,13 @@ void apply_alternatives(void *start, siz
+ .endm
+ /*
++ * Callback-based alternative epilogue
++ */
++.macro alternative_cb_end
++662:
++.endm
++
++/*
+  * Provides a trivial alternative or default sequence consisting solely
+  * of NOPs. The number of NOPs is chosen automatically to match the
+  * previous case.
+--- a/arch/arm64/kernel/alternative.c
++++ b/arch/arm64/kernel/alternative.c
+@@ -107,32 +107,53 @@ static u32 get_alt_insn(struct alt_instr
+       return insn;
+ }
++static void patch_alternative(struct alt_instr *alt,
++                            __le32 *origptr, __le32 *updptr, int nr_inst)
++{
++      __le32 *replptr;
++      int i;
++
++      replptr = ALT_REPL_PTR(alt);
++      for (i = 0; i < nr_inst; i++) {
++              u32 insn;
++
++              insn = get_alt_insn(alt, origptr + i, replptr + i);
++              updptr[i] = cpu_to_le32(insn);
++      }
++}
++
+ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
+ {
+       struct alt_instr *alt;
+       struct alt_region *region = alt_region;
+-      __le32 *origptr, *replptr, *updptr;
++      __le32 *origptr, *updptr;
++      alternative_cb_t alt_cb;
+       for (alt = region->begin; alt < region->end; alt++) {
+-              u32 insn;
+-              int i, nr_inst;
++              int nr_inst;
+-              if (!cpus_have_cap(alt->cpufeature))
++              /* Use ARM64_CB_PATCH as an unconditional patch */
++              if (alt->cpufeature < ARM64_CB_PATCH &&
++                  !cpus_have_cap(alt->cpufeature))
+                       continue;
+-              BUG_ON(alt->alt_len != alt->orig_len);
++              if (alt->cpufeature == ARM64_CB_PATCH)
++                      BUG_ON(alt->alt_len != 0);
++              else
++                      BUG_ON(alt->alt_len != alt->orig_len);
+               pr_info_once("patching kernel code\n");
+               origptr = ALT_ORIG_PTR(alt);
+-              replptr = ALT_REPL_PTR(alt);
+               updptr = use_linear_alias ? lm_alias(origptr) : origptr;
+-              nr_inst = alt->alt_len / sizeof(insn);
++              nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
++
++              if (alt->cpufeature < ARM64_CB_PATCH)
++                      alt_cb = patch_alternative;
++              else
++                      alt_cb  = ALT_REPL_PTR(alt);
+-              for (i = 0; i < nr_inst; i++) {
+-                      insn = get_alt_insn(alt, origptr + i, replptr + i);
+-                      updptr[i] = cpu_to_le32(insn);
+-              }
++              alt_cb(alt, origptr, updptr, nr_inst);
+               flush_icache_range((uintptr_t)origptr,
+                                  (uintptr_t)(origptr + nr_inst));
diff --git a/queue-4.14/arm64-alternatives-use-tpidr_el2-on-vhe-hosts.patch b/queue-4.14/arm64-alternatives-use-tpidr_el2-on-vhe-hosts.patch
new file mode 100644 (file)
index 0000000..f3b727b
--- /dev/null
@@ -0,0 +1,196 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:52:54 +0100
+Subject: arm64: alternatives: use tpidr_el2 on VHE hosts
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com, James Morse <james.morse@arm.com>
+Message-ID: <20180720095312.1161-5-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit 6d99b68933fbcf51f84fcbba49246ce1209ec193 upstream.
+
+Now that KVM uses tpidr_el2 in the same way as Linux's cpu_offset in
+tpidr_el1, merge the two. This saves KVM from save/restoring tpidr_el1
+on VHE hosts, and allows future code to blindly access per-cpu variables
+without triggering world-switch.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/alternative.h |    2 ++
+ arch/arm64/include/asm/assembler.h   |    8 ++++++++
+ arch/arm64/include/asm/percpu.h      |   11 +++++++++--
+ arch/arm64/kernel/alternative.c      |    9 +++++----
+ arch/arm64/kernel/cpufeature.c       |   17 +++++++++++++++++
+ arch/arm64/mm/proc.S                 |    8 ++++++++
+ 6 files changed, 49 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -12,6 +12,8 @@
+ #include <linux/stddef.h>
+ #include <linux/stringify.h>
++extern int alternatives_applied;
++
+ struct alt_instr {
+       s32 orig_offset;        /* offset to original instruction */
+       s32 alt_offset;         /* offset to replacement instruction */
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -260,7 +260,11 @@ lr        .req    x30             // link register
+ #else
+       adr_l   \dst, \sym
+ #endif
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+       mrs     \tmp, tpidr_el1
++alternative_else
++      mrs     \tmp, tpidr_el2
++alternative_endif
+       add     \dst, \dst, \tmp
+       .endm
+@@ -271,7 +275,11 @@ lr        .req    x30             // link register
+        */
+       .macro ldr_this_cpu dst, sym, tmp
+       adr_l   \dst, \sym
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+       mrs     \tmp, tpidr_el1
++alternative_else
++      mrs     \tmp, tpidr_el2
++alternative_endif
+       ldr     \dst, [\dst, \tmp]
+       .endm
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -16,11 +16,15 @@
+ #ifndef __ASM_PERCPU_H
+ #define __ASM_PERCPU_H
++#include <asm/alternative.h>
+ #include <asm/stack_pointer.h>
+ static inline void set_my_cpu_offset(unsigned long off)
+ {
+-      asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
++      asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
++                               "msr tpidr_el2, %0",
++                               ARM64_HAS_VIRT_HOST_EXTN)
++                      :: "r" (off) : "memory");
+ }
+ static inline unsigned long __my_cpu_offset(void)
+@@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_off
+        * We want to allow caching the value, so avoid using volatile and
+        * instead use a fake stack read to hazard against barrier().
+        */
+-      asm("mrs %0, tpidr_el1" : "=r" (off) :
++      asm(ALTERNATIVE("mrs %0, tpidr_el1",
++                      "mrs %0, tpidr_el2",
++                      ARM64_HAS_VIRT_HOST_EXTN)
++              : "=r" (off) :
+               "Q" (*(const unsigned long *)current_stack_pointer));
+       return off;
+--- a/arch/arm64/kernel/alternative.c
++++ b/arch/arm64/kernel/alternative.c
+@@ -32,6 +32,8 @@
+ #define ALT_ORIG_PTR(a)               __ALT_PTR(a, orig_offset)
+ #define ALT_REPL_PTR(a)               __ALT_PTR(a, alt_offset)
++int alternatives_applied;
++
+ struct alt_region {
+       struct alt_instr *begin;
+       struct alt_instr *end;
+@@ -143,7 +145,6 @@ static void __apply_alternatives(void *a
+  */
+ static int __apply_alternatives_multi_stop(void *unused)
+ {
+-      static int patched = 0;
+       struct alt_region region = {
+               .begin  = (struct alt_instr *)__alt_instructions,
+               .end    = (struct alt_instr *)__alt_instructions_end,
+@@ -151,14 +152,14 @@ static int __apply_alternatives_multi_st
+       /* We always have a CPU 0 at this point (__init) */
+       if (smp_processor_id()) {
+-              while (!READ_ONCE(patched))
++              while (!READ_ONCE(alternatives_applied))
+                       cpu_relax();
+               isb();
+       } else {
+-              BUG_ON(patched);
++              BUG_ON(alternatives_applied);
+               __apply_alternatives(&region, true);
+               /* Barriers provided by the cache flushing */
+-              WRITE_ONCE(patched, 1);
++              WRITE_ONCE(alternatives_applied, 1);
+       }
+       return 0;
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -880,6 +880,22 @@ static int __init parse_kpti(char *str)
+ early_param("kpti", parse_kpti);
+ #endif        /* CONFIG_UNMAP_KERNEL_AT_EL0 */
++static int cpu_copy_el2regs(void *__unused)
++{
++      /*
++       * Copy register values that aren't redirected by hardware.
++       *
++       * Before code patching, we only set tpidr_el1, all CPUs need to copy
++       * this value to tpidr_el2 before we patch the code. Once we've done
++       * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
++       * do anything here.
++       */
++      if (!alternatives_applied)
++              write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
++
++      return 0;
++}
++
+ static const struct arm64_cpu_capabilities arm64_features[] = {
+       {
+               .desc = "GIC system register CPU interface",
+@@ -949,6 +965,7 @@ static const struct arm64_cpu_capabiliti
+               .capability = ARM64_HAS_VIRT_HOST_EXTN,
+               .def_scope = SCOPE_SYSTEM,
+               .matches = runs_at_el2,
++              .enable = cpu_copy_el2regs,
+       },
+       {
+               .desc = "32-bit EL0 Support",
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -70,7 +70,11 @@ ENTRY(cpu_do_suspend)
+       mrs     x8, mdscr_el1
+       mrs     x9, oslsr_el1
+       mrs     x10, sctlr_el1
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+       mrs     x11, tpidr_el1
++alternative_else
++      mrs     x11, tpidr_el2
++alternative_endif
+       mrs     x12, sp_el0
+       stp     x2, x3, [x0]
+       stp     x4, xzr, [x0, #16]
+@@ -116,7 +120,11 @@ ENTRY(cpu_do_resume)
+       msr     mdscr_el1, x10
+       msr     sctlr_el1, x12
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+       msr     tpidr_el1, x13
++alternative_else
++      msr     tpidr_el2, x13
++alternative_endif
+       msr     sp_el0, x14
+       /*
+        * Restore oslsr_el1 by writing oslar_el1
diff --git a/queue-4.14/arm64-call-arch_workaround_2-on-transitions-between-el0-and-el1.patch b/queue-4.14/arm64-call-arch_workaround_2-on-transitions-between-el0-and-el1.patch
new file mode 100644 (file)
index 0000000..e36daf7
--- /dev/null
@@ -0,0 +1,132 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:00 +0100
+Subject: arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-11-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 8e2906245f1e3b0d027169d9f2e55ce0548cb96e upstream.
+
+In order for the kernel to protect itself, let's call the SSBD mitigation
+implemented by the higher exception level (either hypervisor or firmware)
+on each transition between userspace and kernel.
+
+We must take the PSCI conduit into account in order to target the
+right exception level, hence the introduction of a runtime patching
+callback.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   24 ++++++++++++++++++++++++
+ arch/arm64/kernel/entry.S      |   22 ++++++++++++++++++++++
+ include/linux/arm-smccc.h      |    5 +++++
+ 3 files changed, 51 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -228,6 +228,30 @@ static int qcom_enable_link_stack_saniti
+ }
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
++#ifdef CONFIG_ARM64_SSBD
++void __init arm64_update_smccc_conduit(struct alt_instr *alt,
++                                     __le32 *origptr, __le32 *updptr,
++                                     int nr_inst)
++{
++      u32 insn;
++
++      BUG_ON(nr_inst != 1);
++
++      switch (psci_ops.conduit) {
++      case PSCI_CONDUIT_HVC:
++              insn = aarch64_insn_get_hvc_value();
++              break;
++      case PSCI_CONDUIT_SMC:
++              insn = aarch64_insn_get_smc_value();
++              break;
++      default:
++              return;
++      }
++
++      *updptr = cpu_to_le32(insn);
++}
++#endif        /* CONFIG_ARM64_SSBD */
++
+ #define MIDR_RANGE(model, min, max) \
+       .def_scope = SCOPE_LOCAL_CPU, \
+       .matches = is_affected_midr_range, \
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -18,6 +18,7 @@
+  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
++#include <linux/arm-smccc.h>
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+@@ -137,6 +138,18 @@ alternative_else_nop_endif
+       add     \dst, \dst, #(\sym - .entry.tramp.text)
+       .endm
++      // This macro corrupts x0-x3. It is the caller's duty
++      // to save/restore them if required.
++      .macro  apply_ssbd, state
++#ifdef CONFIG_ARM64_SSBD
++      mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
++      mov     w1, #\state
++alternative_cb        arm64_update_smccc_conduit
++      nop                                     // Patched to SMC/HVC #0
++alternative_cb_end
++#endif
++      .endm
++
+       .macro  kernel_entry, el, regsize = 64
+       .if     \regsize == 32
+       mov     w0, w0                          // zero upper 32 bits of x0
+@@ -163,6 +176,13 @@ alternative_else_nop_endif
+       ldr     x19, [tsk, #TSK_TI_FLAGS]       // since we can unmask debug
+       disable_step_tsk x19, x20               // exceptions when scheduling.
++      apply_ssbd 1
++
++#ifdef CONFIG_ARM64_SSBD
++      ldp     x0, x1, [sp, #16 * 0]
++      ldp     x2, x3, [sp, #16 * 1]
++#endif
++
+       mov     x29, xzr                        // fp pointed to user-space
+       .else
+       add     x21, sp, #S_FRAME_SIZE
+@@ -301,6 +321,8 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
++      apply_ssbd 0
++
+       .endif
+       msr     elr_el1, x21                    // set up the return data
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -80,6 +80,11 @@
+                          ARM_SMCCC_SMC_32,                            \
+                          0, 0x8000)
++#define ARM_SMCCC_ARCH_WORKAROUND_2                                   \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
++                         ARM_SMCCC_SMC_32,                            \
++                         0, 0x7fff)
++
+ #ifndef __ASSEMBLY__
+ #include <linux/linkage.h>
diff --git a/queue-4.14/arm64-kvm-add-arch_workaround_2-discovery-through-arch_features_func_id.patch b/queue-4.14/arm64-kvm-add-arch_workaround_2-discovery-through-arch_features_func_id.patch
new file mode 100644 (file)
index 0000000..a3c1784
--- /dev/null
@@ -0,0 +1,125 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:12 +0100
+Subject: arm64: KVM: Add ARCH_WORKAROUND_2 discovery through ARCH_FEATURES_FUNC_ID
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-23-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 5d81f7dc9bca4f4963092433e27b508cbe524a32 upstream.
+
+Now that all our infrastructure is in place, let's expose the
+availability of ARCH_WORKAROUND_2 to guests. We take this opportunity
+to tidy up a couple of SMCCC constants.
+
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_host.h   |   12 ++++++++++++
+ arch/arm64/include/asm/kvm_host.h |   23 +++++++++++++++++++++++
+ arch/arm64/kvm/reset.c            |    4 ++++
+ virt/kvm/arm/psci.c               |   18 ++++++++++++++++--
+ 4 files changed, 55 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -302,4 +302,16 @@ static inline bool kvm_arm_harden_branch
+       return false;
+ }
++#define KVM_SSBD_UNKNOWN              -1
++#define KVM_SSBD_FORCE_DISABLE                0
++#define KVM_SSBD_KERNEL               1
++#define KVM_SSBD_FORCE_ENABLE         2
++#define KVM_SSBD_MITIGATED            3
++
++static inline int kvm_arm_have_ssbd(void)
++{
++      /* No way to detect it yet, pretend it is not there. */
++      return KVM_SSBD_UNKNOWN;
++}
++
+ #endif /* __ARM_KVM_HOST_H__ */
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -412,4 +412,27 @@ static inline bool kvm_arm_harden_branch
+       return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+ }
++#define KVM_SSBD_UNKNOWN              -1
++#define KVM_SSBD_FORCE_DISABLE                0
++#define KVM_SSBD_KERNEL               1
++#define KVM_SSBD_FORCE_ENABLE         2
++#define KVM_SSBD_MITIGATED            3
++
++static inline int kvm_arm_have_ssbd(void)
++{
++      switch (arm64_get_ssbd_state()) {
++      case ARM64_SSBD_FORCE_DISABLE:
++              return KVM_SSBD_FORCE_DISABLE;
++      case ARM64_SSBD_KERNEL:
++              return KVM_SSBD_KERNEL;
++      case ARM64_SSBD_FORCE_ENABLE:
++              return KVM_SSBD_FORCE_ENABLE;
++      case ARM64_SSBD_MITIGATED:
++              return KVM_SSBD_MITIGATED;
++      case ARM64_SSBD_UNKNOWN:
++      default:
++              return KVM_SSBD_UNKNOWN;
++      }
++}
++
+ #endif /* __ARM64_KVM_HOST_H__ */
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu
+       /* Reset PMU */
+       kvm_pmu_vcpu_reset(vcpu);
++      /* Default workaround setup is enabled (if supported) */
++      if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
++              vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
++
+       /* Reset timer */
+       return kvm_timer_vcpu_reset(vcpu);
+ }
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -405,7 +405,7 @@ static int kvm_psci_call(struct kvm_vcpu
+ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+ {
+       u32 func_id = smccc_get_function(vcpu);
+-      u32 val = PSCI_RET_NOT_SUPPORTED;
++      u32 val = SMCCC_RET_NOT_SUPPORTED;
+       u32 feature;
+       switch (func_id) {
+@@ -417,7 +417,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu
+               switch(feature) {
+               case ARM_SMCCC_ARCH_WORKAROUND_1:
+                       if (kvm_arm_harden_branch_predictor())
+-                              val = 0;
++                              val = SMCCC_RET_SUCCESS;
++                      break;
++              case ARM_SMCCC_ARCH_WORKAROUND_2:
++                      switch (kvm_arm_have_ssbd()) {
++                      case KVM_SSBD_FORCE_DISABLE:
++                      case KVM_SSBD_UNKNOWN:
++                              break;
++                      case KVM_SSBD_KERNEL:
++                              val = SMCCC_RET_SUCCESS;
++                              break;
++                      case KVM_SSBD_FORCE_ENABLE:
++                      case KVM_SSBD_MITIGATED:
++                              val = SMCCC_RET_NOT_REQUIRED;
++                              break;
++                      }
+                       break;
+               }
+               break;
diff --git a/queue-4.14/arm64-kvm-add-arch_workaround_2-support-for-guests.patch b/queue-4.14/arm64-kvm-add-arch_workaround_2-support-for-guests.patch
new file mode 100644 (file)
index 0000000..d37d3c6
--- /dev/null
@@ -0,0 +1,186 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:10 +0100
+Subject: arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-21-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 55e3748e8902ff641e334226bdcb432f9a5d78d3 upstream.
+
+In order to offer ARCH_WORKAROUND_2 support to guests, we need
+a bit of infrastructure.
+
+Let's add a flag indicating whether or not the guest uses
+SSBD mitigation. Depending on the state of this flag, allow
+KVM to disable ARCH_WORKAROUND_2 before entering the guest,
+and enable it when exiting it.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_mmu.h    |    5 +++++
+ arch/arm64/include/asm/kvm_asm.h  |    3 +++
+ arch/arm64/include/asm/kvm_host.h |    3 +++
+ arch/arm64/include/asm/kvm_mmu.h  |   24 ++++++++++++++++++++++++
+ arch/arm64/kvm/hyp/switch.c       |   38 ++++++++++++++++++++++++++++++++++++++
+ virt/kvm/arm/arm.c                |    4 ++++
+ 6 files changed, 77 insertions(+)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -254,6 +254,11 @@ static inline int kvm_map_vectors(void)
+       return 0;
+ }
++static inline int hyp_map_aux_data(void)
++{
++      return 0;
++}
++
+ #endif        /* !__ASSEMBLY__ */
+ #endif /* __ARM_KVM_MMU_H__ */
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -33,6 +33,9 @@
+ #define KVM_ARM64_DEBUG_DIRTY_SHIFT   0
+ #define KVM_ARM64_DEBUG_DIRTY         (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
++#define       VCPU_WORKAROUND_2_FLAG_SHIFT    0
++#define       VCPU_WORKAROUND_2_FLAG          (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
++
+ /* Translate a kernel address of @sym into its equivalent linear mapping */
+ #define kvm_ksym_ref(sym)                                             \
+       ({                                                              \
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -210,6 +210,9 @@ struct kvm_vcpu_arch {
+       /* Exception Information */
+       struct kvm_vcpu_fault_info fault;
++      /* State of various workarounds, see kvm_asm.h for bit assignment */
++      u64 workaround_flags;
++
+       /* Guest debug state */
+       u64 debug_flags;
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -383,5 +383,29 @@ static inline int kvm_map_vectors(void)
+ }
+ #endif
++#ifdef CONFIG_ARM64_SSBD
++DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
++
++static inline int hyp_map_aux_data(void)
++{
++      int cpu, err;
++
++      for_each_possible_cpu(cpu) {
++              u64 *ptr;
++
++              ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
++              err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
++              if (err)
++                      return err;
++      }
++      return 0;
++}
++#else
++static inline int hyp_map_aux_data(void)
++{
++      return 0;
++}
++#endif
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ARM64_KVM_MMU_H__ */
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -15,6 +15,7 @@
+  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
++#include <linux/arm-smccc.h>
+ #include <linux/types.h>
+ #include <linux/jump_label.h>
+ #include <uapi/linux/psci.h>
+@@ -281,6 +282,39 @@ static void __hyp_text __skip_instr(stru
+       write_sysreg_el2(*vcpu_pc(vcpu), elr);
+ }
++static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
++{
++      if (!cpus_have_const_cap(ARM64_SSBD))
++              return false;
++
++      return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
++}
++
++static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_ARM64_SSBD
++      /*
++       * The host runs with the workaround always present. If the
++       * guest wants it disabled, so be it...
++       */
++      if (__needs_ssbd_off(vcpu) &&
++          __hyp_this_cpu_read(arm64_ssbd_callback_required))
++              arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
++#endif
++}
++
++static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_ARM64_SSBD
++      /*
++       * If the guest has disabled the workaround, bring it back on.
++       */
++      if (__needs_ssbd_off(vcpu) &&
++          __hyp_this_cpu_read(arm64_ssbd_callback_required))
++              arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
++#endif
++}
++
+ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_cpu_context *host_ctxt;
+@@ -311,6 +345,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm
+       __sysreg_restore_guest_state(guest_ctxt);
+       __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
++      __set_guest_arch_workaround_state(vcpu);
++
+       /* Jump in the fire! */
+ again:
+       exit_code = __guest_enter(vcpu, host_ctxt);
+@@ -367,6 +403,8 @@ again:
+               /* 0 falls through to be handled out of EL2 */
+       }
++      __set_host_arch_workaround_state(vcpu);
++
+       if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
+               u32 midr = read_cpuid_id();
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -1411,6 +1411,10 @@ static int init_hyp_mode(void)
+               }
+       }
++      err = hyp_map_aux_data();
++      if (err)
++              kvm_err("Cannot map host auxilary data: %d\n", err);
++
+       return 0;
+ out_err:
diff --git a/queue-4.14/arm64-kvm-add-hyp-per-cpu-accessors.patch b/queue-4.14/arm64-kvm-add-hyp-per-cpu-accessors.patch
new file mode 100644 (file)
index 0000000..4e8e801
--- /dev/null
@@ -0,0 +1,69 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:09 +0100
+Subject: arm64: KVM: Add HYP per-cpu accessors
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-20-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 85478bab409171de501b719971fd25a3d5d639f9 upstream.
+
+As we're going to require to access per-cpu variables at EL2,
+let's craft the minimum set of accessors required to implement
+reading a per-cpu variable, relying on tpidr_el2 to contain the
+per-cpu offset.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_asm.h |   27 +++++++++++++++++++++++++--
+ 1 file changed, 25 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -69,14 +69,37 @@ extern u32 __init_stage2_translation(voi
+ extern void __qcom_hyp_sanitize_btac_predictors(void);
++/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
++#define __hyp_this_cpu_ptr(sym)                                               \
++      ({                                                              \
++              void *__ptr = hyp_symbol_addr(sym);                     \
++              __ptr += read_sysreg(tpidr_el2);                        \
++              (typeof(&sym))__ptr;                                    \
++       })
++
++#define __hyp_this_cpu_read(sym)                                      \
++      ({                                                              \
++              *__hyp_this_cpu_ptr(sym);                               \
++       })
++
+ #else /* __ASSEMBLY__ */
+-.macro get_host_ctxt reg, tmp
+-      adr_l   \reg, kvm_host_cpu_state
++.macro hyp_adr_this_cpu reg, sym, tmp
++      adr_l   \reg, \sym
+       mrs     \tmp, tpidr_el2
+       add     \reg, \reg, \tmp
+ .endm
++.macro hyp_ldr_this_cpu reg, sym, tmp
++      adr_l   \reg, \sym
++      mrs     \tmp, tpidr_el2
++      ldr     \reg,  [\reg, \tmp]
++.endm
++
++.macro get_host_ctxt reg, tmp
++      hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
++.endm
++
+ .macro get_vcpu_ptr vcpu, ctxt
+       get_host_ctxt \ctxt, \vcpu
+       ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
diff --git a/queue-4.14/arm64-kvm-handle-guest-s-arch_workaround_2-requests.patch b/queue-4.14/arm64-kvm-handle-guest-s-arch_workaround_2-requests.patch
new file mode 100644 (file)
index 0000000..3fa20b0
--- /dev/null
@@ -0,0 +1,89 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:11 +0100
+Subject: arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-22-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit b4f18c063a13dfb33e3a63fe1844823e19c2265e upstream.
+
+In order to forward the guest's ARCH_WORKAROUND_2 calls to EL3,
+add a small(-ish) sequence to handle it at EL2. Special care must
+be taken to track the state of the guest itself by updating the
+workaround flags. We also rely on patching to enable calls into
+the firmware.
+
+Note that since we need to execute branches, this always executes
+after the Spectre-v2 mitigation has been applied.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/asm-offsets.c |    1 +
+ arch/arm64/kvm/hyp/hyp-entry.S  |   38 +++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 38 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -131,6 +131,7 @@ int main(void)
+   BLANK();
+ #ifdef CONFIG_KVM_ARM_HOST
+   DEFINE(VCPU_CONTEXT,                offsetof(struct kvm_vcpu, arch.ctxt));
++  DEFINE(VCPU_WORKAROUND_FLAGS,       offsetof(struct kvm_vcpu, arch.workaround_flags));
+   DEFINE(CPU_GP_REGS,         offsetof(struct kvm_cpu_context, gp_regs));
+   DEFINE(CPU_USER_PT_REGS,    offsetof(struct kvm_regs, regs));
+   DEFINE(CPU_FP_REGS,         offsetof(struct kvm_regs, fp_regs));
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -106,8 +106,44 @@ el1_hvc_guest:
+        */
+       ldr     x1, [sp]                                // Guest's x0
+       eor     w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
++      cbz     w1, wa_epilogue
++
++      /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
++      eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
++                        ARM_SMCCC_ARCH_WORKAROUND_2)
+       cbnz    w1, el1_trap
+-      mov     x0, x1
++
++#ifdef CONFIG_ARM64_SSBD
++alternative_cb        arm64_enable_wa2_handling
++      b       wa2_end
++alternative_cb_end
++      get_vcpu_ptr    x2, x0
++      ldr     x0, [x2, #VCPU_WORKAROUND_FLAGS]
++
++      // Sanitize the argument and update the guest flags
++      ldr     x1, [sp, #8]                    // Guest's x1
++      clz     w1, w1                          // Murphy's device:
++      lsr     w1, w1, #5                      // w1 = !!w1 without using
++      eor     w1, w1, #1                      // the flags...
++      bfi     x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
++      str     x0, [x2, #VCPU_WORKAROUND_FLAGS]
++
++      /* Check that we actually need to perform the call */
++      hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
++      cbz     x0, wa2_end
++
++      mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
++      smc     #0
++
++      /* Don't leak data from the SMC call */
++      mov     x3, xzr
++wa2_end:
++      mov     x2, xzr
++      mov     x1, xzr
++#endif
++
++wa_epilogue:
++      mov     x0, xzr
+       add     sp, sp, #16
+       eret
diff --git a/queue-4.14/arm64-ssbd-add-global-mitigation-state-accessor.patch b/queue-4.14/arm64-ssbd-add-global-mitigation-state-accessor.patch
new file mode 100644 (file)
index 0000000..1b5b0b1
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:04 +0100
+Subject: arm64: ssbd: Add global mitigation state accessor
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-15-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit c32e1736ca03904c03de0e4459a673be194f56fd upstream.
+
+We're about to need the mitigation state in various parts of the
+kernel in order to do the right thing for userspace and guests.
+
+Let's expose an accessor that will let other subsystems know
+about the state.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -268,6 +268,16 @@ static inline bool system_uses_ttbr0_pan
+ #define ARM64_SSBD_FORCE_ENABLE               2
+ #define ARM64_SSBD_MITIGATED          3
++static inline int arm64_get_ssbd_state(void)
++{
++#ifdef CONFIG_ARM64_SSBD
++      extern int ssbd_state;
++      return ssbd_state;
++#else
++      return ARM64_SSBD_UNKNOWN;
++#endif
++}
++
+ #endif /* __ASSEMBLY__ */
+ #endif
diff --git a/queue-4.14/arm64-ssbd-add-prctl-interface-for-per-thread-mitigation.patch b/queue-4.14/arm64-ssbd-add-prctl-interface-for-per-thread-mitigation.patch
new file mode 100644 (file)
index 0000000..020181f
--- /dev/null
@@ -0,0 +1,149 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:08 +0100
+Subject: arm64: ssbd: Add prctl interface for per-thread mitigation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-19-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 9cdc0108baa8ef87c76ed834619886a46bd70cbe upstream.
+
+If running on a system that performs dynamic SSBD mitigation, allow
+userspace to request the mitigation for itself. This is implemented
+as a prctl call, allowing the mitigation to be enabled or disabled at
+will for this particular thread.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/Makefile |    1 
+ arch/arm64/kernel/ssbd.c   |  108 +++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 109 insertions(+)
+ create mode 100644 arch/arm64/kernel/ssbd.c
+
+--- a/arch/arm64/kernel/Makefile
++++ b/arch/arm64/kernel/Makefile
+@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC)            += machine_ke
+ arm64-obj-$(CONFIG_ARM64_RELOC_TEST)  += arm64-reloc-test.o
+ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
+ arm64-obj-$(CONFIG_CRASH_DUMP)                += crash_dump.o
++arm64-obj-$(CONFIG_ARM64_SSBD)                += ssbd.o
+ ifeq ($(CONFIG_KVM),y)
+ arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)   += bpi.o
+--- /dev/null
++++ b/arch/arm64/kernel/ssbd.c
+@@ -0,0 +1,108 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
++ */
++
++#include <linux/errno.h>
++#include <linux/prctl.h>
++#include <linux/sched.h>
++#include <linux/thread_info.h>
++
++#include <asm/cpufeature.h>
++
++/*
++ * prctl interface for SSBD
++ */
++static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
++{
++      int state = arm64_get_ssbd_state();
++
++      /* Unsupported */
++      if (state == ARM64_SSBD_UNKNOWN)
++              return -EINVAL;
++
++      /* Treat the unaffected/mitigated state separately */
++      if (state == ARM64_SSBD_MITIGATED) {
++              switch (ctrl) {
++              case PR_SPEC_ENABLE:
++                      return -EPERM;
++              case PR_SPEC_DISABLE:
++              case PR_SPEC_FORCE_DISABLE:
++                      return 0;
++              }
++      }
++
++      /*
++       * Things are a bit backward here: the arm64 internal API
++       * *enables the mitigation* when the userspace API *disables
++       * speculation*. So much fun.
++       */
++      switch (ctrl) {
++      case PR_SPEC_ENABLE:
++              /* If speculation is force disabled, enable is not allowed */
++              if (state == ARM64_SSBD_FORCE_ENABLE ||
++                  task_spec_ssb_force_disable(task))
++                      return -EPERM;
++              task_clear_spec_ssb_disable(task);
++              clear_tsk_thread_flag(task, TIF_SSBD);
++              break;
++      case PR_SPEC_DISABLE:
++              if (state == ARM64_SSBD_FORCE_DISABLE)
++                      return -EPERM;
++              task_set_spec_ssb_disable(task);
++              set_tsk_thread_flag(task, TIF_SSBD);
++              break;
++      case PR_SPEC_FORCE_DISABLE:
++              if (state == ARM64_SSBD_FORCE_DISABLE)
++                      return -EPERM;
++              task_set_spec_ssb_disable(task);
++              task_set_spec_ssb_force_disable(task);
++              set_tsk_thread_flag(task, TIF_SSBD);
++              break;
++      default:
++              return -ERANGE;
++      }
++
++      return 0;
++}
++
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++                           unsigned long ctrl)
++{
++      switch (which) {
++      case PR_SPEC_STORE_BYPASS:
++              return ssbd_prctl_set(task, ctrl);
++      default:
++              return -ENODEV;
++      }
++}
++
++static int ssbd_prctl_get(struct task_struct *task)
++{
++      switch (arm64_get_ssbd_state()) {
++      case ARM64_SSBD_UNKNOWN:
++              return -EINVAL;
++      case ARM64_SSBD_FORCE_ENABLE:
++              return PR_SPEC_DISABLE;
++      case ARM64_SSBD_KERNEL:
++              if (task_spec_ssb_force_disable(task))
++                      return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
++              if (task_spec_ssb_disable(task))
++                      return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
++              return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
++      case ARM64_SSBD_FORCE_DISABLE:
++              return PR_SPEC_ENABLE;
++      default:
++              return PR_SPEC_NOT_AFFECTED;
++      }
++}
++
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
++{
++      switch (which) {
++      case PR_SPEC_STORE_BYPASS:
++              return ssbd_prctl_get(task);
++      default:
++              return -ENODEV;
++      }
++}
diff --git a/queue-4.14/arm64-ssbd-introduce-thread-flag-to-control-userspace-mitigation.patch b/queue-4.14/arm64-ssbd-introduce-thread-flag-to-control-userspace-mitigation.patch
new file mode 100644 (file)
index 0000000..623c87b
--- /dev/null
@@ -0,0 +1,53 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:07 +0100
+Subject: arm64: ssbd: Introduce thread flag to control userspace mitigation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-18-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 9dd9614f5476687abbff8d4b12cd08ae70d7c2ad upstream.
+
+In order to allow userspace to be mitigated on demand, let's
+introduce a new thread flag that prevents the mitigation from
+being turned off when exiting to userspace, and doesn't turn
+it on on entry into the kernel (with the assumption that the
+mitigation is always enabled in the kernel itself).
+
+This will be used by a prctl interface introduced in a later
+patch.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/thread_info.h |    1 +
+ arch/arm64/kernel/entry.S            |    2 ++
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -92,6 +92,7 @@ void arch_setup_new_exec(void);
+ #define TIF_RESTORE_SIGMASK   20
+ #define TIF_SINGLESTEP                21
+ #define TIF_32BIT             22      /* 32bit process */
++#define TIF_SSBD              23      /* Wants SSB mitigation */
+ #define _TIF_SIGPENDING               (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED     (1 << TIF_NEED_RESCHED)
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -147,6 +147,8 @@ alternative_cb     arm64_enable_wa2_handling
+ alternative_cb_end
+       ldr_this_cpu    \tmp2, arm64_ssbd_callback_required, \tmp1
+       cbz     \tmp2, \targ
++      ldr     \tmp2, [tsk, #TSK_TI_FLAGS]
++      tbnz    \tmp2, #TIF_SSBD, \targ
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+       mov     w1, #\state
+ alternative_cb        arm64_update_smccc_conduit
diff --git a/queue-4.14/arm64-ssbd-restore-mitigation-status-on-cpu-resume.patch b/queue-4.14/arm64-ssbd-restore-mitigation-status-on-cpu-resume.patch
new file mode 100644 (file)
index 0000000..2575e72
--- /dev/null
@@ -0,0 +1,99 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:06 +0100
+Subject: arm64: ssbd: Restore mitigation status on CPU resume
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-17-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 647d0519b53f440a55df163de21c52a8205431cc upstream.
+
+On a system where firmware can dynamically change the state of the
+mitigation, the CPU will always come up with the mitigation enabled,
+including when coming back from suspend.
+
+If the user has requested "no mitigation" via a command line option,
+let's enforce it by calling into the firmware again to disable it.
+
+Similarily, for a resume from hibernate, the mitigation could have
+been disabled by the boot kernel. Let's ensure that it is set
+back on in that case.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    6 ++++++
+ arch/arm64/kernel/cpu_errata.c      |    2 +-
+ arch/arm64/kernel/hibernate.c       |   11 +++++++++++
+ arch/arm64/kernel/suspend.c         |    8 ++++++++
+ 4 files changed, 26 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -278,6 +278,12 @@ static inline int arm64_get_ssbd_state(v
+ #endif
+ }
++#ifdef CONFIG_ARM64_SSBD
++void arm64_set_ssbd_mitigation(bool state);
++#else
++static inline void arm64_set_ssbd_mitigation(bool state) {}
++#endif
++
+ #endif /* __ASSEMBLY__ */
+ #endif
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -299,7 +299,7 @@ void __init arm64_enable_wa2_handling(st
+               *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+ }
+-static void arm64_set_ssbd_mitigation(bool state)
++void arm64_set_ssbd_mitigation(bool state)
+ {
+       switch (psci_ops.conduit) {
+       case PSCI_CONDUIT_HVC:
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -313,6 +313,17 @@ int swsusp_arch_suspend(void)
+               sleep_cpu = -EINVAL;
+               __cpu_suspend_exit();
++
++              /*
++               * Just in case the boot kernel did turn the SSBD
++               * mitigation off behind our back, let's set the state
++               * to what we expect it to be.
++               */
++              switch (arm64_get_ssbd_state()) {
++              case ARM64_SSBD_FORCE_ENABLE:
++              case ARM64_SSBD_KERNEL:
++                      arm64_set_ssbd_mitigation(true);
++              }
+       }
+       local_dbg_restore(flags);
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -62,6 +62,14 @@ void notrace __cpu_suspend_exit(void)
+        */
+       if (hw_breakpoint_restore)
+               hw_breakpoint_restore(cpu);
++
++      /*
++       * On resume, firmware implementing dynamic mitigation will
++       * have turned the mitigation on. If the user has forcefully
++       * disabled it, make sure their wishes are obeyed.
++       */
++      if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
++              arm64_set_ssbd_mitigation(false);
+ }
+ /*
diff --git a/queue-4.14/arm64-ssbd-skip-apply_ssbd-if-not-using-dynamic-mitigation.patch b/queue-4.14/arm64-ssbd-skip-apply_ssbd-if-not-using-dynamic-mitigation.patch
new file mode 100644 (file)
index 0000000..a080604
--- /dev/null
@@ -0,0 +1,66 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:53:05 +0100
+Subject: arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-16-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 986372c4367f46b34a3c0f6918d7fb95cbdf39d6 upstream.
+
+In order to avoid checking arm64_ssbd_callback_required on each
+kernel entry/exit even if no mitigation is required, let's
+add yet another alternative that by default jumps over the mitigation,
+and that gets nop'ed out if we're doing dynamic mitigation.
+
+Think of it as a poor man's static key...
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   14 ++++++++++++++
+ arch/arm64/kernel/entry.S      |    3 +++
+ 2 files changed, 17 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -285,6 +285,20 @@ void __init arm64_update_smccc_conduit(s
+       *updptr = cpu_to_le32(insn);
+ }
++void __init arm64_enable_wa2_handling(struct alt_instr *alt,
++                                    __le32 *origptr, __le32 *updptr,
++                                    int nr_inst)
++{
++      BUG_ON(nr_inst != 1);
++      /*
++       * Only allow mitigation on EL1 entry/exit and guest
++       * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
++       * be flipped.
++       */
++      if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
++              *updptr = cpu_to_le32(aarch64_insn_gen_nop());
++}
++
+ static void arm64_set_ssbd_mitigation(bool state)
+ {
+       switch (psci_ops.conduit) {
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -142,6 +142,9 @@ alternative_else_nop_endif
+       // to save/restore them if required.
+       .macro  apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
++alternative_cb        arm64_enable_wa2_handling
++      b       \targ
++alternative_cb_end
+       ldr_this_cpu    \tmp2, arm64_ssbd_callback_required, \tmp1
+       cbz     \tmp2, \targ
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
diff --git a/queue-4.14/autofs-fix-slab-out-of-bounds-read-in-getname_kernel.patch b/queue-4.14/autofs-fix-slab-out-of-bounds-read-in-getname_kernel.patch
new file mode 100644 (file)
index 0000000..d52393c
--- /dev/null
@@ -0,0 +1,84 @@
+From 02f51d45937f7bc7f4dee21e9f85b2d5eac37104 Mon Sep 17 00:00:00 2001
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+Date: Fri, 13 Jul 2018 16:58:59 -0700
+Subject: autofs: fix slab out of bounds read in getname_kernel()
+
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+
+commit 02f51d45937f7bc7f4dee21e9f85b2d5eac37104 upstream.
+
+The autofs subsystem does not check that the "path" parameter is present
+for all cases where it is required when it is passed in via the "param"
+struct.
+
+In particular it isn't checked for the AUTOFS_DEV_IOCTL_OPENMOUNT_CMD
+ioctl command.
+
+To solve it, modify validate_dev_ioctl(function to check that a path has
+been provided for ioctl commands that require it.
+
+Link: http://lkml.kernel.org/r/153060031527.26631.18306637892746301555.stgit@pluto.themaw.net
+Signed-off-by: Tomas Bortoli <tomasbortoli@gmail.com>
+Signed-off-by: Ian Kent <raven@themaw.net>
+Reported-by: syzbot+60c837b428dc84e83a93@syzkaller.appspotmail.com
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/autofs4/dev-ioctl.c |   22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+--- a/fs/autofs4/dev-ioctl.c
++++ b/fs/autofs4/dev-ioctl.c
+@@ -148,6 +148,15 @@ static int validate_dev_ioctl(int cmd, s
+                               cmd);
+                       goto out;
+               }
++      } else {
++              unsigned int inr = _IOC_NR(cmd);
++
++              if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD ||
++                  inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD ||
++                  inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) {
++                      err = -EINVAL;
++                      goto out;
++              }
+       }
+       err = 0;
+@@ -284,7 +293,8 @@ static int autofs_dev_ioctl_openmount(st
+       dev_t devid;
+       int err, fd;
+-      /* param->path has already been checked */
++      /* param->path has been checked in validate_dev_ioctl() */
++
+       if (!param->openmount.devid)
+               return -EINVAL;
+@@ -446,10 +456,7 @@ static int autofs_dev_ioctl_requester(st
+       dev_t devid;
+       int err = -ENOENT;
+-      if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
+-              err = -EINVAL;
+-              goto out;
+-      }
++      /* param->path has been checked in validate_dev_ioctl() */
+       devid = sbi->sb->s_dev;
+@@ -534,10 +541,7 @@ static int autofs_dev_ioctl_ismountpoint
+       unsigned int devid, magic;
+       int err = -ENOENT;
+-      if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
+-              err = -EINVAL;
+-              goto out;
+-      }
++      /* param->path has been checked in validate_dev_ioctl() */
+       name = param->path;
+       type = param->ismountpoint.in.type;
diff --git a/queue-4.14/bdi-fix-another-oops-in-wb_workfn.patch b/queue-4.14/bdi-fix-another-oops-in-wb_workfn.patch
new file mode 100644 (file)
index 0000000..7c908e3
--- /dev/null
@@ -0,0 +1,145 @@
+From 3ee7e8697d5860b173132606d80a9cd35e7113ee Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 18 Jun 2018 15:46:58 +0200
+Subject: bdi: Fix another oops in wb_workfn()
+
+From: Jan Kara <jack@suse.cz>
+
+commit 3ee7e8697d5860b173132606d80a9cd35e7113ee upstream.
+
+syzbot is reporting NULL pointer dereference at wb_workfn() [1] due to
+wb->bdi->dev being NULL. And Dmitry confirmed that wb->state was
+WB_shutting_down after wb->bdi->dev became NULL. This indicates that
+unregister_bdi() failed to call wb_shutdown() on one of wb objects.
+
+The problem is in cgwb_bdi_unregister() which does cgwb_kill() and thus
+drops bdi's reference to wb structures before going through the list of
+wbs again and calling wb_shutdown() on each of them. This way the loop
+iterating through all wbs can easily miss a wb if that wb has already
+passed through cgwb_remove_from_bdi_list() called from wb_shutdown()
+from cgwb_release_workfn() and as a result fully shutdown bdi although
+wb_workfn() for this wb structure is still running. In fact there are
+also other ways cgwb_bdi_unregister() can race with
+cgwb_release_workfn() leading e.g. to use-after-free issues:
+
+CPU1                            CPU2
+                                cgwb_bdi_unregister()
+                                  cgwb_kill(*slot);
+
+cgwb_release()
+  queue_work(cgwb_release_wq, &wb->release_work);
+cgwb_release_workfn()
+                                  wb = list_first_entry(&bdi->wb_list, ...)
+                                  spin_unlock_irq(&cgwb_lock);
+  wb_shutdown(wb);
+  ...
+  kfree_rcu(wb, rcu);
+                                  wb_shutdown(wb); -> oops use-after-free
+
+We solve these issues by synchronizing writeback structure shutdown from
+cgwb_bdi_unregister() with cgwb_release_workfn() using a new mutex. That
+way we also no longer need synchronization using WB_shutting_down as the
+mutex provides it for CONFIG_CGROUP_WRITEBACK case and without
+CONFIG_CGROUP_WRITEBACK wb_shutdown() can be called only once from
+bdi_unregister().
+
+Reported-by: syzbot <syzbot+4a7438e774b21ddd8eca@syzkaller.appspotmail.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/backing-dev-defs.h |    2 +-
+ mm/backing-dev.c                 |   20 +++++++-------------
+ 2 files changed, 8 insertions(+), 14 deletions(-)
+
+--- a/include/linux/backing-dev-defs.h
++++ b/include/linux/backing-dev-defs.h
+@@ -22,7 +22,6 @@ struct dentry;
+  */
+ enum wb_state {
+       WB_registered,          /* bdi_register() was done */
+-      WB_shutting_down,       /* wb_shutdown() in progress */
+       WB_writeback_running,   /* Writeback is in progress */
+       WB_has_dirty_io,        /* Dirty inodes on ->b_{dirty|io|more_io} */
+ };
+@@ -165,6 +164,7 @@ struct backing_dev_info {
+ #ifdef CONFIG_CGROUP_WRITEBACK
+       struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
+       struct rb_root cgwb_congested_tree; /* their congested states */
++      struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
+ #else
+       struct bdi_writeback_congested *wb_congested;
+ #endif
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -356,15 +356,8 @@ static void wb_shutdown(struct bdi_write
+       spin_lock_bh(&wb->work_lock);
+       if (!test_and_clear_bit(WB_registered, &wb->state)) {
+               spin_unlock_bh(&wb->work_lock);
+-              /*
+-               * Wait for wb shutdown to finish if someone else is just
+-               * running wb_shutdown(). Otherwise we could proceed to wb /
+-               * bdi destruction before wb_shutdown() is finished.
+-               */
+-              wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
+               return;
+       }
+-      set_bit(WB_shutting_down, &wb->state);
+       spin_unlock_bh(&wb->work_lock);
+       cgwb_remove_from_bdi_list(wb);
+@@ -376,12 +369,6 @@ static void wb_shutdown(struct bdi_write
+       mod_delayed_work(bdi_wq, &wb->dwork, 0);
+       flush_delayed_work(&wb->dwork);
+       WARN_ON(!list_empty(&wb->work_list));
+-      /*
+-       * Make sure bit gets cleared after shutdown is finished. Matches with
+-       * the barrier provided by test_and_clear_bit() above.
+-       */
+-      smp_wmb();
+-      clear_and_wake_up_bit(WB_shutting_down, &wb->state);
+ }
+ static void wb_exit(struct bdi_writeback *wb)
+@@ -505,10 +492,12 @@ static void cgwb_release_workfn(struct w
+       struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
+                                               release_work);
++      mutex_lock(&wb->bdi->cgwb_release_mutex);
+       wb_shutdown(wb);
+       css_put(wb->memcg_css);
+       css_put(wb->blkcg_css);
++      mutex_unlock(&wb->bdi->cgwb_release_mutex);
+       fprop_local_destroy_percpu(&wb->memcg_completions);
+       percpu_ref_exit(&wb->refcnt);
+@@ -694,6 +683,7 @@ static int cgwb_bdi_init(struct backing_
+       INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
+       bdi->cgwb_congested_tree = RB_ROOT;
++      mutex_init(&bdi->cgwb_release_mutex);
+       ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
+       if (!ret) {
+@@ -714,7 +704,10 @@ static void cgwb_bdi_unregister(struct b
+       spin_lock_irq(&cgwb_lock);
+       radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
+               cgwb_kill(*slot);
++      spin_unlock_irq(&cgwb_lock);
++      mutex_lock(&bdi->cgwb_release_mutex);
++      spin_lock_irq(&cgwb_lock);
+       while (!list_empty(&bdi->wb_list)) {
+               wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
+                                     bdi_node);
+@@ -723,6 +716,7 @@ static void cgwb_bdi_unregister(struct b
+               spin_lock_irq(&cgwb_lock);
+       }
+       spin_unlock_irq(&cgwb_lock);
++      mutex_unlock(&bdi->cgwb_release_mutex);
+ }
+ /**
diff --git a/queue-4.14/keys-dns-fix-parsing-multiple-options.patch b/queue-4.14/keys-dns-fix-parsing-multiple-options.patch
new file mode 100644 (file)
index 0000000..b3fd1f4
--- /dev/null
@@ -0,0 +1,107 @@
+From c604cb767049b78b3075497b80ebb8fd530ea2cc Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Wed, 11 Jul 2018 10:46:29 -0700
+Subject: KEYS: DNS: fix parsing multiple options
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit c604cb767049b78b3075497b80ebb8fd530ea2cc upstream.
+
+My recent fix for dns_resolver_preparse() printing very long strings was
+incomplete, as shown by syzbot which still managed to hit the
+WARN_ONCE() in set_precision() by adding a crafted "dns_resolver" key:
+
+    precision 50001 too large
+    WARNING: CPU: 7 PID: 864 at lib/vsprintf.c:2164 vsnprintf+0x48a/0x5a0
+
+The bug this time isn't just a printing bug, but also a logical error
+when multiple options ("#"-separated strings) are given in the key
+payload.  Specifically, when separating an option string into name and
+value, if there is no value then the name is incorrectly considered to
+end at the end of the key payload, rather than the end of the current
+option.  This bypasses validation of the option length, and also means
+that specifying multiple options is broken -- which presumably has gone
+unnoticed as there is currently only one valid option anyway.
+
+A similar problem also applied to option values, as the kstrtoul() when
+parsing the "dnserror" option will read past the end of the current
+option and into the next option.
+
+Fix these bugs by correctly computing the length of the option name and
+by copying the option value, null-terminated, into a temporary buffer.
+
+Reproducer for the WARN_ONCE() that syzbot hit:
+
+    perl -e 'print "#A#", "\0" x 50000' | keyctl padd dns_resolver desc @s
+
+Reproducer for "dnserror" option being parsed incorrectly (expected
+behavior is to fail when seeing the unknown option "foo", actual
+behavior was to read the dnserror value as "1#foo" and fail there):
+
+    perl -e 'print "#dnserror=1#foo\0"' | keyctl padd dns_resolver desc @s
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Fixes: 4a2d789267e0 ("DNS: If the DNS server returns an error, allow that to be cached [ver #2]")
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/dns_resolver/dns_key.c |   30 +++++++++++++++++-------------
+ 1 file changed, 17 insertions(+), 13 deletions(-)
+
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -87,35 +87,39 @@ dns_resolver_preparse(struct key_prepars
+               opt++;
+               kdebug("options: '%s'", opt);
+               do {
++                      int opt_len, opt_nlen;
+                       const char *eq;
+-                      int opt_len, opt_nlen, opt_vlen, tmp;
++                      char optval[128];
+                       next_opt = memchr(opt, '#', end - opt) ?: end;
+                       opt_len = next_opt - opt;
+-                      if (opt_len <= 0 || opt_len > 128) {
++                      if (opt_len <= 0 || opt_len > sizeof(optval)) {
+                               pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
+                                                   opt_len);
+                               return -EINVAL;
+                       }
+-                      eq = memchr(opt, '=', opt_len) ?: end;
+-                      opt_nlen = eq - opt;
+-                      eq++;
+-                      opt_vlen = next_opt - eq; /* will be -1 if no value */
+-
+-                      tmp = opt_vlen >= 0 ? opt_vlen : 0;
+-                      kdebug("option '%*.*s' val '%*.*s'",
+-                             opt_nlen, opt_nlen, opt, tmp, tmp, eq);
++                      eq = memchr(opt, '=', opt_len);
++                      if (eq) {
++                              opt_nlen = eq - opt;
++                              eq++;
++                              memcpy(optval, eq, next_opt - eq);
++                              optval[next_opt - eq] = '\0';
++                      } else {
++                              opt_nlen = opt_len;
++                              optval[0] = '\0';
++                      }
++
++                      kdebug("option '%*.*s' val '%s'",
++                             opt_nlen, opt_nlen, opt, optval);
+                       /* see if it's an error number representing a DNS error
+                        * that's to be recorded as the result in this key */
+                       if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
+                           memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
+                               kdebug("dns error number option");
+-                              if (opt_vlen <= 0)
+-                                      goto bad_option_value;
+-                              ret = kstrtoul(eq, 10, &derrno);
++                              ret = kstrtoul(optval, 10, &derrno);
+                               if (ret < 0)
+                                       goto bad_option_value;
diff --git a/queue-4.14/kvm-arm-arm64-convert-kvm_host_cpu_state-to-a-static-per-cpu-allocation.patch b/queue-4.14/kvm-arm-arm64-convert-kvm_host_cpu_state-to-a-static-per-cpu-allocation.patch
new file mode 100644 (file)
index 0000000..5cc375c
--- /dev/null
@@ -0,0 +1,85 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:52:52 +0100
+Subject: KVM: arm/arm64: Convert kvm_host_cpu_state to a static per-cpu allocation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com, James Morse <james.morse@arm.com>
+Message-ID: <20180720095312.1161-3-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit 36989e7fd386a9a5822c48691473863f8fbb404d upstream.
+
+kvm_host_cpu_state is a per-cpu allocation made from kvm_arch_init()
+used to store the host EL1 registers when KVM switches to a guest.
+
+Make it easier for ASM to generate pointers into this per-cpu memory
+by making it a static allocation.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Acked-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/arm/arm.c |   18 +++---------------
+ 1 file changed, 3 insertions(+), 15 deletions(-)
+
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -51,8 +51,8 @@
+ __asm__(".arch_extension      virt");
+ #endif
++DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
+ /* Per-CPU variable containing the currently running vcpu. */
+ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
+@@ -351,7 +351,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu
+       }
+       vcpu->cpu = cpu;
+-      vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
++      vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
+       kvm_arm_set_running_vcpu(vcpu);
+@@ -1259,19 +1259,8 @@ static inline void hyp_cpu_pm_exit(void)
+ }
+ #endif
+-static void teardown_common_resources(void)
+-{
+-      free_percpu(kvm_host_cpu_state);
+-}
+-
+ static int init_common_resources(void)
+ {
+-      kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
+-      if (!kvm_host_cpu_state) {
+-              kvm_err("Cannot allocate host CPU state\n");
+-              return -ENOMEM;
+-      }
+-
+       /* set size of VMID supported by CPU */
+       kvm_vmid_bits = kvm_get_vmid_bits();
+       kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+@@ -1413,7 +1402,7 @@ static int init_hyp_mode(void)
+       for_each_possible_cpu(cpu) {
+               kvm_cpu_context_t *cpu_ctxt;
+-              cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
++              cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
+               err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
+               if (err) {
+@@ -1497,7 +1486,6 @@ out_hyp:
+       if (!in_hyp_mode)
+               teardown_hyp_mode();
+ out_err:
+-      teardown_common_resources();
+       return err;
+ }
diff --git a/queue-4.14/kvm-arm-arm64-do-not-use-kern_hyp_va-with-kvm_vgic_global_state.patch b/queue-4.14/kvm-arm-arm64-do-not-use-kern_hyp_va-with-kvm_vgic_global_state.patch
new file mode 100644 (file)
index 0000000..d863ccb
--- /dev/null
@@ -0,0 +1,88 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:52:57 +0100
+Subject: KVM: arm/arm64: Do not use kern_hyp_va() with kvm_vgic_global_state
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com
+Message-ID: <20180720095312.1161-8-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+Commit 44a497abd621a71c645f06d3d545ae2f46448830 upstream.
+
+kvm_vgic_global_state is part of the read-only section, and is
+usually accessed using a PC-relative address generation (adrp + add).
+
+It is thus useless to use kern_hyp_va() on it, and actively problematic
+if kern_hyp_va() becomes non-idempotent. On the other hand, there is
+no way that the compiler is going to guarantee that such access is
+always PC relative.
+
+So let's bite the bullet and provide our own accessor.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: James Morse <james.morse@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_mmu.h   |    7 +++++++
+ arch/arm64/include/asm/kvm_mmu.h |   20 ++++++++++++++++++++
+ virt/kvm/arm/hyp/vgic-v2-sr.c    |    2 +-
+ 3 files changed, 28 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -28,6 +28,13 @@
+  */
+ #define kern_hyp_va(kva)      (kva)
++/* Contrary to arm64, there is no need to generate a PC-relative address */
++#define hyp_symbol_addr(s)                                            \
++      ({                                                              \
++              typeof(s) *addr = &(s);                                 \
++              addr;                                                   \
++      })
++
+ /*
+  * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
+  */
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -131,6 +131,26 @@ static inline unsigned long __kern_hyp_v
+ #define kern_hyp_va(v)        ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
+ /*
++ * Obtain the PC-relative address of a kernel symbol
++ * s: symbol
++ *
++ * The goal of this macro is to return a symbol's address based on a
++ * PC-relative computation, as opposed to a loading the VA from a
++ * constant pool or something similar. This works well for HYP, as an
++ * absolute VA is guaranteed to be wrong. Only use this if trying to
++ * obtain the address of a symbol (i.e. not something you obtained by
++ * following a pointer).
++ */
++#define hyp_symbol_addr(s)                                            \
++      ({                                                              \
++              typeof(s) *addr;                                        \
++              asm("adrp       %0, %1\n"                               \
++                  "add        %0, %0, :lo12:%1\n"                     \
++                  : "=r" (addr) : "S" (&s));                          \
++              addr;                                                   \
++      })
++
++/*
+  * We currently only support a 40bit IPA.
+  */
+ #define KVM_PHYS_SHIFT        (40)
+--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
++++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
+@@ -139,7 +139,7 @@ int __hyp_text __vgic_v2_perform_cpuif_a
+               return -1;
+       rd = kvm_vcpu_dabt_get_rd(vcpu);
+-      addr  = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
++      addr  = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va);
+       addr += fault_ipa - vgic->vgic_cpu_base;
+       if (kvm_vcpu_dabt_iswrite(vcpu)) {
diff --git a/queue-4.14/kvm-arm64-avoid-storing-the-vcpu-pointer-on-the-stack.patch b/queue-4.14/kvm-arm64-avoid-storing-the-vcpu-pointer-on-the-stack.patch
new file mode 100644 (file)
index 0000000..d1aa693
--- /dev/null
@@ -0,0 +1,247 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:52:58 +0100
+Subject: KVM: arm64: Avoid storing the vcpu pointer on the stack
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com, Christoffer Dall <christoffer.dall@linaro.org>
+Message-ID: <20180720095312.1161-9-marc.zyngier@arm.com>
+
+From: Christoffer Dall <christoffer.dall@linaro.org>
+
+Commit 4464e210de9e80e38de59df052fe09ea2ff80b1b upstream.
+
+We already have the percpu area for the host cpu state, which points to
+the VCPU, so there's no need to store the VCPU pointer on the stack on
+every context switch.  We can be a little more clever and just use
+tpidr_el2 for the percpu offset and load the VCPU pointer from the host
+context.
+
+This has the benefit of being able to retrieve the host context even
+when our stack is corrupted, and it has a potential performance benefit
+because we trade a store plus a load for an mrs and a load on a round
+trip to the guest.
+
+This does require us to calculate the percpu offset without including
+the offset from the kernel mapping of the percpu array to the linear
+mapping of the array (which is what we store in tpidr_el1), because a
+PC-relative generated address in EL2 is already giving us the hyp alias
+of the linear mapping of a kernel address.  We do this in
+__cpu_init_hyp_mode() by using kvm_ksym_ref().
+
+The code that accesses ESR_EL2 was previously using an alternative to
+use the _EL1 accessor on VHE systems, but this was actually unnecessary
+as the _EL1 accessor aliases the ESR_EL2 register on VHE, and the _EL2
+accessor does the same thing on both systems.
+
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Andrew Jones <drjones@redhat.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_asm.h  |   15 +++++++++++++++
+ arch/arm64/include/asm/kvm_host.h |   15 +++++++++++++++
+ arch/arm64/kernel/asm-offsets.c   |    1 +
+ arch/arm64/kvm/hyp/entry.S        |    6 +-----
+ arch/arm64/kvm/hyp/hyp-entry.S    |   28 ++++++++++------------------
+ arch/arm64/kvm/hyp/switch.c       |    5 +----
+ arch/arm64/kvm/hyp/sysreg-sr.c    |    5 +++++
+ 7 files changed, 48 insertions(+), 27 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -33,6 +33,7 @@
+ #define KVM_ARM64_DEBUG_DIRTY_SHIFT   0
+ #define KVM_ARM64_DEBUG_DIRTY         (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
++/* Translate a kernel address of @sym into its equivalent linear mapping */
+ #define kvm_ksym_ref(sym)                                             \
+       ({                                                              \
+               void *val = &sym;                                       \
+@@ -68,6 +69,20 @@ extern u32 __init_stage2_translation(voi
+ extern void __qcom_hyp_sanitize_btac_predictors(void);
++#else /* __ASSEMBLY__ */
++
++.macro get_host_ctxt reg, tmp
++      adr_l   \reg, kvm_host_cpu_state
++      mrs     \tmp, tpidr_el2
++      add     \reg, \reg, \tmp
++.endm
++
++.macro get_vcpu_ptr vcpu, ctxt
++      get_host_ctxt \ctxt, \vcpu
++      ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
++      kern_hyp_va     \vcpu
++.endm
++
+ #endif
+ #endif /* __ARM_KVM_ASM_H__ */
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -350,10 +350,15 @@ int kvm_perf_teardown(void);
+ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
++void __kvm_set_tpidr_el2(u64 tpidr_el2);
++DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
++
+ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
+                                      unsigned long hyp_stack_ptr,
+                                      unsigned long vector_ptr)
+ {
++      u64 tpidr_el2;
++
+       /*
+        * Call initialization code, and switch to the full blown HYP code.
+        * If the cpucaps haven't been finalized yet, something has gone very
+@@ -362,6 +367,16 @@ static inline void __cpu_init_hyp_mode(p
+        */
+       BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+       __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
++
++      /*
++       * Calculate the raw per-cpu offset without a translation from the
++       * kernel's mapping to the linear mapping, and store it in tpidr_el2
++       * so that we can use adr_l to access per-cpu variables in EL2.
++       */
++      tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
++              - (u64)kvm_ksym_ref(kvm_host_cpu_state);
++
++      kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
+ }
+ static inline void kvm_arch_hardware_unsetup(void) {}
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -136,6 +136,7 @@ int main(void)
+   DEFINE(CPU_FP_REGS,         offsetof(struct kvm_regs, fp_regs));
+   DEFINE(VCPU_FPEXC32_EL2,    offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
+   DEFINE(VCPU_HOST_CONTEXT,   offsetof(struct kvm_vcpu, arch.host_cpu_context));
++  DEFINE(HOST_CONTEXT_VCPU,   offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
+ #endif
+ #ifdef CONFIG_CPU_PM
+   DEFINE(CPU_SUSPEND_SZ,      sizeof(struct cpu_suspend_ctx));
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -62,9 +62,6 @@ ENTRY(__guest_enter)
+       // Store the host regs
+       save_callee_saved_regs x1
+-      // Store host_ctxt and vcpu for use at exit time
+-      stp     x1, x0, [sp, #-16]!
+-
+       add     x18, x0, #VCPU_CONTEXT
+       // Restore guest regs x0-x17
+@@ -118,8 +115,7 @@ ENTRY(__guest_exit)
+       // Store the guest regs x19-x29, lr
+       save_callee_saved_regs x1
+-      // Restore the host_ctxt from the stack
+-      ldr     x2, [sp], #16
++      get_host_ctxt   x2, x3
+       // Now restore the host regs
+       restore_callee_saved_regs x2
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -57,13 +57,8 @@ ENDPROC(__vhe_hyp_call)
+ el1_sync:                             // Guest trapped into EL2
+       stp     x0, x1, [sp, #-16]!
+-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+-      mrs     x1, esr_el2
+-alternative_else
+-      mrs     x1, esr_el1
+-alternative_endif
+-      lsr     x0, x1, #ESR_ELx_EC_SHIFT
+-
++      mrs     x0, esr_el2
++      lsr     x0, x0, #ESR_ELx_EC_SHIFT
+       cmp     x0, #ESR_ELx_EC_HVC64
+       ccmp    x0, #ESR_ELx_EC_HVC32, #4, ne
+       b.ne    el1_trap
+@@ -117,10 +112,14 @@ el1_hvc_guest:
+       eret
+ el1_trap:
++      get_vcpu_ptr    x1, x0
++
++      mrs             x0, esr_el2
++      lsr             x0, x0, #ESR_ELx_EC_SHIFT
+       /*
+        * x0: ESR_EC
++       * x1: vcpu pointer
+        */
+-      ldr     x1, [sp, #16 + 8]       // vcpu stored by __guest_enter
+       /*
+        * We trap the first access to the FP/SIMD to save the host context
+@@ -138,13 +137,13 @@ alternative_else_nop_endif
+ el1_irq:
+       stp     x0, x1, [sp, #-16]!
+-      ldr     x1, [sp, #16 + 8]
++      get_vcpu_ptr    x1, x0
+       mov     x0, #ARM_EXCEPTION_IRQ
+       b       __guest_exit
+ el1_error:
+       stp     x0, x1, [sp, #-16]!
+-      ldr     x1, [sp, #16 + 8]
++      get_vcpu_ptr    x1, x0
+       mov     x0, #ARM_EXCEPTION_EL1_SERROR
+       b       __guest_exit
+@@ -180,14 +179,7 @@ ENTRY(__hyp_do_panic)
+ ENDPROC(__hyp_do_panic)
+ ENTRY(__hyp_panic)
+-      /*
+-       * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
+-       * not be accessible by this address from EL2, hyp_panic() converts
+-       * it with kern_hyp_va() before use.
+-       */
+-      ldr     x0, =kvm_host_cpu_state
+-      mrs     x1, tpidr_el2
+-      add     x0, x0, x1
++      get_host_ctxt x0, x1
+       b       hyp_panic
+ ENDPROC(__hyp_panic)
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -437,7 +437,7 @@ static hyp_alternate_select(__hyp_call_p
+                           __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
+                           ARM64_HAS_VIRT_HOST_EXTN);
+-void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
++void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
+ {
+       struct kvm_vcpu *vcpu = NULL;
+@@ -446,9 +446,6 @@ void __hyp_text __noreturn hyp_panic(str
+       u64 par = read_sysreg(par_el1);
+       if (read_sysreg(vttbr_el2)) {
+-              struct kvm_cpu_context *host_ctxt;
+-
+-              host_ctxt = kern_hyp_va(__host_ctxt);
+               vcpu = host_ctxt->__hyp_running_vcpu;
+               __timer_save_state(vcpu);
+               __deactivate_traps(vcpu);
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -183,3 +183,8 @@ void __hyp_text __sysreg32_restore_state
+       if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+               write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
+ }
++
++void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
++{
++      asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
++}
diff --git a/queue-4.14/kvm-arm64-change-hyp_panic-s-dependency-on-tpidr_el2.patch b/queue-4.14/kvm-arm64-change-hyp_panic-s-dependency-on-tpidr_el2.patch
new file mode 100644 (file)
index 0000000..4e6388a
--- /dev/null
@@ -0,0 +1,165 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:52:53 +0100
+Subject: KVM: arm64: Change hyp_panic()s dependency on tpidr_el2
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com, James Morse <james.morse@arm.com>
+Message-ID: <20180720095312.1161-4-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit c97e166e54b662717d20ec2e36761758d2b6a7c2 upstream.
+
+Make tpidr_el2 a cpu-offset for per-cpu variables in the same way the
+host uses tpidr_el1. This lets tpidr_el{1,2} have the same value, and
+on VHE they can be the same register.
+
+KVM calls hyp_panic() when anything unexpected happens. This may occur
+while a guest owns the EL1 registers. KVM stashes the vcpu pointer in
+tpidr_el2, which it uses to find the host context in order to restore
+the host EL1 registers before parachuting into the host's panic().
+
+The host context is a struct kvm_cpu_context allocated in the per-cpu
+area, and mapped to hyp. Given the per-cpu offset for this CPU, this is
+easy to find. Change hyp_panic() to take a pointer to the
+struct kvm_cpu_context. Wrap these calls with an asm function that
+retrieves the struct kvm_cpu_context from the host's per-cpu area.
+
+Copy the per-cpu offset from the hosts tpidr_el1 into tpidr_el2 during
+kvm init. (Later patches will make this unnecessary for VHE hosts)
+
+We print out the vcpu pointer as part of the panic message. Add a back
+reference to the 'running vcpu' in the host cpu context to preserve this.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h |    2 ++
+ arch/arm64/kvm/hyp/hyp-entry.S    |   12 ++++++++++++
+ arch/arm64/kvm/hyp/s2-setup.c     |    3 +++
+ arch/arm64/kvm/hyp/switch.c       |   25 +++++++++++++------------
+ 4 files changed, 30 insertions(+), 12 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -194,6 +194,8 @@ struct kvm_cpu_context {
+               u64 sys_regs[NR_SYS_REGS];
+               u32 copro[NR_COPRO_REGS];
+       };
++
++      struct kvm_vcpu *__hyp_running_vcpu;
+ };
+ typedef struct kvm_cpu_context kvm_cpu_context_t;
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -179,6 +179,18 @@ ENTRY(__hyp_do_panic)
+       eret
+ ENDPROC(__hyp_do_panic)
++ENTRY(__hyp_panic)
++      /*
++       * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
++       * not be accessible by this address from EL2, hyp_panic() converts
++       * it with kern_hyp_va() before use.
++       */
++      ldr     x0, =kvm_host_cpu_state
++      mrs     x1, tpidr_el2
++      add     x0, x0, x1
++      b       hyp_panic
++ENDPROC(__hyp_panic)
++
+ .macro invalid_vector label, target = __hyp_panic
+       .align  2
+ \label:
+--- a/arch/arm64/kvm/hyp/s2-setup.c
++++ b/arch/arm64/kvm/hyp/s2-setup.c
+@@ -84,5 +84,8 @@ u32 __hyp_text __init_stage2_translation
+       write_sysreg(val, vtcr_el2);
++      /* copy tpidr_el1 into tpidr_el2 for use by HYP */
++      write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
++
+       return parange;
+ }
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -289,9 +289,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm
+       u64 exit_code;
+       vcpu = kern_hyp_va(vcpu);
+-      write_sysreg(vcpu, tpidr_el2);
+       host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
++      host_ctxt->__hyp_running_vcpu = vcpu;
+       guest_ctxt = &vcpu->arch.ctxt;
+       __sysreg_save_host_state(host_ctxt);
+@@ -406,7 +406,8 @@ again:
+ static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
+-static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
++static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
++                                           struct kvm_vcpu *vcpu)
+ {
+       unsigned long str_va;
+@@ -420,35 +421,35 @@ static void __hyp_text __hyp_call_panic_
+       __hyp_do_panic(str_va,
+                      spsr,  elr,
+                      read_sysreg(esr_el2),   read_sysreg_el2(far),
+-                     read_sysreg(hpfar_el2), par,
+-                     (void *)read_sysreg(tpidr_el2));
++                     read_sysreg(hpfar_el2), par, vcpu);
+ }
+-static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
++static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
++                                          struct kvm_vcpu *vcpu)
+ {
+       panic(__hyp_panic_string,
+             spsr,  elr,
+             read_sysreg_el2(esr),   read_sysreg_el2(far),
+-            read_sysreg(hpfar_el2), par,
+-            (void *)read_sysreg(tpidr_el2));
++            read_sysreg(hpfar_el2), par, vcpu);
+ }
+ static hyp_alternate_select(__hyp_call_panic,
+                           __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
+                           ARM64_HAS_VIRT_HOST_EXTN);
+-void __hyp_text __noreturn __hyp_panic(void)
++void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
+ {
++      struct kvm_vcpu *vcpu = NULL;
++
+       u64 spsr = read_sysreg_el2(spsr);
+       u64 elr = read_sysreg_el2(elr);
+       u64 par = read_sysreg(par_el1);
+       if (read_sysreg(vttbr_el2)) {
+-              struct kvm_vcpu *vcpu;
+               struct kvm_cpu_context *host_ctxt;
+-              vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
+-              host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
++              host_ctxt = kern_hyp_va(__host_ctxt);
++              vcpu = host_ctxt->__hyp_running_vcpu;
+               __timer_save_state(vcpu);
+               __deactivate_traps(vcpu);
+               __deactivate_vm(vcpu);
+@@ -456,7 +457,7 @@ void __hyp_text __noreturn __hyp_panic(v
+       }
+       /* Call panic for real */
+-      __hyp_call_panic()(spsr, elr, par);
++      __hyp_call_panic()(spsr, elr, par, vcpu);
+       unreachable();
+ }
diff --git a/queue-4.14/kvm-arm64-stop-save-restoring-host-tpidr_el1-on-vhe.patch b/queue-4.14/kvm-arm64-stop-save-restoring-host-tpidr_el1-on-vhe.patch
new file mode 100644 (file)
index 0000000..1bfaaeb
--- /dev/null
@@ -0,0 +1,123 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:52:55 +0100
+Subject: KVM: arm64: Stop save/restoring host tpidr_el1 on VHE
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com, James Morse <james.morse@arm.com>
+Message-ID: <20180720095312.1161-6-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit 1f742679c33bc083722cb0b442a95d458c491b56 upstream.
+
+Now that a VHE host uses tpidr_el2 for the cpu offset we no longer
+need KVM to save/restore tpidr_el1. Move this from the 'common' code
+into the non-vhe code. While we're at it, on VHE we don't need to
+save the ELR or SPSR as kernel_entry in entry.S will have pushed these
+onto the kernel stack, and will restore them from there. Move these
+to the non-vhe code as we need them to get back to the host.
+
+Finally remove the always-copy-tpidr we hid in the stage2 setup
+code, cpufeature's enable callback will do this for VHE, we only
+need KVM to do it for non-vhe. Add the copy into kvm-init instead.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp-init.S      |    4 ++++
+ arch/arm64/kvm/hyp/s2-setup.c  |    3 ---
+ arch/arm64/kvm/hyp/sysreg-sr.c |   16 ++++++++--------
+ 3 files changed, 12 insertions(+), 11 deletions(-)
+
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -122,6 +122,10 @@ CPU_BE(   orr     x4, x4, #SCTLR_ELx_EE)
+       kern_hyp_va     x2
+       msr     vbar_el2, x2
++      /* copy tpidr_el1 into tpidr_el2 for use by HYP */
++      mrs     x1, tpidr_el1
++      msr     tpidr_el2, x1
++
+       /* Hello, World! */
+       eret
+ ENDPROC(__kvm_hyp_init)
+--- a/arch/arm64/kvm/hyp/s2-setup.c
++++ b/arch/arm64/kvm/hyp/s2-setup.c
+@@ -84,8 +84,5 @@ u32 __hyp_text __init_stage2_translation
+       write_sysreg(val, vtcr_el2);
+-      /* copy tpidr_el1 into tpidr_el2 for use by HYP */
+-      write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+-
+       return parange;
+ }
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothi
+ /*
+  * Non-VHE: Both host and guest must save everything.
+  *
+- * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
+- * pstate, and guest must save everything.
++ * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
++ * and guest must save everything.
+  */
+ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
+@@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_com
+       ctxt->sys_regs[ACTLR_EL1]       = read_sysreg(actlr_el1);
+       ctxt->sys_regs[TPIDR_EL0]       = read_sysreg(tpidr_el0);
+       ctxt->sys_regs[TPIDRRO_EL0]     = read_sysreg(tpidrro_el0);
+-      ctxt->sys_regs[TPIDR_EL1]       = read_sysreg(tpidr_el1);
+       ctxt->sys_regs[MDSCR_EL1]       = read_sysreg(mdscr_el1);
+       ctxt->gp_regs.regs.sp           = read_sysreg(sp_el0);
+-      ctxt->gp_regs.regs.pc           = read_sysreg_el2(elr);
+-      ctxt->gp_regs.regs.pstate       = read_sysreg_el2(spsr);
+ }
+ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+@@ -62,10 +59,13 @@ static void __hyp_text __sysreg_save_sta
+       ctxt->sys_regs[AMAIR_EL1]       = read_sysreg_el1(amair);
+       ctxt->sys_regs[CNTKCTL_EL1]     = read_sysreg_el1(cntkctl);
+       ctxt->sys_regs[PAR_EL1]         = read_sysreg(par_el1);
++      ctxt->sys_regs[TPIDR_EL1]       = read_sysreg(tpidr_el1);
+       ctxt->gp_regs.sp_el1            = read_sysreg(sp_el1);
+       ctxt->gp_regs.elr_el1           = read_sysreg_el1(elr);
+       ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
++      ctxt->gp_regs.regs.pc           = read_sysreg_el2(elr);
++      ctxt->gp_regs.regs.pstate       = read_sysreg_el2(spsr);
+ }
+ static hyp_alternate_select(__sysreg_call_save_host_state,
+@@ -89,11 +89,8 @@ static void __hyp_text __sysreg_restore_
+       write_sysreg(ctxt->sys_regs[ACTLR_EL1],   actlr_el1);
+       write_sysreg(ctxt->sys_regs[TPIDR_EL0],   tpidr_el0);
+       write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
+-      write_sysreg(ctxt->sys_regs[TPIDR_EL1],   tpidr_el1);
+       write_sysreg(ctxt->sys_regs[MDSCR_EL1],   mdscr_el1);
+       write_sysreg(ctxt->gp_regs.regs.sp,       sp_el0);
+-      write_sysreg_el2(ctxt->gp_regs.regs.pc,   elr);
+-      write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+ }
+ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+@@ -115,10 +112,13 @@ static void __hyp_text __sysreg_restore_
+       write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],     amair);
+       write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1],   cntkctl);
+       write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
++      write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
+       write_sysreg(ctxt->gp_regs.sp_el1,              sp_el1);
+       write_sysreg_el1(ctxt->gp_regs.elr_el1,         elr);
+       write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
++      write_sysreg_el2(ctxt->gp_regs.regs.pc,         elr);
++      write_sysreg_el2(ctxt->gp_regs.regs.pstate,     spsr);
+ }
+ static hyp_alternate_select(__sysreg_call_restore_host_state,
diff --git a/queue-4.14/kvm-arm64-store-vcpu-on-the-stack-during-__guest_enter.patch b/queue-4.14/kvm-arm64-store-vcpu-on-the-stack-during-__guest_enter.patch
new file mode 100644 (file)
index 0000000..c6da9dc
--- /dev/null
@@ -0,0 +1,99 @@
+From foo@baz Fri Jul 20 11:59:34 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:52:51 +0100
+Subject: KVM: arm64: Store vcpu on the stack during __guest_enter()
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, jeremy.linton@arm.com, James Morse <james.morse@arm.com>
+Message-ID: <20180720095312.1161-2-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit 32b03d1059667a39e089c45ee38ec9c16332430f upstream.
+
+KVM uses tpidr_el2 as its private vcpu register, which makes sense for
+non-vhe world switch as only KVM can access this register. This means
+vhe Linux has to use tpidr_el1, which KVM has to save/restore as part
+of the host context.
+
+If the SDEI handler code runs behind KVMs back, it mustn't access any
+per-cpu variables. To allow this on systems with vhe we need to make
+the host use tpidr_el2, saving KVM from save/restoring it.
+
+__guest_enter() stores the host_ctxt on the stack, do the same with
+the vcpu.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp/entry.S     |   10 +++++++---
+ arch/arm64/kvm/hyp/hyp-entry.S |    6 +++---
+ 2 files changed, 10 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -62,8 +62,8 @@ ENTRY(__guest_enter)
+       // Store the host regs
+       save_callee_saved_regs x1
+-      // Store the host_ctxt for use at exit time
+-      str     x1, [sp, #-16]!
++      // Store host_ctxt and vcpu for use at exit time
++      stp     x1, x0, [sp, #-16]!
+       add     x18, x0, #VCPU_CONTEXT
+@@ -159,6 +159,10 @@ abort_guest_exit_end:
+ ENDPROC(__guest_exit)
+ ENTRY(__fpsimd_guest_restore)
++      // x0: esr
++      // x1: vcpu
++      // x2-x29,lr: vcpu regs
++      // vcpu x0-x1 on the stack
+       stp     x2, x3, [sp, #-16]!
+       stp     x4, lr, [sp, #-16]!
+@@ -173,7 +177,7 @@ alternative_else
+ alternative_endif
+       isb
+-      mrs     x3, tpidr_el2
++      mov     x3, x1
+       ldr     x0, [x3, #VCPU_HOST_CONTEXT]
+       kern_hyp_va x0
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -120,6 +120,7 @@ el1_trap:
+       /*
+        * x0: ESR_EC
+        */
++      ldr     x1, [sp, #16 + 8]       // vcpu stored by __guest_enter
+       /*
+        * We trap the first access to the FP/SIMD to save the host context
+@@ -132,19 +133,18 @@ alternative_if_not ARM64_HAS_NO_FPSIMD
+       b.eq    __fpsimd_guest_restore
+ alternative_else_nop_endif
+-      mrs     x1, tpidr_el2
+       mov     x0, #ARM_EXCEPTION_TRAP
+       b       __guest_exit
+ el1_irq:
+       stp     x0, x1, [sp, #-16]!
+-      mrs     x1, tpidr_el2
++      ldr     x1, [sp, #16 + 8]
+       mov     x0, #ARM_EXCEPTION_IRQ
+       b       __guest_exit
+ el1_error:
+       stp     x0, x1, [sp, #-16]!
+-      mrs     x1, tpidr_el2
++      ldr     x1, [sp, #16 + 8]
+       mov     x0, #ARM_EXCEPTION_EL1_SERROR
+       b       __guest_exit
diff --git a/queue-4.14/net-nfc-avoid-stalls-when-nfc_alloc_send_skb-returned-null.patch b/queue-4.14/net-nfc-avoid-stalls-when-nfc_alloc_send_skb-returned-null.patch
new file mode 100644 (file)
index 0000000..efc5ec0
--- /dev/null
@@ -0,0 +1,49 @@
+From 3bc53be9db21040b5d2de4d455f023c8c494aa68 Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Wed, 18 Jul 2018 18:57:27 +0900
+Subject: net/nfc: Avoid stalls when nfc_alloc_send_skb() returned NULL.
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit 3bc53be9db21040b5d2de4d455f023c8c494aa68 upstream.
+
+syzbot is reporting stalls at nfc_llcp_send_ui_frame() [1]. This is
+because nfc_llcp_send_ui_frame() is retrying the loop without any delay
+when nonblocking nfc_alloc_send_skb() returned NULL.
+
+Since there is no need to use MSG_DONTWAIT if we retry until
+sock_alloc_send_pskb() succeeds, let's use blocking call.
+Also, in case an unexpected error occurred, let's break the loop
+if blocking nfc_alloc_send_skb() failed.
+
+[1] https://syzkaller.appspot.com/bug?id=4a131cc571c3733e0eff6bc673f4e36ae48f19c6
+
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reported-by: syzbot <syzbot+d29d18215e477cfbfbdd@syzkaller.appspotmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/nfc/llcp_commands.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/nfc/llcp_commands.c
++++ b/net/nfc/llcp_commands.c
+@@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_ll
+               pr_debug("Fragment %zd bytes remaining %zd",
+                        frag_len, remaining_len);
+-              pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
++              pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
+                                        frag_len + LLCP_HEADER_SIZE, &err);
+               if (pdu == NULL) {
+-                      pr_err("Could not allocate PDU\n");
+-                      continue;
++                      pr_err("Could not allocate PDU (error=%d)\n", err);
++                      len -= remaining_len;
++                      if (len == 0)
++                              len = err;
++                      break;
+               }
+               pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
diff --git a/queue-4.14/netfilter-ebtables-reject-non-bridge-targets.patch b/queue-4.14/netfilter-ebtables-reject-non-bridge-targets.patch
new file mode 100644 (file)
index 0000000..7c2a82d
--- /dev/null
@@ -0,0 +1,64 @@
+From 11ff7288beb2b7da889a014aff0a7b80bf8efcf3 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Wed, 6 Jun 2018 12:14:56 +0200
+Subject: netfilter: ebtables: reject non-bridge targets
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 11ff7288beb2b7da889a014aff0a7b80bf8efcf3 upstream.
+
+the ebtables evaluation loop expects targets to return
+positive values (jumps), or negative values (absolute verdicts).
+
+This is completely different from what xtables does.
+In xtables, targets are expected to return the standard netfilter
+verdicts, i.e. NF_DROP, NF_ACCEPT, etc.
+
+ebtables will consider these as jumps.
+
+Therefore reject any target found due to unspec fallback.
+v2: also reject watchers.  ebtables ignores their return value, so
+a target that assumes skb ownership (and returns NF_STOLEN) causes
+use-after-free.
+
+The only watchers in the 'ebtables' front-end are log and nflog;
+both have AF_BRIDGE specific wrappers on kernel side.
+
+Reported-by: syzbot+2b43f681169a2a0d306a@syzkaller.appspotmail.com
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bridge/netfilter/ebtables.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -398,6 +398,12 @@ ebt_check_watcher(struct ebt_entry_watch
+       watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
+       if (IS_ERR(watcher))
+               return PTR_ERR(watcher);
++
++      if (watcher->family != NFPROTO_BRIDGE) {
++              module_put(watcher->me);
++              return -ENOENT;
++      }
++
+       w->u.watcher = watcher;
+       par->target   = watcher;
+@@ -719,6 +725,13 @@ ebt_check_entry(struct ebt_entry *e, str
+               goto cleanup_watchers;
+       }
++      /* Reject UNSPEC, xtables verdicts/return values are incompatible */
++      if (target->family != NFPROTO_BRIDGE) {
++              module_put(target->me);
++              ret = -ENOENT;
++              goto cleanup_watchers;
++      }
++
+       t->u.target = target;
+       if (t->u.target == &ebt_standard_target) {
+               if (gap < sizeof(struct ebt_standard_target)) {
diff --git a/queue-4.14/netfilter-ipv6-nf_defrag-drop-skb-dst-before-queueing.patch b/queue-4.14/netfilter-ipv6-nf_defrag-drop-skb-dst-before-queueing.patch
new file mode 100644 (file)
index 0000000..1827d39
--- /dev/null
@@ -0,0 +1,52 @@
+From 84379c9afe011020e797e3f50a662b08a6355dcf Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Mon, 9 Jul 2018 13:43:38 +0200
+Subject: netfilter: ipv6: nf_defrag: drop skb dst before queueing
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 84379c9afe011020e797e3f50a662b08a6355dcf upstream.
+
+Eric Dumazet reports:
+ Here is a reproducer of an annoying bug detected by syzkaller on our production kernel
+ [..]
+ ./b78305423 enable_conntrack
+ Then :
+ sleep 60
+ dmesg | tail -10
+ [  171.599093] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [  181.631024] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [  191.687076] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [  201.703037] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [  211.711072] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [  221.959070] unregister_netdevice: waiting for lo to become free. Usage count = 2
+
+Reproducer sends ipv6 fragment that hits nfct defrag via LOCAL_OUT hook.
+skb gets queued until frag timer expiry -- 1 minute.
+
+Normally nf_conntrack_reasm gets called during prerouting, so skb has
+no dst yet which might explain why this wasn't spotted earlier.
+
+Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
+Reported-by: John Sperbeck <jsperbeck@google.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Tested-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv6/netfilter/nf_conntrack_reasm.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -618,6 +618,8 @@ int nf_ct_frag6_gather(struct net *net,
+           fq->q.meat == fq->q.len &&
+           nf_ct_frag6_reasm(fq, skb, dev))
+               ret = 0;
++      else
++              skb_dst_drop(skb);
+ out_unlock:
+       spin_unlock_bh(&fq->q.lock);
diff --git a/queue-4.14/nsh-set-mac-len-based-on-inner-packet.patch b/queue-4.14/nsh-set-mac-len-based-on-inner-packet.patch
new file mode 100644 (file)
index 0000000..82199a7
--- /dev/null
@@ -0,0 +1,46 @@
+From bab2c80e5a6c855657482eac9e97f5f3eedb509a Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Wed, 11 Jul 2018 12:00:44 -0400
+Subject: nsh: set mac len based on inner packet
+
+From: Willem de Bruijn <willemb@google.com>
+
+commit bab2c80e5a6c855657482eac9e97f5f3eedb509a upstream.
+
+When pulling the NSH header in nsh_gso_segment, set the mac length
+based on the encapsulated packet type.
+
+skb_reset_mac_len computes an offset to the network header, which
+here still points to the outer packet:
+
+  >     skb_reset_network_header(skb);
+  >     [...]
+  >     __skb_pull(skb, nsh_len);
+  >     skb_reset_mac_header(skb);    // now mac hdr starts nsh_len == 8B after net hdr
+  >     skb_reset_mac_len(skb);       // mac len = net hdr - mac hdr == (u16) -8 == 65528
+  >     [..]
+  >     skb_mac_gso_segment(skb, ..)
+
+Link: http://lkml.kernel.org/r/CAF=yD-KeAcTSOn4AxirAxL8m7QAS8GBBe1w09eziYwvPbbUeYA@mail.gmail.com
+Reported-by: syzbot+7b9ed9872dab8c32305d@syzkaller.appspotmail.com
+Fixes: c411ed854584 ("nsh: add GSO support")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Jiri Benc <jbenc@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/nsh/nsh.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/nsh/nsh.c
++++ b/net/nsh/nsh.c
+@@ -42,7 +42,7 @@ static struct sk_buff *nsh_gso_segment(s
+       __skb_pull(skb, nsh_len);
+       skb_reset_mac_header(skb);
+-      skb_reset_mac_len(skb);
++      skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
+       skb->protocol = proto;
+       features &= NETIF_F_SG;
diff --git a/queue-4.14/rds-avoid-unenecessary-cong_update-in-loop-transport.patch b/queue-4.14/rds-avoid-unenecessary-cong_update-in-loop-transport.patch
new file mode 100644 (file)
index 0000000..2c717c7
--- /dev/null
@@ -0,0 +1,61 @@
+From f1693c63ab133d16994cc50f773982b5905af264 Mon Sep 17 00:00:00 2001
+From: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Date: Thu, 14 Jun 2018 11:52:34 -0700
+Subject: rds: avoid unenecessary cong_update in loop transport
+
+From: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+
+commit f1693c63ab133d16994cc50f773982b5905af264 upstream.
+
+Loop transport which is self loopback, remote port congestion
+update isn't relevant. Infact the xmit path already ignores it.
+Receive path needs to do the same.
+
+Reported-by: syzbot+4c20b3866171ce8441d2@syzkaller.appspotmail.com
+Reviewed-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/rds/loop.c |    1 +
+ net/rds/rds.h  |    5 +++++
+ net/rds/recv.c |    5 +++++
+ 3 files changed, 11 insertions(+)
+
+--- a/net/rds/loop.c
++++ b/net/rds/loop.c
+@@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport
+       .inc_copy_to_user       = rds_message_inc_copy_to_user,
+       .inc_free               = rds_loop_inc_free,
+       .t_name                 = "loopback",
++      .t_type                 = RDS_TRANS_LOOP,
+ };
+--- a/net/rds/rds.h
++++ b/net/rds/rds.h
+@@ -454,6 +454,11 @@ struct rds_notifier {
+       int                     n_status;
+ };
++/* Available as part of RDS core, so doesn't need to participate
++ * in get_preferred transport etc
++ */
++#define       RDS_TRANS_LOOP  3
++
+ /**
+  * struct rds_transport -  transport specific behavioural hooks
+  *
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -103,6 +103,11 @@ static void rds_recv_rcvbuf_delta(struct
+               rds_stats_add(s_recv_bytes_added_to_socket, delta);
+       else
+               rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
++
++      /* loop transport doesn't send/recv congestion updates */
++      if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
++              return;
++
+       now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
+       rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
diff --git a/queue-4.14/reiserfs-fix-buffer-overflow-with-long-warning-messages.patch b/queue-4.14/reiserfs-fix-buffer-overflow-with-long-warning-messages.patch
new file mode 100644 (file)
index 0000000..e2cd195
--- /dev/null
@@ -0,0 +1,252 @@
+From fe10e398e860955bac4d28ec031b701d358465e4 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Fri, 13 Jul 2018 16:59:27 -0700
+Subject: reiserfs: fix buffer overflow with long warning messages
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit fe10e398e860955bac4d28ec031b701d358465e4 upstream.
+
+ReiserFS prepares log messages into a 1024-byte buffer with no bounds
+checks.  Long messages, such as the "unknown mount option" warning when
+userspace passes a crafted mount options string, overflow this buffer.
+This causes KASAN to report a global-out-of-bounds write.
+
+Fix it by truncating messages to the buffer size.
+
+Link: http://lkml.kernel.org/r/20180707203621.30922-1-ebiggers3@gmail.com
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+b890b3335a4d8c608963@syzkaller.appspotmail.com
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/reiserfs/prints.c |  141 +++++++++++++++++++++++++++++----------------------
+ 1 file changed, 81 insertions(+), 60 deletions(-)
+
+--- a/fs/reiserfs/prints.c
++++ b/fs/reiserfs/prints.c
+@@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key
+ }
+ /* %k */
+-static void sprintf_le_key(char *buf, struct reiserfs_key *key)
++static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
+ {
+       if (key)
+-              sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
+-                      le32_to_cpu(key->k_objectid), le_offset(key),
+-                      le_type(key));
++              return scnprintf(buf, size, "[%d %d %s %s]",
++                               le32_to_cpu(key->k_dir_id),
++                               le32_to_cpu(key->k_objectid), le_offset(key),
++                               le_type(key));
+       else
+-              sprintf(buf, "[NULL]");
++              return scnprintf(buf, size, "[NULL]");
+ }
+ /* %K */
+-static void sprintf_cpu_key(char *buf, struct cpu_key *key)
++static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
+ {
+       if (key)
+-              sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
+-                      key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
+-                      cpu_type(key));
++              return scnprintf(buf, size, "[%d %d %s %s]",
++                               key->on_disk_key.k_dir_id,
++                               key->on_disk_key.k_objectid,
++                               reiserfs_cpu_offset(key), cpu_type(key));
+       else
+-              sprintf(buf, "[NULL]");
++              return scnprintf(buf, size, "[NULL]");
+ }
+-static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
++static int scnprintf_de_head(char *buf, size_t size,
++                           struct reiserfs_de_head *deh)
+ {
+       if (deh)
+-              sprintf(buf,
+-                      "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
+-                      deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
+-                      deh_location(deh), deh_state(deh));
++              return scnprintf(buf, size,
++                               "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
++                               deh_offset(deh), deh_dir_id(deh),
++                               deh_objectid(deh), deh_location(deh),
++                               deh_state(deh));
+       else
+-              sprintf(buf, "[NULL]");
++              return scnprintf(buf, size, "[NULL]");
+ }
+-static void sprintf_item_head(char *buf, struct item_head *ih)
++static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
+ {
+       if (ih) {
+-              strcpy(buf,
+-                     (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
+-              sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
+-              sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
+-                      "free_space(entry_count) %d",
+-                      ih_item_len(ih), ih_location(ih), ih_free_space(ih));
++              char *p = buf;
++              char * const end = buf + size;
++
++              p += scnprintf(p, end - p, "%s",
++                             (ih_version(ih) == KEY_FORMAT_3_6) ?
++                             "*3.6* " : "*3.5*");
++
++              p += scnprintf_le_key(p, end - p, &ih->ih_key);
++
++              p += scnprintf(p, end - p,
++                             ", item_len %d, item_location %d, free_space(entry_count) %d",
++                             ih_item_len(ih), ih_location(ih),
++                             ih_free_space(ih));
++              return p - buf;
+       } else
+-              sprintf(buf, "[NULL]");
++              return scnprintf(buf, size, "[NULL]");
+ }
+-static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
++static int scnprintf_direntry(char *buf, size_t size,
++                            struct reiserfs_dir_entry *de)
+ {
+       char name[20];
+       memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
+       name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
+-      sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
++      return scnprintf(buf, size, "\"%s\"==>[%d %d]",
++                       name, de->de_dir_id, de->de_objectid);
+ }
+-static void sprintf_block_head(char *buf, struct buffer_head *bh)
++static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
+ {
+-      sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
+-              B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
++      return scnprintf(buf, size,
++                       "level=%d, nr_items=%d, free_space=%d rdkey ",
++                       B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
+ }
+-static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
++static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
+ {
+-      sprintf(buf,
+-              "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+-              bh->b_bdev, bh->b_size,
+-              (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
+-              bh->b_state, bh->b_page,
+-              buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
+-              buffer_dirty(bh) ? "DIRTY" : "CLEAN",
+-              buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
++      return scnprintf(buf, size,
++                       "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
++                       bh->b_bdev, bh->b_size,
++                       (unsigned long long)bh->b_blocknr,
++                       atomic_read(&(bh->b_count)),
++                       bh->b_state, bh->b_page,
++                       buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
++                       buffer_dirty(bh) ? "DIRTY" : "CLEAN",
++                       buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
+ }
+-static void sprintf_disk_child(char *buf, struct disk_child *dc)
++static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
+ {
+-      sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
+-              dc_size(dc));
++      return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
++                       dc_block_number(dc), dc_size(dc));
+ }
+ static char *is_there_reiserfs_struct(char *fmt, int *what)
+@@ -189,55 +205,60 @@ static void prepare_error_buf(const char
+       char *fmt1 = fmt_buf;
+       char *k;
+       char *p = error_buf;
++      char * const end = &error_buf[sizeof(error_buf)];
+       int what;
+       spin_lock(&error_lock);
+-      strcpy(fmt1, fmt);
++      if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
++              strscpy(error_buf, "format string too long", end - error_buf);
++              goto out_unlock;
++      }
+       while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
+               *k = 0;
+-              p += vsprintf(p, fmt1, args);
++              p += vscnprintf(p, end - p, fmt1, args);
+               switch (what) {
+               case 'k':
+-                      sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
++                      p += scnprintf_le_key(p, end - p,
++                                            va_arg(args, struct reiserfs_key *));
+                       break;
+               case 'K':
+-                      sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
++                      p += scnprintf_cpu_key(p, end - p,
++                                             va_arg(args, struct cpu_key *));
+                       break;
+               case 'h':
+-                      sprintf_item_head(p, va_arg(args, struct item_head *));
++                      p += scnprintf_item_head(p, end - p,
++                                               va_arg(args, struct item_head *));
+                       break;
+               case 't':
+-                      sprintf_direntry(p,
+-                                       va_arg(args,
+-                                              struct reiserfs_dir_entry *));
++                      p += scnprintf_direntry(p, end - p,
++                                              va_arg(args, struct reiserfs_dir_entry *));
+                       break;
+               case 'y':
+-                      sprintf_disk_child(p,
+-                                         va_arg(args, struct disk_child *));
++                      p += scnprintf_disk_child(p, end - p,
++                                                va_arg(args, struct disk_child *));
+                       break;
+               case 'z':
+-                      sprintf_block_head(p,
+-                                         va_arg(args, struct buffer_head *));
++                      p += scnprintf_block_head(p, end - p,
++                                                va_arg(args, struct buffer_head *));
+                       break;
+               case 'b':
+-                      sprintf_buffer_head(p,
+-                                          va_arg(args, struct buffer_head *));
++                      p += scnprintf_buffer_head(p, end - p,
++                                                 va_arg(args, struct buffer_head *));
+                       break;
+               case 'a':
+-                      sprintf_de_head(p,
+-                                      va_arg(args,
+-                                             struct reiserfs_de_head *));
++                      p += scnprintf_de_head(p, end - p,
++                                             va_arg(args, struct reiserfs_de_head *));
+                       break;
+               }
+-              p += strlen(p);
+               fmt1 = k + 2;
+       }
+-      vsprintf(p, fmt1, args);
++      p += vscnprintf(p, end - p, fmt1, args);
++out_unlock:
+       spin_unlock(&error_lock);
+ }
index 9fb7029068aaa9cb8451230ac28697d13bd1645a..ed4d6b84f416fc560e87fcddbb5a538627f9bb23 100644 (file)
@@ -57,3 +57,35 @@ crypto-af_alg-initialize-sg_num_bytes-in-error-code-path.patch
 mtd-rawnand-denali_dt-set-clk_x_rate-to-200-mhz-unconditionally.patch
 block-do-not-use-interruptible-wait-anywhere.patch
 pci-hv-disable-enable-irqs-rather-than-bh-in-hv_compose_msi_msg.patch
+netfilter-ebtables-reject-non-bridge-targets.patch
+reiserfs-fix-buffer-overflow-with-long-warning-messages.patch
+keys-dns-fix-parsing-multiple-options.patch
+tls-stricter-error-checking-in-zerocopy-sendmsg-path.patch
+autofs-fix-slab-out-of-bounds-read-in-getname_kernel.patch
+nsh-set-mac-len-based-on-inner-packet.patch
+netfilter-ipv6-nf_defrag-drop-skb-dst-before-queueing.patch
+bdi-fix-another-oops-in-wb_workfn.patch
+rds-avoid-unenecessary-cong_update-in-loop-transport.patch
+net-nfc-avoid-stalls-when-nfc_alloc_send_skb-returned-null.patch
+kvm-arm64-store-vcpu-on-the-stack-during-__guest_enter.patch
+kvm-arm-arm64-convert-kvm_host_cpu_state-to-a-static-per-cpu-allocation.patch
+kvm-arm64-change-hyp_panic-s-dependency-on-tpidr_el2.patch
+arm64-alternatives-use-tpidr_el2-on-vhe-hosts.patch
+kvm-arm64-stop-save-restoring-host-tpidr_el1-on-vhe.patch
+arm64-alternatives-add-dynamic-patching-feature.patch
+kvm-arm-arm64-do-not-use-kern_hyp_va-with-kvm_vgic_global_state.patch
+kvm-arm64-avoid-storing-the-vcpu-pointer-on-the-stack.patch
+arm-arm64-smccc-add-smccc-specific-return-codes.patch
+arm64-call-arch_workaround_2-on-transitions-between-el0-and-el1.patch
+arm64-add-per-cpu-infrastructure-to-call-arch_workaround_2.patch
+arm64-add-arch_workaround_2-probing.patch
+arm64-add-ssbd-command-line-option.patch
+arm64-ssbd-add-global-mitigation-state-accessor.patch
+arm64-ssbd-skip-apply_ssbd-if-not-using-dynamic-mitigation.patch
+arm64-ssbd-restore-mitigation-status-on-cpu-resume.patch
+arm64-ssbd-introduce-thread-flag-to-control-userspace-mitigation.patch
+arm64-ssbd-add-prctl-interface-for-per-thread-mitigation.patch
+arm64-kvm-add-hyp-per-cpu-accessors.patch
+arm64-kvm-add-arch_workaround_2-support-for-guests.patch
+arm64-kvm-handle-guest-s-arch_workaround_2-requests.patch
+arm64-kvm-add-arch_workaround_2-discovery-through-arch_features_func_id.patch
diff --git a/queue-4.14/tls-stricter-error-checking-in-zerocopy-sendmsg-path.patch b/queue-4.14/tls-stricter-error-checking-in-zerocopy-sendmsg-path.patch
new file mode 100644 (file)
index 0000000..31b581b
--- /dev/null
@@ -0,0 +1,42 @@
+From 32da12216e467dea70a09cd7094c30779ce0f9db Mon Sep 17 00:00:00 2001
+From: Dave Watson <davejwatson@fb.com>
+Date: Thu, 12 Jul 2018 08:03:43 -0700
+Subject: tls: Stricter error checking in zerocopy sendmsg path
+
+From: Dave Watson <davejwatson@fb.com>
+
+commit 32da12216e467dea70a09cd7094c30779ce0f9db upstream.
+
+In the zerocopy sendmsg() path, there are error checks to revert
+the zerocopy if we get any error code.  syzkaller has discovered
+that tls_push_record can return -ECONNRESET, which is fatal, and
+happens after the point at which it is safe to revert the iter,
+as we've already passed the memory to do_tcp_sendpages.
+
+Previously this code could return -ENOMEM and we would want to
+revert the iter, but AFAIK this no longer returns ENOMEM after
+a447da7d004 ("tls: fix waitall behavior in tls_sw_recvmsg"),
+so we fail for all error codes.
+
+Reported-by: syzbot+c226690f7b3126c5ee04@syzkaller.appspotmail.com
+Reported-by: syzbot+709f2810a6a05f11d4d3@syzkaller.appspotmail.com
+Signed-off-by: Dave Watson <davejwatson@fb.com>
+Fixes: 3c4d7559159b ("tls: kernel TLS support")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/tls/tls_sw.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -449,7 +449,7 @@ alloc_encrypted:
+                       ret = tls_push_record(sk, msg->msg_flags, record_type);
+                       if (!ret)
+                               continue;
+-                      if (ret == -EAGAIN)
++                      if (ret < 0)
+                               goto send_end;
+                       copied -= try_to_copy;