]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 13 Sep 2018 07:19:48 +0000 (09:19 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 13 Sep 2018 07:19:48 +0000 (09:19 +0200)
added patches:
kbuild-make-missing-depmod-a-warning-instead-of-an-error.patch
kvm-x86-set-highest-physical-address-bits-in-non-present-reserved-sptes.patch
x86-kvm-avoid-unused-variable-warning.patch

queue-4.18/kbuild-make-missing-depmod-a-warning-instead-of-an-error.patch [new file with mode: 0644]
queue-4.18/kvm-x86-set-highest-physical-address-bits-in-non-present-reserved-sptes.patch [new file with mode: 0644]
queue-4.18/series
queue-4.18/x86-kvm-avoid-unused-variable-warning.patch [new file with mode: 0644]

diff --git a/queue-4.18/kbuild-make-missing-depmod-a-warning-instead-of-an-error.patch b/queue-4.18/kbuild-make-missing-depmod-a-warning-instead-of-an-error.patch
new file mode 100644 (file)
index 0000000..d18c851
--- /dev/null
@@ -0,0 +1,54 @@
+From 914b087ff9e0e9a399a4927fa30793064afc0178 Mon Sep 17 00:00:00 2001
+From: Randy Dunlap <rdunlap@infradead.org>
+Date: Tue, 28 Aug 2018 12:59:10 -0700
+Subject: kbuild: make missing $DEPMOD a Warning instead of an Error
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+commit 914b087ff9e0e9a399a4927fa30793064afc0178 upstream.
+
+When $DEPMOD is not found, only print a warning instead of exiting
+with an error message and error status:
+
+Warning: 'make modules_install' requires /sbin/depmod. Please install it.
+This is probably in the kmod package.
+
+Change the Error to a Warning because "not all build hosts for cross
+compiling Linux are Linux systems and are able to provide a working
+port of depmod, especially at the file patch /sbin/depmod."
+
+I.e., "make modules_install" may be used to copy/install the
+loadable modules files to a target directory on a build system and
+then transferred to an embedded device where /sbin/depmod is run
+instead of it being run on the build system.
+
+Fixes: 934193a654c1 ("kbuild: verify that $DEPMOD is installed")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Reported-by: H. Nikolaus Schaller <hns@goldelico.com>
+Cc: stable@vger.kernel.org
+Cc: Lucas De Marchi <lucas.demarchi@profusion.mobi>
+Cc: Lucas De Marchi <lucas.de.marchi@gmail.com>
+Cc: Michal Marek <michal.lkml@markovi.net>
+Cc: Jessica Yu <jeyu@kernel.org>
+Cc: Chih-Wei Huang <cwhuang@linux.org.tw>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/depmod.sh |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/scripts/depmod.sh
++++ b/scripts/depmod.sh
+@@ -15,9 +15,9 @@ if ! test -r System.map ; then
+ fi
+ if [ -z $(command -v $DEPMOD) ]; then
+-      echo "'make modules_install' requires $DEPMOD. Please install it." >&2
++      echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
+       echo "This is probably in the kmod package." >&2
+-      exit 1
++      exit 0
+ fi
+ # older versions of depmod require the version string to start with three
diff --git a/queue-4.18/kvm-x86-set-highest-physical-address-bits-in-non-present-reserved-sptes.patch b/queue-4.18/kvm-x86-set-highest-physical-address-bits-in-non-present-reserved-sptes.patch
new file mode 100644 (file)
index 0000000..eb27c6b
--- /dev/null
@@ -0,0 +1,133 @@
+From 28a1f3ac1d0c8558ee4453d9634dad891a6e922e Mon Sep 17 00:00:00 2001
+From: Junaid Shahid <junaids@google.com>
+Date: Tue, 14 Aug 2018 10:15:34 -0700
+Subject: kvm: x86: Set highest physical address bits in non-present/reserved SPTEs
+
+From: Junaid Shahid <junaids@google.com>
+
+commit 28a1f3ac1d0c8558ee4453d9634dad891a6e922e upstream.
+
+Always set the 5 upper-most supported physical address bits to 1 for SPTEs
+that are marked as non-present or reserved, to make them unusable for
+L1TF attacks from the guest. Currently, this just applies to MMIO SPTEs.
+(We do not need to mark PTEs that are completely 0 as physical page 0
+is already reserved.)
+
+This allows mitigation of L1TF without disabling hyper-threading by using
+shadow paging mode instead of EPT.
+
+Signed-off-by: Junaid Shahid <junaids@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c |   43 ++++++++++++++++++++++++++++++++++++++-----
+ arch/x86/kvm/x86.c |    8 ++++++--
+ 2 files changed, 44 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -221,6 +221,17 @@ static const u64 shadow_acc_track_saved_
+                                                   PT64_EPT_EXECUTABLE_MASK;
+ static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
++/*
++ * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
++ * to guard against L1TF attacks.
++ */
++static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
++
++/*
++ * The number of high-order 1 bits to use in the mask above.
++ */
++static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
++
+ static void mmu_spte_set(u64 *sptep, u64 spte);
+ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
+@@ -308,9 +319,13 @@ static void mark_mmio_spte(struct kvm_vc
+ {
+       unsigned int gen = kvm_current_mmio_generation(vcpu);
+       u64 mask = generation_mmio_spte_mask(gen);
++      u64 gpa = gfn << PAGE_SHIFT;
+       access &= ACC_WRITE_MASK | ACC_USER_MASK;
+-      mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
++      mask |= shadow_mmio_value | access;
++      mask |= gpa | shadow_nonpresent_or_rsvd_mask;
++      mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
++              << shadow_nonpresent_or_rsvd_mask_len;
+       trace_mark_mmio_spte(sptep, gfn, access, gen);
+       mmu_spte_set(sptep, mask);
+@@ -323,8 +338,14 @@ static bool is_mmio_spte(u64 spte)
+ static gfn_t get_mmio_spte_gfn(u64 spte)
+ {
+-      u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
+-      return (spte & ~mask) >> PAGE_SHIFT;
++      u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
++                 shadow_nonpresent_or_rsvd_mask;
++      u64 gpa = spte & ~mask;
++
++      gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
++             & shadow_nonpresent_or_rsvd_mask;
++
++      return gpa >> PAGE_SHIFT;
+ }
+ static unsigned get_mmio_spte_access(u64 spte)
+@@ -381,7 +402,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
+-static void kvm_mmu_clear_all_pte_masks(void)
++static void kvm_mmu_reset_all_pte_masks(void)
+ {
+       shadow_user_mask = 0;
+       shadow_accessed_mask = 0;
+@@ -391,6 +412,18 @@ static void kvm_mmu_clear_all_pte_masks(
+       shadow_mmio_mask = 0;
+       shadow_present_mask = 0;
+       shadow_acc_track_mask = 0;
++
++      /*
++       * If the CPU has 46 or less physical address bits, then set an
++       * appropriate mask to guard against L1TF attacks. Otherwise, it is
++       * assumed that the CPU is not vulnerable to L1TF.
++       */
++      if (boot_cpu_data.x86_phys_bits <
++          52 - shadow_nonpresent_or_rsvd_mask_len)
++              shadow_nonpresent_or_rsvd_mask =
++                      rsvd_bits(boot_cpu_data.x86_phys_bits -
++                                shadow_nonpresent_or_rsvd_mask_len,
++                                boot_cpu_data.x86_phys_bits - 1);
+ }
+ static int is_cpuid_PSE36(void)
+@@ -5500,7 +5533,7 @@ int kvm_mmu_module_init(void)
+ {
+       int ret = -ENOMEM;
+-      kvm_mmu_clear_all_pte_masks();
++      kvm_mmu_reset_all_pte_masks();
+       pte_list_desc_cache = kmem_cache_create("pte_list_desc",
+                                           sizeof(struct pte_list_desc),
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6506,8 +6506,12 @@ static void kvm_set_mmio_spte_mask(void)
+        * Set the reserved bits and the present bit of an paging-structure
+        * entry to generate page fault with PFER.RSV = 1.
+        */
+-       /* Mask the reserved physical address bits. */
+-      mask = rsvd_bits(maxphyaddr, 51);
++
++      /*
++       * Mask the uppermost physical address bit, which would be reserved as
++       * long as the supported physical address width is less than 52.
++       */
++      mask = 1ull << 51;
+       /* Set the present bit. */
+       mask |= 1ull;
index ca23bc9be4ff338f603ccd79194cebddafb5c015..06bc2fcda3681abb68b329cd0a9af87eb6e47351 100644 (file)
@@ -190,3 +190,6 @@ drm-amdgpu-don-t-warn-on-destroying-a-pinned-bo.patch
 debugobjects-make-stack-check-warning-more-informative.patch
 x86-pae-use-64-bit-atomic-xchg-function-in-native_ptep_get_and_clear.patch
 x86-xen-don-t-write-ptes-directly-in-32-bit-pv-guests.patch
+kbuild-make-missing-depmod-a-warning-instead-of-an-error.patch
+kvm-x86-set-highest-physical-address-bits-in-non-present-reserved-sptes.patch
+x86-kvm-avoid-unused-variable-warning.patch
diff --git a/queue-4.18/x86-kvm-avoid-unused-variable-warning.patch b/queue-4.18/x86-kvm-avoid-unused-variable-warning.patch
new file mode 100644 (file)
index 0000000..6ef3bb3
--- /dev/null
@@ -0,0 +1,46 @@
+From 7288bde1f9df6c1475675419bdd7725ce84dec56 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 20 Aug 2018 23:37:50 +0200
+Subject: x86: kvm: avoid unused variable warning
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 7288bde1f9df6c1475675419bdd7725ce84dec56 upstream.
+
+Removing one of the two accesses of the maxphyaddr variable led to
+a harmless warning:
+
+arch/x86/kvm/x86.c: In function 'kvm_set_mmio_spte_mask':
+arch/x86/kvm/x86.c:6563:6: error: unused variable 'maxphyaddr' [-Werror=unused-variable]
+
+Removing the #ifdef seems to be the nicest workaround, as it
+makes the code look cleaner than adding another #ifdef.
+
+Fixes: 28a1f3ac1d0c ("kvm: x86: Set highest physical address bits in non-present/reserved SPTEs")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Cc: stable@vger.kernel.org # L1TF
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6516,14 +6516,12 @@ static void kvm_set_mmio_spte_mask(void)
+       /* Set the present bit. */
+       mask |= 1ull;
+-#ifdef CONFIG_X86_64
+       /*
+        * If reserved bit is not supported, clear the present bit to disable
+        * mmio page fault.
+        */
+-      if (maxphyaddr == 52)
++      if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
+               mask &= ~1ull;
+-#endif
+       kvm_mmu_set_mmio_spte_mask(mask, mask);
+ }