]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jun 2020 20:34:16 +0000 (22:34 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jun 2020 20:34:16 +0000 (22:34 +0200)
added patches:
alsa-pcm-disallow-linking-stream-to-itself.patch
crypto-cavium-nitrox-fix-nitrox_get_first_device-when-ndevlist-is-fully-iterated.patch
x86-mce-mm-unmap-the-entire-page-if-the-whole-page-is-affected-and-poisoned.patch

queue-4.19/alsa-pcm-disallow-linking-stream-to-itself.patch [new file with mode: 0644]
queue-4.19/crypto-cavium-nitrox-fix-nitrox_get_first_device-when-ndevlist-is-fully-iterated.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/x86-mce-mm-unmap-the-entire-page-if-the-whole-page-is-affected-and-poisoned.patch [new file with mode: 0644]

diff --git a/queue-4.19/alsa-pcm-disallow-linking-stream-to-itself.patch b/queue-4.19/alsa-pcm-disallow-linking-stream-to-itself.patch
new file mode 100644 (file)
index 0000000..e8cc102
--- /dev/null
@@ -0,0 +1,42 @@
+From 951e2736f4b11b58dc44d41964fa17c3527d882a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= <mirq-linux@rere.qmqm.pl>
+Date: Mon, 8 Jun 2020 18:50:39 +0200
+Subject: ALSA: pcm: disallow linking stream to itself
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+
+commit 951e2736f4b11b58dc44d41964fa17c3527d882a upstream.
+
+Prevent SNDRV_PCM_IOCTL_LINK linking stream to itself - the code
+can't handle it. Fixed commit is not where bug was introduced, but
+changes the context significantly.
+
+Cc: stable@vger.kernel.org
+Fixes: 0888c321de70 ("pcm_native: switch to fdget()/fdput()")
+Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+Link: https://lore.kernel.org/r/89c4a2487609a0ed6af3ecf01cc972bdc59a7a2d.1591634956.git.mirq-linux@rere.qmqm.pl
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ sound/core/pcm_native.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1982,6 +1982,11 @@ static int snd_pcm_link(struct snd_pcm_s
+       }
+       pcm_file = f.file->private_data;
+       substream1 = pcm_file->substream;
++      if (substream == substream1) {
++              res = -EINVAL;
++              goto _badf;
++      }
++
+       group = kmalloc(sizeof(*group), GFP_KERNEL);
+       if (!group) {
+               res = -ENOMEM;
diff --git a/queue-4.19/crypto-cavium-nitrox-fix-nitrox_get_first_device-when-ndevlist-is-fully-iterated.patch b/queue-4.19/crypto-cavium-nitrox-fix-nitrox_get_first_device-when-ndevlist-is-fully-iterated.patch
new file mode 100644 (file)
index 0000000..069f874
--- /dev/null
@@ -0,0 +1,45 @@
+From 320bdbd816156f9ca07e5fed7bfb449f2908dda7 Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sat, 30 May 2020 15:35:37 +0200
+Subject: crypto: cavium/nitrox - Fix 'nitrox_get_first_device()' when ndevlist is fully iterated
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 320bdbd816156f9ca07e5fed7bfb449f2908dda7 upstream.
+
+When a list is completely iterated with 'list_for_each_entry(x, ...)', x is
+not NULL at the end.
+
+While at it, remove a useless initialization of the ndev variable. It
+is overridden by 'list_for_each_entry'.
+
+Fixes: f2663872f073 ("crypto: cavium - Register the CNN55XX supported crypto algorithms.")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/cavium/nitrox/nitrox_main.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
+@@ -183,7 +183,7 @@ static void nitrox_remove_from_devlist(s
+ struct nitrox_device *nitrox_get_first_device(void)
+ {
+-      struct nitrox_device *ndev = NULL;
++      struct nitrox_device *ndev;
+       mutex_lock(&devlist_lock);
+       list_for_each_entry(ndev, &ndevlist, list) {
+@@ -191,7 +191,7 @@ struct nitrox_device *nitrox_get_first_d
+                       break;
+       }
+       mutex_unlock(&devlist_lock);
+-      if (!ndev)
++      if (&ndev->list == &ndevlist)
+               return NULL;
+       refcount_inc(&ndev->refcnt);
index 955e978d7e7296753f40530b87dce2d6defdd854..15dc5ed3511db64b251405da739ab6963e1ba8f2 100644 (file)
@@ -44,3 +44,6 @@ spi-dw-fix-controller-unregister-order.patch
 spi-bcm2835aux-fix-controller-unregister-order.patch
 spi-bcm-qspi-when-tx-rx-buffer-is-null-set-to-0.patch
 pm-runtime-clk-fix-clk_pm_runtime_get-error-path.patch
+crypto-cavium-nitrox-fix-nitrox_get_first_device-when-ndevlist-is-fully-iterated.patch
+alsa-pcm-disallow-linking-stream-to-itself.patch
+x86-mce-mm-unmap-the-entire-page-if-the-whole-page-is-affected-and-poisoned.patch
diff --git a/queue-4.19/x86-mce-mm-unmap-the-entire-page-if-the-whole-page-is-affected-and-poisoned.patch b/queue-4.19/x86-mce-mm-unmap-the-entire-page-if-the-whole-page-is-affected-and-poisoned.patch
new file mode 100644 (file)
index 0000000..fbd5a8b
--- /dev/null
@@ -0,0 +1,146 @@
+From 17fae1294ad9d711b2c3dd0edef479d40c76a5e8 Mon Sep 17 00:00:00 2001
+From: Tony Luck <tony.luck@intel.com>
+Date: Wed, 20 May 2020 09:35:46 -0700
+Subject: x86/{mce,mm}: Unmap the entire page if the whole page is affected and poisoned
+
+From: Tony Luck <tony.luck@intel.com>
+
+commit 17fae1294ad9d711b2c3dd0edef479d40c76a5e8 upstream.
+
+An interesting thing happened when a guest Linux instance took a machine
+check. The VMM unmapped the bad page from guest physical space and
+passed the machine check to the guest.
+
+Linux took all the normal actions to offline the page from the process
+that was using it. But then guest Linux crashed because it said there
+was a second machine check inside the kernel with this stack trace:
+
+do_memory_failure
+    set_mce_nospec
+         set_memory_uc
+              _set_memory_uc
+                   change_page_attr_set_clr
+                        cpa_flush
+                             clflush_cache_range_opt
+
+This was odd, because a CLFLUSH instruction shouldn't raise a machine
+check (it isn't consuming the data). Further investigation showed that
+the VMM had passed in another machine check because is appeared that the
+guest was accessing the bad page.
+
+Fix is to check the scope of the poison by checking the MCi_MISC register.
+If the entire page is affected, then unmap the page. If only part of the
+page is affected, then mark the page as uncacheable.
+
+This assumes that VMMs will do the logical thing and pass in the "whole
+page scope" via the MCi_MISC register (since they unmapped the entire
+page).
+
+  [ bp: Adjust to x86/entry changes. ]
+
+Fixes: 284ce4011ba6 ("x86/memory_failure: Introduce {set, clear}_mce_nospec()")
+Reported-by: Jue Wang <juew@google.com>
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jue Wang <juew@google.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20200520163546.GA7977@agluck-desk2.amr.corp.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/set_memory.h |   19 +++++++++++++------
+ arch/x86/kernel/cpu/mcheck/mce.c  |   11 +++++++++--
+ include/linux/set_memory.h        |    2 +-
+ 3 files changed, 23 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/set_memory.h
++++ b/arch/x86/include/asm/set_memory.h
+@@ -90,28 +90,35 @@ void set_kernel_text_rw(void);
+ void set_kernel_text_ro(void);
+ #ifdef CONFIG_X86_64
+-static inline int set_mce_nospec(unsigned long pfn)
++/*
++ * Prevent speculative access to the page by either unmapping
++ * it (if we do not require access to any part of the page) or
++ * marking it uncacheable (if we want to try to retrieve data
++ * from non-poisoned lines in the page).
++ */
++static inline int set_mce_nospec(unsigned long pfn, bool unmap)
+ {
+       unsigned long decoy_addr;
+       int rc;
+       /*
+-       * Mark the linear address as UC to make sure we don't log more
+-       * errors because of speculative access to the page.
+        * We would like to just call:
+-       *      set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
++       *      set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
+        * but doing that would radically increase the odds of a
+        * speculative access to the poison page because we'd have
+        * the virtual address of the kernel 1:1 mapping sitting
+        * around in registers.
+        * Instead we get tricky.  We create a non-canonical address
+        * that looks just like the one we want, but has bit 63 flipped.
+-       * This relies on set_memory_uc() properly sanitizing any __pa()
++       * This relies on set_memory_XX() properly sanitizing any __pa()
+        * results with __PHYSICAL_MASK or PTE_PFN_MASK.
+        */
+       decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
+-      rc = set_memory_uc(decoy_addr, 1);
++      if (unmap)
++              rc = set_memory_np(decoy_addr, 1);
++      else
++              rc = set_memory_uc(decoy_addr, 1);
+       if (rc)
+               pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
+       return rc;
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -535,6 +535,13 @@ bool mce_is_memory_error(struct mce *m)
+ }
+ EXPORT_SYMBOL_GPL(mce_is_memory_error);
++static bool whole_page(struct mce *m)
++{
++      if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
++              return true;
++      return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
++}
++
+ bool mce_is_correctable(struct mce *m)
+ {
+       if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
+@@ -600,7 +607,7 @@ static int srao_decode_notifier(struct n
+       if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
+               pfn = mce->addr >> PAGE_SHIFT;
+               if (!memory_failure(pfn, 0))
+-                      set_mce_nospec(pfn);
++                      set_mce_nospec(pfn, whole_page(mce));
+       }
+       return NOTIFY_OK;
+@@ -1101,7 +1108,7 @@ static int do_memory_failure(struct mce
+       if (ret)
+               pr_err("Memory error not recovered");
+       else
+-              set_mce_nospec(m->addr >> PAGE_SHIFT);
++              set_mce_nospec(m->addr >> PAGE_SHIFT, whole_page(m));
+       return ret;
+ }
+--- a/include/linux/set_memory.h
++++ b/include/linux/set_memory.h
+@@ -18,7 +18,7 @@ static inline int set_memory_nx(unsigned
+ #endif
+ #ifndef set_mce_nospec
+-static inline int set_mce_nospec(unsigned long pfn)
++static inline int set_mce_nospec(unsigned long pfn, bool unmap)
+ {
+       return 0;
+ }