]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Dec 2018 10:22:13 +0000 (11:22 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Dec 2018 10:22:13 +0000 (11:22 +0100)
added patches:
alsa-ac97-fix-incorrect-bit-shift-at-ac97-spsa-control-write.patch
alsa-control-fix-race-between-adding-and-removing-a-user-element.patch
alsa-hda-realtek-fix-headset-mic-detection-for-msi-ms-b171.patch
alsa-hda-realtek-support-alc300.patch
alsa-sparc-fix-invalid-snd_free_pages-at-error-path.patch
alsa-wss-fix-invalid-snd_free_pages-at-error-path.patch
arm64-dts-rockchip-fix-pcie-reset-polarity-for-rk3399-puma-haikou.patch
btrfs-ensure-path-name-is-null-terminated-at-btrfs_control_ioctl.patch
btrfs-relocation-set-trans-to-be-null-after-ending-transaction.patch
fs-fix-lost-error-code-in-dio_complete.patch
kvm-mmu-fix-race-in-emulated-page-table-writes.patch
kvm-svm-ensure-an-ibpb-on-all-affected-cpus-when-freeing-a-vmcb.patch
kvm-x86-fix-kernel-info-leak-in-kvm_hc_clock_pairing-hypercall.patch
kvm-x86-fix-scan-ioapic-use-before-initialization.patch
pci-layerscape-fix-wrong-invocation-of-outbound-window-disable-accessor.patch
perf-x86-intel-add-generic-branch-tracing-check-to-intel_pmu_has_bts.patch
perf-x86-intel-move-branch-tracing-setup-to-the-intel-specific-source-file.patch
x86-fpu-disable-bottom-halves-while-loading-fpu-registers.patch
x86-mce-amd-fix-the-thresholding-machinery-initialization-order.patch
xtensa-enable-coprocessors-that-are-being-flushed.patch
xtensa-fix-coprocessor-context-offset-definitions.patch
xtensa-fix-coprocessor-part-of-ptrace_-get-set-xregs.patch

23 files changed:
queue-4.14/alsa-ac97-fix-incorrect-bit-shift-at-ac97-spsa-control-write.patch [new file with mode: 0644]
queue-4.14/alsa-control-fix-race-between-adding-and-removing-a-user-element.patch [new file with mode: 0644]
queue-4.14/alsa-hda-realtek-fix-headset-mic-detection-for-msi-ms-b171.patch [new file with mode: 0644]
queue-4.14/alsa-hda-realtek-support-alc300.patch [new file with mode: 0644]
queue-4.14/alsa-sparc-fix-invalid-snd_free_pages-at-error-path.patch [new file with mode: 0644]
queue-4.14/alsa-wss-fix-invalid-snd_free_pages-at-error-path.patch [new file with mode: 0644]
queue-4.14/arm64-dts-rockchip-fix-pcie-reset-polarity-for-rk3399-puma-haikou.patch [new file with mode: 0644]
queue-4.14/btrfs-ensure-path-name-is-null-terminated-at-btrfs_control_ioctl.patch [new file with mode: 0644]
queue-4.14/btrfs-relocation-set-trans-to-be-null-after-ending-transaction.patch [new file with mode: 0644]
queue-4.14/fs-fix-lost-error-code-in-dio_complete.patch [new file with mode: 0644]
queue-4.14/kvm-mmu-fix-race-in-emulated-page-table-writes.patch [new file with mode: 0644]
queue-4.14/kvm-svm-ensure-an-ibpb-on-all-affected-cpus-when-freeing-a-vmcb.patch [new file with mode: 0644]
queue-4.14/kvm-x86-fix-kernel-info-leak-in-kvm_hc_clock_pairing-hypercall.patch [new file with mode: 0644]
queue-4.14/kvm-x86-fix-scan-ioapic-use-before-initialization.patch [new file with mode: 0644]
queue-4.14/pci-layerscape-fix-wrong-invocation-of-outbound-window-disable-accessor.patch [new file with mode: 0644]
queue-4.14/perf-x86-intel-add-generic-branch-tracing-check-to-intel_pmu_has_bts.patch [new file with mode: 0644]
queue-4.14/perf-x86-intel-move-branch-tracing-setup-to-the-intel-specific-source-file.patch [new file with mode: 0644]
queue-4.14/series
queue-4.14/x86-fpu-disable-bottom-halves-while-loading-fpu-registers.patch [new file with mode: 0644]
queue-4.14/x86-mce-amd-fix-the-thresholding-machinery-initialization-order.patch [new file with mode: 0644]
queue-4.14/xtensa-enable-coprocessors-that-are-being-flushed.patch [new file with mode: 0644]
queue-4.14/xtensa-fix-coprocessor-context-offset-definitions.patch [new file with mode: 0644]
queue-4.14/xtensa-fix-coprocessor-part-of-ptrace_-get-set-xregs.patch [new file with mode: 0644]

diff --git a/queue-4.14/alsa-ac97-fix-incorrect-bit-shift-at-ac97-spsa-control-write.patch b/queue-4.14/alsa-ac97-fix-incorrect-bit-shift-at-ac97-spsa-control-write.patch
new file mode 100644 (file)
index 0000000..ab829e9
--- /dev/null
@@ -0,0 +1,41 @@
+From 7194eda1ba0872d917faf3b322540b4f57f11ba5 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 23 Nov 2018 15:44:00 +0100
+Subject: ALSA: ac97: Fix incorrect bit shift at AC97-SPSA control write
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 7194eda1ba0872d917faf3b322540b4f57f11ba5 upstream.
+
+The function snd_ac97_put_spsa() gets the bit shift value from the
+associated private_value, but it extracts too much; the current code
+extracts 8 bit values in bits 8-15, but this is a combination of two
+nibbles (bits 8-11 and bits 12-15) for left and right shifts.
+Due to the incorrect bits extraction, the actual shift may go beyond
+the 32bit value, as spotted recently by UBSAN check:
+ UBSAN: Undefined behaviour in sound/pci/ac97/ac97_codec.c:836:7
+ shift exponent 68 is too large for 32-bit type 'int'
+
+This patch fixes the shift value extraction by masking the properly
+with 0x0f instead of 0xff.
+
+Reported-and-tested-by: Meelis Roos <mroos@linux.ee>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/ac97/ac97_codec.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_
+ {
+       struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
+       int reg = kcontrol->private_value & 0xff;
+-      int shift = (kcontrol->private_value >> 8) & 0xff;
++      int shift = (kcontrol->private_value >> 8) & 0x0f;
+       int mask = (kcontrol->private_value >> 16) & 0xff;
+       // int invert = (kcontrol->private_value >> 24) & 0xff;
+       unsigned short value, old, new;
diff --git a/queue-4.14/alsa-control-fix-race-between-adding-and-removing-a-user-element.patch b/queue-4.14/alsa-control-fix-race-between-adding-and-removing-a-user-element.patch
new file mode 100644 (file)
index 0000000..355abae
--- /dev/null
@@ -0,0 +1,158 @@
+From e1a7bfe3807974e66f971f2589d4e0197ec0fced Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 22 Nov 2018 14:36:17 +0100
+Subject: ALSA: control: Fix race between adding and removing a user element
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit e1a7bfe3807974e66f971f2589d4e0197ec0fced upstream.
+
+The procedure for adding a user control element has some window opened
+for race against the concurrent removal of a user element.  This was
+caught by syzkaller, hitting a KASAN use-after-free error.
+
+This patch addresses the bug by wrapping the whole procedure to add a
+user control element with the card->controls_rwsem, instead of only
+around the increment of card->user_ctl_count.
+
+This required a slight code refactoring, too.  The function
+snd_ctl_add() is split to two parts: a core function to add the
+control element and a part calling it.  The former is called from the
+function for adding a user control element inside the controls_rwsem.
+
+One change to be noted is that snd_ctl_notify() for adding a control
+element gets called inside the controls_rwsem as well while it was
+called outside the rwsem.  But this should be OK, as snd_ctl_notify()
+takes another (finer) rwlock instead of rwsem, and the call of
+snd_ctl_notify() inside rwsem is already done in another code path.
+
+Reported-by: syzbot+dc09047bce3820621ba2@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/control.c |   80 ++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 45 insertions(+), 35 deletions(-)
+
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -347,6 +347,40 @@ static int snd_ctl_find_hole(struct snd_
+       return 0;
+ }
++/* add a new kcontrol object; call with card->controls_rwsem locked */
++static int __snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
++{
++      struct snd_ctl_elem_id id;
++      unsigned int idx;
++      unsigned int count;
++
++      id = kcontrol->id;
++      if (id.index > UINT_MAX - kcontrol->count)
++              return -EINVAL;
++
++      if (snd_ctl_find_id(card, &id)) {
++              dev_err(card->dev,
++                      "control %i:%i:%i:%s:%i is already present\n",
++                      id.iface, id.device, id.subdevice, id.name, id.index);
++              return -EBUSY;
++      }
++
++      if (snd_ctl_find_hole(card, kcontrol->count) < 0)
++              return -ENOMEM;
++
++      list_add_tail(&kcontrol->list, &card->controls);
++      card->controls_count += kcontrol->count;
++      kcontrol->id.numid = card->last_numid + 1;
++      card->last_numid += kcontrol->count;
++
++      id = kcontrol->id;
++      count = kcontrol->count;
++      for (idx = 0; idx < count; idx++, id.index++, id.numid++)
++              snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
++
++      return 0;
++}
++
+ /**
+  * snd_ctl_add - add the control instance to the card
+  * @card: the card instance
+@@ -363,45 +397,18 @@ static int snd_ctl_find_hole(struct snd_
+  */
+ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+ {
+-      struct snd_ctl_elem_id id;
+-      unsigned int idx;
+-      unsigned int count;
+       int err = -EINVAL;
+       if (! kcontrol)
+               return err;
+       if (snd_BUG_ON(!card || !kcontrol->info))
+               goto error;
+-      id = kcontrol->id;
+-      if (id.index > UINT_MAX - kcontrol->count)
+-              goto error;
+       down_write(&card->controls_rwsem);
+-      if (snd_ctl_find_id(card, &id)) {
+-              up_write(&card->controls_rwsem);
+-              dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
+-                                      id.iface,
+-                                      id.device,
+-                                      id.subdevice,
+-                                      id.name,
+-                                      id.index);
+-              err = -EBUSY;
+-              goto error;
+-      }
+-      if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
+-              up_write(&card->controls_rwsem);
+-              err = -ENOMEM;
+-              goto error;
+-      }
+-      list_add_tail(&kcontrol->list, &card->controls);
+-      card->controls_count += kcontrol->count;
+-      kcontrol->id.numid = card->last_numid + 1;
+-      card->last_numid += kcontrol->count;
+-      id = kcontrol->id;
+-      count = kcontrol->count;
++      err = __snd_ctl_add(card, kcontrol);
+       up_write(&card->controls_rwsem);
+-      for (idx = 0; idx < count; idx++, id.index++, id.numid++)
+-              snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
++      if (err < 0)
++              goto error;
+       return 0;
+  error:
+@@ -1360,9 +1367,12 @@ static int snd_ctl_elem_add(struct snd_c
+               kctl->tlv.c = snd_ctl_elem_user_tlv;
+       /* This function manage to free the instance on failure. */
+-      err = snd_ctl_add(card, kctl);
+-      if (err < 0)
+-              return err;
++      down_write(&card->controls_rwsem);
++      err = __snd_ctl_add(card, kctl);
++      if (err < 0) {
++              snd_ctl_free_one(kctl);
++              goto unlock;
++      }
+       offset = snd_ctl_get_ioff(kctl, &info->id);
+       snd_ctl_build_ioff(&info->id, kctl, offset);
+       /*
+@@ -1373,10 +1383,10 @@ static int snd_ctl_elem_add(struct snd_c
+        * which locks the element.
+        */
+-      down_write(&card->controls_rwsem);
+       card->user_ctl_count++;
+-      up_write(&card->controls_rwsem);
++ unlock:
++      up_write(&card->controls_rwsem);
+       return 0;
+ }
diff --git a/queue-4.14/alsa-hda-realtek-fix-headset-mic-detection-for-msi-ms-b171.patch b/queue-4.14/alsa-hda-realtek-fix-headset-mic-detection-for-msi-ms-b171.patch
new file mode 100644 (file)
index 0000000..3f0d074
--- /dev/null
@@ -0,0 +1,34 @@
+From 8cd65271f8e545ddeed10ecc2e417936bdff168e Mon Sep 17 00:00:00 2001
+From: Anisse Astier <anisse@astier.eu>
+Date: Fri, 23 Nov 2018 17:59:11 +0100
+Subject: ALSA: hda/realtek - fix headset mic detection for MSI MS-B171
+
+From: Anisse Astier <anisse@astier.eu>
+
+commit 8cd65271f8e545ddeed10ecc2e417936bdff168e upstream.
+
+MSI Cubi N 8GL (MS-B171) needs the same fixup as its older model, the
+MS-B120, in order for the headset mic to be properly detected.
+
+They both use a single 3-way jack for both mic and headset with an
+ALC283 codec, with the same pins used.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Anisse Astier <anisse@astier.eu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6411,6 +6411,7 @@ static const struct snd_pci_quirk alc269
+       SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
+       SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
++      SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+       SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+       SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
diff --git a/queue-4.14/alsa-hda-realtek-support-alc300.patch b/queue-4.14/alsa-hda-realtek-support-alc300.patch
new file mode 100644 (file)
index 0000000..2084649
--- /dev/null
@@ -0,0 +1,68 @@
+From 1078bef0cd9291355a20369b21cd823026ab8eaa Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Thu, 8 Nov 2018 16:36:15 +0800
+Subject: ALSA: hda/realtek - Support ALC300
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit 1078bef0cd9291355a20369b21cd823026ab8eaa upstream.
+
+This patch will enable ALC300.
+
+[ It's almost equivalent with other ALC269-compatible ones, and
+  apparently has no loopback mixer -- tiwai ]
+
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -343,6 +343,7 @@ static void alc_fill_eapd_coef(struct hd
+       case 0x10ec0285:
+       case 0x10ec0298:
+       case 0x10ec0289:
++      case 0x10ec0300:
+               alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+               break;
+       case 0x10ec0275:
+@@ -2758,6 +2759,7 @@ enum {
+       ALC269_TYPE_ALC215,
+       ALC269_TYPE_ALC225,
+       ALC269_TYPE_ALC294,
++      ALC269_TYPE_ALC300,
+       ALC269_TYPE_ALC700,
+ };
+@@ -2792,6 +2794,7 @@ static int alc269_parse_auto_config(stru
+       case ALC269_TYPE_ALC215:
+       case ALC269_TYPE_ALC225:
+       case ALC269_TYPE_ALC294:
++      case ALC269_TYPE_ALC300:
+       case ALC269_TYPE_ALC700:
+               ssids = alc269_ssids;
+               break;
+@@ -7089,6 +7092,10 @@ static int patch_alc269(struct hda_codec
+               spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
+               alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
+               break;
++      case 0x10ec0300:
++              spec->codec_variant = ALC269_TYPE_ALC300;
++              spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
++              break;
+       case 0x10ec0700:
+       case 0x10ec0701:
+       case 0x10ec0703:
+@@ -8160,6 +8167,7 @@ static const struct hda_device_id snd_hd
+       HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
++      HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
+       HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
+       HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
+       HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
diff --git a/queue-4.14/alsa-sparc-fix-invalid-snd_free_pages-at-error-path.patch b/queue-4.14/alsa-sparc-fix-invalid-snd_free_pages-at-error-path.patch
new file mode 100644 (file)
index 0000000..4ecee9b
--- /dev/null
@@ -0,0 +1,51 @@
+From 9a20332ab373b1f8f947e0a9c923652b32dab031 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 23 Nov 2018 18:18:30 +0100
+Subject: ALSA: sparc: Fix invalid snd_free_pages() at error path
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 9a20332ab373b1f8f947e0a9c923652b32dab031 upstream.
+
+Some spurious calls of snd_free_pages() have been overlooked and
+remain in the error paths of sparc cs4231 driver code.  Since
+runtime->dma_area is managed by the PCM core helper, we shouldn't
+release manually.
+
+Drop the superfluous calls.
+
+Reviewed-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/sparc/cs4231.c |    8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/sound/sparc/cs4231.c
++++ b/sound/sparc/cs4231.c
+@@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(stru
+       runtime->hw = snd_cs4231_playback;
+       err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
+-      if (err < 0) {
+-              snd_free_pages(runtime->dma_area, runtime->dma_bytes);
++      if (err < 0)
+               return err;
+-      }
+       chip->playback_substream = substream;
+       chip->p_periods_sent = 0;
+       snd_pcm_set_sync(substream);
+@@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struc
+       runtime->hw = snd_cs4231_capture;
+       err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
+-      if (err < 0) {
+-              snd_free_pages(runtime->dma_area, runtime->dma_bytes);
++      if (err < 0)
+               return err;
+-      }
+       chip->capture_substream = substream;
+       chip->c_periods_sent = 0;
+       snd_pcm_set_sync(substream);
diff --git a/queue-4.14/alsa-wss-fix-invalid-snd_free_pages-at-error-path.patch b/queue-4.14/alsa-wss-fix-invalid-snd_free_pages-at-error-path.patch
new file mode 100644 (file)
index 0000000..6083a63
--- /dev/null
@@ -0,0 +1,42 @@
+From 7b69154171b407844c273ab4c10b5f0ddcd6aa29 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 23 Nov 2018 18:16:33 +0100
+Subject: ALSA: wss: Fix invalid snd_free_pages() at error path
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 7b69154171b407844c273ab4c10b5f0ddcd6aa29 upstream.
+
+Some spurious calls of snd_free_pages() have been overlooked and
+remain in the error paths of wss driver code.  Since runtime->dma_area
+is managed by the PCM core helper, we shouldn't release manually.
+
+Drop the superfluous calls.
+
+Reviewed-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/isa/wss/wss_lib.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/sound/isa/wss/wss_lib.c
++++ b/sound/isa/wss/wss_lib.c
+@@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct
+       if (err < 0) {
+               if (chip->release_dma)
+                       chip->release_dma(chip, chip->dma_private_data, chip->dma1);
+-              snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+               return err;
+       }
+       chip->playback_substream = substream;
+@@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct s
+       if (err < 0) {
+               if (chip->release_dma)
+                       chip->release_dma(chip, chip->dma_private_data, chip->dma2);
+-              snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+               return err;
+       }
+       chip->capture_substream = substream;
diff --git a/queue-4.14/arm64-dts-rockchip-fix-pcie-reset-polarity-for-rk3399-puma-haikou.patch b/queue-4.14/arm64-dts-rockchip-fix-pcie-reset-polarity-for-rk3399-puma-haikou.patch
new file mode 100644 (file)
index 0000000..680f9b5
--- /dev/null
@@ -0,0 +1,34 @@
+From c1d91f86a1b4c9c05854d59c6a0abd5d0f75b849 Mon Sep 17 00:00:00 2001
+From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+Date: Tue, 13 Nov 2018 11:25:35 +0100
+Subject: arm64: dts: rockchip: Fix PCIe reset polarity for rk3399-puma-haikou.
+
+From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+
+commit c1d91f86a1b4c9c05854d59c6a0abd5d0f75b849 upstream.
+
+This patch fixes the wrong polarity setting for the PCIe host driver's
+pre-reset pin for rk3399-puma-haikou. Without this patch link training
+will most likely fail.
+
+Fixes: 60fd9f72ce8a ("arm64: dts: rockchip: add Haikou baseboard with RK3399-Q7 SoM")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+@@ -130,7 +130,7 @@
+ };
+ &pcie0 {
+-      ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>;
++      ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>;
+       num-lanes = <4>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pcie_clkreqn_cpm>;
diff --git a/queue-4.14/btrfs-ensure-path-name-is-null-terminated-at-btrfs_control_ioctl.patch b/queue-4.14/btrfs-ensure-path-name-is-null-terminated-at-btrfs_control_ioctl.patch
new file mode 100644 (file)
index 0000000..9511a2e
--- /dev/null
@@ -0,0 +1,39 @@
+From f505754fd6599230371cb01b9332754ddc104be1 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Wed, 14 Nov 2018 11:35:24 +0000
+Subject: Btrfs: ensure path name is null terminated at btrfs_control_ioctl
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit f505754fd6599230371cb01b9332754ddc104be1 upstream.
+
+We were using the path name received from user space without checking that
+it is null terminated. While btrfs-progs is well behaved and does proper
+validation and null termination, someone could call the ioctl and pass
+a non-null terminated patch, leading to buffer overrun problems in the
+kernel.  The ioctl is protected by CAP_SYS_ADMIN.
+
+So just set the last byte of the path to a null character, similar to what
+we do in other ioctls (add/remove/resize device, snapshot creation, etc).
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/super.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -2176,6 +2176,7 @@ static long btrfs_control_ioctl(struct f
+       vol = memdup_user((void __user *)arg, sizeof(*vol));
+       if (IS_ERR(vol))
+               return PTR_ERR(vol);
++      vol->name[BTRFS_PATH_NAME_MAX] = '\0';
+       switch (cmd) {
+       case BTRFS_IOC_SCAN_DEV:
diff --git a/queue-4.14/btrfs-relocation-set-trans-to-be-null-after-ending-transaction.patch b/queue-4.14/btrfs-relocation-set-trans-to-be-null-after-ending-transaction.patch
new file mode 100644 (file)
index 0000000..08068f7
--- /dev/null
@@ -0,0 +1,38 @@
+From 42a657f57628402c73237547f0134e083e2f6764 Mon Sep 17 00:00:00 2001
+From: Pan Bian <bianpan2016@163.com>
+Date: Fri, 23 Nov 2018 18:10:15 +0800
+Subject: btrfs: relocation: set trans to be NULL after ending transaction
+
+From: Pan Bian <bianpan2016@163.com>
+
+commit 42a657f57628402c73237547f0134e083e2f6764 upstream.
+
+The function relocate_block_group calls btrfs_end_transaction to release
+trans when update_backref_cache returns 1, and then continues the loop
+body. If btrfs_block_rsv_refill fails this time, it will jump out the
+loop and the freed trans will be accessed. This may result in a
+use-after-free bug. The patch assigns NULL to trans after trans is
+released so that it will not be accessed.
+
+Fixes: 0647bf564f1 ("Btrfs: improve forever loop when doing balance relocation")
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Pan Bian <bianpan2016@163.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/relocation.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -4048,6 +4048,7 @@ static noinline_for_stack int relocate_b
+ restart:
+               if (update_backref_cache(trans, &rc->backref_cache)) {
+                       btrfs_end_transaction(trans);
++                      trans = NULL;
+                       continue;
+               }
diff --git a/queue-4.14/fs-fix-lost-error-code-in-dio_complete.patch b/queue-4.14/fs-fix-lost-error-code-in-dio_complete.patch
new file mode 100644 (file)
index 0000000..2b4de0a
--- /dev/null
@@ -0,0 +1,56 @@
+From 41e817bca3acd3980efe5dd7d28af0e6f4ab9247 Mon Sep 17 00:00:00 2001
+From: Maximilian Heyne <mheyne@amazon.de>
+Date: Fri, 30 Nov 2018 08:35:14 -0700
+Subject: fs: fix lost error code in dio_complete
+
+From: Maximilian Heyne <mheyne@amazon.de>
+
+commit 41e817bca3acd3980efe5dd7d28af0e6f4ab9247 upstream.
+
+commit e259221763a40403d5bb232209998e8c45804ab8 ("fs: simplify the
+generic_write_sync prototype") reworked callers of generic_write_sync(),
+and ended up dropping the error return for the directio path. Prior to
+that commit, in dio_complete(), an error would be bubbled up the stack,
+but after that commit, errors passed on to dio_complete were eaten up.
+
+This was reported on the list earlier, and a fix was proposed in
+https://lore.kernel.org/lkml/20160921141539.GA17898@infradead.org/, but
+never followed up with.  We recently hit this bug in our testing where
+fencing io errors, which were previously erroring out with EIO, were
+being returned as success operations after this commit.
+
+The fix proposed on the list earlier was a little short -- it would have
+still called generic_write_sync() in case `ret` already contained an
+error. This fix ensures generic_write_sync() is only called when there's
+no pending error in the write. Additionally, transferred is replaced
+with ret to bring this code in line with other callers.
+
+Fixes: e259221763a4 ("fs: simplify the generic_write_sync prototype")
+Reported-by: Ravi Nankani <rnankani@amazon.com>
+Signed-off-by: Maximilian Heyne <mheyne@amazon.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+CC: Torsten Mehlan <tomeh@amazon.de>
+CC: Uwe Dannowski <uwed@amazon.de>
+CC: Amit Shah <aams@amazon.de>
+CC: David Woodhouse <dwmw@amazon.co.uk>
+CC: stable@vger.kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/direct-io.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -304,8 +304,8 @@ static ssize_t dio_complete(struct dio *
+                */
+               dio->iocb->ki_pos += transferred;
+-              if (dio->op == REQ_OP_WRITE)
+-                      ret = generic_write_sync(dio->iocb,  transferred);
++              if (ret > 0 && dio->op == REQ_OP_WRITE)
++                      ret = generic_write_sync(dio->iocb, ret);
+               dio->iocb->ki_complete(dio->iocb, ret, 0);
+       }
diff --git a/queue-4.14/kvm-mmu-fix-race-in-emulated-page-table-writes.patch b/queue-4.14/kvm-mmu-fix-race-in-emulated-page-table-writes.patch
new file mode 100644 (file)
index 0000000..55da6da
--- /dev/null
@@ -0,0 +1,91 @@
+From 0e0fee5c539b61fdd098332e0e2cc375d9073706 Mon Sep 17 00:00:00 2001
+From: Junaid Shahid <junaids@google.com>
+Date: Wed, 31 Oct 2018 14:53:57 -0700
+Subject: kvm: mmu: Fix race in emulated page table writes
+
+From: Junaid Shahid <junaids@google.com>
+
+commit 0e0fee5c539b61fdd098332e0e2cc375d9073706 upstream.
+
+When a guest page table is updated via an emulated write,
+kvm_mmu_pte_write() is called to update the shadow PTE using the just
+written guest PTE value. But if two emulated guest PTE writes happened
+concurrently, it is possible that the guest PTE and the shadow PTE end
+up being out of sync. Emulated writes do not mark the shadow page as
+unsync-ed, so this inconsistency will not be resolved even by a guest TLB
+flush (unless the page was marked as unsync-ed at some other point).
+
+This is fixed by re-reading the current value of the guest PTE after the
+MMU lock has been acquired instead of just using the value that was
+written prior to calling kvm_mmu_pte_write().
+
+Signed-off-by: Junaid Shahid <junaids@google.com>
+Reviewed-by: Wanpeng Li <wanpengli@tencent.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c |   27 +++++++++------------------
+ 1 file changed, 9 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4734,9 +4734,9 @@ static bool need_remote_flush(u64 old, u
+ }
+ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
+-                                  const u8 *new, int *bytes)
++                                  int *bytes)
+ {
+-      u64 gentry;
++      u64 gentry = 0;
+       int r;
+       /*
+@@ -4748,22 +4748,12 @@ static u64 mmu_pte_write_fetch_gpte(stru
+               /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
+               *gpa &= ~(gpa_t)7;
+               *bytes = 8;
+-              r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
+-              if (r)
+-                      gentry = 0;
+-              new = (const u8 *)&gentry;
+       }
+-      switch (*bytes) {
+-      case 4:
+-              gentry = *(const u32 *)new;
+-              break;
+-      case 8:
+-              gentry = *(const u64 *)new;
+-              break;
+-      default:
+-              gentry = 0;
+-              break;
++      if (*bytes == 4 || *bytes == 8) {
++              r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
++              if (r)
++                      gentry = 0;
+       }
+       return gentry;
+@@ -4876,8 +4866,6 @@ static void kvm_mmu_pte_write(struct kvm
+       pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
+-      gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
+-
+       /*
+        * No need to care whether allocation memory is successful
+        * or not since pte prefetch is skiped if it does not have
+@@ -4886,6 +4874,9 @@ static void kvm_mmu_pte_write(struct kvm
+       mmu_topup_memory_caches(vcpu);
+       spin_lock(&vcpu->kvm->mmu_lock);
++
++      gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
++
+       ++vcpu->kvm->stat.mmu_pte_write;
+       kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
diff --git a/queue-4.14/kvm-svm-ensure-an-ibpb-on-all-affected-cpus-when-freeing-a-vmcb.patch b/queue-4.14/kvm-svm-ensure-an-ibpb-on-all-affected-cpus-when-freeing-a-vmcb.patch
new file mode 100644 (file)
index 0000000..5fdece1
--- /dev/null
@@ -0,0 +1,64 @@
+From fd65d3142f734bc4376053c8d75670041903134d Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Tue, 22 May 2018 09:54:20 -0700
+Subject: kvm: svm: Ensure an IBPB on all affected CPUs when freeing a vmcb
+
+From: Jim Mattson <jmattson@google.com>
+
+commit fd65d3142f734bc4376053c8d75670041903134d upstream.
+
+Previously, we only called indirect_branch_prediction_barrier on the
+logical CPU that freed a vmcb. This function should be called on all
+logical CPUs that last loaded the vmcb in question.
+
+Fixes: 15d45071523d ("KVM/x86: Add IBPB support")
+Reported-by: Neel Natu <neelnatu@google.com>
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c |   20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1733,21 +1733,31 @@ out:
+       return ERR_PTR(err);
+ }
++static void svm_clear_current_vmcb(struct vmcb *vmcb)
++{
++      int i;
++
++      for_each_online_cpu(i)
++              cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
++}
++
+ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
++      /*
++       * The vmcb page can be recycled, causing a false negative in
++       * svm_vcpu_load(). So, ensure that no logical CPU has this
++       * vmcb page recorded as its current vmcb.
++       */
++      svm_clear_current_vmcb(svm->vmcb);
++
+       __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
+       __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
+       __free_page(virt_to_page(svm->nested.hsave));
+       __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
+       kvm_vcpu_uninit(vcpu);
+       kmem_cache_free(kvm_vcpu_cache, svm);
+-      /*
+-       * The vmcb page can be recycled, causing a false negative in
+-       * svm_vcpu_load(). So do a full IBPB now.
+-       */
+-      indirect_branch_prediction_barrier();
+ }
+ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
diff --git a/queue-4.14/kvm-x86-fix-kernel-info-leak-in-kvm_hc_clock_pairing-hypercall.patch b/queue-4.14/kvm-x86-fix-kernel-info-leak-in-kvm_hc_clock_pairing-hypercall.patch
new file mode 100644 (file)
index 0000000..adeb88f
--- /dev/null
@@ -0,0 +1,41 @@
+From bcbfbd8ec21096027f1ee13ce6c185e8175166f6 Mon Sep 17 00:00:00 2001
+From: Liran Alon <liran.alon@oracle.com>
+Date: Thu, 8 Nov 2018 00:43:06 +0200
+Subject: KVM: x86: Fix kernel info-leak in KVM_HC_CLOCK_PAIRING hypercall
+
+From: Liran Alon <liran.alon@oracle.com>
+
+commit bcbfbd8ec21096027f1ee13ce6c185e8175166f6 upstream.
+
+kvm_pv_clock_pairing() allocates local var
+"struct kvm_clock_pairing clock_pairing" on stack and initializes
+all it's fields besides padding (clock_pairing.pad[]).
+
+Because clock_pairing var is written completely (including padding)
+to guest memory, failure to init struct padding results in kernel
+info-leak.
+
+Fix the issue by making sure to also init the padding with zeroes.
+
+Fixes: 55dd00a73a51 ("KVM: x86: add KVM_HC_CLOCK_PAIRING hypercall")
+Reported-by: syzbot+a8ef68d71211ba264f56@syzkaller.appspotmail.com
+Reviewed-by: Mark Kanda <mark.kanda@oracle.com>
+Signed-off-by: Liran Alon <liran.alon@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6378,6 +6378,7 @@ static int kvm_pv_clock_pairing(struct k
+       clock_pairing.nsec = ts.tv_nsec;
+       clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
+       clock_pairing.flags = 0;
++      memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
+       ret = 0;
+       if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
diff --git a/queue-4.14/kvm-x86-fix-scan-ioapic-use-before-initialization.patch b/queue-4.14/kvm-x86-fix-scan-ioapic-use-before-initialization.patch
new file mode 100644 (file)
index 0000000..a15e065
--- /dev/null
@@ -0,0 +1,107 @@
+From e97f852fd4561e77721bb9a4e0ea9d98305b1e93 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpengli@tencent.com>
+Date: Tue, 20 Nov 2018 16:34:18 +0800
+Subject: KVM: X86: Fix scan ioapic use-before-initialization
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpengli@tencent.com>
+
+commit e97f852fd4561e77721bb9a4e0ea9d98305b1e93 upstream.
+
+Reported by syzkaller:
+
+ BUG: unable to handle kernel NULL pointer dereference at 00000000000001c8
+ PGD 80000003ec4da067 P4D 80000003ec4da067 PUD 3f7bfa067 PMD 0
+ Oops: 0000 [#1] PREEMPT SMP PTI
+ CPU: 7 PID: 5059 Comm: debug Tainted: G           OE     4.19.0-rc5 #16
+ RIP: 0010:__lock_acquire+0x1a6/0x1990
+ Call Trace:
+  lock_acquire+0xdb/0x210
+  _raw_spin_lock+0x38/0x70
+  kvm_ioapic_scan_entry+0x3e/0x110 [kvm]
+  vcpu_enter_guest+0x167e/0x1910 [kvm]
+  kvm_arch_vcpu_ioctl_run+0x35c/0x610 [kvm]
+  kvm_vcpu_ioctl+0x3e9/0x6d0 [kvm]
+  do_vfs_ioctl+0xa5/0x690
+  ksys_ioctl+0x6d/0x80
+  __x64_sys_ioctl+0x1a/0x20
+  do_syscall_64+0x83/0x6e0
+  entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+The reason is that the testcase writes hyperv synic HV_X64_MSR_SINT6 msr
+and triggers scan ioapic logic to load synic vectors into EOI exit bitmap.
+However, irqchip is not initialized by this simple testcase, ioapic/apic
+objects should not be accessed.
+This can be triggered by the following program:
+
+    #define _GNU_SOURCE
+
+    #include <endian.h>
+    #include <stdint.h>
+    #include <stdio.h>
+    #include <stdlib.h>
+    #include <string.h>
+    #include <sys/syscall.h>
+    #include <sys/types.h>
+    #include <unistd.h>
+
+    uint64_t r[3] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff};
+
+    int main(void)
+    {
+       syscall(__NR_mmap, 0x20000000, 0x1000000, 3, 0x32, -1, 0);
+       long res = 0;
+       memcpy((void*)0x20000040, "/dev/kvm", 9);
+       res = syscall(__NR_openat, 0xffffffffffffff9c, 0x20000040, 0, 0);
+       if (res != -1)
+               r[0] = res;
+       res = syscall(__NR_ioctl, r[0], 0xae01, 0);
+       if (res != -1)
+               r[1] = res;
+       res = syscall(__NR_ioctl, r[1], 0xae41, 0);
+       if (res != -1)
+               r[2] = res;
+       memcpy(
+                       (void*)0x20000080,
+                       "\x01\x00\x00\x00\x00\x5b\x61\xbb\x96\x00\x00\x40\x00\x00\x00\x00\x01\x00"
+                       "\x08\x00\x00\x00\x00\x00\x0b\x77\xd1\x78\x4d\xd8\x3a\xed\xb1\x5c\x2e\x43"
+                       "\xaa\x43\x39\xd6\xff\xf5\xf0\xa8\x98\xf2\x3e\x37\x29\x89\xde\x88\xc6\x33"
+                       "\xfc\x2a\xdb\xb7\xe1\x4c\xac\x28\x61\x7b\x9c\xa9\xbc\x0d\xa0\x63\xfe\xfe"
+                       "\xe8\x75\xde\xdd\x19\x38\xdc\x34\xf5\xec\x05\xfd\xeb\x5d\xed\x2e\xaf\x22"
+                       "\xfa\xab\xb7\xe4\x42\x67\xd0\xaf\x06\x1c\x6a\x35\x67\x10\x55\xcb",
+                       106);
+       syscall(__NR_ioctl, r[2], 0x4008ae89, 0x20000080);
+       syscall(__NR_ioctl, r[2], 0xae80, 0);
+       return 0;
+    }
+
+This patch fixes it by bailing out scan ioapic if ioapic is not initialized in
+kernel.
+
+Reported-by: Wei Wu <ww9210@gmail.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Wei Wu <ww9210@gmail.com>
+Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6885,7 +6885,8 @@ static void vcpu_scan_ioapic(struct kvm_
+       else {
+               if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
+                       kvm_x86_ops->sync_pir_to_irr(vcpu);
+-              kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
++              if (ioapic_in_kernel(vcpu->kvm))
++                      kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+       }
+       bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
+                 vcpu_to_synic(vcpu)->vec_bitmap, 256);
diff --git a/queue-4.14/pci-layerscape-fix-wrong-invocation-of-outbound-window-disable-accessor.patch b/queue-4.14/pci-layerscape-fix-wrong-invocation-of-outbound-window-disable-accessor.patch
new file mode 100644 (file)
index 0000000..6b47934
--- /dev/null
@@ -0,0 +1,34 @@
+From c6fd6fe9dea44732cdcd970f1130b8cc50ad685a Mon Sep 17 00:00:00 2001
+From: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+Date: Wed, 7 Nov 2018 05:16:49 +0000
+Subject: PCI: layerscape: Fix wrong invocation of outbound window disable accessor
+
+From: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+
+commit c6fd6fe9dea44732cdcd970f1130b8cc50ad685a upstream.
+
+The order of parameters is not correct when invoking the outbound
+window disable routine. Fix it.
+
+Fixes: 4a2745d760fa ("PCI: layerscape: Disable outbound windows configured by bootloader")
+Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+[lorenzo.pieralisi@arm.com: commit log]
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/dwc/pci-layerscape.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pci/dwc/pci-layerscape.c
++++ b/drivers/pci/dwc/pci-layerscape.c
+@@ -89,7 +89,7 @@ static void ls_pcie_disable_outbound_atu
+       int i;
+       for (i = 0; i < PCIE_IATU_NUM; i++)
+-              dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i);
++              dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
+ }
+ static int ls1021_pcie_link_up(struct dw_pcie *pci)
diff --git a/queue-4.14/perf-x86-intel-add-generic-branch-tracing-check-to-intel_pmu_has_bts.patch b/queue-4.14/perf-x86-intel-add-generic-branch-tracing-check-to-intel_pmu_has_bts.patch
new file mode 100644 (file)
index 0000000..8191e63
--- /dev/null
@@ -0,0 +1,105 @@
+From 67266c1080ad56c31af72b9c18355fde8ccc124a Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 21 Nov 2018 11:16:11 +0100
+Subject: perf/x86/intel: Add generic branch tracing check to intel_pmu_has_bts()
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit 67266c1080ad56c31af72b9c18355fde8ccc124a upstream.
+
+Currently we check the branch tracing only by checking for the
+PERF_COUNT_HW_BRANCH_INSTRUCTIONS event of PERF_TYPE_HARDWARE
+type. But we can define the same event with the PERF_TYPE_RAW
+type.
+
+Changing the intel_pmu_has_bts() code to check on event's final
+hw config value, so both HW types are covered.
+
+Adding unlikely to intel_pmu_has_bts() condition calls, because
+it was used in the original code in intel_bts_constraints.
+
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Link: http://lkml.kernel.org/r/20181121101612.16272-2-jolsa@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/core.c |   17 +++--------------
+ arch/x86/events/perf_event.h |   13 +++++++++----
+ 2 files changed, 12 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2345,16 +2345,7 @@ done:
+ static struct event_constraint *
+ intel_bts_constraints(struct perf_event *event)
+ {
+-      struct hw_perf_event *hwc = &event->hw;
+-      unsigned int hw_event, bts_event;
+-
+-      if (event->attr.freq)
+-              return NULL;
+-
+-      hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+-      bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+-
+-      if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
++      if (unlikely(intel_pmu_has_bts(event)))
+               return &bts_constraint;
+       return NULL;
+@@ -2976,10 +2967,8 @@ static unsigned long intel_pmu_free_runn
+ static int intel_pmu_bts_config(struct perf_event *event)
+ {
+       struct perf_event_attr *attr = &event->attr;
+-      struct hw_perf_event *hwc = &event->hw;
+-      if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+-          !attr->freq && hwc->sample_period == 1) {
++      if (unlikely(intel_pmu_has_bts(event))) {
+               /* BTS is not supported by this architecture. */
+               if (!x86_pmu.bts_active)
+                       return -EOPNOTSUPP;
+@@ -3038,7 +3027,7 @@ static int intel_pmu_hw_config(struct pe
+               /*
+                * BTS is set up earlier in this path, so don't account twice
+                */
+-              if (!intel_pmu_has_bts(event)) {
++              if (!unlikely(intel_pmu_has_bts(event))) {
+                       /* disallow lbr if conflicting events are present */
+                       if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+                               return -EBUSY;
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -850,11 +850,16 @@ static inline int amd_pmu_init(void)
+ static inline bool intel_pmu_has_bts(struct perf_event *event)
+ {
+-      if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+-          !event->attr.freq && event->hw.sample_period == 1)
+-              return true;
++      struct hw_perf_event *hwc = &event->hw;
++      unsigned int hw_event, bts_event;
+-      return false;
++      if (event->attr.freq)
++              return false;
++
++      hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
++      bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
++
++      return hw_event == bts_event && hwc->sample_period == 1;
+ }
+ int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/queue-4.14/perf-x86-intel-move-branch-tracing-setup-to-the-intel-specific-source-file.patch b/queue-4.14/perf-x86-intel-move-branch-tracing-setup-to-the-intel-specific-source-file.patch
new file mode 100644 (file)
index 0000000..e6ea495
--- /dev/null
@@ -0,0 +1,126 @@
+From ed6101bbf6266ee83e620b19faa7c6ad56bb41ab Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 21 Nov 2018 11:16:10 +0100
+Subject: perf/x86/intel: Move branch tracing setup to the Intel-specific source file
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit ed6101bbf6266ee83e620b19faa7c6ad56bb41ab upstream.
+
+Moving branch tracing setup to Intel core object into separate
+intel_pmu_bts_config function, because it's Intel specific.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Link: http://lkml.kernel.org/r/20181121101612.16272-1-jolsa@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/core.c       |   20 --------------------
+ arch/x86/events/intel/core.c |   41 ++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 40 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -438,26 +438,6 @@ int x86_setup_perfctr(struct perf_event
+       if (config == -1LL)
+               return -EINVAL;
+-      /*
+-       * Branch tracing:
+-       */
+-      if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+-          !attr->freq && hwc->sample_period == 1) {
+-              /* BTS is not supported by this architecture. */
+-              if (!x86_pmu.bts_active)
+-                      return -EOPNOTSUPP;
+-
+-              /* BTS is currently only allowed for user-mode. */
+-              if (!attr->exclude_kernel)
+-                      return -EOPNOTSUPP;
+-
+-              /* disallow bts if conflicting events are present */
+-              if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+-                      return -EBUSY;
+-
+-              event->destroy = hw_perf_lbr_event_destroy;
+-      }
+-
+       hwc->config |= config;
+       return 0;
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2973,6 +2973,41 @@ static unsigned long intel_pmu_free_runn
+       return flags;
+ }
++static int intel_pmu_bts_config(struct perf_event *event)
++{
++      struct perf_event_attr *attr = &event->attr;
++      struct hw_perf_event *hwc = &event->hw;
++
++      if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
++          !attr->freq && hwc->sample_period == 1) {
++              /* BTS is not supported by this architecture. */
++              if (!x86_pmu.bts_active)
++                      return -EOPNOTSUPP;
++
++              /* BTS is currently only allowed for user-mode. */
++              if (!attr->exclude_kernel)
++                      return -EOPNOTSUPP;
++
++              /* disallow bts if conflicting events are present */
++              if (x86_add_exclusive(x86_lbr_exclusive_lbr))
++                      return -EBUSY;
++
++              event->destroy = hw_perf_lbr_event_destroy;
++      }
++
++      return 0;
++}
++
++static int core_pmu_hw_config(struct perf_event *event)
++{
++      int ret = x86_pmu_hw_config(event);
++
++      if (ret)
++              return ret;
++
++      return intel_pmu_bts_config(event);
++}
++
+ static int intel_pmu_hw_config(struct perf_event *event)
+ {
+       int ret = x86_pmu_hw_config(event);
+@@ -2980,6 +3015,10 @@ static int intel_pmu_hw_config(struct pe
+       if (ret)
+               return ret;
++      ret = intel_pmu_bts_config(event);
++      if (ret)
++              return ret;
++
+       if (event->attr.precise_ip) {
+               if (!event->attr.freq) {
+                       event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
+@@ -3462,7 +3501,7 @@ static __initconst const struct x86_pmu
+       .enable_all             = core_pmu_enable_all,
+       .enable                 = core_pmu_enable_event,
+       .disable                = x86_pmu_disable_event,
+-      .hw_config              = x86_pmu_hw_config,
++      .hw_config              = core_pmu_hw_config,
+       .schedule_events        = x86_schedule_events,
+       .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
+       .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
index a8948fde35939acb17849cacd0c86350e382ab05..f1750167ee92a057270aade68837accfd4e6d8d2 100644 (file)
@@ -106,3 +106,25 @@ x86speculation_Add_prctl()_control_for_indirect_branch_speculation.patch
 x86speculation_Enable_prctl_mode_for_spectre_v2_user.patch
 x86speculation_Add_seccomp_Spectre_v2_user_space_protection_mode.patch
 x86speculation_Provide_IBPB_always_command_line_options.patch
+kvm-mmu-fix-race-in-emulated-page-table-writes.patch
+kvm-svm-ensure-an-ibpb-on-all-affected-cpus-when-freeing-a-vmcb.patch
+kvm-x86-fix-kernel-info-leak-in-kvm_hc_clock_pairing-hypercall.patch
+kvm-x86-fix-scan-ioapic-use-before-initialization.patch
+xtensa-enable-coprocessors-that-are-being-flushed.patch
+xtensa-fix-coprocessor-context-offset-definitions.patch
+xtensa-fix-coprocessor-part-of-ptrace_-get-set-xregs.patch
+btrfs-ensure-path-name-is-null-terminated-at-btrfs_control_ioctl.patch
+btrfs-relocation-set-trans-to-be-null-after-ending-transaction.patch
+pci-layerscape-fix-wrong-invocation-of-outbound-window-disable-accessor.patch
+arm64-dts-rockchip-fix-pcie-reset-polarity-for-rk3399-puma-haikou.patch
+x86-mce-amd-fix-the-thresholding-machinery-initialization-order.patch
+x86-fpu-disable-bottom-halves-while-loading-fpu-registers.patch
+perf-x86-intel-move-branch-tracing-setup-to-the-intel-specific-source-file.patch
+perf-x86-intel-add-generic-branch-tracing-check-to-intel_pmu_has_bts.patch
+fs-fix-lost-error-code-in-dio_complete.patch
+alsa-wss-fix-invalid-snd_free_pages-at-error-path.patch
+alsa-ac97-fix-incorrect-bit-shift-at-ac97-spsa-control-write.patch
+alsa-control-fix-race-between-adding-and-removing-a-user-element.patch
+alsa-sparc-fix-invalid-snd_free_pages-at-error-path.patch
+alsa-hda-realtek-support-alc300.patch
+alsa-hda-realtek-fix-headset-mic-detection-for-msi-ms-b171.patch
diff --git a/queue-4.14/x86-fpu-disable-bottom-halves-while-loading-fpu-registers.patch b/queue-4.14/x86-fpu-disable-bottom-halves-while-loading-fpu-registers.patch
new file mode 100644 (file)
index 0000000..c57f63b
--- /dev/null
@@ -0,0 +1,84 @@
+From 68239654acafe6aad5a3c1dc7237e60accfebc03 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 20 Nov 2018 11:26:35 +0100
+Subject: x86/fpu: Disable bottom halves while loading FPU registers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit 68239654acafe6aad5a3c1dc7237e60accfebc03 upstream.
+
+The sequence
+
+  fpu->initialized = 1;                /* step A */
+  preempt_disable();           /* step B */
+  fpu__restore(fpu);
+  preempt_enable();
+
+in __fpu__restore_sig() is racy in regard to a context switch.
+
+For 32bit frames, __fpu__restore_sig() prepares the FPU state within
+fpu->state. To ensure that a context switch (switch_fpu_prepare() in
+particular) does not modify fpu->state it uses fpu__drop() which sets
+fpu->initialized to 0.
+
+After fpu->initialized is cleared, the CPU's FPU state is not saved
+to fpu->state during a context switch. The new state is loaded via
+fpu__restore(). It gets loaded into fpu->state from userland and
+ensured it is sane. fpu->initialized is then set to 1 in order to avoid
+fpu__initialize() doing anything (overwrite the new state) which is part
+of fpu__restore().
+
+A context switch between step A and B above would save CPU's current FPU
+registers to fpu->state and overwrite the newly prepared state. This
+looks like a tiny race window but the Kernel Test Robot reported this
+back in 2016 while we had lazy FPU support. Borislav Petkov made the
+link between that report and another patch that has been posted. Since
+the removal of the lazy FPU support, this race goes unnoticed because
+the warning has been removed.
+
+Disable bottom halves around the restore sequence to avoid the race. BH
+need to be disabled because BH is allowed to run (even with preemption
+disabled) and might invoke kernel_fpu_begin() by doing IPsec.
+
+ [ bp: massage commit message a bit. ]
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Cc: kvm ML <kvm@vger.kernel.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: stable@vger.kernel.org
+Cc: x86-ml <x86@kernel.org>
+Link: http://lkml.kernel.org/r/20181120102635.ddv3fvavxajjlfqk@linutronix.de
+Link: https://lkml.kernel.org/r/20160226074940.GA28911@pd.tnic
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/fpu/signal.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __use
+                       sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
+               }
++              local_bh_disable();
+               fpu->initialized = 1;
+-              preempt_disable();
+               fpu__restore(fpu);
+-              preempt_enable();
++              local_bh_enable();
+               return err;
+       } else {
diff --git a/queue-4.14/x86-mce-amd-fix-the-thresholding-machinery-initialization-order.patch b/queue-4.14/x86-mce-amd-fix-the-thresholding-machinery-initialization-order.patch
new file mode 100644 (file)
index 0000000..3676c96
--- /dev/null
@@ -0,0 +1,104 @@
+From 60c8144afc287ef09ce8c1230c6aa972659ba1bb Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 27 Nov 2018 14:41:37 +0100
+Subject: x86/MCE/AMD: Fix the thresholding machinery initialization order
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 60c8144afc287ef09ce8c1230c6aa972659ba1bb upstream.
+
+Currently, the code sets up the thresholding interrupt vector and only
+then goes about initializing the thresholding banks. Which is wrong,
+because an early thresholding interrupt would cause a NULL pointer
+dereference when accessing those banks and prevent the machine from
+booting.
+
+Therefore, set the thresholding interrupt vector only *after* having
+initialized the banks successfully.
+
+Fixes: 18807ddb7f88 ("x86/mce/AMD: Reset Threshold Limit after logging error")
+Reported-by: Rafał Miłecki <rafal@milecki.pl>
+Reported-by: John Clemens <clemej@gmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Tested-by: Rafał Miłecki <rafal@milecki.pl>
+Tested-by: John Clemens <john@deater.net>
+Cc: Aravind Gopalakrishnan <aravindksg.lkml@gmail.com>
+Cc: linux-edac@vger.kernel.org
+Cc: stable@vger.kernel.org
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: x86@kernel.org
+Cc: Yazen Ghannam <Yazen.Ghannam@amd.com>
+Link: https://lkml.kernel.org/r/20181127101700.2964-1-zajec5@gmail.com
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=201291
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mcheck/mce_amd.c |   19 ++++++-------------
+ 1 file changed, 6 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -56,7 +56,7 @@
+ /* Threshold LVT offset is at MSR0xC0000410[15:12] */
+ #define SMCA_THR_LVT_OFF      0xF000
+-static bool thresholding_en;
++static bool thresholding_irq_en;
+ static const char * const th_names[] = {
+       "load_store",
+@@ -533,9 +533,8 @@ prepare_threshold_block(unsigned int ban
+ set_offset:
+       offset = setup_APIC_mce_threshold(offset, new);
+-
+-      if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
+-              mce_threshold_vector = amd_threshold_interrupt;
++      if (offset == new)
++              thresholding_irq_en = true;
+ done:
+       mce_threshold_block_init(&b, offset);
+@@ -1356,9 +1355,6 @@ int mce_threshold_remove_device(unsigned
+ {
+       unsigned int bank;
+-      if (!thresholding_en)
+-              return 0;
+-
+       for (bank = 0; bank < mca_cfg.banks; ++bank) {
+               if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+                       continue;
+@@ -1376,9 +1372,6 @@ int mce_threshold_create_device(unsigned
+       struct threshold_bank **bp;
+       int err = 0;
+-      if (!thresholding_en)
+-              return 0;
+-
+       bp = per_cpu(threshold_banks, cpu);
+       if (bp)
+               return 0;
+@@ -1407,9 +1400,6 @@ static __init int threshold_init_device(
+ {
+       unsigned lcpu = 0;
+-      if (mce_threshold_vector == amd_threshold_interrupt)
+-              thresholding_en = true;
+-
+       /* to hit CPUs online before the notifier is up */
+       for_each_online_cpu(lcpu) {
+               int err = mce_threshold_create_device(lcpu);
+@@ -1418,6 +1408,9 @@ static __init int threshold_init_device(
+                       return err;
+       }
++      if (thresholding_irq_en)
++              mce_threshold_vector = amd_threshold_interrupt;
++
+       return 0;
+ }
+ /*
diff --git a/queue-4.14/xtensa-enable-coprocessors-that-are-being-flushed.patch b/queue-4.14/xtensa-enable-coprocessors-that-are-being-flushed.patch
new file mode 100644 (file)
index 0000000..705184c
--- /dev/null
@@ -0,0 +1,70 @@
+From 2958b66694e018c552be0b60521fec27e8d12988 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Mon, 26 Nov 2018 13:29:41 -0800
+Subject: xtensa: enable coprocessors that are being flushed
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 2958b66694e018c552be0b60521fec27e8d12988 upstream.
+
+coprocessor_flush_all may be called from a context of a thread that is
+different from the thread being flushed. In that case contents of the
+cpenable special register may not match ti->cpenable of the target
+thread, resulting in unhandled coprocessor exception in the kernel
+context.
+Set cpenable special register to the ti->cpenable of the target register
+for the duration of the flush and restore it afterwards.
+This fixes the following crash caused by coprocessor register inspection
+in native gdb:
+
+  (gdb) p/x $w0
+  Illegal instruction in kernel: sig: 9 [#1] PREEMPT
+  Call Trace:
+    ___might_sleep+0x184/0x1a4
+    __might_sleep+0x41/0xac
+    exit_signals+0x14/0x218
+    do_exit+0xc9/0x8b8
+    die+0x99/0xa0
+    do_illegal_instruction+0x18/0x6c
+    common_exception+0x77/0x77
+    coprocessor_flush+0x16/0x3c
+    arch_ptrace+0x46c/0x674
+    sys_ptrace+0x2ce/0x3b4
+    system_call+0x54/0x80
+    common_exception+0x77/0x77
+  note: gdb[100] exited with preempt_count 1
+  Killed
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/process.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/xtensa/kernel/process.c
++++ b/arch/xtensa/kernel/process.c
+@@ -88,18 +88,21 @@ void coprocessor_release_all(struct thre
+ void coprocessor_flush_all(struct thread_info *ti)
+ {
+-      unsigned long cpenable;
++      unsigned long cpenable, old_cpenable;
+       int i;
+       preempt_disable();
++      RSR_CPENABLE(old_cpenable);
+       cpenable = ti->cpenable;
++      WSR_CPENABLE(cpenable);
+       for (i = 0; i < XCHAL_CP_MAX; i++) {
+               if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
+                       coprocessor_flush(ti, i);
+               cpenable >>= 1;
+       }
++      WSR_CPENABLE(old_cpenable);
+       preempt_enable();
+ }
diff --git a/queue-4.14/xtensa-fix-coprocessor-context-offset-definitions.patch b/queue-4.14/xtensa-fix-coprocessor-context-offset-definitions.patch
new file mode 100644 (file)
index 0000000..0c318fd
--- /dev/null
@@ -0,0 +1,49 @@
+From 03bc996af0cc71c7f30c384d8ce7260172423b34 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Mon, 26 Nov 2018 15:18:26 -0800
+Subject: xtensa: fix coprocessor context offset definitions
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 03bc996af0cc71c7f30c384d8ce7260172423b34 upstream.
+
+Coprocessor context offsets are used by the assembly code that moves
+coprocessor context between the individual fields of the
+thread_info::xtregs_cp structure and coprocessor registers.
+This fixes coprocessor context clobbering on flushing and reloading
+during normal user code execution and user process debugging in the
+presence of more than one coprocessor in the core configuration.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/asm-offsets.c |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/xtensa/kernel/asm-offsets.c
++++ b/arch/xtensa/kernel/asm-offsets.c
+@@ -91,14 +91,14 @@ int main(void)
+       DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
+       DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
+ #if XTENSA_HAVE_COPROCESSORS
+-      DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
+-      DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
+-      DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
+-      DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
+-      DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
+-      DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
+-      DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
+-      DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
++      DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
++      DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
++      DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
++      DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
++      DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
++      DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
++      DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
++      DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
+ #endif
+       DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
+       DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
diff --git a/queue-4.14/xtensa-fix-coprocessor-part-of-ptrace_-get-set-xregs.patch b/queue-4.14/xtensa-fix-coprocessor-part-of-ptrace_-get-set-xregs.patch
new file mode 100644 (file)
index 0000000..a3303f1
--- /dev/null
@@ -0,0 +1,106 @@
+From 38a35a78c5e270cbe53c4fef6b0d3c2da90dd849 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Mon, 26 Nov 2018 18:06:01 -0800
+Subject: xtensa: fix coprocessor part of ptrace_{get,set}xregs
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 38a35a78c5e270cbe53c4fef6b0d3c2da90dd849 upstream.
+
+Layout of coprocessor registers in the elf_xtregs_t and
+xtregs_coprocessor_t may be different due to alignment. Thus it is not
+always possible to copy data between the xtregs_coprocessor_t structure
+and the elf_xtregs_t and get correct values for all registers.
+Use a table of offsets and sizes of individual coprocessor register
+groups to do coprocessor context copying in the ptrace_getxregs and
+ptrace_setxregs.
+This fixes incorrect coprocessor register values reading from the user
+process by the native gdb on an xtensa core with multiple coprocessors
+and registers with high alignment requirements.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/ptrace.c |   42 ++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 38 insertions(+), 4 deletions(-)
+
+--- a/arch/xtensa/kernel/ptrace.c
++++ b/arch/xtensa/kernel/ptrace.c
+@@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_st
+ }
++#if XTENSA_HAVE_COPROCESSORS
++#define CP_OFFSETS(cp) \
++      { \
++              .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \
++              .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \
++              .sz = sizeof(xtregs_ ## cp ## _t), \
++      }
++
++static const struct {
++      size_t elf_xtregs_offset;
++      size_t ti_offset;
++      size_t sz;
++} cp_offsets[] = {
++      CP_OFFSETS(cp0),
++      CP_OFFSETS(cp1),
++      CP_OFFSETS(cp2),
++      CP_OFFSETS(cp3),
++      CP_OFFSETS(cp4),
++      CP_OFFSETS(cp5),
++      CP_OFFSETS(cp6),
++      CP_OFFSETS(cp7),
++};
++#endif
++
+ static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
+ {
+       struct pt_regs *regs = task_pt_regs(child);
+       struct thread_info *ti = task_thread_info(child);
+       elf_xtregs_t __user *xtregs = uregs;
+       int ret = 0;
++      int i __maybe_unused;
+       if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
+               return -EIO;
+@@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_s
+ #if XTENSA_HAVE_COPROCESSORS
+       /* Flush all coprocessor registers to memory. */
+       coprocessor_flush_all(ti);
+-      ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
+-                            sizeof(xtregs_coprocessor_t));
++
++      for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
++              ret |= __copy_to_user((char __user *)xtregs +
++                                    cp_offsets[i].elf_xtregs_offset,
++                                    (const char *)ti +
++                                    cp_offsets[i].ti_offset,
++                                    cp_offsets[i].sz);
+ #endif
+       ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
+                             sizeof(xtregs->opt));
+@@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_s
+       struct pt_regs *regs = task_pt_regs(child);
+       elf_xtregs_t *xtregs = uregs;
+       int ret = 0;
++      int i __maybe_unused;
+       if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
+               return -EFAULT;
+@@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_s
+       coprocessor_flush_all(ti);
+       coprocessor_release_all(ti);
+-      ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
+-                              sizeof(xtregs_coprocessor_t));
++      for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
++              ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset,
++                                      (const char __user *)xtregs +
++                                      cp_offsets[i].elf_xtregs_offset,
++                                      cp_offsets[i].sz);
+ #endif
+       ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
+                               sizeof(xtregs->opt));