--- /dev/null
+From 7194eda1ba0872d917faf3b322540b4f57f11ba5 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 23 Nov 2018 15:44:00 +0100
+Subject: ALSA: ac97: Fix incorrect bit shift at AC97-SPSA control write
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 7194eda1ba0872d917faf3b322540b4f57f11ba5 upstream.
+
+The function snd_ac97_put_spsa() gets the bit shift value from the
+associated private_value, but it extracts too much; the current code
+extracts 8 bit values in bits 8-15, but this is a combination of two
+nibbles (bits 8-11 and bits 12-15) for left and right shifts.
+Due to the incorrect bits extraction, the actual shift may go beyond
+the 32bit value, as spotted recently by UBSAN check:
+ UBSAN: Undefined behaviour in sound/pci/ac97/ac97_codec.c:836:7
+ shift exponent 68 is too large for 32-bit type 'int'
+
+This patch fixes the shift value extraction by masking the properly
+with 0x0f instead of 0xff.
+
+Reported-and-tested-by: Meelis Roos <mroos@linux.ee>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/ac97/ac97_codec.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_
+ {
+ struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
+ int reg = kcontrol->private_value & 0xff;
+- int shift = (kcontrol->private_value >> 8) & 0xff;
++ int shift = (kcontrol->private_value >> 8) & 0x0f;
+ int mask = (kcontrol->private_value >> 16) & 0xff;
+ // int invert = (kcontrol->private_value >> 24) & 0xff;
+ unsigned short value, old, new;
--- /dev/null
+From e1a7bfe3807974e66f971f2589d4e0197ec0fced Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 22 Nov 2018 14:36:17 +0100
+Subject: ALSA: control: Fix race between adding and removing a user element
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit e1a7bfe3807974e66f971f2589d4e0197ec0fced upstream.
+
+The procedure for adding a user control element has some window opened
+for race against the concurrent removal of a user element. This was
+caught by syzkaller, hitting a KASAN use-after-free error.
+
+This patch addresses the bug by wrapping the whole procedure to add a
+user control element with the card->controls_rwsem, instead of only
+around the increment of card->user_ctl_count.
+
+This required a slight code refactoring, too. The function
+snd_ctl_add() is split to two parts: a core function to add the
+control element and a part calling it. The former is called from the
+function for adding a user control element inside the controls_rwsem.
+
+One change to be noted is that snd_ctl_notify() for adding a control
+element gets called inside the controls_rwsem as well while it was
+called outside the rwsem. But this should be OK, as snd_ctl_notify()
+takes another (finer) rwlock instead of rwsem, and the call of
+snd_ctl_notify() inside rwsem is already done in another code path.
+
+Reported-by: syzbot+dc09047bce3820621ba2@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/control.c | 80 ++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 45 insertions(+), 35 deletions(-)
+
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -348,6 +348,40 @@ static int snd_ctl_find_hole(struct snd_
+ return 0;
+ }
+
++/* add a new kcontrol object; call with card->controls_rwsem locked */
++static int __snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
++{
++ struct snd_ctl_elem_id id;
++ unsigned int idx;
++ unsigned int count;
++
++ id = kcontrol->id;
++ if (id.index > UINT_MAX - kcontrol->count)
++ return -EINVAL;
++
++ if (snd_ctl_find_id(card, &id)) {
++ dev_err(card->dev,
++ "control %i:%i:%i:%s:%i is already present\n",
++ id.iface, id.device, id.subdevice, id.name, id.index);
++ return -EBUSY;
++ }
++
++ if (snd_ctl_find_hole(card, kcontrol->count) < 0)
++ return -ENOMEM;
++
++ list_add_tail(&kcontrol->list, &card->controls);
++ card->controls_count += kcontrol->count;
++ kcontrol->id.numid = card->last_numid + 1;
++ card->last_numid += kcontrol->count;
++
++ id = kcontrol->id;
++ count = kcontrol->count;
++ for (idx = 0; idx < count; idx++, id.index++, id.numid++)
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
++
++ return 0;
++}
++
+ /**
+ * snd_ctl_add - add the control instance to the card
+ * @card: the card instance
+@@ -364,45 +398,18 @@ static int snd_ctl_find_hole(struct snd_
+ */
+ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+ {
+- struct snd_ctl_elem_id id;
+- unsigned int idx;
+- unsigned int count;
+ int err = -EINVAL;
+
+ if (! kcontrol)
+ return err;
+ if (snd_BUG_ON(!card || !kcontrol->info))
+ goto error;
+- id = kcontrol->id;
+- if (id.index > UINT_MAX - kcontrol->count)
+- goto error;
+
+ down_write(&card->controls_rwsem);
+- if (snd_ctl_find_id(card, &id)) {
+- up_write(&card->controls_rwsem);
+- dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
+- id.iface,
+- id.device,
+- id.subdevice,
+- id.name,
+- id.index);
+- err = -EBUSY;
+- goto error;
+- }
+- if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
+- up_write(&card->controls_rwsem);
+- err = -ENOMEM;
+- goto error;
+- }
+- list_add_tail(&kcontrol->list, &card->controls);
+- card->controls_count += kcontrol->count;
+- kcontrol->id.numid = card->last_numid + 1;
+- card->last_numid += kcontrol->count;
+- id = kcontrol->id;
+- count = kcontrol->count;
++ err = __snd_ctl_add(card, kcontrol);
+ up_write(&card->controls_rwsem);
+- for (idx = 0; idx < count; idx++, id.index++, id.numid++)
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
++ if (err < 0)
++ goto error;
+ return 0;
+
+ error:
+@@ -1361,9 +1368,12 @@ static int snd_ctl_elem_add(struct snd_c
+ kctl->tlv.c = snd_ctl_elem_user_tlv;
+
+ /* This function manage to free the instance on failure. */
+- err = snd_ctl_add(card, kctl);
+- if (err < 0)
+- return err;
++ down_write(&card->controls_rwsem);
++ err = __snd_ctl_add(card, kctl);
++ if (err < 0) {
++ snd_ctl_free_one(kctl);
++ goto unlock;
++ }
+ offset = snd_ctl_get_ioff(kctl, &info->id);
+ snd_ctl_build_ioff(&info->id, kctl, offset);
+ /*
+@@ -1374,10 +1384,10 @@ static int snd_ctl_elem_add(struct snd_c
+ * which locks the element.
+ */
+
+- down_write(&card->controls_rwsem);
+ card->user_ctl_count++;
+- up_write(&card->controls_rwsem);
+
++ unlock:
++ up_write(&card->controls_rwsem);
+ return 0;
+ }
+
--- /dev/null
+From 39070a98d668db8fbaa2a6a6752f732cbcbb14b1 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 22 Nov 2018 12:38:12 +0100
+Subject: ALSA: hda: Add ASRock N68C-S UCC the power_save blacklist
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 39070a98d668db8fbaa2a6a6752f732cbcbb14b1 upstream.
+
+Power-saving is causing plops on audio start/stop on the built-in audio
+of the nForce 430 based ASRock N68C-S UCC motherboard, add this model to
+the power_save blacklist.
+
+BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1525104
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_intel.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2256,6 +2256,8 @@ static struct snd_pci_quirk power_save_b
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
++ SND_PCI_QUIRK(0x1849, 0x0397, "Asrock N68C-S UCC", 0),
++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
--- /dev/null
+From e8ed64b08eddc05043e556832616a478bbe4bb00 Mon Sep 17 00:00:00 2001
+From: Girija Kumar Kasinadhuni <gkumar@neverware.com>
+Date: Mon, 26 Nov 2018 13:40:46 -0500
+Subject: ALSA: hda/realtek - Add auto-mute quirk for HP Spectre x360 laptop
+
+From: Girija Kumar Kasinadhuni <gkumar@neverware.com>
+
+commit e8ed64b08eddc05043e556832616a478bbe4bb00 upstream.
+
+This device makes a loud buzzing sound when a headphone is inserted while
+playing audio at full volume through the speaker.
+
+Fixes: bbf8ff6b1d2a ("ALSA: hda/realtek - Fixup for HP x360 laptops with B&O speakers")
+Signed-off-by: Girija Kumar Kasinadhuni <gkumar@neverware.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5509,6 +5509,7 @@ enum {
+ ALC295_FIXUP_HP_X360,
+ ALC221_FIXUP_HP_HEADSET_MIC,
+ ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
++ ALC295_FIXUP_HP_AUTO_MUTE,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5673,6 +5674,8 @@ static const struct hda_fixup alc269_fix
+ [ALC269_FIXUP_HP_MUTE_LED_MIC3] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_hp_mute_led_mic3,
++ .chained = true,
++ .chain_id = ALC295_FIXUP_HP_AUTO_MUTE
+ },
+ [ALC269_FIXUP_HP_GPIO_LED] = {
+ .type = HDA_FIXUP_FUNC,
+@@ -6380,6 +6383,10 @@ static const struct hda_fixup alc269_fix
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_invalidate_dacs,
+ },
++ [ALC295_FIXUP_HP_AUTO_MUTE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_auto_mute_via_amp,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
--- /dev/null
+From 8cd65271f8e545ddeed10ecc2e417936bdff168e Mon Sep 17 00:00:00 2001
+From: Anisse Astier <anisse@astier.eu>
+Date: Fri, 23 Nov 2018 17:59:11 +0100
+Subject: ALSA: hda/realtek - fix headset mic detection for MSI MS-B171
+
+From: Anisse Astier <anisse@astier.eu>
+
+commit 8cd65271f8e545ddeed10ecc2e417936bdff168e upstream.
+
+MSI Cubi N 8GL (MS-B171) needs the same fixup as its older model, the
+MS-B120, in order for the headset mic to be properly detected.
+
+They both use a single 3-way jack for both mic and headset with an
+ALC283 codec, with the same pins used.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Anisse Astier <anisse@astier.eu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6535,6 +6535,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
+ SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
--- /dev/null
+From c4cfcf6f4297c9256b53790bacbbbd6901fef468 Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Mon, 26 Nov 2018 14:17:16 +0800
+Subject: ALSA: hda/realtek - fix the pop noise on headphone for lenovo laptops
+
+From: Hui Wang <hui.wang@canonical.com>
+
+commit c4cfcf6f4297c9256b53790bacbbbd6901fef468 upstream.
+
+We have several Lenovo laptops with the codec alc285, when playing
+sound via headphone, we can hear click/pop noise in the headphone,
+if we let the headphone share the DAC of NID 0x2 with the speaker,
+the noise disappears.
+
+The Lenovo laptops here include P52, P72, X1 yoda2 and X1 carbon.
+
+I have tried to set preferred_dacs and override_conn, but neither of
+them worked. Thanks for Kailang, he told me to invalidate the NID 0x3
+through override_wcaps.
+
+BugLink: https://bugs.launchpad.net/bugs/1805079
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5361,6 +5361,16 @@ static void alc274_fixup_bind_dacs(struc
+ spec->gen.preferred_dacs = preferred_pairs;
+ }
+
++/* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
++static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action != HDA_FIXUP_ACT_PRE_PROBE)
++ return;
++
++ snd_hda_override_wcaps(codec, 0x03, 0);
++}
++
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+
+@@ -5498,6 +5508,7 @@ enum {
+ ALC255_FIXUP_DELL_HEADSET_MIC,
+ ALC295_FIXUP_HP_X360,
+ ALC221_FIXUP_HP_HEADSET_MIC,
++ ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6365,6 +6376,10 @@ static const struct hda_fixup alc269_fix
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
+ },
++ [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc285_fixup_invalidate_dacs,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7038,6 +7053,11 @@ static const struct snd_hda_pin_quirk al
+ {0x12, 0x90a60130},
+ {0x19, 0x03a11020},
+ {0x21, 0x0321101f}),
++ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
++ {0x12, 0x90a60130},
++ {0x14, 0x90170110},
++ {0x19, 0x04a11040},
++ {0x21, 0x04211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60120},
+ {0x14, 0x90170110},
--- /dev/null
+From 1078bef0cd9291355a20369b21cd823026ab8eaa Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Thu, 8 Nov 2018 16:36:15 +0800
+Subject: ALSA: hda/realtek - Support ALC300
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit 1078bef0cd9291355a20369b21cd823026ab8eaa upstream.
+
+This patch will enable ALC300.
+
+[ It's almost equivalent with other ALC269-compatible ones, and
+ apparently has no loopback mixer -- tiwai ]
+
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -388,6 +388,7 @@ static void alc_fill_eapd_coef(struct hd
+ case 0x10ec0285:
+ case 0x10ec0298:
+ case 0x10ec0289:
++ case 0x10ec0300:
+ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+ break;
+ case 0x10ec0275:
+@@ -2830,6 +2831,7 @@ enum {
+ ALC269_TYPE_ALC215,
+ ALC269_TYPE_ALC225,
+ ALC269_TYPE_ALC294,
++ ALC269_TYPE_ALC300,
+ ALC269_TYPE_ALC700,
+ };
+
+@@ -2864,6 +2866,7 @@ static int alc269_parse_auto_config(stru
+ case ALC269_TYPE_ALC215:
+ case ALC269_TYPE_ALC225:
+ case ALC269_TYPE_ALC294:
++ case ALC269_TYPE_ALC300:
+ case ALC269_TYPE_ALC700:
+ ssids = alc269_ssids;
+ break;
+@@ -7295,6 +7298,10 @@ static int patch_alc269(struct hda_codec
+ spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
+ alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
+ break;
++ case 0x10ec0300:
++ spec->codec_variant = ALC269_TYPE_ALC300;
++ spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
++ break;
+ case 0x10ec0700:
+ case 0x10ec0701:
+ case 0x10ec0703:
+@@ -8404,6 +8411,7 @@ static const struct hda_device_id snd_hd
+ HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
++ HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
+ HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
+ HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
+ HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
--- /dev/null
+From 9a20332ab373b1f8f947e0a9c923652b32dab031 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 23 Nov 2018 18:18:30 +0100
+Subject: ALSA: sparc: Fix invalid snd_free_pages() at error path
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 9a20332ab373b1f8f947e0a9c923652b32dab031 upstream.
+
+Some spurious calls of snd_free_pages() have been overlooked and
+remain in the error paths of sparc cs4231 driver code. Since
+runtime->dma_area is managed by the PCM core helper, we shouldn't
+release manually.
+
+Drop the superfluous calls.
+
+Reviewed-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/sparc/cs4231.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/sound/sparc/cs4231.c
++++ b/sound/sparc/cs4231.c
+@@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(stru
+ runtime->hw = snd_cs4231_playback;
+
+ err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
+- if (err < 0) {
+- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
++ if (err < 0)
+ return err;
+- }
+ chip->playback_substream = substream;
+ chip->p_periods_sent = 0;
+ snd_pcm_set_sync(substream);
+@@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struc
+ runtime->hw = snd_cs4231_capture;
+
+ err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
+- if (err < 0) {
+- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
++ if (err < 0)
+ return err;
+- }
+ chip->capture_substream = substream;
+ chip->c_periods_sent = 0;
+ snd_pcm_set_sync(substream);
--- /dev/null
+From 7b69154171b407844c273ab4c10b5f0ddcd6aa29 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 23 Nov 2018 18:16:33 +0100
+Subject: ALSA: wss: Fix invalid snd_free_pages() at error path
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 7b69154171b407844c273ab4c10b5f0ddcd6aa29 upstream.
+
+Some spurious calls of snd_free_pages() have been overlooked and
+remain in the error paths of wss driver code. Since runtime->dma_area
+is managed by the PCM core helper, we shouldn't release manually.
+
+Drop the superfluous calls.
+
+Reviewed-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/isa/wss/wss_lib.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/sound/isa/wss/wss_lib.c
++++ b/sound/isa/wss/wss_lib.c
+@@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct
+ if (err < 0) {
+ if (chip->release_dma)
+ chip->release_dma(chip, chip->dma_private_data, chip->dma1);
+- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+ return err;
+ }
+ chip->playback_substream = substream;
+@@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct s
+ if (err < 0) {
+ if (chip->release_dma)
+ chip->release_dma(chip, chip->dma_private_data, chip->dma2);
+- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+ return err;
+ }
+ chip->capture_substream = substream;
--- /dev/null
+From c1d91f86a1b4c9c05854d59c6a0abd5d0f75b849 Mon Sep 17 00:00:00 2001
+From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+Date: Tue, 13 Nov 2018 11:25:35 +0100
+Subject: arm64: dts: rockchip: Fix PCIe reset polarity for rk3399-puma-haikou.
+
+From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+
+commit c1d91f86a1b4c9c05854d59c6a0abd5d0f75b849 upstream.
+
+This patch fixes the wrong polarity setting for the PCIe host driver's
+pre-reset pin for rk3399-puma-haikou. Without this patch link training
+will most likely fail.
+
+Fixes: 60fd9f72ce8a ("arm64: dts: rockchip: add Haikou baseboard with RK3399-Q7 SoM")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+@@ -153,7 +153,7 @@
+ };
+
+ &pcie0 {
+- ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>;
++ ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>;
+ num-lanes = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_clkreqn_cpm>;
--- /dev/null
+From f8397d69daef06d358430d3054662fb597e37c00 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 6 Nov 2018 16:40:20 +0200
+Subject: btrfs: Always try all copies when reading extent buffers
+
+From: Nikolay Borisov <nborisov@suse.com>
+
+commit f8397d69daef06d358430d3054662fb597e37c00 upstream.
+
+When a metadata read is served the endio routine btree_readpage_end_io_hook
+is called which eventually runs the tree-checker. If tree-checker fails
+to validate the read eb then it sets EXTENT_BUFFER_CORRUPT flag. This
+leads to btree_read_extent_buffer_pages wrongly assuming that all
+available copies of this extent buffer are wrong and failing prematurely.
+Fix this modify btree_read_extent_buffer_pages to read all copies of
+the data.
+
+This failure was exhibitted in xfstests btrfs/124 which would
+spuriously fail its balance operations. The reason was that when balance
+was run following re-introduction of the missing raid1 disk
+__btrfs_map_block would map the read request to stripe 0, which
+corresponded to devid 2 (the disk which is being removed in the test):
+
+ item 2 key (FIRST_CHUNK_TREE CHUNK_ITEM 3553624064) itemoff 15975 itemsize 112
+ length 1073741824 owner 2 stripe_len 65536 type DATA|RAID1
+ io_align 65536 io_width 65536 sector_size 4096
+ num_stripes 2 sub_stripes 1
+ stripe 0 devid 2 offset 2156920832
+ dev_uuid 8466c350-ed0c-4c3b-b17d-6379b445d5c8
+ stripe 1 devid 1 offset 3553624064
+ dev_uuid 1265d8db-5596-477e-af03-df08eb38d2ca
+
+This caused read requests for a checksum item that to be routed to the
+stale disk which triggered the aforementioned logic involving
+EXTENT_BUFFER_CORRUPT flag. This then triggered cascading failures of
+the balance operation.
+
+Fixes: a826d6dcb32d ("Btrfs: check items for correctness as we search")
+CC: stable@vger.kernel.org # 4.4+
+Suggested-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/disk-io.c | 11 +----------
+ 1 file changed, 1 insertion(+), 10 deletions(-)
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -477,9 +477,9 @@ static int btree_read_extent_buffer_page
+ int mirror_num = 0;
+ int failed_mirror = 0;
+
+- clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
+ io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
+ while (1) {
++ clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
+ ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
+ mirror_num);
+ if (!ret) {
+@@ -493,15 +493,6 @@ static int btree_read_extent_buffer_page
+ break;
+ }
+
+- /*
+- * This buffer's crc is fine, but its contents are corrupted, so
+- * there is no reason to read the other copies, they won't be
+- * any less wrong.
+- */
+- if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags) ||
+- ret == -EUCLEAN)
+- break;
+-
+ num_copies = btrfs_num_copies(fs_info,
+ eb->start, eb->len);
+ if (num_copies == 1)
--- /dev/null
+From f505754fd6599230371cb01b9332754ddc104be1 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Wed, 14 Nov 2018 11:35:24 +0000
+Subject: Btrfs: ensure path name is null terminated at btrfs_control_ioctl
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit f505754fd6599230371cb01b9332754ddc104be1 upstream.
+
+We were using the path name received from user space without checking that
+it is null terminated. While btrfs-progs is well behaved and does proper
+validation and null termination, someone could call the ioctl and pass
+a non-null terminated patch, leading to buffer overrun problems in the
+kernel. The ioctl is protected by CAP_SYS_ADMIN.
+
+So just set the last byte of the path to a null character, similar to what
+we do in other ioctls (add/remove/resize device, snapshot creation, etc).
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/super.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -2235,6 +2235,7 @@ static long btrfs_control_ioctl(struct f
+ vol = memdup_user((void __user *)arg, sizeof(*vol));
+ if (IS_ERR(vol))
+ return PTR_ERR(vol);
++ vol->name[BTRFS_PATH_NAME_MAX] = '\0';
+
+ switch (cmd) {
+ case BTRFS_IOC_SCAN_DEV:
--- /dev/null
+From 552f0329c75b3e1d7f9bb8c9e421d37403f192cd Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Mon, 19 Nov 2018 16:20:34 +0000
+Subject: Btrfs: fix race between enabling quotas and subvolume creation
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 552f0329c75b3e1d7f9bb8c9e421d37403f192cd upstream.
+
+We have a race between enabling quotas end subvolume creation that cause
+subvolume creation to fail with -EINVAL, and the following diagram shows
+how it happens:
+
+ CPU 0 CPU 1
+
+ btrfs_ioctl()
+ btrfs_ioctl_quota_ctl()
+ btrfs_quota_enable()
+ mutex_lock(fs_info->qgroup_ioctl_lock)
+
+ btrfs_ioctl()
+ create_subvol()
+ btrfs_qgroup_inherit()
+ -> save fs_info->quota_root
+ into quota_root
+ -> stores a NULL value
+ -> tries to lock the mutex
+ qgroup_ioctl_lock
+ -> blocks waiting for
+ the task at CPU0
+
+ -> sets BTRFS_FS_QUOTA_ENABLED in fs_info
+ -> sets quota_root in fs_info->quota_root
+ (non-NULL value)
+
+ mutex_unlock(fs_info->qgroup_ioctl_lock)
+
+ -> checks quota enabled
+ flag is set
+ -> returns -EINVAL because
+ fs_info->quota_root was
+ NULL before it acquired
+ the mutex
+ qgroup_ioctl_lock
+ -> ioctl returns -EINVAL
+
+Returning -EINVAL to user space will be confusing if all the arguments
+passed to the subvolume creation ioctl were valid.
+
+Fix it by grabbing the value from fs_info->quota_root after acquiring
+the mutex.
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/qgroup.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2244,7 +2244,7 @@ int btrfs_qgroup_inherit(struct btrfs_tr
+ int i;
+ u64 *i_qgroups;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+- struct btrfs_root *quota_root = fs_info->quota_root;
++ struct btrfs_root *quota_root;
+ struct btrfs_qgroup *srcgroup;
+ struct btrfs_qgroup *dstgroup;
+ u32 level_size = 0;
+@@ -2254,6 +2254,7 @@ int btrfs_qgroup_inherit(struct btrfs_tr
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ goto out;
+
++ quota_root = fs_info->quota_root;
+ if (!quota_root) {
+ ret = -EINVAL;
+ goto out;
--- /dev/null
+From aab15e8ec25765cf7968c72cbec7583acf99d8a4 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Mon, 12 Nov 2018 10:23:58 +0000
+Subject: Btrfs: fix rare chances for data loss when doing a fast fsync
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit aab15e8ec25765cf7968c72cbec7583acf99d8a4 upstream.
+
+After the simplification of the fast fsync patch done recently by commit
+b5e6c3e170b7 ("btrfs: always wait on ordered extents at fsync time") and
+commit e7175a692765 ("btrfs: remove the wait ordered logic in the
+log_one_extent path"), we got a very short time window where we can get
+extents logged without writeback completing first or extents logged
+without logging the respective data checksums. Both issues can only happen
+when doing a non-full (fast) fsync.
+
+As soon as we enter btrfs_sync_file() we trigger writeback, then lock the
+inode and then wait for the writeback to complete before starting to log
+the inode. However before we acquire the inode's lock and after we started
+writeback, it's possible that more writes happened and dirtied more pages.
+If that happened and those pages get writeback triggered while we are
+logging the inode (for example, the VM subsystem triggering it due to
+memory pressure, or another concurrent fsync), we end up seeing the
+respective extent maps in the inode's list of modified extents and will
+log matching file extent items without waiting for the respective
+ordered extents to complete, meaning that either of the following will
+happen:
+
+1) We log an extent after its writeback finishes but before its checksums
+ are added to the csum tree, leading to -EIO errors when attempting to
+ read the extent after a log replay.
+
+2) We log an extent before its writeback finishes.
+ Therefore after the log replay we will have a file extent item pointing
+ to an unwritten extent (and without the respective data checksums as
+ well).
+
+This could not happen before the fast fsync patch simplification, because
+for any extent we found in the list of modified extents, we would wait for
+its respective ordered extent to finish writeback or collect its checksums
+for logging if it did not complete yet.
+
+Fix this by triggering writeback again after acquiring the inode's lock
+and before waiting for ordered extents to complete.
+
+Fixes: e7175a692765 ("btrfs: remove the wait ordered logic in the log_one_extent path")
+Fixes: b5e6c3e170b7 ("btrfs: always wait on ordered extents at fsync time")
+CC: stable@vger.kernel.org # 4.19+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/file.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2089,6 +2089,30 @@ int btrfs_sync_file(struct file *file, l
+ atomic_inc(&root->log_batch);
+
+ /*
++ * Before we acquired the inode's lock, someone may have dirtied more
++ * pages in the target range. We need to make sure that writeback for
++ * any such pages does not start while we are logging the inode, because
++ * if it does, any of the following might happen when we are not doing a
++ * full inode sync:
++ *
++ * 1) We log an extent after its writeback finishes but before its
++ * checksums are added to the csum tree, leading to -EIO errors
++ * when attempting to read the extent after a log replay.
++ *
++ * 2) We can end up logging an extent before its writeback finishes.
++ * Therefore after the log replay we will have a file extent item
++ * pointing to an unwritten extent (and no data checksums as well).
++ *
++ * So trigger writeback for any eventual new dirty pages and then we
++ * wait for all ordered extents to complete below.
++ */
++ ret = start_ordered_ops(inode, start, end);
++ if (ret) {
++ inode_unlock(inode);
++ goto out;
++ }
++
++ /*
+ * We have to do this here to avoid the priority inversion of waiting on
+ * IO of a lower priority task while holding a transaciton open.
+ */
--- /dev/null
+From 42a657f57628402c73237547f0134e083e2f6764 Mon Sep 17 00:00:00 2001
+From: Pan Bian <bianpan2016@163.com>
+Date: Fri, 23 Nov 2018 18:10:15 +0800
+Subject: btrfs: relocation: set trans to be NULL after ending transaction
+
+From: Pan Bian <bianpan2016@163.com>
+
+commit 42a657f57628402c73237547f0134e083e2f6764 upstream.
+
+The function relocate_block_group calls btrfs_end_transaction to release
+trans when update_backref_cache returns 1, and then continues the loop
+body. If btrfs_block_rsv_refill fails this time, it will jump out the
+loop and the freed trans will be accessed. This may result in a
+use-after-free bug. The patch assigns NULL to trans after trans is
+released so that it will not be accessed.
+
+Fixes: 0647bf564f1 ("Btrfs: improve forever loop when doing balance relocation")
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Pan Bian <bianpan2016@163.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/relocation.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -3963,6 +3963,7 @@ static noinline_for_stack int relocate_b
+ restart:
+ if (update_backref_cache(trans, &rc->backref_cache)) {
+ btrfs_end_transaction(trans);
++ trans = NULL;
+ continue;
+ }
+
--- /dev/null
+From 41e817bca3acd3980efe5dd7d28af0e6f4ab9247 Mon Sep 17 00:00:00 2001
+From: Maximilian Heyne <mheyne@amazon.de>
+Date: Fri, 30 Nov 2018 08:35:14 -0700
+Subject: fs: fix lost error code in dio_complete
+
+From: Maximilian Heyne <mheyne@amazon.de>
+
+commit 41e817bca3acd3980efe5dd7d28af0e6f4ab9247 upstream.
+
+commit e259221763a40403d5bb232209998e8c45804ab8 ("fs: simplify the
+generic_write_sync prototype") reworked callers of generic_write_sync(),
+and ended up dropping the error return for the directio path. Prior to
+that commit, in dio_complete(), an error would be bubbled up the stack,
+but after that commit, errors passed on to dio_complete were eaten up.
+
+This was reported on the list earlier, and a fix was proposed in
+https://lore.kernel.org/lkml/20160921141539.GA17898@infradead.org/, but
+never followed up with. We recently hit this bug in our testing where
+fencing io errors, which were previously erroring out with EIO, were
+being returned as success operations after this commit.
+
+The fix proposed on the list earlier was a little short -- it would have
+still called generic_write_sync() in case `ret` already contained an
+error. This fix ensures generic_write_sync() is only called when there's
+no pending error in the write. Additionally, transferred is replaced
+with ret to bring this code in line with other callers.
+
+Fixes: e259221763a4 ("fs: simplify the generic_write_sync prototype")
+Reported-by: Ravi Nankani <rnankani@amazon.com>
+Signed-off-by: Maximilian Heyne <mheyne@amazon.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+CC: Torsten Mehlan <tomeh@amazon.de>
+CC: Uwe Dannowski <uwed@amazon.de>
+CC: Amit Shah <aams@amazon.de>
+CC: David Woodhouse <dwmw@amazon.co.uk>
+CC: stable@vger.kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/direct-io.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -325,8 +325,8 @@ static ssize_t dio_complete(struct dio *
+ */
+ dio->iocb->ki_pos += transferred;
+
+- if (dio->op == REQ_OP_WRITE)
+- ret = generic_write_sync(dio->iocb, transferred);
++ if (ret > 0 && dio->op == REQ_OP_WRITE)
++ ret = generic_write_sync(dio->iocb, ret);
+ dio->iocb->ki_complete(dio->iocb, ret, 0);
+ }
+
--- /dev/null
+From 38ab012f109caf10f471db1adf284e620dd8d701 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <kernellwp@gmail.com>
+Date: Tue, 20 Nov 2018 09:39:30 +0800
+Subject: KVM: LAPIC: Fix pv ipis use-before-initialization
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <kernellwp@gmail.com>
+
+commit 38ab012f109caf10f471db1adf284e620dd8d701 upstream.
+
+Reported by syzkaller:
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000014
+ PGD 800000040410c067 P4D 800000040410c067 PUD 40410d067 PMD 0
+ Oops: 0000 [#1] PREEMPT SMP PTI
+ CPU: 3 PID: 2567 Comm: poc Tainted: G OE 4.19.0-rc5 #16
+ RIP: 0010:kvm_pv_send_ipi+0x94/0x350 [kvm]
+ Call Trace:
+ kvm_emulate_hypercall+0x3cc/0x700 [kvm]
+ handle_vmcall+0xe/0x10 [kvm_intel]
+ vmx_handle_exit+0xc1/0x11b0 [kvm_intel]
+ vcpu_enter_guest+0x9fb/0x1910 [kvm]
+ kvm_arch_vcpu_ioctl_run+0x35c/0x610 [kvm]
+ kvm_vcpu_ioctl+0x3e9/0x6d0 [kvm]
+ do_vfs_ioctl+0xa5/0x690
+ ksys_ioctl+0x6d/0x80
+ __x64_sys_ioctl+0x1a/0x20
+ do_syscall_64+0x83/0x6e0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+The reason is that the apic map has not yet been initialized, the testcase
+triggers pv_send_ipi interface by vmcall which results in kvm->arch.apic_map
+is dereferenced. This patch fixes it by checking whether or not apic map is
+NULL and bailing out immediately if that is the case.
+
+Fixes: 4180bf1b65 (KVM: X86: Implement "send IPI" hypercall)
+Reported-by: Wei Wu <ww9210@gmail.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Wei Wu <ww9210@gmail.com>
+Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/lapic.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -571,6 +571,11 @@ int kvm_pv_send_ipi(struct kvm *kvm, uns
+ rcu_read_lock();
+ map = rcu_dereference(kvm->arch.apic_map);
+
++ if (unlikely(!map)) {
++ count = -EOPNOTSUPP;
++ goto out;
++ }
++
+ if (min > map->max_apic_id)
+ goto out;
+ /* Bits above cluster_size are masked in the caller. */
--- /dev/null
+From 0e0fee5c539b61fdd098332e0e2cc375d9073706 Mon Sep 17 00:00:00 2001
+From: Junaid Shahid <junaids@google.com>
+Date: Wed, 31 Oct 2018 14:53:57 -0700
+Subject: kvm: mmu: Fix race in emulated page table writes
+
+From: Junaid Shahid <junaids@google.com>
+
+commit 0e0fee5c539b61fdd098332e0e2cc375d9073706 upstream.
+
+When a guest page table is updated via an emulated write,
+kvm_mmu_pte_write() is called to update the shadow PTE using the just
+written guest PTE value. But if two emulated guest PTE writes happened
+concurrently, it is possible that the guest PTE and the shadow PTE end
+up being out of sync. Emulated writes do not mark the shadow page as
+unsync-ed, so this inconsistency will not be resolved even by a guest TLB
+flush (unless the page was marked as unsync-ed at some other point).
+
+This is fixed by re-reading the current value of the guest PTE after the
+MMU lock has been acquired instead of just using the value that was
+written prior to calling kvm_mmu_pte_write().
+
+Signed-off-by: Junaid Shahid <junaids@google.com>
+Reviewed-by: Wanpeng Li <wanpengli@tencent.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c | 27 +++++++++------------------
+ 1 file changed, 9 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -5013,9 +5013,9 @@ static bool need_remote_flush(u64 old, u
+ }
+
+ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
+- const u8 *new, int *bytes)
++ int *bytes)
+ {
+- u64 gentry;
++ u64 gentry = 0;
+ int r;
+
+ /*
+@@ -5027,22 +5027,12 @@ static u64 mmu_pte_write_fetch_gpte(stru
+ /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
+ *gpa &= ~(gpa_t)7;
+ *bytes = 8;
+- r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
+- if (r)
+- gentry = 0;
+- new = (const u8 *)&gentry;
+ }
+
+- switch (*bytes) {
+- case 4:
+- gentry = *(const u32 *)new;
+- break;
+- case 8:
+- gentry = *(const u64 *)new;
+- break;
+- default:
+- gentry = 0;
+- break;
++ if (*bytes == 4 || *bytes == 8) {
++ r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
++ if (r)
++ gentry = 0;
+ }
+
+ return gentry;
+@@ -5146,8 +5136,6 @@ static void kvm_mmu_pte_write(struct kvm
+
+ pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
+
+- gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
+-
+ /*
+ * No need to care whether allocation memory is successful
+ * or not since pte prefetch is skiped if it does not have
+@@ -5156,6 +5144,9 @@ static void kvm_mmu_pte_write(struct kvm
+ mmu_topup_memory_caches(vcpu);
+
+ spin_lock(&vcpu->kvm->mmu_lock);
++
++ gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
++
+ ++vcpu->kvm->stat.mmu_pte_write;
+ kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
+
--- /dev/null
+From 326e742533bf0a23f0127d8ea62fb558ba665f08 Mon Sep 17 00:00:00 2001
+From: Leonid Shatz <leonid.shatz@oracle.com>
+Date: Tue, 6 Nov 2018 12:14:25 +0200
+Subject: KVM: nVMX/nSVM: Fix bug which sets vcpu->arch.tsc_offset to L1 tsc_offset
+
+From: Leonid Shatz <leonid.shatz@oracle.com>
+
+commit 326e742533bf0a23f0127d8ea62fb558ba665f08 upstream.
+
+Since commit e79f245ddec1 ("X86/KVM: Properly update 'tsc_offset' to
+represent the running guest"), vcpu->arch.tsc_offset meaning was
+changed to always reflect the tsc_offset value set on active VMCS.
+Regardless if vCPU is currently running L1 or L2.
+
+However, above mentioned commit failed to also change
+kvm_vcpu_write_tsc_offset() to set vcpu->arch.tsc_offset correctly.
+This is because vmx_write_tsc_offset() could set the tsc_offset value
+in active VMCS to given offset parameter *plus vmcs12->tsc_offset*.
+However, kvm_vcpu_write_tsc_offset() just sets vcpu->arch.tsc_offset
+to given offset parameter. Without taking into account the possible
+addition of vmcs12->tsc_offset. (Same is true for SVM case).
+
+Fix this issue by changing kvm_x86_ops->write_tsc_offset() to return
+actually set tsc_offset in active VMCS and modify
+kvm_vcpu_write_tsc_offset() to set returned value in
+vcpu->arch.tsc_offset.
+In addition, rename write_tsc_offset() callback to write_l1_tsc_offset()
+to make it clear that it is meant to set L1 TSC offset.
+
+Fixes: e79f245ddec1 ("X86/KVM: Properly update 'tsc_offset' to represent the running guest")
+Reviewed-by: Liran Alon <liran.alon@oracle.com>
+Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
+Reviewed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
+Signed-off-by: Leonid Shatz <leonid.shatz@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 3 ++-
+ arch/x86/kvm/svm.c | 5 +++--
+ arch/x86/kvm/vmx.c | 21 +++++++++------------
+ arch/x86/kvm/x86.c | 6 +++---
+ 4 files changed, 17 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1046,7 +1046,8 @@ struct kvm_x86_ops {
+ bool (*has_wbinvd_exit)(void);
+
+ u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
+- void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
++ /* Returns actual tsc_offset set in active VMCS */
++ u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+
+ void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1444,7 +1444,7 @@ static u64 svm_read_l1_tsc_offset(struct
+ return vcpu->arch.tsc_offset;
+ }
+
+-static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
++static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+ u64 g_tsc_offset = 0;
+@@ -1462,6 +1462,7 @@ static void svm_write_tsc_offset(struct
+ svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
+
+ mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
++ return svm->vmcb->control.tsc_offset;
+ }
+
+ static void avic_init_vmcb(struct vcpu_svm *svm)
+@@ -7155,7 +7156,7 @@ static struct kvm_x86_ops svm_x86_ops __
+ .has_wbinvd_exit = svm_has_wbinvd_exit,
+
+ .read_l1_tsc_offset = svm_read_l1_tsc_offset,
+- .write_tsc_offset = svm_write_tsc_offset,
++ .write_l1_tsc_offset = svm_write_l1_tsc_offset,
+
+ .set_tdp_cr3 = set_tdp_cr3,
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3433,11 +3433,9 @@ static u64 vmx_read_l1_tsc_offset(struct
+ return vcpu->arch.tsc_offset;
+ }
+
+-/*
+- * writes 'offset' into guest's timestamp counter offset register
+- */
+-static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
++static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ {
++ u64 active_offset = offset;
+ if (is_guest_mode(vcpu)) {
+ /*
+ * We're here if L1 chose not to trap WRMSR to TSC. According
+@@ -3445,17 +3443,16 @@ static void vmx_write_tsc_offset(struct
+ * set for L2 remains unchanged, and still needs to be added
+ * to the newly set TSC to get L2's TSC.
+ */
+- struct vmcs12 *vmcs12;
+- /* recalculate vmcs02.TSC_OFFSET: */
+- vmcs12 = get_vmcs12(vcpu);
+- vmcs_write64(TSC_OFFSET, offset +
+- (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
+- vmcs12->tsc_offset : 0));
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
++ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING))
++ active_offset += vmcs12->tsc_offset;
+ } else {
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+ vmcs_read64(TSC_OFFSET), offset);
+- vmcs_write64(TSC_OFFSET, offset);
+ }
++
++ vmcs_write64(TSC_OFFSET, active_offset);
++ return active_offset;
+ }
+
+ /*
+@@ -14203,7 +14200,7 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
+
+ .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
+- .write_tsc_offset = vmx_write_tsc_offset,
++ .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
+
+ .set_tdp_cr3 = vmx_set_cr3,
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1582,8 +1582,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
+
+ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ {
+- kvm_x86_ops->write_tsc_offset(vcpu, offset);
+- vcpu->arch.tsc_offset = offset;
++ vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
+ }
+
+ static inline bool kvm_check_tsc_unstable(void)
+@@ -1711,7 +1710,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
+ static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+ s64 adjustment)
+ {
+- kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
++ u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
++ kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
+ }
+
+ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
--- /dev/null
+From fd65d3142f734bc4376053c8d75670041903134d Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Tue, 22 May 2018 09:54:20 -0700
+Subject: kvm: svm: Ensure an IBPB on all affected CPUs when freeing a vmcb
+
+From: Jim Mattson <jmattson@google.com>
+
+commit fd65d3142f734bc4376053c8d75670041903134d upstream.
+
+Previously, we only called indirect_branch_prediction_barrier on the
+logical CPU that freed a vmcb. This function should be called on all
+logical CPUs that last loaded the vmcb in question.
+
+Fixes: 15d45071523d ("KVM/x86: Add IBPB support")
+Reported-by: Neel Natu <neelnatu@google.com>
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2187,21 +2187,31 @@ out:
+ return ERR_PTR(err);
+ }
+
++static void svm_clear_current_vmcb(struct vmcb *vmcb)
++{
++ int i;
++
++ for_each_online_cpu(i)
++ cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
++}
++
+ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+
++ /*
++ * The vmcb page can be recycled, causing a false negative in
++ * svm_vcpu_load(). So, ensure that no logical CPU has this
++ * vmcb page recorded as its current vmcb.
++ */
++ svm_clear_current_vmcb(svm->vmcb);
++
+ __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
+ __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
+ __free_page(virt_to_page(svm->nested.hsave));
+ __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, svm);
+- /*
+- * The vmcb page can be recycled, causing a false negative in
+- * svm_vcpu_load(). So do a full IBPB now.
+- */
+- indirect_branch_prediction_barrier();
+ }
+
+ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
--- /dev/null
+From a87c99e61236ba8ca962ce97a19fab5ebd588d35 Mon Sep 17 00:00:00 2001
+From: Luiz Capitulino <lcapitulino@redhat.com>
+Date: Fri, 23 Nov 2018 12:02:14 -0500
+Subject: KVM: VMX: re-add ple_gap module parameter
+
+From: Luiz Capitulino <lcapitulino@redhat.com>
+
+commit a87c99e61236ba8ca962ce97a19fab5ebd588d35 upstream.
+
+Apparently, the ple_gap parameter was accidentally removed
+by commit c8e88717cfc6b36bedea22368d97667446318291. Add it
+back.
+
+Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: c8e88717cfc6b36bedea22368d97667446318291
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -170,6 +170,7 @@ module_param_named(preemption_timer, ena
+ * refer SDM volume 3b section 21.6.13 & 22.1.3.
+ */
+ static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
++module_param(ple_gap, uint, 0444);
+
+ static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
+ module_param(ple_window, uint, 0444);
--- /dev/null
+From bcbfbd8ec21096027f1ee13ce6c185e8175166f6 Mon Sep 17 00:00:00 2001
+From: Liran Alon <liran.alon@oracle.com>
+Date: Thu, 8 Nov 2018 00:43:06 +0200
+Subject: KVM: x86: Fix kernel info-leak in KVM_HC_CLOCK_PAIRING hypercall
+
+From: Liran Alon <liran.alon@oracle.com>
+
+commit bcbfbd8ec21096027f1ee13ce6c185e8175166f6 upstream.
+
+kvm_pv_clock_pairing() allocates local var
+"struct kvm_clock_pairing clock_pairing" on stack and initializes
+all it's fields besides padding (clock_pairing.pad[]).
+
+Because clock_pairing var is written completely (including padding)
+to guest memory, failure to init struct padding results in kernel
+info-leak.
+
+Fix the issue by making sure to also init the padding with zeroes.
+
+Fixes: 55dd00a73a51 ("KVM: x86: add KVM_HC_CLOCK_PAIRING hypercall")
+Reported-by: syzbot+a8ef68d71211ba264f56@syzkaller.appspotmail.com
+Reviewed-by: Mark Kanda <mark.kanda@oracle.com>
+Signed-off-by: Liran Alon <liran.alon@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6788,6 +6788,7 @@ static int kvm_pv_clock_pairing(struct k
+ clock_pairing.nsec = ts.tv_nsec;
+ clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
+ clock_pairing.flags = 0;
++ memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
+
+ ret = 0;
+ if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
--- /dev/null
+From e97f852fd4561e77721bb9a4e0ea9d98305b1e93 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpengli@tencent.com>
+Date: Tue, 20 Nov 2018 16:34:18 +0800
+Subject: KVM: X86: Fix scan ioapic use-before-initialization
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpengli@tencent.com>
+
+commit e97f852fd4561e77721bb9a4e0ea9d98305b1e93 upstream.
+
+Reported by syzkaller:
+
+ BUG: unable to handle kernel NULL pointer dereference at 00000000000001c8
+ PGD 80000003ec4da067 P4D 80000003ec4da067 PUD 3f7bfa067 PMD 0
+ Oops: 0000 [#1] PREEMPT SMP PTI
+ CPU: 7 PID: 5059 Comm: debug Tainted: G OE 4.19.0-rc5 #16
+ RIP: 0010:__lock_acquire+0x1a6/0x1990
+ Call Trace:
+ lock_acquire+0xdb/0x210
+ _raw_spin_lock+0x38/0x70
+ kvm_ioapic_scan_entry+0x3e/0x110 [kvm]
+ vcpu_enter_guest+0x167e/0x1910 [kvm]
+ kvm_arch_vcpu_ioctl_run+0x35c/0x610 [kvm]
+ kvm_vcpu_ioctl+0x3e9/0x6d0 [kvm]
+ do_vfs_ioctl+0xa5/0x690
+ ksys_ioctl+0x6d/0x80
+ __x64_sys_ioctl+0x1a/0x20
+ do_syscall_64+0x83/0x6e0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+The reason is that the testcase writes hyperv synic HV_X64_MSR_SINT6 msr
+and triggers scan ioapic logic to load synic vectors into EOI exit bitmap.
+However, irqchip is not initialized by this simple testcase, ioapic/apic
+objects should not be accessed.
+This can be triggered by the following program:
+
+ #define _GNU_SOURCE
+
+ #include <endian.h>
+ #include <stdint.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include <sys/syscall.h>
+ #include <sys/types.h>
+ #include <unistd.h>
+
+ uint64_t r[3] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff};
+
+ int main(void)
+ {
+ syscall(__NR_mmap, 0x20000000, 0x1000000, 3, 0x32, -1, 0);
+ long res = 0;
+ memcpy((void*)0x20000040, "/dev/kvm", 9);
+ res = syscall(__NR_openat, 0xffffffffffffff9c, 0x20000040, 0, 0);
+ if (res != -1)
+ r[0] = res;
+ res = syscall(__NR_ioctl, r[0], 0xae01, 0);
+ if (res != -1)
+ r[1] = res;
+ res = syscall(__NR_ioctl, r[1], 0xae41, 0);
+ if (res != -1)
+ r[2] = res;
+ memcpy(
+ (void*)0x20000080,
+ "\x01\x00\x00\x00\x00\x5b\x61\xbb\x96\x00\x00\x40\x00\x00\x00\x00\x01\x00"
+ "\x08\x00\x00\x00\x00\x00\x0b\x77\xd1\x78\x4d\xd8\x3a\xed\xb1\x5c\x2e\x43"
+ "\xaa\x43\x39\xd6\xff\xf5\xf0\xa8\x98\xf2\x3e\x37\x29\x89\xde\x88\xc6\x33"
+ "\xfc\x2a\xdb\xb7\xe1\x4c\xac\x28\x61\x7b\x9c\xa9\xbc\x0d\xa0\x63\xfe\xfe"
+ "\xe8\x75\xde\xdd\x19\x38\xdc\x34\xf5\xec\x05\xfd\xeb\x5d\xed\x2e\xaf\x22"
+ "\xfa\xab\xb7\xe4\x42\x67\xd0\xaf\x06\x1c\x6a\x35\x67\x10\x55\xcb",
+ 106);
+ syscall(__NR_ioctl, r[2], 0x4008ae89, 0x20000080);
+ syscall(__NR_ioctl, r[2], 0xae80, 0);
+ return 0;
+ }
+
+This patch fixes it by bailing out scan ioapic if ioapic is not initialized in
+kernel.
+
+Reported-by: Wei Wu <ww9210@gmail.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Wei Wu <ww9210@gmail.com>
+Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7314,7 +7314,8 @@ static void vcpu_scan_ioapic(struct kvm_
+ else {
+ if (vcpu->arch.apicv_active)
+ kvm_x86_ops->sync_pir_to_irr(vcpu);
+- kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
++ if (ioapic_in_kernel(vcpu->kvm))
++ kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+ }
+
+ if (is_guest_mode(vcpu))
--- /dev/null
+From 15cb127e3c8f6232096d5dba6a5b4046bc292d70 Mon Sep 17 00:00:00 2001
+From: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+Date: Fri, 23 Nov 2018 18:00:21 +0100
+Subject: PCI: dwc: Fix MSI-X EP framework address calculation bug
+
+From: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+
+commit 15cb127e3c8f6232096d5dba6a5b4046bc292d70 upstream.
+
+Fix an error caused by 3-bit right rotation on offset address
+calculation of MSI-X table in dw_pcie_ep_raise_msix_irq().
+
+The initial testing code was setting by default the offset address of
+MSI-X table to zero, so that even with a 3-bit right rotation the
+computed result would still be zero and valid, therefore this bug went
+unnoticed.
+
+Fixes: beb4641a787d ("PCI: dwc: Add MSI-X callbacks handler")
+Signed-off-by: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+[lorenzo.pieralisi@arm.com: updated commit log]
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/controller/dwc/pcie-designware-ep.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -440,7 +440,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_
+ tbl_offset = dw_pcie_readl_dbi(pci, reg);
+ bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+- tbl_offset >>= 3;
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bir);
+ bar_addr_upper = 0;
--- /dev/null
+From f1f90e254e46e0a14220e4090041f68256fbe297 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 26 Nov 2018 10:37:13 -0600
+Subject: PCI: Fix incorrect value returned from pcie_get_speed_cap()
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit f1f90e254e46e0a14220e4090041f68256fbe297 upstream.
+
+The macros PCI_EXP_LNKCAP_SLS_*GB are values, not bit masks. We must mask
+the register and compare it against them.
+
+This fixes errors like this:
+
+ amdgpu: [powerplay] failed to send message 261 ret is 0
+
+when a PCIe-v3 card is plugged into a PCIe-v1 slot, because the slot is
+being incorrectly reported as PCIe-v3 capable.
+
+6cf57be0f78e, which appeared in v4.17, added pcie_get_speed_cap() with the
+incorrect test of PCI_EXP_LNKCAP_SLS as a bitmask. 5d9a63304032, which
+appeared in v4.19, changed amdgpu to use pcie_get_speed_cap(), so the
+amdgpu bug reports below are regressions in v4.19.
+
+Fixes: 6cf57be0f78e ("PCI: Add pcie_get_speed_cap() to find max supported link speed")
+Fixes: 5d9a63304032 ("drm/amdgpu: use pcie functions for link width and speed")
+Link: https://bugs.freedesktop.org/show_bug.cgi?id=108704
+Link: https://bugs.freedesktop.org/show_bug.cgi?id=108778
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+[bhelgaas: update comment, remove use of PCI_EXP_LNKCAP_SLS_8_0GB and
+PCI_EXP_LNKCAP_SLS_16_0GB since those should be covered by PCI_EXP_LNKCAP2,
+remove test of PCI_EXP_LNKCAP for zero, since that register is required]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # v4.17+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/pci.c | 24 +++++++++++-------------
+ 1 file changed, 11 insertions(+), 13 deletions(-)
+
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5473,9 +5473,13 @@ enum pci_bus_speed pcie_get_speed_cap(st
+ u32 lnkcap2, lnkcap;
+
+ /*
+- * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
+- * Speeds Vector in Link Capabilities 2 when supported, falling
+- * back to Max Link Speed in Link Capabilities otherwise.
++ * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
++ * implementation note there recommends using the Supported Link
++ * Speeds Vector in Link Capabilities 2 when supported.
++ *
++ * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
++ * should use the Supported Link Speeds field in Link Capabilities,
++ * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
+ */
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
+ if (lnkcap2) { /* PCIe r3.0-compliant */
+@@ -5491,16 +5495,10 @@ enum pci_bus_speed pcie_get_speed_cap(st
+ }
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+- if (lnkcap) {
+- if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
+- return PCIE_SPEED_16_0GT;
+- else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
+- return PCIE_SPEED_8_0GT;
+- else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+- return PCIE_SPEED_5_0GT;
+- else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
+- return PCIE_SPEED_2_5GT;
+- }
++ if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
++ return PCIE_SPEED_5_0GT;
++ else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
++ return PCIE_SPEED_2_5GT;
+
+ return PCI_SPEED_UNKNOWN;
+ }
--- /dev/null
+From c6fd6fe9dea44732cdcd970f1130b8cc50ad685a Mon Sep 17 00:00:00 2001
+From: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+Date: Wed, 7 Nov 2018 05:16:49 +0000
+Subject: PCI: layerscape: Fix wrong invocation of outbound window disable accessor
+
+From: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+
+commit c6fd6fe9dea44732cdcd970f1130b8cc50ad685a upstream.
+
+The order of parameters is not correct when invoking the outbound
+window disable routine. Fix it.
+
+Fixes: 4a2745d760fa ("PCI: layerscape: Disable outbound windows configured by bootloader")
+Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+[lorenzo.pieralisi@arm.com: commit log]
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/controller/dwc/pci-layerscape.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pci/controller/dwc/pci-layerscape.c
++++ b/drivers/pci/controller/dwc/pci-layerscape.c
+@@ -88,7 +88,7 @@ static void ls_pcie_disable_outbound_atu
+ int i;
+
+ for (i = 0; i < PCIE_IATU_NUM; i++)
+- dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i);
++ dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
+ }
+
+ static int ls1021_pcie_link_up(struct dw_pcie *pci)
--- /dev/null
+From 67266c1080ad56c31af72b9c18355fde8ccc124a Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 21 Nov 2018 11:16:11 +0100
+Subject: perf/x86/intel: Add generic branch tracing check to intel_pmu_has_bts()
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit 67266c1080ad56c31af72b9c18355fde8ccc124a upstream.
+
+Currently we check the branch tracing only by checking for the
+PERF_COUNT_HW_BRANCH_INSTRUCTIONS event of PERF_TYPE_HARDWARE
+type. But we can define the same event with the PERF_TYPE_RAW
+type.
+
+Changing the intel_pmu_has_bts() code to check on event's final
+hw config value, so both HW types are covered.
+
+Adding unlikely to intel_pmu_has_bts() condition calls, because
+it was used in the original code in intel_bts_constraints.
+
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Link: http://lkml.kernel.org/r/20181121101612.16272-2-jolsa@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/core.c | 17 +++--------------
+ arch/x86/events/perf_event.h | 13 +++++++++----
+ 2 files changed, 12 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2358,16 +2358,7 @@ done:
+ static struct event_constraint *
+ intel_bts_constraints(struct perf_event *event)
+ {
+- struct hw_perf_event *hwc = &event->hw;
+- unsigned int hw_event, bts_event;
+-
+- if (event->attr.freq)
+- return NULL;
+-
+- hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+- bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+-
+- if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
++ if (unlikely(intel_pmu_has_bts(event)))
+ return &bts_constraint;
+
+ return NULL;
+@@ -2989,10 +2980,8 @@ static unsigned long intel_pmu_large_peb
+ static int intel_pmu_bts_config(struct perf_event *event)
+ {
+ struct perf_event_attr *attr = &event->attr;
+- struct hw_perf_event *hwc = &event->hw;
+
+- if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+- !attr->freq && hwc->sample_period == 1) {
++ if (unlikely(intel_pmu_has_bts(event))) {
+ /* BTS is not supported by this architecture. */
+ if (!x86_pmu.bts_active)
+ return -EOPNOTSUPP;
+@@ -3054,7 +3043,7 @@ static int intel_pmu_hw_config(struct pe
+ /*
+ * BTS is set up earlier in this path, so don't account twice
+ */
+- if (!intel_pmu_has_bts(event)) {
++ if (!unlikely(intel_pmu_has_bts(event))) {
+ /* disallow lbr if conflicting events are present */
+ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+ return -EBUSY;
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -857,11 +857,16 @@ static inline int amd_pmu_init(void)
+
+ static inline bool intel_pmu_has_bts(struct perf_event *event)
+ {
+- if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+- !event->attr.freq && event->hw.sample_period == 1)
+- return true;
++ struct hw_perf_event *hwc = &event->hw;
++ unsigned int hw_event, bts_event;
+
+- return false;
++ if (event->attr.freq)
++ return false;
++
++ hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
++ bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
++
++ return hw_event == bts_event && hwc->sample_period == 1;
+ }
+
+ int intel_pmu_save_and_restart(struct perf_event *event);
--- /dev/null
+From 472de49fdc53365c880ab81ae2b5cfdd83db0b06 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 21 Nov 2018 11:16:12 +0100
+Subject: perf/x86/intel: Disallow precise_ip on BTS events
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit 472de49fdc53365c880ab81ae2b5cfdd83db0b06 upstream.
+
+Vince reported a crash in the BTS flush code when touching the callchain
+data, which was supposed to be initialized as an 'early' callchain,
+but intel_pmu_drain_bts_buffer() does not do that:
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
+ ...
+ Call Trace:
+ <IRQ>
+ intel_pmu_drain_bts_buffer+0x151/0x220
+ ? intel_get_event_constraints+0x219/0x360
+ ? perf_assign_events+0xe2/0x2a0
+ ? select_idle_sibling+0x22/0x3a0
+ ? __update_load_avg_se+0x1ec/0x270
+ ? enqueue_task_fair+0x377/0xdd0
+ ? cpumask_next_and+0x19/0x20
+ ? load_balance+0x134/0x950
+ ? check_preempt_curr+0x7a/0x90
+ ? ttwu_do_wakeup+0x19/0x140
+ x86_pmu_stop+0x3b/0x90
+ x86_pmu_del+0x57/0x160
+ event_sched_out.isra.106+0x81/0x170
+ group_sched_out.part.108+0x51/0xc0
+ __perf_event_disable+0x7f/0x160
+ event_function+0x8c/0xd0
+ remote_function+0x3c/0x50
+ flush_smp_call_function_queue+0x35/0xe0
+ smp_call_function_single_interrupt+0x3a/0xd0
+ call_function_single_interrupt+0xf/0x20
+ </IRQ>
+
+It was triggered by fuzzer but can be easily reproduced by:
+
+ # perf record -e cpu/branch-instructions/pu -g -c 1
+
+Peter suggested not to allow branch tracing for precise events:
+
+ > Now arguably, this is really stupid behaviour. Who in his right mind
+ > wants callchain output on BTS entries. And even if they do, BTS +
+ > precise_ip is nonsensical.
+ >
+ > So in my mind disallowing precise_ip on BTS would be the simplest fix.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Reported-by: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 6cbc304f2f36 ("perf/x86/intel: Fix unwind errors from PEBS entries (mk-II)")
+Link: http://lkml.kernel.org/r/20181121101612.16272-3-jolsa@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2990,6 +2990,10 @@ static int intel_pmu_bts_config(struct p
+ if (!attr->exclude_kernel)
+ return -EOPNOTSUPP;
+
++ /* BTS is not allowed for precise events. */
++ if (attr->precise_ip)
++ return -EOPNOTSUPP;
++
+ /* disallow bts if conflicting events are present */
+ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+ return -EBUSY;
--- /dev/null
+From ed6101bbf6266ee83e620b19faa7c6ad56bb41ab Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 21 Nov 2018 11:16:10 +0100
+Subject: perf/x86/intel: Move branch tracing setup to the Intel-specific source file
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit ed6101bbf6266ee83e620b19faa7c6ad56bb41ab upstream.
+
+Moving branch tracing setup to Intel core object into separate
+intel_pmu_bts_config function, because it's Intel specific.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Link: http://lkml.kernel.org/r/20181121101612.16272-1-jolsa@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/core.c | 20 --------------------
+ arch/x86/events/intel/core.c | 41 ++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 40 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -438,26 +438,6 @@ int x86_setup_perfctr(struct perf_event
+ if (config == -1LL)
+ return -EINVAL;
+
+- /*
+- * Branch tracing:
+- */
+- if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+- !attr->freq && hwc->sample_period == 1) {
+- /* BTS is not supported by this architecture. */
+- if (!x86_pmu.bts_active)
+- return -EOPNOTSUPP;
+-
+- /* BTS is currently only allowed for user-mode. */
+- if (!attr->exclude_kernel)
+- return -EOPNOTSUPP;
+-
+- /* disallow bts if conflicting events are present */
+- if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+- return -EBUSY;
+-
+- event->destroy = hw_perf_lbr_event_destroy;
+- }
+-
+ hwc->config |= config;
+
+ return 0;
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2986,6 +2986,41 @@ static unsigned long intel_pmu_large_peb
+ return flags;
+ }
+
++static int intel_pmu_bts_config(struct perf_event *event)
++{
++ struct perf_event_attr *attr = &event->attr;
++ struct hw_perf_event *hwc = &event->hw;
++
++ if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
++ !attr->freq && hwc->sample_period == 1) {
++ /* BTS is not supported by this architecture. */
++ if (!x86_pmu.bts_active)
++ return -EOPNOTSUPP;
++
++ /* BTS is currently only allowed for user-mode. */
++ if (!attr->exclude_kernel)
++ return -EOPNOTSUPP;
++
++ /* disallow bts if conflicting events are present */
++ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
++ return -EBUSY;
++
++ event->destroy = hw_perf_lbr_event_destroy;
++ }
++
++ return 0;
++}
++
++static int core_pmu_hw_config(struct perf_event *event)
++{
++ int ret = x86_pmu_hw_config(event);
++
++ if (ret)
++ return ret;
++
++ return intel_pmu_bts_config(event);
++}
++
+ static int intel_pmu_hw_config(struct perf_event *event)
+ {
+ int ret = x86_pmu_hw_config(event);
+@@ -2993,6 +3028,10 @@ static int intel_pmu_hw_config(struct pe
+ if (ret)
+ return ret;
+
++ ret = intel_pmu_bts_config(event);
++ if (ret)
++ return ret;
++
+ if (event->attr.precise_ip) {
+ if (!event->attr.freq) {
+ event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
+@@ -3478,7 +3517,7 @@ static __initconst const struct x86_pmu
+ .enable_all = core_pmu_enable_all,
+ .enable = core_pmu_enable_event,
+ .disable = x86_pmu_disable_event,
+- .hw_config = x86_pmu_hw_config,
++ .hw_config = core_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
+ .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
x86speculation_Enable_prctl_mode_for_spectre_v2_user.patch
x86speculation_Add_seccomp_Spectre_v2_user_space_protection_mode.patch
x86speculation_Provide_IBPB_always_command_line_options.patch
+userfaultfd-shmem-hugetlbfs-only-allow-to-register-vm_maywrite-vmas.patch
+kvm-mmu-fix-race-in-emulated-page-table-writes.patch
+kvm-svm-ensure-an-ibpb-on-all-affected-cpus-when-freeing-a-vmcb.patch
+kvm-nvmx-nsvm-fix-bug-which-sets-vcpu-arch.tsc_offset-to-l1-tsc_offset.patch
+kvm-x86-fix-kernel-info-leak-in-kvm_hc_clock_pairing-hypercall.patch
+kvm-lapic-fix-pv-ipis-use-before-initialization.patch
+kvm-x86-fix-scan-ioapic-use-before-initialization.patch
+kvm-vmx-re-add-ple_gap-module-parameter.patch
+xtensa-enable-coprocessors-that-are-being-flushed.patch
+xtensa-fix-coprocessor-context-offset-definitions.patch
+xtensa-fix-coprocessor-part-of-ptrace_-get-set-xregs.patch
+udf-allow-mounting-volumes-with-incorrect-identification-strings.patch
+btrfs-always-try-all-copies-when-reading-extent-buffers.patch
+btrfs-ensure-path-name-is-null-terminated-at-btrfs_control_ioctl.patch
+btrfs-fix-rare-chances-for-data-loss-when-doing-a-fast-fsync.patch
+btrfs-fix-race-between-enabling-quotas-and-subvolume-creation.patch
+btrfs-relocation-set-trans-to-be-null-after-ending-transaction.patch
+pci-layerscape-fix-wrong-invocation-of-outbound-window-disable-accessor.patch
+pci-dwc-fix-msi-x-ep-framework-address-calculation-bug.patch
+pci-fix-incorrect-value-returned-from-pcie_get_speed_cap.patch
+arm64-dts-rockchip-fix-pcie-reset-polarity-for-rk3399-puma-haikou.patch
+x86-mce-amd-fix-the-thresholding-machinery-initialization-order.patch
+x86-fpu-disable-bottom-halves-while-loading-fpu-registers.patch
+perf-x86-intel-move-branch-tracing-setup-to-the-intel-specific-source-file.patch
+perf-x86-intel-add-generic-branch-tracing-check-to-intel_pmu_has_bts.patch
+perf-x86-intel-disallow-precise_ip-on-bts-events.patch
+fs-fix-lost-error-code-in-dio_complete.patch
+alsa-wss-fix-invalid-snd_free_pages-at-error-path.patch
+alsa-ac97-fix-incorrect-bit-shift-at-ac97-spsa-control-write.patch
+alsa-control-fix-race-between-adding-and-removing-a-user-element.patch
+alsa-sparc-fix-invalid-snd_free_pages-at-error-path.patch
+alsa-hda-add-asrock-n68c-s-ucc-the-power_save-blacklist.patch
+alsa-hda-realtek-support-alc300.patch
+alsa-hda-realtek-fix-headset-mic-detection-for-msi-ms-b171.patch
+alsa-hda-realtek-fix-the-pop-noise-on-headphone-for-lenovo-laptops.patch
+alsa-hda-realtek-add-auto-mute-quirk-for-hp-spectre-x360-laptop.patch
--- /dev/null
+From b54e41f5efcb4316b2f30b30c2535cc194270373 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 16 Nov 2018 13:43:17 +0100
+Subject: udf: Allow mounting volumes with incorrect identification strings
+
+From: Jan Kara <jack@suse.cz>
+
+commit b54e41f5efcb4316b2f30b30c2535cc194270373 upstream.
+
+Commit c26f6c615788 ("udf: Fix conversion of 'dstring' fields to UTF8")
+started to be more strict when checking whether converted strings are
+properly formatted. Sudip reports that there are DVDs where the volume
+identification string is actually too long - UDF reports:
+
+[ 632.309320] UDF-fs: incorrect dstring lengths (32/32)
+
+during mount and fails the mount. This is mostly harmless failure as we
+don't need volume identification (and even less volume set
+identification) for anything. So just truncate the volume identification
+string if it is too long and replace it with 'Invalid' if we just cannot
+convert it for other reasons. This keeps slightly incorrect media still
+mountable.
+
+CC: stable@vger.kernel.org
+Fixes: c26f6c615788 ("udf: Fix conversion of 'dstring' fields to UTF8")
+Reported-and-tested-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/udf/super.c | 16 ++++++++++------
+ fs/udf/unicode.c | 14 +++++++++++---
+ 2 files changed, 21 insertions(+), 9 deletions(-)
+
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -831,16 +831,20 @@ static int udf_load_pvoldesc(struct supe
+
+
+ ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
+- if (ret < 0)
+- goto out_bh;
+-
+- strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
++ if (ret < 0) {
++ strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
++ pr_warn("incorrect volume identification, setting to "
++ "'InvalidName'\n");
++ } else {
++ strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
++ }
+ udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
+
+ ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
+- if (ret < 0)
++ if (ret < 0) {
++ ret = 0;
+ goto out_bh;
+-
++ }
+ outstr[ret] = 0;
+ udf_debug("volSetIdent[] = '%s'\n", outstr);
+
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -351,6 +351,11 @@ try_again:
+ return u_len;
+ }
+
++/*
++ * Convert CS0 dstring to output charset. Warning: This function may truncate
++ * input string if it is too long as it is used for informational strings only
++ * and it is better to truncate the string than to refuse mounting a media.
++ */
+ int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len,
+ const uint8_t *ocu_i, int i_len)
+ {
+@@ -359,9 +364,12 @@ int udf_dstrCS0toChar(struct super_block
+ if (i_len > 0) {
+ s_len = ocu_i[i_len - 1];
+ if (s_len >= i_len) {
+- pr_err("incorrect dstring lengths (%d/%d)\n",
+- s_len, i_len);
+- return -EINVAL;
++ pr_warn("incorrect dstring lengths (%d/%d),"
++ " truncating\n", s_len, i_len);
++ s_len = i_len - 1;
++ /* 2-byte encoding? Need to round properly... */
++ if (ocu_i[0] == 16)
++ s_len -= (s_len - 1) & 2;
+ }
+ }
+
--- /dev/null
+From 29ec90660d68bbdd69507c1c8b4e33aa299278b1 Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Fri, 30 Nov 2018 14:09:32 -0800
+Subject: userfaultfd: shmem/hugetlbfs: only allow to register VM_MAYWRITE vmas
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 29ec90660d68bbdd69507c1c8b4e33aa299278b1 upstream.
+
+After the VMA to register the uffd onto is found, check that it has
+VM_MAYWRITE set before allowing registration. This way we inherit all
+common code checks before allowing to fill file holes in shmem and
+hugetlbfs with UFFDIO_COPY.
+
+The userfaultfd memory model is not applicable for readonly files unless
+it's a MAP_PRIVATE.
+
+Link: http://lkml.kernel.org/r/20181126173452.26955-4-aarcange@redhat.com
+Fixes: ff62a3421044 ("hugetlb: implement memfd sealing")
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
+Reviewed-by: Hugh Dickins <hughd@google.com>
+Reported-by: Jann Horn <jannh@google.com>
+Fixes: 4c27fe4c4c84 ("userfaultfd: shmem: add shmem_mcopy_atomic_pte for userfaultfd support")
+Cc: <stable@vger.kernel.org>
+Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/userfaultfd.c | 15 +++++++++++++++
+ mm/userfaultfd.c | 15 ++++++---------
+ 2 files changed, 21 insertions(+), 9 deletions(-)
+
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1361,6 +1361,19 @@ static int userfaultfd_register(struct u
+ ret = -EINVAL;
+ if (!vma_can_userfault(cur))
+ goto out_unlock;
++
++ /*
++ * UFFDIO_COPY will fill file holes even without
++ * PROT_WRITE. This check enforces that if this is a
++ * MAP_SHARED, the process has write permission to the backing
++ * file. If VM_MAYWRITE is set it also enforces that on a
++ * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
++ * F_WRITE_SEAL can be taken until the vma is destroyed.
++ */
++ ret = -EPERM;
++ if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
++ goto out_unlock;
++
+ /*
+ * If this vma contains ending address, and huge pages
+ * check alignment.
+@@ -1406,6 +1419,7 @@ static int userfaultfd_register(struct u
+ BUG_ON(!vma_can_userfault(vma));
+ BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
+ vma->vm_userfaultfd_ctx.ctx != ctx);
++ WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
+
+ /*
+ * Nothing to do: this vma is already registered into this
+@@ -1552,6 +1566,7 @@ static int userfaultfd_unregister(struct
+ cond_resched();
+
+ BUG_ON(!vma_can_userfault(vma));
++ WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
+
+ /*
+ * Nothing to do: this vma is already registered into this
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -205,8 +205,9 @@ retry:
+ if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
+ goto out_unlock;
+ /*
+- * Only allow __mcopy_atomic_hugetlb on userfaultfd
+- * registered ranges.
++ * Check the vma is registered in uffd, this is
++ * required to enforce the VM_MAYWRITE check done at
++ * uffd registration time.
+ */
+ if (!dst_vma->vm_userfaultfd_ctx.ctx)
+ goto out_unlock;
+@@ -449,13 +450,9 @@ retry:
+ if (!dst_vma)
+ goto out_unlock;
+ /*
+- * Be strict and only allow __mcopy_atomic on userfaultfd
+- * registered ranges to prevent userland errors going
+- * unnoticed. As far as the VM consistency is concerned, it
+- * would be perfectly safe to remove this check, but there's
+- * no useful usage for __mcopy_atomic ouside of userfaultfd
+- * registered ranges. This is after all why these are ioctls
+- * belonging to the userfaultfd and not syscalls.
++ * Check the vma is registered in uffd, this is required to
++ * enforce the VM_MAYWRITE check done at uffd registration
++ * time.
+ */
+ if (!dst_vma->vm_userfaultfd_ctx.ctx)
+ goto out_unlock;
--- /dev/null
+From 68239654acafe6aad5a3c1dc7237e60accfebc03 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 20 Nov 2018 11:26:35 +0100
+Subject: x86/fpu: Disable bottom halves while loading FPU registers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit 68239654acafe6aad5a3c1dc7237e60accfebc03 upstream.
+
+The sequence
+
+ fpu->initialized = 1; /* step A */
+ preempt_disable(); /* step B */
+ fpu__restore(fpu);
+ preempt_enable();
+
+in __fpu__restore_sig() is racy in regard to a context switch.
+
+For 32bit frames, __fpu__restore_sig() prepares the FPU state within
+fpu->state. To ensure that a context switch (switch_fpu_prepare() in
+particular) does not modify fpu->state it uses fpu__drop() which sets
+fpu->initialized to 0.
+
+After fpu->initialized is cleared, the CPU's FPU state is not saved
+to fpu->state during a context switch. The new state is loaded via
+fpu__restore(). It gets loaded into fpu->state from userland and
+ensured it is sane. fpu->initialized is then set to 1 in order to avoid
+fpu__initialize() doing anything (overwrite the new state) which is part
+of fpu__restore().
+
+A context switch between step A and B above would save CPU's current FPU
+registers to fpu->state and overwrite the newly prepared state. This
+looks like a tiny race window but the Kernel Test Robot reported this
+back in 2016 while we had lazy FPU support. Borislav Petkov made the
+link between that report and another patch that has been posted. Since
+the removal of the lazy FPU support, this race goes unnoticed because
+the warning has been removed.
+
+Disable bottom halves around the restore sequence to avoid the race. BH
+need to be disabled because BH is allowed to run (even with preemption
+disabled) and might invoke kernel_fpu_begin() by doing IPsec.
+
+ [ bp: massage commit message a bit. ]
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Cc: kvm ML <kvm@vger.kernel.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: stable@vger.kernel.org
+Cc: x86-ml <x86@kernel.org>
+Link: http://lkml.kernel.org/r/20181120102635.ddv3fvavxajjlfqk@linutronix.de
+Link: https://lkml.kernel.org/r/20160226074940.GA28911@pd.tnic
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/fpu/signal.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __use
+ sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
+ }
+
++ local_bh_disable();
+ fpu->initialized = 1;
+- preempt_disable();
+ fpu__restore(fpu);
+- preempt_enable();
++ local_bh_enable();
+
+ return err;
+ } else {
--- /dev/null
+From 60c8144afc287ef09ce8c1230c6aa972659ba1bb Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 27 Nov 2018 14:41:37 +0100
+Subject: x86/MCE/AMD: Fix the thresholding machinery initialization order
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 60c8144afc287ef09ce8c1230c6aa972659ba1bb upstream.
+
+Currently, the code sets up the thresholding interrupt vector and only
+then goes about initializing the thresholding banks. Which is wrong,
+because an early thresholding interrupt would cause a NULL pointer
+dereference when accessing those banks and prevent the machine from
+booting.
+
+Therefore, set the thresholding interrupt vector only *after* having
+initialized the banks successfully.
+
+Fixes: 18807ddb7f88 ("x86/mce/AMD: Reset Threshold Limit after logging error")
+Reported-by: Rafał Miłecki <rafal@milecki.pl>
+Reported-by: John Clemens <clemej@gmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Tested-by: Rafał Miłecki <rafal@milecki.pl>
+Tested-by: John Clemens <john@deater.net>
+Cc: Aravind Gopalakrishnan <aravindksg.lkml@gmail.com>
+Cc: linux-edac@vger.kernel.org
+Cc: stable@vger.kernel.org
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: x86@kernel.org
+Cc: Yazen Ghannam <Yazen.Ghannam@amd.com>
+Link: https://lkml.kernel.org/r/20181127101700.2964-1-zajec5@gmail.com
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=201291
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mcheck/mce_amd.c | 19 ++++++-------------
+ 1 file changed, 6 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -56,7 +56,7 @@
+ /* Threshold LVT offset is at MSR0xC0000410[15:12] */
+ #define SMCA_THR_LVT_OFF 0xF000
+
+-static bool thresholding_en;
++static bool thresholding_irq_en;
+
+ static const char * const th_names[] = {
+ "load_store",
+@@ -534,9 +534,8 @@ prepare_threshold_block(unsigned int ban
+
+ set_offset:
+ offset = setup_APIC_mce_threshold(offset, new);
+-
+- if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
+- mce_threshold_vector = amd_threshold_interrupt;
++ if (offset == new)
++ thresholding_irq_en = true;
+
+ done:
+ mce_threshold_block_init(&b, offset);
+@@ -1357,9 +1356,6 @@ int mce_threshold_remove_device(unsigned
+ {
+ unsigned int bank;
+
+- if (!thresholding_en)
+- return 0;
+-
+ for (bank = 0; bank < mca_cfg.banks; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+ continue;
+@@ -1377,9 +1373,6 @@ int mce_threshold_create_device(unsigned
+ struct threshold_bank **bp;
+ int err = 0;
+
+- if (!thresholding_en)
+- return 0;
+-
+ bp = per_cpu(threshold_banks, cpu);
+ if (bp)
+ return 0;
+@@ -1408,9 +1401,6 @@ static __init int threshold_init_device(
+ {
+ unsigned lcpu = 0;
+
+- if (mce_threshold_vector == amd_threshold_interrupt)
+- thresholding_en = true;
+-
+ /* to hit CPUs online before the notifier is up */
+ for_each_online_cpu(lcpu) {
+ int err = mce_threshold_create_device(lcpu);
+@@ -1419,6 +1409,9 @@ static __init int threshold_init_device(
+ return err;
+ }
+
++ if (thresholding_irq_en)
++ mce_threshold_vector = amd_threshold_interrupt;
++
+ return 0;
+ }
+ /*
--- /dev/null
+From 2958b66694e018c552be0b60521fec27e8d12988 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Mon, 26 Nov 2018 13:29:41 -0800
+Subject: xtensa: enable coprocessors that are being flushed
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 2958b66694e018c552be0b60521fec27e8d12988 upstream.
+
+coprocessor_flush_all may be called from a context of a thread that is
+different from the thread being flushed. In that case contents of the
+cpenable special register may not match ti->cpenable of the target
+thread, resulting in unhandled coprocessor exception in the kernel
+context.
+Set cpenable special register to the ti->cpenable of the target register
+for the duration of the flush and restore it afterwards.
+This fixes the following crash caused by coprocessor register inspection
+in native gdb:
+
+ (gdb) p/x $w0
+ Illegal instruction in kernel: sig: 9 [#1] PREEMPT
+ Call Trace:
+ ___might_sleep+0x184/0x1a4
+ __might_sleep+0x41/0xac
+ exit_signals+0x14/0x218
+ do_exit+0xc9/0x8b8
+ die+0x99/0xa0
+ do_illegal_instruction+0x18/0x6c
+ common_exception+0x77/0x77
+ coprocessor_flush+0x16/0x3c
+ arch_ptrace+0x46c/0x674
+ sys_ptrace+0x2ce/0x3b4
+ system_call+0x54/0x80
+ common_exception+0x77/0x77
+ note: gdb[100] exited with preempt_count 1
+ Killed
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/process.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/xtensa/kernel/process.c
++++ b/arch/xtensa/kernel/process.c
+@@ -94,18 +94,21 @@ void coprocessor_release_all(struct thre
+
+ void coprocessor_flush_all(struct thread_info *ti)
+ {
+- unsigned long cpenable;
++ unsigned long cpenable, old_cpenable;
+ int i;
+
+ preempt_disable();
+
++ RSR_CPENABLE(old_cpenable);
+ cpenable = ti->cpenable;
++ WSR_CPENABLE(cpenable);
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
+ coprocessor_flush(ti, i);
+ cpenable >>= 1;
+ }
++ WSR_CPENABLE(old_cpenable);
+
+ preempt_enable();
+ }
--- /dev/null
+From 03bc996af0cc71c7f30c384d8ce7260172423b34 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Mon, 26 Nov 2018 15:18:26 -0800
+Subject: xtensa: fix coprocessor context offset definitions
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 03bc996af0cc71c7f30c384d8ce7260172423b34 upstream.
+
+Coprocessor context offsets are used by the assembly code that moves
+coprocessor context between the individual fields of the
+thread_info::xtregs_cp structure and coprocessor registers.
+This fixes coprocessor context clobbering on flushing and reloading
+during normal user code execution and user process debugging in the
+presence of more than one coprocessor in the core configuration.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/asm-offsets.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/xtensa/kernel/asm-offsets.c
++++ b/arch/xtensa/kernel/asm-offsets.c
+@@ -94,14 +94,14 @@ int main(void)
+ DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
+ DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
+ #if XTENSA_HAVE_COPROCESSORS
+- DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
+- DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
+- DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
+- DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
+- DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
+- DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
+- DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
+- DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
++ DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
++ DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
++ DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
++ DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
++ DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
++ DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
++ DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
++ DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
+ #endif
+ DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
+ DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
--- /dev/null
+From 38a35a78c5e270cbe53c4fef6b0d3c2da90dd849 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Mon, 26 Nov 2018 18:06:01 -0800
+Subject: xtensa: fix coprocessor part of ptrace_{get,set}xregs
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 38a35a78c5e270cbe53c4fef6b0d3c2da90dd849 upstream.
+
+Layout of coprocessor registers in the elf_xtregs_t and
+xtregs_coprocessor_t may be different due to alignment. Thus it is not
+always possible to copy data between the xtregs_coprocessor_t structure
+and the elf_xtregs_t and get correct values for all registers.
+Use a table of offsets and sizes of individual coprocessor register
+groups to do coprocessor context copying in the ptrace_getxregs and
+ptrace_setxregs.
+This fixes incorrect coprocessor register values reading from the user
+process by the native gdb on an xtensa core with multiple coprocessors
+and registers with high alignment requirements.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/ptrace.c | 42 ++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 38 insertions(+), 4 deletions(-)
+
+--- a/arch/xtensa/kernel/ptrace.c
++++ b/arch/xtensa/kernel/ptrace.c
+@@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_st
+ }
+
+
++#if XTENSA_HAVE_COPROCESSORS
++#define CP_OFFSETS(cp) \
++ { \
++ .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \
++ .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \
++ .sz = sizeof(xtregs_ ## cp ## _t), \
++ }
++
++static const struct {
++ size_t elf_xtregs_offset;
++ size_t ti_offset;
++ size_t sz;
++} cp_offsets[] = {
++ CP_OFFSETS(cp0),
++ CP_OFFSETS(cp1),
++ CP_OFFSETS(cp2),
++ CP_OFFSETS(cp3),
++ CP_OFFSETS(cp4),
++ CP_OFFSETS(cp5),
++ CP_OFFSETS(cp6),
++ CP_OFFSETS(cp7),
++};
++#endif
++
+ static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
+ {
+ struct pt_regs *regs = task_pt_regs(child);
+ struct thread_info *ti = task_thread_info(child);
+ elf_xtregs_t __user *xtregs = uregs;
+ int ret = 0;
++ int i __maybe_unused;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
+ return -EIO;
+@@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_s
+ #if XTENSA_HAVE_COPROCESSORS
+ /* Flush all coprocessor registers to memory. */
+ coprocessor_flush_all(ti);
+- ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
+- sizeof(xtregs_coprocessor_t));
++
++ for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
++ ret |= __copy_to_user((char __user *)xtregs +
++ cp_offsets[i].elf_xtregs_offset,
++ (const char *)ti +
++ cp_offsets[i].ti_offset,
++ cp_offsets[i].sz);
+ #endif
+ ret |= __copy_to_user(&xtregs->opt, ®s->xtregs_opt,
+ sizeof(xtregs->opt));
+@@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_s
+ struct pt_regs *regs = task_pt_regs(child);
+ elf_xtregs_t *xtregs = uregs;
+ int ret = 0;
++ int i __maybe_unused;
+
+ if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
+ return -EFAULT;
+@@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_s
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+
+- ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
+- sizeof(xtregs_coprocessor_t));
++ for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
++ ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset,
++ (const char __user *)xtregs +
++ cp_offsets[i].elf_xtregs_offset,
++ cp_offsets[i].sz);
+ #endif
+ ret |= __copy_from_user(®s->xtregs_opt, &xtregs->opt,
+ sizeof(xtregs->opt));