]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 6 Feb 2020 16:49:22 +0000 (17:49 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 6 Feb 2020 16:49:22 +0000 (17:49 +0100)
added patches:
alsa-hda-add-clevo-w65_67sb-the-power_save-blacklist.patch
alsa-hda-add-jasperlake-pci-id-and-codec-vid.patch
alsa-hda-apply-aligned-mmio-access-only-conditionally.patch
arm64-acpi-fix-daif-manipulation-with-pnmi.patch
kvm-arm-arm64-correct-aarch32-spsr-on-exception-entry.patch
kvm-arm-arm64-correct-cpsr-on-exception-entry.patch
kvm-arm64-correct-pstate-on-exception-entry.patch
kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch
mips-boot-fix-typo-in-vmlinux.lzma.its-target.patch
mips-fix-indentation-of-the-relocs-message.patch
mips-syscalls-fix-indentation-of-the-sysnr-message.patch
powerpc-32s-fix-bad_kuap_fault.patch
powerpc-32s-fix-cpu-wake-up-from-sleep-mode.patch
powerpc-mmu_gather-enable-rcu_table_free-even-for-smp-case.patch
powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch
powerpc-ptdump-fix-w-x-verification.patch
powerpc-xmon-don-t-access-asdr-in-vms.patch
s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch

19 files changed:
queue-5.4/alsa-hda-add-clevo-w65_67sb-the-power_save-blacklist.patch [new file with mode: 0644]
queue-5.4/alsa-hda-add-jasperlake-pci-id-and-codec-vid.patch [new file with mode: 0644]
queue-5.4/alsa-hda-apply-aligned-mmio-access-only-conditionally.patch [new file with mode: 0644]
queue-5.4/arm64-acpi-fix-daif-manipulation-with-pnmi.patch [new file with mode: 0644]
queue-5.4/kvm-arm-arm64-correct-aarch32-spsr-on-exception-entry.patch [new file with mode: 0644]
queue-5.4/kvm-arm-arm64-correct-cpsr-on-exception-entry.patch [new file with mode: 0644]
queue-5.4/kvm-arm64-correct-pstate-on-exception-entry.patch [new file with mode: 0644]
queue-5.4/kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch [new file with mode: 0644]
queue-5.4/mips-boot-fix-typo-in-vmlinux.lzma.its-target.patch [new file with mode: 0644]
queue-5.4/mips-fix-indentation-of-the-relocs-message.patch [new file with mode: 0644]
queue-5.4/mips-syscalls-fix-indentation-of-the-sysnr-message.patch [new file with mode: 0644]
queue-5.4/powerpc-32s-fix-bad_kuap_fault.patch [new file with mode: 0644]
queue-5.4/powerpc-32s-fix-cpu-wake-up-from-sleep-mode.patch [new file with mode: 0644]
queue-5.4/powerpc-mmu_gather-enable-rcu_table_free-even-for-smp-case.patch [new file with mode: 0644]
queue-5.4/powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch [new file with mode: 0644]
queue-5.4/powerpc-ptdump-fix-w-x-verification.patch [new file with mode: 0644]
queue-5.4/powerpc-xmon-don-t-access-asdr-in-vms.patch [new file with mode: 0644]
queue-5.4/s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/alsa-hda-add-clevo-w65_67sb-the-power_save-blacklist.patch b/queue-5.4/alsa-hda-add-clevo-w65_67sb-the-power_save-blacklist.patch
new file mode 100644 (file)
index 0000000..762cb16
--- /dev/null
@@ -0,0 +1,37 @@
+From d8feb6080bb0c9f4d799a423d9453048fdd06990 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Sat, 25 Jan 2020 19:10:21 +0100
+Subject: ALSA: hda: Add Clevo W65_67SB the power_save blacklist
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit d8feb6080bb0c9f4d799a423d9453048fdd06990 upstream.
+
+Using HDA power-saving on the Clevo W65_67SB causes the first 0.5
+seconds of audio to be missing every time audio starts playing.
+
+This commit adds the Clevo W65_67SB the power_save blacklist to avoid
+this issue.
+
+Cc: stable@vger.kernel.org
+BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1525104
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20200125181021.70446-1-hdegoede@redhat.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_intel.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2156,6 +2156,8 @@ static struct snd_pci_quirk power_save_b
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
+       SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
++      SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0),
++      /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+       SND_PCI_QUIRK(0x1028, 0x0497, "Dell Precision T3600", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+       /* Note the P55A-UD3 and Z87-D3HP share the subsys id for the HDA dev */
diff --git a/queue-5.4/alsa-hda-add-jasperlake-pci-id-and-codec-vid.patch b/queue-5.4/alsa-hda-add-jasperlake-pci-id-and-codec-vid.patch
new file mode 100644 (file)
index 0000000..4bf35fe
--- /dev/null
@@ -0,0 +1,45 @@
+From 78be2228c15dd45865b102b29d72e721f0ace9b1 Mon Sep 17 00:00:00 2001
+From: Yong Zhi <yong.zhi@intel.com>
+Date: Fri, 31 Jan 2020 14:40:03 -0600
+Subject: ALSA: hda: Add JasperLake PCI ID and codec vid
+
+From: Yong Zhi <yong.zhi@intel.com>
+
+commit 78be2228c15dd45865b102b29d72e721f0ace9b1 upstream.
+
+Add HD Audio Device PCI ID and codec vendor_id for the Intel JasperLake
+REV2/A0 silicon.
+
+Signed-off-by: Yong Zhi <yong.zhi@intel.com>
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200131204003.10153-1-pierre-louis.bossart@linux.intel.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_intel.c  |    2 ++
+ sound/pci/hda/patch_hdmi.c |    1 +
+ 2 files changed, 3 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2417,6 +2417,8 @@ static const struct pci_device_id azx_id
+       /* Jasperlake */
+       { PCI_DEVICE(0x8086, 0x38c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
++      { PCI_DEVICE(0x8086, 0x4dc8),
++        .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Tigerlake */
+       { PCI_DEVICE(0x8086, 0xa0c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -4153,6 +4153,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake
+ HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI",        patch_i915_glk_hdmi),
+ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",   patch_i915_icl_hdmi),
+ HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
++HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI",        patch_i915_icl_hdmi),
+ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI",        patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",       patch_i915_byt_hdmi),
+ HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",  patch_i915_byt_hdmi),
diff --git a/queue-5.4/alsa-hda-apply-aligned-mmio-access-only-conditionally.patch b/queue-5.4/alsa-hda-apply-aligned-mmio-access-only-conditionally.patch
new file mode 100644 (file)
index 0000000..0ee42ac
--- /dev/null
@@ -0,0 +1,174 @@
+From 4d024fe8f806e20e577cc934204c5784c7063293 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 20 Jan 2020 11:41:27 +0100
+Subject: ALSA: hda: Apply aligned MMIO access only conditionally
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 4d024fe8f806e20e577cc934204c5784c7063293 upstream.
+
+It turned out that the recent simplification of HD-audio bus access
+helpers caused a regression on the virtual HD-audio device on QEMU
+with ARM platforms.  The driver got a CORB/RIRB timeout and couldn't
+probe any codecs.
+
+The essential difference that caused a problem was the enforced
+aligned MMIO accesses by simplification.  Since snd-hda-tegra driver
+is enabled on ARM, it enables CONFIG_SND_HDA_ALIGNED_MMIO, which makes
+the all HD-audio drivers using the aligned MMIO accesses.  While this
+is mandatory for snd-hda-tegra, it seems that snd-hda-intel on ARM
+gets broken by this access pattern.
+
+For addressing the regression, this patch introduces a new flag,
+aligned_mmio, to hdac_bus object, and applies the aligned MMIO only
+when this flag is set.  This change affects only platforms with
+CONFIG_SND_HDA_ALIGNED_MMIO set, i.e. mostly only for ARM platforms.
+
+Unfortunately the patch became a big bigger than it should be, just
+because the former calls didn't take hdac_bus object in the argument,
+hence we had to extend the call patterns.
+
+Fixes: 19abfefd4c76 ("ALSA: hda: Direct MMIO accesses")
+BugLink: https://bugzilla.opensuse.org/show_bug.cgi?id=1161152
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200120104127.28985-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/sound/hdaudio.h   |   77 +++++++++++++++++++++++++++++++---------------
+ sound/pci/hda/hda_tegra.c |    1 
+ 2 files changed, 54 insertions(+), 24 deletions(-)
+
+--- a/include/sound/hdaudio.h
++++ b/include/sound/hdaudio.h
+@@ -8,6 +8,7 @@
+ #include <linux/device.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/timecounter.h>
+ #include <sound/core.h>
+@@ -330,6 +331,7 @@ struct hdac_bus {
+       bool chip_init:1;               /* h/w initialized */
+       /* behavior flags */
++      bool aligned_mmio:1;            /* aligned MMIO access */
+       bool sync_write:1;              /* sync after verb write */
+       bool use_posbuf:1;              /* use position buffer */
+       bool snoop:1;                   /* enable snooping */
+@@ -405,34 +407,61 @@ void snd_hdac_bus_free_stream_pages(stru
+ unsigned int snd_hdac_aligned_read(void __iomem *addr, unsigned int mask);
+ void snd_hdac_aligned_write(unsigned int val, void __iomem *addr,
+                           unsigned int mask);
+-#define snd_hdac_reg_writeb(v, addr)  snd_hdac_aligned_write(v, addr, 0xff)
+-#define snd_hdac_reg_writew(v, addr)  snd_hdac_aligned_write(v, addr, 0xffff)
+-#define snd_hdac_reg_readb(addr)      snd_hdac_aligned_read(addr, 0xff)
+-#define snd_hdac_reg_readw(addr)      snd_hdac_aligned_read(addr, 0xffff)
+-#else /* CONFIG_SND_HDA_ALIGNED_MMIO */
+-#define snd_hdac_reg_writeb(val, addr)        writeb(val, addr)
+-#define snd_hdac_reg_writew(val, addr)        writew(val, addr)
+-#define snd_hdac_reg_readb(addr)      readb(addr)
+-#define snd_hdac_reg_readw(addr)      readw(addr)
+-#endif /* CONFIG_SND_HDA_ALIGNED_MMIO */
+-#define snd_hdac_reg_writel(val, addr)        writel(val, addr)
+-#define snd_hdac_reg_readl(addr)      readl(addr)
++#define snd_hdac_aligned_mmio(bus)    (bus)->aligned_mmio
++#else
++#define snd_hdac_aligned_mmio(bus)    false
++#define snd_hdac_aligned_read(addr, mask)     0
++#define snd_hdac_aligned_write(val, addr, mask) do {} while (0)
++#endif
++
++static inline void snd_hdac_reg_writeb(struct hdac_bus *bus, void __iomem *addr,
++                                     u8 val)
++{
++      if (snd_hdac_aligned_mmio(bus))
++              snd_hdac_aligned_write(val, addr, 0xff);
++      else
++              writeb(val, addr);
++}
++
++static inline void snd_hdac_reg_writew(struct hdac_bus *bus, void __iomem *addr,
++                                     u16 val)
++{
++      if (snd_hdac_aligned_mmio(bus))
++              snd_hdac_aligned_write(val, addr, 0xffff);
++      else
++              writew(val, addr);
++}
++
++static inline u8 snd_hdac_reg_readb(struct hdac_bus *bus, void __iomem *addr)
++{
++      return snd_hdac_aligned_mmio(bus) ?
++              snd_hdac_aligned_read(addr, 0xff) : readb(addr);
++}
++
++static inline u16 snd_hdac_reg_readw(struct hdac_bus *bus, void __iomem *addr)
++{
++      return snd_hdac_aligned_mmio(bus) ?
++              snd_hdac_aligned_read(addr, 0xffff) : readw(addr);
++}
++
++#define snd_hdac_reg_writel(bus, addr, val)   writel(val, addr)
++#define snd_hdac_reg_readl(bus, addr) readl(addr)
+ /*
+  * macros for easy use
+  */
+ #define _snd_hdac_chip_writeb(chip, reg, value) \
+-      snd_hdac_reg_writeb(value, (chip)->remap_addr + (reg))
++      snd_hdac_reg_writeb(chip, (chip)->remap_addr + (reg), value)
+ #define _snd_hdac_chip_readb(chip, reg) \
+-      snd_hdac_reg_readb((chip)->remap_addr + (reg))
++      snd_hdac_reg_readb(chip, (chip)->remap_addr + (reg))
+ #define _snd_hdac_chip_writew(chip, reg, value) \
+-      snd_hdac_reg_writew(value, (chip)->remap_addr + (reg))
++      snd_hdac_reg_writew(chip, (chip)->remap_addr + (reg), value)
+ #define _snd_hdac_chip_readw(chip, reg) \
+-      snd_hdac_reg_readw((chip)->remap_addr + (reg))
++      snd_hdac_reg_readw(chip, (chip)->remap_addr + (reg))
+ #define _snd_hdac_chip_writel(chip, reg, value) \
+-      snd_hdac_reg_writel(value, (chip)->remap_addr + (reg))
++      snd_hdac_reg_writel(chip, (chip)->remap_addr + (reg), value)
+ #define _snd_hdac_chip_readl(chip, reg) \
+-      snd_hdac_reg_readl((chip)->remap_addr + (reg))
++      snd_hdac_reg_readl(chip, (chip)->remap_addr + (reg))
+ /* read/write a register, pass without AZX_REG_ prefix */
+ #define snd_hdac_chip_writel(chip, reg, value) \
+@@ -540,17 +569,17 @@ int snd_hdac_get_stream_stripe_ctl(struc
+  */
+ /* read/write a register, pass without AZX_REG_ prefix */
+ #define snd_hdac_stream_writel(dev, reg, value) \
+-      snd_hdac_reg_writel(value, (dev)->sd_addr + AZX_REG_ ## reg)
++      snd_hdac_reg_writel((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg, value)
+ #define snd_hdac_stream_writew(dev, reg, value) \
+-      snd_hdac_reg_writew(value, (dev)->sd_addr + AZX_REG_ ## reg)
++      snd_hdac_reg_writew((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg, value)
+ #define snd_hdac_stream_writeb(dev, reg, value) \
+-      snd_hdac_reg_writeb(value, (dev)->sd_addr + AZX_REG_ ## reg)
++      snd_hdac_reg_writeb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg, value)
+ #define snd_hdac_stream_readl(dev, reg) \
+-      snd_hdac_reg_readl((dev)->sd_addr + AZX_REG_ ## reg)
++      snd_hdac_reg_readl((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+ #define snd_hdac_stream_readw(dev, reg) \
+-      snd_hdac_reg_readw((dev)->sd_addr + AZX_REG_ ## reg)
++      snd_hdac_reg_readw((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+ #define snd_hdac_stream_readb(dev, reg) \
+-      snd_hdac_reg_readb((dev)->sd_addr + AZX_REG_ ## reg)
++      snd_hdac_reg_readb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+ /* update a register, pass without AZX_REG_ prefix */
+ #define snd_hdac_stream_updatel(dev, reg, mask, val) \
+--- a/sound/pci/hda/hda_tegra.c
++++ b/sound/pci/hda/hda_tegra.c
+@@ -398,6 +398,7 @@ static int hda_tegra_create(struct snd_c
+               return err;
+       chip->bus.needs_damn_long_delay = 1;
++      chip->bus.core.aligned_mmio = 1;
+       err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+       if (err < 0) {
diff --git a/queue-5.4/arm64-acpi-fix-daif-manipulation-with-pnmi.patch b/queue-5.4/arm64-acpi-fix-daif-manipulation-with-pnmi.patch
new file mode 100644 (file)
index 0000000..04a0e6f
--- /dev/null
@@ -0,0 +1,89 @@
+From e533dbe9dcb199bb637a2c465f3a6e70564994fe Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 22 Jan 2020 12:45:46 +0000
+Subject: arm64: acpi: fix DAIF manipulation with pNMI
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit e533dbe9dcb199bb637a2c465f3a6e70564994fe upstream.
+
+Since commit:
+
+  d44f1b8dd7e66d80 ("arm64: KVM/mm: Move SEA handling behind a single 'claim' interface")
+
+... the top-level APEI SEA handler has the shape:
+
+1. current_flags = arch_local_save_flags()
+2. local_daif_restore(DAIF_ERRCTX)
+3. <GHES handler>
+4. local_daif_restore(current_flags)
+
+However, since commit:
+
+  4a503217ce37e1f4 ("arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking")
+
+... when pseudo-NMIs (pNMIs) are in use, arch_local_save_flags() will save
+the PMR value rather than the DAIF flags.
+
+The combination of these two commits means that the APEI SEA handler will
+erroneously attempt to restore the PMR value into DAIF. Fix this by
+factoring local_daif_save_flags() out of local_daif_save(), so that we
+can consistently save DAIF in step #1, regardless of whether pNMIs are in
+use.
+
+Both commits were introduced concurrently in v5.0.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 4a503217ce37e1f4 ("arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking")
+Fixes: d44f1b8dd7e66d80 ("arm64: KVM/mm: Move SEA handling behind a single 'claim' interface")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
+Cc: Will Deacon <will@kernel.org>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/daifflags.h |   11 ++++++++++-
+ arch/arm64/kernel/acpi.c           |    2 +-
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/daifflags.h
++++ b/arch/arm64/include/asm/daifflags.h
+@@ -36,7 +36,7 @@ static inline void local_daif_mask(void)
+       trace_hardirqs_off();
+ }
+-static inline unsigned long local_daif_save(void)
++static inline unsigned long local_daif_save_flags(void)
+ {
+       unsigned long flags;
+@@ -48,6 +48,15 @@ static inline unsigned long local_daif_s
+                       flags |= PSR_I_BIT;
+       }
++      return flags;
++}
++
++static inline unsigned long local_daif_save(void)
++{
++      unsigned long flags;
++
++      flags = local_daif_save_flags();
++
+       local_daif_mask();
+       return flags;
+--- a/arch/arm64/kernel/acpi.c
++++ b/arch/arm64/kernel/acpi.c
+@@ -274,7 +274,7 @@ int apei_claim_sea(struct pt_regs *regs)
+       if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+               return err;
+-      current_flags = arch_local_save_flags();
++      current_flags = local_daif_save_flags();
+       /*
+        * SEA can interrupt SError, mask it and describe this as an NMI so
diff --git a/queue-5.4/kvm-arm-arm64-correct-aarch32-spsr-on-exception-entry.patch b/queue-5.4/kvm-arm-arm64-correct-aarch32-spsr-on-exception-entry.patch
new file mode 100644 (file)
index 0000000..fdc36f3
--- /dev/null
@@ -0,0 +1,125 @@
+From 1cfbb484de158e378e8971ac40f3082e53ecca55 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 8 Jan 2020 13:43:24 +0000
+Subject: KVM: arm/arm64: Correct AArch32 SPSR on exception entry
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 1cfbb484de158e378e8971ac40f3082e53ecca55 upstream.
+
+Confusingly, there are three SPSR layouts that a kernel may need to deal
+with:
+
+(1) An AArch64 SPSR_ELx view of an AArch64 pstate
+(2) An AArch64 SPSR_ELx view of an AArch32 pstate
+(3) An AArch32 SPSR_* view of an AArch32 pstate
+
+When the KVM AArch32 support code deals with SPSR_{EL2,HYP}, it's either
+dealing with #2 or #3 consistently. On arm64 the PSR_AA32_* definitions
+match the AArch64 SPSR_ELx view, and on arm the PSR_AA32_* definitions
+match the AArch32 SPSR_* view.
+
+However, when we inject an exception into an AArch32 guest, we have to
+synthesize the AArch32 SPSR_* that the guest will see. Thus, an AArch64
+host needs to synthesize layout #3 from layout #2.
+
+This patch adds a new host_spsr_to_spsr32() helper for this, and makes
+use of it in the KVM AArch32 support code. For arm64 we need to shuffle
+the DIT bit around, and remove the SS bit, while for arm we can use the
+value as-is.
+
+I've open-coded the bit manipulation for now to avoid having to rework
+the existing PSR_* definitions into PSR64_AA32_* and PSR32_AA32_*
+definitions. I hope to perform a more thorough refactoring in future so
+that we can handle pstate view manipulation more consistently across the
+kernel tree.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200108134324.46500-4-mark.rutland@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_emulate.h   |    5 +++++
+ arch/arm64/include/asm/kvm_emulate.h |   32 ++++++++++++++++++++++++++++++++
+ virt/kvm/arm/aarch32.c               |    6 +++---
+ 3 files changed, 40 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -53,6 +53,11 @@ static inline void vcpu_write_spsr(struc
+       *__vcpu_spsr(vcpu) = v;
+ }
++static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
++{
++      return spsr;
++}
++
+ static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
+                                        u8 reg_num)
+ {
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -204,6 +204,38 @@ static inline void vcpu_write_spsr(struc
+               vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
+ }
++/*
++ * The layout of SPSR for an AArch32 state is different when observed from an
++ * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
++ * view given an AArch64 view.
++ *
++ * In ARM DDI 0487E.a see:
++ *
++ * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
++ * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
++ * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
++ *
++ * Which show the following differences:
++ *
++ * | Bit | AA64 | AA32 | Notes                       |
++ * +-----+------+------+-----------------------------|
++ * | 24  | DIT  | J    | J is RES0 in ARMv8          |
++ * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
++ *
++ * ... and all other bits are (currently) common.
++ */
++static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
++{
++      const unsigned long overlap = BIT(24) | BIT(21);
++      unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
++
++      spsr &= ~overlap;
++
++      spsr |= dit << 21;
++
++      return spsr;
++}
++
+ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
+ {
+       u32 mode;
+--- a/virt/kvm/arm/aarch32.c
++++ b/virt/kvm/arm/aarch32.c
+@@ -129,15 +129,15 @@ static unsigned long get_except32_cpsr(s
+ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+ {
+-      unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
+-      bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
++      unsigned long spsr = *vcpu_cpsr(vcpu);
++      bool is_thumb = (spsr & PSR_AA32_T_BIT);
+       u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
+       u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+       *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
+       /* Note: These now point to the banked copies */
+-      vcpu_write_spsr(vcpu, new_spsr_value);
++      vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr));
+       *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+       /* Branch to exception vector */
diff --git a/queue-5.4/kvm-arm-arm64-correct-cpsr-on-exception-entry.patch b/queue-5.4/kvm-arm-arm64-correct-cpsr-on-exception-entry.patch
new file mode 100644 (file)
index 0000000..f6f72a0
--- /dev/null
@@ -0,0 +1,217 @@
+From 3c2483f15499b877ccb53250d88addb8c91da147 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 8 Jan 2020 13:43:23 +0000
+Subject: KVM: arm/arm64: Correct CPSR on exception entry
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 3c2483f15499b877ccb53250d88addb8c91da147 upstream.
+
+When KVM injects an exception into a guest, it generates the CPSR value
+from scratch, configuring CPSR.{M,A,I,T,E}, and setting all other
+bits to zero.
+
+This isn't correct, as the architecture specifies that some CPSR bits
+are (conditionally) cleared or set upon an exception, and others are
+unchanged from the original context.
+
+This patch adds logic to match the architectural behaviour. To make this
+simple to follow/audit/extend, documentation references are provided,
+and bits are configured in order of their layout in SPSR_EL2. This
+layout can be seen in the diagram on ARM DDI 0487E.a page C5-426.
+
+Note that this code is used by both arm and arm64, and is intended to
+fuction with the SPSR_EL2 and SPSR_HYP layouts.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200108134324.46500-3-mark.rutland@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_emulate.h |   12 ++++
+ arch/arm64/include/asm/ptrace.h    |    1 
+ virt/kvm/arm/aarch32.c             |  111 +++++++++++++++++++++++++++++++++----
+ 3 files changed, 114 insertions(+), 10 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -14,13 +14,25 @@
+ #include <asm/cputype.h>
+ /* arm64 compatibility macros */
++#define PSR_AA32_MODE_FIQ     FIQ_MODE
++#define PSR_AA32_MODE_SVC     SVC_MODE
+ #define PSR_AA32_MODE_ABT     ABT_MODE
+ #define PSR_AA32_MODE_UND     UND_MODE
+ #define PSR_AA32_T_BIT                PSR_T_BIT
++#define PSR_AA32_F_BIT                PSR_F_BIT
+ #define PSR_AA32_I_BIT                PSR_I_BIT
+ #define PSR_AA32_A_BIT                PSR_A_BIT
+ #define PSR_AA32_E_BIT                PSR_E_BIT
+ #define PSR_AA32_IT_MASK      PSR_IT_MASK
++#define PSR_AA32_GE_MASK      0x000f0000
++#define PSR_AA32_DIT_BIT      0x00200000
++#define PSR_AA32_PAN_BIT      0x00400000
++#define PSR_AA32_SSBS_BIT     0x00800000
++#define PSR_AA32_Q_BIT                PSR_Q_BIT
++#define PSR_AA32_V_BIT                PSR_V_BIT
++#define PSR_AA32_C_BIT                PSR_C_BIT
++#define PSR_AA32_Z_BIT                PSR_Z_BIT
++#define PSR_AA32_N_BIT                PSR_N_BIT
+ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -62,6 +62,7 @@
+ #define PSR_AA32_I_BIT                0x00000080
+ #define PSR_AA32_A_BIT                0x00000100
+ #define PSR_AA32_E_BIT                0x00000200
++#define PSR_AA32_PAN_BIT      0x00400000
+ #define PSR_AA32_SSBS_BIT     0x00800000
+ #define PSR_AA32_DIT_BIT      0x01000000
+ #define PSR_AA32_Q_BIT                0x08000000
+--- a/virt/kvm/arm/aarch32.c
++++ b/virt/kvm/arm/aarch32.c
+@@ -10,6 +10,7 @@
+  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+  */
++#include <linux/bits.h>
+ #include <linux/kvm_host.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_hyp.h>
+@@ -28,22 +29,112 @@ static const u8 return_offsets[8][2] = {
+       [7] = { 4, 4 },         /* FIQ, unused */
+ };
++/*
++ * When an exception is taken, most CPSR fields are left unchanged in the
++ * handler. However, some are explicitly overridden (e.g. M[4:0]).
++ *
++ * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
++ * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
++ * obsoleted by the ARMv7 virtualization extensions and is RES0.
++ *
++ * For the SPSR layout seen from AArch32, see:
++ * - ARM DDI 0406C.d, page B1-1148
++ * - ARM DDI 0487E.a, page G8-6264
++ *
++ * For the SPSR_ELx layout for AArch32 seen from AArch64, see:
++ * - ARM DDI 0487E.a, page C5-426
++ *
++ * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
++ * MSB to LSB.
++ */
++static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
++{
++      u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
++      unsigned long old, new;
++
++      old = *vcpu_cpsr(vcpu);
++      new = 0;
++
++      new |= (old & PSR_AA32_N_BIT);
++      new |= (old & PSR_AA32_Z_BIT);
++      new |= (old & PSR_AA32_C_BIT);
++      new |= (old & PSR_AA32_V_BIT);
++      new |= (old & PSR_AA32_Q_BIT);
++
++      // CPSR.IT[7:0] are set to zero upon any exception
++      // See ARM DDI 0487E.a, section G1.12.3
++      // See ARM DDI 0406C.d, section B1.8.3
++
++      new |= (old & PSR_AA32_DIT_BIT);
++
++      // CPSR.SSBS is set to SCTLR.DSSBS upon any exception
++      // See ARM DDI 0487E.a, page G8-6244
++      if (sctlr & BIT(31))
++              new |= PSR_AA32_SSBS_BIT;
++
++      // CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
++      // SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
++      // See ARM DDI 0487E.a, page G8-6246
++      new |= (old & PSR_AA32_PAN_BIT);
++      if (!(sctlr & BIT(23)))
++              new |= PSR_AA32_PAN_BIT;
++
++      // SS does not exist in AArch32, so ignore
++
++      // CPSR.IL is set to zero upon any exception
++      // See ARM DDI 0487E.a, page G1-5527
++
++      new |= (old & PSR_AA32_GE_MASK);
++
++      // CPSR.IT[7:0] are set to zero upon any exception
++      // See prior comment above
++
++      // CPSR.E is set to SCTLR.EE upon any exception
++      // See ARM DDI 0487E.a, page G8-6245
++      // See ARM DDI 0406C.d, page B4-1701
++      if (sctlr & BIT(25))
++              new |= PSR_AA32_E_BIT;
++
++      // CPSR.A is unchanged upon an exception to Undefined, Supervisor
++      // CPSR.A is set upon an exception to other modes
++      // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
++      // See ARM DDI 0406C.d, page B1-1182
++      new |= (old & PSR_AA32_A_BIT);
++      if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
++              new |= PSR_AA32_A_BIT;
++
++      // CPSR.I is set upon any exception
++      // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
++      // See ARM DDI 0406C.d, page B1-1182
++      new |= PSR_AA32_I_BIT;
++
++      // CPSR.F is set upon an exception to FIQ
++      // CPSR.F is unchanged upon an exception to other modes
++      // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
++      // See ARM DDI 0406C.d, page B1-1182
++      new |= (old & PSR_AA32_F_BIT);
++      if (mode == PSR_AA32_MODE_FIQ)
++              new |= PSR_AA32_F_BIT;
++
++      // CPSR.T is set to SCTLR.TE upon any exception
++      // See ARM DDI 0487E.a, page G8-5514
++      // See ARM DDI 0406C.d, page B1-1181
++      if (sctlr & BIT(30))
++              new |= PSR_AA32_T_BIT;
++
++      new |= mode;
++
++      return new;
++}
++
+ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+ {
+-      unsigned long cpsr;
+       unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
+       bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
+       u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
+       u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+-      cpsr = mode | PSR_AA32_I_BIT;
+-
+-      if (sctlr & (1 << 30))
+-              cpsr |= PSR_AA32_T_BIT;
+-      if (sctlr & (1 << 25))
+-              cpsr |= PSR_AA32_E_BIT;
+-
+-      *vcpu_cpsr(vcpu) = cpsr;
++      *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
+       /* Note: These now point to the banked copies */
+       vcpu_write_spsr(vcpu, new_spsr_value);
+@@ -84,7 +175,7 @@ static void inject_abt32(struct kvm_vcpu
+               fsr = &vcpu_cp15(vcpu, c5_DFSR);
+       }
+-      prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
++      prepare_fault32(vcpu, PSR_AA32_MODE_ABT, vect_offset);
+       *far = addr;
diff --git a/queue-5.4/kvm-arm64-correct-pstate-on-exception-entry.patch b/queue-5.4/kvm-arm64-correct-pstate-on-exception-entry.patch
new file mode 100644 (file)
index 0000000..c95eea5
--- /dev/null
@@ -0,0 +1,144 @@
+From a425372e733177eb0779748956bc16c85167af48 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 8 Jan 2020 13:43:22 +0000
+Subject: KVM: arm64: Correct PSTATE on exception entry
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit a425372e733177eb0779748956bc16c85167af48 upstream.
+
+When KVM injects an exception into a guest, it generates the PSTATE
+value from scratch, configuring PSTATE.{M[4:0],DAIF}, and setting all
+other bits to zero.
+
+This isn't correct, as the architecture specifies that some PSTATE bits
+are (conditionally) cleared or set upon an exception, and others are
+unchanged from the original context.
+
+This patch adds logic to match the architectural behaviour. To make this
+simple to follow/audit/extend, documentation references are provided,
+and bits are configured in order of their layout in SPSR_EL2. This
+layout can be seen in the diagram on ARM DDI 0487E.a page C5-429.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200108134324.46500-2-mark.rutland@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/uapi/asm/ptrace.h |    1 
+ arch/arm64/kvm/inject_fault.c        |   70 ++++++++++++++++++++++++++++++++---
+ 2 files changed, 66 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/uapi/asm/ptrace.h
++++ b/arch/arm64/include/uapi/asm/ptrace.h
+@@ -49,6 +49,7 @@
+ #define PSR_SSBS_BIT  0x00001000
+ #define PSR_PAN_BIT   0x00400000
+ #define PSR_UAO_BIT   0x00800000
++#define PSR_DIT_BIT   0x01000000
+ #define PSR_V_BIT     0x10000000
+ #define PSR_C_BIT     0x20000000
+ #define PSR_Z_BIT     0x40000000
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -14,9 +14,6 @@
+ #include <asm/kvm_emulate.h>
+ #include <asm/esr.h>
+-#define PSTATE_FAULT_BITS_64  (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
+-                               PSR_I_BIT | PSR_D_BIT)
+-
+ #define CURRENT_EL_SP_EL0_VECTOR      0x0
+ #define CURRENT_EL_SP_ELx_VECTOR      0x200
+ #define LOWER_EL_AArch64_VECTOR               0x400
+@@ -50,6 +47,69 @@ static u64 get_except_vector(struct kvm_
+       return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
+ }
++/*
++ * When an exception is taken, most PSTATE fields are left unchanged in the
++ * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
++ * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
++ * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
++ *
++ * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
++ * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
++ *
++ * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
++ * MSB to LSB.
++ */
++static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
++{
++      unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
++      unsigned long old, new;
++
++      old = *vcpu_cpsr(vcpu);
++      new = 0;
++
++      new |= (old & PSR_N_BIT);
++      new |= (old & PSR_Z_BIT);
++      new |= (old & PSR_C_BIT);
++      new |= (old & PSR_V_BIT);
++
++      // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
++
++      new |= (old & PSR_DIT_BIT);
++
++      // PSTATE.UAO is set to zero upon any exception to AArch64
++      // See ARM DDI 0487E.a, page D5-2579.
++
++      // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
++      // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
++      // See ARM DDI 0487E.a, page D5-2578.
++      new |= (old & PSR_PAN_BIT);
++      if (!(sctlr & SCTLR_EL1_SPAN))
++              new |= PSR_PAN_BIT;
++
++      // PSTATE.SS is set to zero upon any exception to AArch64
++      // See ARM DDI 0487E.a, page D2-2452.
++
++      // PSTATE.IL is set to zero upon any exception to AArch64
++      // See ARM DDI 0487E.a, page D1-2306.
++
++      // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
++      // See ARM DDI 0487E.a, page D13-3258
++      if (sctlr & SCTLR_ELx_DSSBS)
++              new |= PSR_SSBS_BIT;
++
++      // PSTATE.BTYPE is set to zero upon any exception to AArch64
++      // See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
++
++      new |= PSR_D_BIT;
++      new |= PSR_A_BIT;
++      new |= PSR_I_BIT;
++      new |= PSR_F_BIT;
++
++      new |= PSR_MODE_EL1h;
++
++      return new;
++}
++
+ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
+ {
+       unsigned long cpsr = *vcpu_cpsr(vcpu);
+@@ -59,7 +119,7 @@ static void inject_abt64(struct kvm_vcpu
+       vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
+       *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
+-      *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
++      *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
+       vcpu_write_spsr(vcpu, cpsr);
+       vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
+@@ -94,7 +154,7 @@ static void inject_undef64(struct kvm_vc
+       vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
+       *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
+-      *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
++      *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
+       vcpu_write_spsr(vcpu, cpsr);
+       /*
diff --git a/queue-5.4/kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch b/queue-5.4/kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch
new file mode 100644 (file)
index 0000000..dbf1fb2
--- /dev/null
@@ -0,0 +1,125 @@
+From b6ae256afd32f96bec0117175b329d0dd617655e Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@arm.com>
+Date: Thu, 12 Dec 2019 20:50:55 +0100
+Subject: KVM: arm64: Only sign-extend MMIO up to register width
+
+From: Christoffer Dall <christoffer.dall@arm.com>
+
+commit b6ae256afd32f96bec0117175b329d0dd617655e upstream.
+
+On AArch64 you can do a sign-extended load to either a 32-bit or 64-bit
+register, and we should only sign extend the register up to the width of
+the register as specified in the operation (by using the 32-bit Wn or
+64-bit Xn register specifier).
+
+As it turns out, the architecture provides this decoding information in
+the SF ("Sixty-Four" -- how cute...) bit.
+
+Let's take advantage of this with the usual 32-bit/64-bit header file
+dance and do the right thing on AArch64 hosts.
+
+Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191212195055.5541-1-christoffer.dall@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_emulate.h   |    5 +++++
+ arch/arm/include/asm/kvm_mmio.h      |    2 ++
+ arch/arm64/include/asm/kvm_emulate.h |    5 +++++
+ arch/arm64/include/asm/kvm_mmio.h    |    6 ++----
+ virt/kvm/arm/mmio.c                  |    6 ++++++
+ 5 files changed, 20 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -194,6 +194,11 @@ static inline bool kvm_vcpu_dabt_issext(
+       return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
+ }
++static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
++{
++      return false;
++}
++
+ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
+ {
+       return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+--- a/arch/arm/include/asm/kvm_mmio.h
++++ b/arch/arm/include/asm/kvm_mmio.h
+@@ -14,6 +14,8 @@
+ struct kvm_decode {
+       unsigned long rt;
+       bool sign_extend;
++      /* Not used on 32-bit arm */
++      bool sixty_four;
+ };
+ void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -295,6 +295,11 @@ static inline bool kvm_vcpu_dabt_issext(
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
+ }
++static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
++{
++      return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
++}
++
+ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
+ {
+       return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+--- a/arch/arm64/include/asm/kvm_mmio.h
++++ b/arch/arm64/include/asm/kvm_mmio.h
+@@ -10,13 +10,11 @@
+ #include <linux/kvm_host.h>
+ #include <asm/kvm_arm.h>
+-/*
+- * This is annoying. The mmio code requires this, even if we don't
+- * need any decoding. To be fixed.
+- */
+ struct kvm_decode {
+       unsigned long rt;
+       bool sign_extend;
++      /* Witdth of the register accessed by the faulting instruction is 64-bits */
++      bool sixty_four;
+ };
+ void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+--- a/virt/kvm/arm/mmio.c
++++ b/virt/kvm/arm/mmio.c
+@@ -105,6 +105,9 @@ int kvm_handle_mmio_return(struct kvm_vc
+                       data = (data ^ mask) - mask;
+               }
++              if (!vcpu->arch.mmio_decode.sixty_four)
++                      data = data & 0xffffffff;
++
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+                              &data);
+               data = vcpu_data_host_to_guest(vcpu, data, len);
+@@ -125,6 +128,7 @@ static int decode_hsr(struct kvm_vcpu *v
+       unsigned long rt;
+       int access_size;
+       bool sign_extend;
++      bool sixty_four;
+       if (kvm_vcpu_dabt_iss1tw(vcpu)) {
+               /* page table accesses IO mem: tell guest to fix its TTBR */
+@@ -138,11 +142,13 @@ static int decode_hsr(struct kvm_vcpu *v
+       *is_write = kvm_vcpu_dabt_iswrite(vcpu);
+       sign_extend = kvm_vcpu_dabt_issext(vcpu);
++      sixty_four = kvm_vcpu_dabt_issf(vcpu);
+       rt = kvm_vcpu_dabt_get_rd(vcpu);
+       *len = access_size;
+       vcpu->arch.mmio_decode.sign_extend = sign_extend;
+       vcpu->arch.mmio_decode.rt = rt;
++      vcpu->arch.mmio_decode.sixty_four = sixty_four;
+       return 0;
+ }
diff --git a/queue-5.4/mips-boot-fix-typo-in-vmlinux.lzma.its-target.patch b/queue-5.4/mips-boot-fix-typo-in-vmlinux.lzma.its-target.patch
new file mode 100644 (file)
index 0000000..978c97d
--- /dev/null
@@ -0,0 +1,41 @@
+From 16202c09577f3d0c533274c0410b7de05fb0d458 Mon Sep 17 00:00:00 2001
+From: Alexander Lobakin <alobakin@dlink.ru>
+Date: Fri, 17 Jan 2020 17:02:08 +0300
+Subject: MIPS: boot: fix typo in 'vmlinux.lzma.its' target
+
+From: Alexander Lobakin <alobakin@dlink.ru>
+
+commit 16202c09577f3d0c533274c0410b7de05fb0d458 upstream.
+
+Commit 92b34a976348 ("MIPS: boot: add missing targets for vmlinux.*.its")
+fixed constant rebuild of *.its files on every make invocation, but due
+to typo ("lzmo") it made no sense for vmlinux.lzma.its.
+
+Fixes: 92b34a976348 ("MIPS: boot: add missing targets for vmlinux.*.its")
+Cc: <stable@vger.kernel.org> # v4.19+
+Signed-off-by: Alexander Lobakin <alobakin@dlink.ru>
+[paulburton@kernel.org: s/invokation/invocation/]
+Signed-off-by: Paul Burton <paulburton@kernel.org>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: James Hogan <jhogan@kernel.org>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: linux-mips@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/boot/Makefile |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/boot/Makefile
++++ b/arch/mips/boot/Makefile
+@@ -123,7 +123,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srct
+ targets += vmlinux.its
+ targets += vmlinux.gz.its
+ targets += vmlinux.bz2.its
+-targets += vmlinux.lzmo.its
++targets += vmlinux.lzma.its
+ targets += vmlinux.lzo.its
+ quiet_cmd_cpp_its_S = ITS     $@
diff --git a/queue-5.4/mips-fix-indentation-of-the-relocs-message.patch b/queue-5.4/mips-fix-indentation-of-the-relocs-message.patch
new file mode 100644 (file)
index 0000000..98cafed
--- /dev/null
@@ -0,0 +1,61 @@
+From a53998802e178451701d59d38e36f551422977ba Mon Sep 17 00:00:00 2001
+From: Alexander Lobakin <alobakin@dlink.ru>
+Date: Fri, 17 Jan 2020 17:02:07 +0300
+Subject: MIPS: fix indentation of the 'RELOCS' message
+
+From: Alexander Lobakin <alobakin@dlink.ru>
+
+commit a53998802e178451701d59d38e36f551422977ba upstream.
+
+quiet_cmd_relocs lacks a whitespace which results in:
+
+  LD      vmlinux
+  SORTEX  vmlinux
+  SYSMAP  System.map
+  RELOCS vmlinux
+  Building modules, stage 2.
+  MODPOST 64 modules
+
+After this patch:
+
+  LD      vmlinux
+  SORTEX  vmlinux
+  SYSMAP  System.map
+  RELOCS  vmlinux
+  Building modules, stage 2.
+  MODPOST 64 modules
+
+Typo is present in kernel tree since the introduction of relocatable
+kernel support in commit e818fac595ab ("MIPS: Generate relocation table
+when CONFIG_RELOCATABLE"), but the relocation scripts were moved to
+Makefile.postlink later with commit 44079d3509ae ("MIPS: Use
+Makefile.postlink to insert relocations into vmlinux").
+
+Fixes: 44079d3509ae ("MIPS: Use Makefile.postlink to insert relocations into vmlinux")
+Cc: <stable@vger.kernel.org> # v4.11+
+Signed-off-by: Alexander Lobakin <alobakin@dlink.ru>
+[paulburton@kernel.org: Fixup commit references in commit message.]
+Signed-off-by: Paul Burton <paulburton@kernel.org>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: James Hogan <jhogan@kernel.org>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: linux-mips@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/Makefile.postlink |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Makefile.postlink
++++ b/arch/mips/Makefile.postlink
+@@ -12,7 +12,7 @@ __archpost:
+ include scripts/Kbuild.include
+ CMD_RELOCS = arch/mips/boot/tools/relocs
+-quiet_cmd_relocs = RELOCS $@
++quiet_cmd_relocs = RELOCS  $@
+       cmd_relocs = $(CMD_RELOCS) $@
+ # `@true` prevents complaint when there is nothing to be done
diff --git a/queue-5.4/mips-syscalls-fix-indentation-of-the-sysnr-message.patch b/queue-5.4/mips-syscalls-fix-indentation-of-the-sysnr-message.patch
new file mode 100644 (file)
index 0000000..e9a7936
--- /dev/null
@@ -0,0 +1,61 @@
+From 4f29ad200f7b40fbcf73cd65f95087535ba78380 Mon Sep 17 00:00:00 2001
+From: Alexander Lobakin <alobakin@dlink.ru>
+Date: Fri, 17 Jan 2020 17:02:09 +0300
+Subject: MIPS: syscalls: fix indentation of the 'SYSNR' message
+
+From: Alexander Lobakin <alobakin@dlink.ru>
+
+commit 4f29ad200f7b40fbcf73cd65f95087535ba78380 upstream.
+
+It also lacks a whitespace (copy'n'paste error?) and also messes up the
+output:
+
+  SYSHDR  arch/mips/include/generated/uapi/asm/unistd_n32.h
+  SYSHDR  arch/mips/include/generated/uapi/asm/unistd_n64.h
+  SYSHDR  arch/mips/include/generated/uapi/asm/unistd_o32.h
+  SYSNR  arch/mips/include/generated/uapi/asm/unistd_nr_n32.h
+  SYSNR  arch/mips/include/generated/uapi/asm/unistd_nr_n64.h
+  SYSNR  arch/mips/include/generated/uapi/asm/unistd_nr_o32.h
+  WRAP    arch/mips/include/generated/uapi/asm/bpf_perf_event.h
+  WRAP    arch/mips/include/generated/uapi/asm/ipcbuf.h
+
+After:
+
+  SYSHDR  arch/mips/include/generated/uapi/asm/unistd_n32.h
+  SYSHDR  arch/mips/include/generated/uapi/asm/unistd_n64.h
+  SYSHDR  arch/mips/include/generated/uapi/asm/unistd_o32.h
+  SYSNR   arch/mips/include/generated/uapi/asm/unistd_nr_n32.h
+  SYSNR   arch/mips/include/generated/uapi/asm/unistd_nr_n64.h
+  SYSNR   arch/mips/include/generated/uapi/asm/unistd_nr_o32.h
+  WRAP    arch/mips/include/generated/uapi/asm/bpf_perf_event.h
+  WRAP    arch/mips/include/generated/uapi/asm/ipcbuf.h
+
+Present since day 0 of syscall table generation introduction for MIPS.
+
+Fixes: 9bcbf97c6293 ("mips: add system call table generation support")
+Cc: <stable@vger.kernel.org> # v5.0+
+Signed-off-by: Alexander Lobakin <alobakin@dlink.ru>
+Signed-off-by: Paul Burton <paulburton@kernel.org>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: James Hogan <jhogan@kernel.org>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: linux-mips@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/syscalls/Makefile |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/kernel/syscalls/Makefile
++++ b/arch/mips/kernel/syscalls/Makefile
+@@ -18,7 +18,7 @@ quiet_cmd_syshdr = SYSHDR  $@
+                  '$(syshdr_pfx_$(basetarget))'                \
+                  '$(syshdr_offset_$(basetarget))'
+-quiet_cmd_sysnr = SYSNR  $@
++quiet_cmd_sysnr = SYSNR   $@
+       cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@'                \
+                 '$(sysnr_abis_$(basetarget))'                 \
+                 '$(sysnr_pfx_$(basetarget))'                  \
diff --git a/queue-5.4/powerpc-32s-fix-bad_kuap_fault.patch b/queue-5.4/powerpc-32s-fix-bad_kuap_fault.patch
new file mode 100644 (file)
index 0000000..7e6f0a6
--- /dev/null
@@ -0,0 +1,106 @@
+From 6ec20aa2e510b6297906c45f009aa08b2d97269a Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Fri, 24 Jan 2020 11:54:40 +0000
+Subject: powerpc/32s: Fix bad_kuap_fault()
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 6ec20aa2e510b6297906c45f009aa08b2d97269a upstream.
+
+At the moment, bad_kuap_fault() reports a fault only if a bad access
+to userspace occurred while access to userspace was not granted.
+
+But if a fault occurs for a write outside the allowed userspace
+segment(s) that have been unlocked, bad_kuap_fault() fails to
+detect it and the kernel loops forever in do_page_fault().
+
+Fix it by checking that the accessed address is within the allowed
+range.
+
+Fixes: a68c31fc01ef ("powerpc/32s: Implement Kernel Userspace Access Protection")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/f48244e9485ada0a304ed33ccbb8da271180c80d.1579866752.git.christophe.leroy@c-s.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/book3s/32/kup.h       |    9 +++++++--
+ arch/powerpc/include/asm/book3s/64/kup-radix.h |    3 ++-
+ arch/powerpc/include/asm/kup.h                 |    6 +++++-
+ arch/powerpc/include/asm/nohash/32/kup-8xx.h   |    3 ++-
+ arch/powerpc/mm/fault.c                        |    2 +-
+ 5 files changed, 17 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/include/asm/book3s/32/kup.h
++++ b/arch/powerpc/include/asm/book3s/32/kup.h
+@@ -131,12 +131,17 @@ static inline void prevent_user_access(v
+       kuap_update_sr(mfsrin(addr) | SR_KS, addr, end);        /* set Ks */
+ }
+-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
++static inline bool
++bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+ {
++      unsigned long begin = regs->kuap & 0xf0000000;
++      unsigned long end = regs->kuap << 28;
++
+       if (!is_write)
+               return false;
+-      return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !");
++      return WARN(address < begin || address >= end,
++                  "Bug: write fault blocked by segment registers !");
+ }
+ #endif /* CONFIG_PPC_KUAP */
+--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+@@ -95,7 +95,8 @@ static inline void prevent_user_access(v
+       set_kuap(AMR_KUAP_BLOCKED);
+ }
+-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
++static inline bool
++bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+ {
+       return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
+                   (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
+--- a/arch/powerpc/include/asm/kup.h
++++ b/arch/powerpc/include/asm/kup.h
+@@ -45,7 +45,11 @@ static inline void allow_user_access(voi
+                                    unsigned long size) { }
+ static inline void prevent_user_access(void __user *to, const void __user *from,
+                                      unsigned long size) { }
+-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) { return false; }
++static inline bool
++bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
++{
++      return false;
++}
+ #endif /* CONFIG_PPC_KUAP */
+ static inline void allow_read_from_user(const void __user *from, unsigned long size)
+--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
++++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+@@ -45,7 +45,8 @@ static inline void prevent_user_access(v
+       mtspr(SPRN_MD_AP, MD_APG_KUAP);
+ }
+-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
++static inline bool
++bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+ {
+       return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
+                   "Bug: fault blocked by AP register !");
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -233,7 +233,7 @@ static bool bad_kernel_fault(struct pt_r
+       // Read/write fault in a valid region (the exception table search passed
+       // above), but blocked by KUAP is bad, it can never succeed.
+-      if (bad_kuap_fault(regs, is_write))
++      if (bad_kuap_fault(regs, address, is_write))
+               return true;
+       // What's left? Kernel fault on user in well defined regions (extable
diff --git a/queue-5.4/powerpc-32s-fix-cpu-wake-up-from-sleep-mode.patch b/queue-5.4/powerpc-32s-fix-cpu-wake-up-from-sleep-mode.patch
new file mode 100644 (file)
index 0000000..7746ccf
--- /dev/null
@@ -0,0 +1,60 @@
+From 9933819099c4600b41a042f27a074470a43cf6b9 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Mon, 27 Jan 2020 10:42:04 +0000
+Subject: powerpc/32s: Fix CPU wake-up from sleep mode
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 9933819099c4600b41a042f27a074470a43cf6b9 upstream.
+
+Commit f7354ccac844 ("powerpc/32: Remove CURRENT_THREAD_INFO and
+rename TI_CPU") broke the CPU wake-up from sleep mode (i.e. when
+_TLF_SLEEPING is set) by delaying the tovirt(r2, r2).
+
+This is because r2 is not restored by fast_exception_return. It used
+to work (by chance ?) because CPU wake-up interrupt never comes from
+user, so r2 is expected to point to 'current' on return.
+
+Commit e2fb9f544431 ("powerpc/32: Prepare for Kernel Userspace Access
+Protection") broke it even more by clobbering r0 which is not
+restored by fast_exception_return either.
+
+Use r6 instead of r0. This is possible because r3-r6 are restored by
+fast_exception_return and only r3-r5 are used for exception arguments.
+
+For r2 it could be converted back to virtual address, but stay on the
+safe side and restore it from the stack instead. It should be live
+in the cache at that moment, so loading from the stack should make
+no difference compared to converting it from phys to virt.
+
+Fixes: f7354ccac844 ("powerpc/32: Remove CURRENT_THREAD_INFO and rename TI_CPU")
+Fixes: e2fb9f544431 ("powerpc/32: Prepare for Kernel Userspace Access Protection")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/6d02c3ae6ad77af34392e98117e44c2bf6d13ba1.1580121710.git.christophe.leroy@c-s.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/entry_32.S |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -179,7 +179,7 @@ transfer_to_handler:
+ 2:    /* if from kernel, check interrupted DOZE/NAP mode and
+          * check for stack overflow
+          */
+-      kuap_save_and_lock r11, r12, r9, r2, r0
++      kuap_save_and_lock r11, r12, r9, r2, r6
+       addi    r2, r12, -THREAD
+       lwz     r9,KSP_LIMIT(r12)
+       cmplw   r1,r9                   /* if r1 <= ksp_limit */
+@@ -284,6 +284,7 @@ reenable_mmu:
+       rlwinm  r9,r9,0,~MSR_EE
+       lwz     r12,_LINK(r11)          /* and return to address in LR */
+       kuap_restore r11, r2, r3, r4, r5
++      lwz     r2, GPR2(r11)
+       b       fast_exception_return
+ #endif
diff --git a/queue-5.4/powerpc-mmu_gather-enable-rcu_table_free-even-for-smp-case.patch b/queue-5.4/powerpc-mmu_gather-enable-rcu_table_free-even-for-smp-case.patch
new file mode 100644 (file)
index 0000000..7e91198
--- /dev/null
@@ -0,0 +1,162 @@
+From 12e4d53f3f04e81f9e83d6fc10edc7314ab9f6b9 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Mon, 3 Feb 2020 17:36:46 -0800
+Subject: powerpc/mmu_gather: enable RCU_TABLE_FREE even for !SMP case
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit 12e4d53f3f04e81f9e83d6fc10edc7314ab9f6b9 upstream.
+
+Patch series "Fixup page directory freeing", v4.
+
+This is a repost of patch series from Peter with the arch specific changes
+except ppc64 dropped.  ppc64 changes are added here because we are redoing
+the patch series on top of ppc64 changes.  This makes it easy to backport
+these changes.  Only the first 2 patches need to be backported to stable.
+
+The thing is, on anything SMP, freeing page directories should observe the
+exact same order as normal page freeing:
+
+ 1) unhook page/directory
+ 2) TLB invalidate
+ 3) free page/directory
+
+Without this, any concurrent page-table walk could end up with a
+Use-after-Free.  This is esp.  trivial for anything that has software
+page-table walkers (HAVE_FAST_GUP / software TLB fill) or the hardware
+caches partial page-walks (ie.  caches page directories).
+
+Even on UP this might give issues since mmu_gather is preemptible these
+days.  An interrupt or preempted task accessing user pages might stumble
+into the free page if the hardware caches page directories.
+
+This patch series fixes ppc64 and add generic MMU_GATHER changes to
+support the conversion of other architectures.  I haven't added patches
+w.r.t other architecture because they are yet to be acked.
+
+This patch (of 9):
+
+A followup patch is going to make sure we correctly invalidate page walk
+cache before we free page table pages.  In order to keep things simple
+enable RCU_TABLE_FREE even for !SMP so that we don't have to fixup the
+!SMP case differently in the followup patch
+
+!SMP case is right now broken for radix translation w.r.t page walk
+cache flush.  We can get interrupted in between page table free and
+that would imply we have page walk cache entries pointing to tables
+which got freed already.  Michael said "both our platforms that run on
+Power9 force SMP on in Kconfig, so the !SMP case is unlikely to be a
+problem for anyone in practice, unless they've hacked their kernel to
+build it !SMP."
+
+Link: http://lkml.kernel.org/r/20200116064531.483522-2-aneesh.kumar@linux.ibm.com
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Michael Ellerman <mpe@ellerman.id.au>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/Kconfig                         |    2 +-
+ arch/powerpc/include/asm/book3s/32/pgalloc.h |    8 --------
+ arch/powerpc/include/asm/book3s/64/pgalloc.h |    2 --
+ arch/powerpc/include/asm/nohash/pgalloc.h    |    8 --------
+ arch/powerpc/mm/book3s64/pgtable.c           |    7 -------
+ 5 files changed, 1 insertion(+), 26 deletions(-)
+
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -221,7 +221,7 @@ config PPC
+       select HAVE_HARDLOCKUP_DETECTOR_PERF    if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
+       select HAVE_PERF_REGS
+       select HAVE_PERF_USER_STACK_DUMP
+-      select HAVE_RCU_TABLE_FREE              if SMP
++      select HAVE_RCU_TABLE_FREE
+       select HAVE_RCU_TABLE_NO_INVALIDATE     if HAVE_RCU_TABLE_FREE
+       select HAVE_MMU_GATHER_PAGE_SIZE
+       select HAVE_REGS_AND_STACK_ACCESS_API
+--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
++++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
+@@ -49,7 +49,6 @@ static inline void pgtable_free(void *ta
+ #define get_hugepd_cache_index(x)  (x)
+-#ifdef CONFIG_SMP
+ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+                                   void *table, int shift)
+ {
+@@ -66,13 +65,6 @@ static inline void __tlb_remove_table(vo
+       pgtable_free(table, shift);
+ }
+-#else
+-static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+-                                  void *table, int shift)
+-{
+-      pgtable_free(table, shift);
+-}
+-#endif
+ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+                                 unsigned long address)
+--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
++++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
+@@ -19,9 +19,7 @@ extern struct vmemmap_backing *vmemmap_l
+ extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
+ extern void pmd_fragment_free(unsigned long *);
+ extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
+-#ifdef CONFIG_SMP
+ extern void __tlb_remove_table(void *_table);
+-#endif
+ void pte_frag_destroy(void *pte_frag);
+ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
+--- a/arch/powerpc/include/asm/nohash/pgalloc.h
++++ b/arch/powerpc/include/asm/nohash/pgalloc.h
+@@ -46,7 +46,6 @@ static inline void pgtable_free(void *ta
+ #define get_hugepd_cache_index(x)     (x)
+-#ifdef CONFIG_SMP
+ static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+ {
+       unsigned long pgf = (unsigned long)table;
+@@ -64,13 +63,6 @@ static inline void __tlb_remove_table(vo
+       pgtable_free(table, shift);
+ }
+-#else
+-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+-{
+-      pgtable_free(table, shift);
+-}
+-#endif
+-
+ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+                                 unsigned long address)
+ {
+--- a/arch/powerpc/mm/book3s64/pgtable.c
++++ b/arch/powerpc/mm/book3s64/pgtable.c
+@@ -378,7 +378,6 @@ static inline void pgtable_free(void *ta
+       }
+ }
+-#ifdef CONFIG_SMP
+ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
+ {
+       unsigned long pgf = (unsigned long)table;
+@@ -395,12 +394,6 @@ void __tlb_remove_table(void *_table)
+       return pgtable_free(table, index);
+ }
+-#else
+-void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
+-{
+-      return pgtable_free(table, index);
+-}
+-#endif
+ #ifdef CONFIG_PROC_FS
+ atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
diff --git a/queue-5.4/powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch b/queue-5.4/powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch
new file mode 100644 (file)
index 0000000..29d0221
--- /dev/null
@@ -0,0 +1,38 @@
+From fbee6ba2dca30d302efe6bddb3a886f5e964a257 Mon Sep 17 00:00:00 2001
+From: Pingfan Liu <kernelfans@gmail.com>
+Date: Fri, 10 Jan 2020 12:54:02 +0800
+Subject: powerpc/pseries: Advance pfn if section is not present in lmb_is_removable()
+
+From: Pingfan Liu <kernelfans@gmail.com>
+
+commit fbee6ba2dca30d302efe6bddb3a886f5e964a257 upstream.
+
+In lmb_is_removable(), if a section is not present, it should continue
+to test the rest of the sections in the block. But the current code
+fails to do so.
+
+Fixes: 51925fb3c5c9 ("powerpc/pseries: Implement memory hotplug remove in the kernel")
+Cc: stable@vger.kernel.org # v4.1+
+Signed-off-by: Pingfan Liu <kernelfans@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/1578632042-12415-1-git-send-email-kernelfans@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/hotplug-memory.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -360,8 +360,10 @@ static bool lmb_is_removable(struct drme
+       for (i = 0; i < scns_per_block; i++) {
+               pfn = PFN_DOWN(phys_addr);
+-              if (!pfn_present(pfn))
++              if (!pfn_present(pfn)) {
++                      phys_addr += MIN_MEMORY_BLOCK_SIZE;
+                       continue;
++              }
+               rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
+               phys_addr += MIN_MEMORY_BLOCK_SIZE;
diff --git a/queue-5.4/powerpc-ptdump-fix-w-x-verification.patch b/queue-5.4/powerpc-ptdump-fix-w-x-verification.patch
new file mode 100644 (file)
index 0000000..9342f75
--- /dev/null
@@ -0,0 +1,43 @@
+From d80ae83f1f932ab7af47b54d0d3bef4f4dba489f Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Tue, 14 Jan 2020 08:13:09 +0000
+Subject: powerpc/ptdump: Fix W+X verification
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit d80ae83f1f932ab7af47b54d0d3bef4f4dba489f upstream.
+
+Verification cannot rely on simple bit checking because on some
+platforms PAGE_RW is 0, checking that a page is not W means
+checking that PAGE_RO is set instead of checking that PAGE_RW
+is not set.
+
+Use pte helpers instead of checking bits.
+
+Fixes: 453d87f6a8ae ("powerpc/mm: Warn if W+X pages found on boot")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/0d894839fdbb19070f0e1e4140363be4f2bb62fc.1578989540.git.christophe.leroy@c-s.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/ptdump/ptdump.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/ptdump/ptdump.c
++++ b/arch/powerpc/mm/ptdump/ptdump.c
+@@ -173,10 +173,12 @@ static void dump_addr(struct pg_state *s
+ static void note_prot_wx(struct pg_state *st, unsigned long addr)
+ {
++      pte_t pte = __pte(st->current_flags);
++
+       if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
+               return;
+-      if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X)))
++      if (!pte_write(pte) || !pte_exec(pte))
+               return;
+       WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
diff --git a/queue-5.4/powerpc-xmon-don-t-access-asdr-in-vms.patch b/queue-5.4/powerpc-xmon-don-t-access-asdr-in-vms.patch
new file mode 100644 (file)
index 0000000..ef18128
--- /dev/null
@@ -0,0 +1,46 @@
+From c2a20711fc181e7f22ee5c16c28cb9578af84729 Mon Sep 17 00:00:00 2001
+From: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
+Date: Mon, 6 Jan 2020 13:50:02 -0600
+Subject: powerpc/xmon: don't access ASDR in VMs
+
+From: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
+
+commit c2a20711fc181e7f22ee5c16c28cb9578af84729 upstream.
+
+ASDR is HV-privileged and must only be accessed in HV-mode.
+Fixes a Program Check (0x700) when xmon in a VM dumps SPRs.
+
+Fixes: d1e1b351f50f ("powerpc/xmon: Add ISA v3.0 SPRs to SPR dump")
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
+Reviewed-by: Andrew Donnellan <ajd@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200107021633.GB29843@us.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/xmon/xmon.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1894,15 +1894,14 @@ static void dump_300_sprs(void)
+       printf("pidr   = %.16lx  tidr  = %.16lx\n",
+               mfspr(SPRN_PID), mfspr(SPRN_TIDR));
+-      printf("asdr   = %.16lx  psscr = %.16lx\n",
+-              mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR)
+-                                      : mfspr(SPRN_PSSCR_PR));
++      printf("psscr  = %.16lx\n",
++              hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR));
+       if (!hv)
+               return;
+-      printf("ptcr   = %.16lx\n",
+-              mfspr(SPRN_PTCR));
++      printf("ptcr   = %.16lx  asdr  = %.16lx\n",
++              mfspr(SPRN_PTCR), mfspr(SPRN_ASDR));
+ #endif
+ }
diff --git a/queue-5.4/s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch b/queue-5.4/s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch
new file mode 100644 (file)
index 0000000..dcacc55
--- /dev/null
@@ -0,0 +1,181 @@
+From 5f490a520bcb393389a4d44bec90afcb332eb112 Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Thu, 16 Jan 2020 19:59:04 +0100
+Subject: s390/mm: fix dynamic pagetable upgrade for hugetlbfs
+
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+
+commit 5f490a520bcb393389a4d44bec90afcb332eb112 upstream.
+
+Commit ee71d16d22bb ("s390/mm: make TASK_SIZE independent from the number
+of page table levels") changed the logic of TASK_SIZE and also removed the
+arch_mmap_check() implementation for s390. This combination has a subtle
+effect on how get_unmapped_area() for hugetlbfs pages works. It is now
+possible that a user process establishes a hugetlbfs mapping at an address
+above 4 TB, without triggering a dynamic pagetable upgrade from 3 to 4
+levels.
+
+This is because hugetlbfs mappings will not use mm->get_unmapped_area, but
+rather file->f_op->get_unmapped_area, which currently is the generic
+implementation of hugetlb_get_unmapped_area() that does not know about s390
+dynamic pagetable upgrades, but with the new definition of TASK_SIZE, it
+will now allow mappings above 4 TB.
+
+Subsequent access to such a mapped address above 4 TB will result in a page
+fault loop, because the CPU cannot translate such a large address with 3
+pagetable levels. The fault handler will try to map in a hugepage at the
+address, but due to the folded pagetable logic it will end up with creating
+entries in the 3 level pagetable, possibly overwriting existing mappings,
+and then it all repeats when the access is retried.
+
+Apart from the page fault loop, this can have various nasty effects, e.g.
+kernel panic from one of the BUG_ON() checks in memory management code,
+or even data loss if an existing mapping gets overwritten.
+
+Fix this by implementing HAVE_ARCH_HUGETLB_UNMAPPED_AREA support for s390,
+providing an s390 version for hugetlb_get_unmapped_area() with pagetable
+upgrade support similar to arch_get_unmapped_area(), which will then be
+used instead of the generic version.
+
+Fixes: ee71d16d22bb ("s390/mm: make TASK_SIZE independent from the number of page table levels")
+Cc: <stable@vger.kernel.org> # 4.12+
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/page.h |    2 
+ arch/s390/mm/hugetlbpage.c   |  100 ++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 101 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/page.h
++++ b/arch/s390/include/asm/page.h
+@@ -33,6 +33,8 @@
+ #define ARCH_HAS_PREPARE_HUGEPAGE
+ #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
++
+ #include <asm/setup.h>
+ #ifndef __ASSEMBLY__
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -2,7 +2,7 @@
+ /*
+  *  IBM System z Huge TLB Page Support for Kernel.
+  *
+- *    Copyright IBM Corp. 2007,2016
++ *    Copyright IBM Corp. 2007,2020
+  *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+  */
+@@ -11,6 +11,9 @@
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
++#include <linux/mman.h>
++#include <linux/sched/mm.h>
++#include <linux/security.h>
+ /*
+  * If the bit selected by single-bit bitmask "a" is set within "x", move
+@@ -267,3 +270,98 @@ static __init int setup_hugepagesz(char
+       return 1;
+ }
+ __setup("hugepagesz=", setup_hugepagesz);
++
++static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
++              unsigned long addr, unsigned long len,
++              unsigned long pgoff, unsigned long flags)
++{
++      struct hstate *h = hstate_file(file);
++      struct vm_unmapped_area_info info;
++
++      info.flags = 0;
++      info.length = len;
++      info.low_limit = current->mm->mmap_base;
++      info.high_limit = TASK_SIZE;
++      info.align_mask = PAGE_MASK & ~huge_page_mask(h);
++      info.align_offset = 0;
++      return vm_unmapped_area(&info);
++}
++
++static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
++              unsigned long addr0, unsigned long len,
++              unsigned long pgoff, unsigned long flags)
++{
++      struct hstate *h = hstate_file(file);
++      struct vm_unmapped_area_info info;
++      unsigned long addr;
++
++      info.flags = VM_UNMAPPED_AREA_TOPDOWN;
++      info.length = len;
++      info.low_limit = max(PAGE_SIZE, mmap_min_addr);
++      info.high_limit = current->mm->mmap_base;
++      info.align_mask = PAGE_MASK & ~huge_page_mask(h);
++      info.align_offset = 0;
++      addr = vm_unmapped_area(&info);
++
++      /*
++       * A failed mmap() very likely causes application failure,
++       * so fall back to the bottom-up function here. This scenario
++       * can happen with large stack limits and large mmap()
++       * allocations.
++       */
++      if (addr & ~PAGE_MASK) {
++              VM_BUG_ON(addr != -ENOMEM);
++              info.flags = 0;
++              info.low_limit = TASK_UNMAPPED_BASE;
++              info.high_limit = TASK_SIZE;
++              addr = vm_unmapped_area(&info);
++      }
++
++      return addr;
++}
++
++unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
++              unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++      struct hstate *h = hstate_file(file);
++      struct mm_struct *mm = current->mm;
++      struct vm_area_struct *vma;
++      int rc;
++
++      if (len & ~huge_page_mask(h))
++              return -EINVAL;
++      if (len > TASK_SIZE - mmap_min_addr)
++              return -ENOMEM;
++
++      if (flags & MAP_FIXED) {
++              if (prepare_hugepage_range(file, addr, len))
++                      return -EINVAL;
++              goto check_asce_limit;
++      }
++
++      if (addr) {
++              addr = ALIGN(addr, huge_page_size(h));
++              vma = find_vma(mm, addr);
++              if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
++                  (!vma || addr + len <= vm_start_gap(vma)))
++                      goto check_asce_limit;
++      }
++
++      if (mm->get_unmapped_area == arch_get_unmapped_area)
++              addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
++                              pgoff, flags);
++      else
++              addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
++                              pgoff, flags);
++      if (addr & ~PAGE_MASK)
++              return addr;
++
++check_asce_limit:
++      if (addr + len > current->mm->context.asce_limit &&
++          addr + len <= TASK_SIZE) {
++              rc = crst_table_upgrade(mm, addr + len);
++              if (rc)
++                      return (unsigned long) rc;
++      }
++      return addr;
++}
index 4abeaca3a984bfa5bb68956902d87d62449143ef..18bb539e68a8433c28dcf25af0c457bc50f6fb84 100644 (file)
@@ -59,3 +59,21 @@ lib-test_kasan.c-fix-memory-leak-in-kmalloc_oob_krealloc_more.patch
 irqdomain-fix-a-memory-leak-in-irq_domain_push_irq.patch
 x86-cpu-update-cached-hle-state-on-write-to-tsx_ctrl_cpuid_clear.patch
 platform-x86-intel_scu_ipc-fix-interrupt-support.patch
+alsa-hda-apply-aligned-mmio-access-only-conditionally.patch
+alsa-hda-add-clevo-w65_67sb-the-power_save-blacklist.patch
+alsa-hda-add-jasperlake-pci-id-and-codec-vid.patch
+arm64-acpi-fix-daif-manipulation-with-pnmi.patch
+kvm-arm64-correct-pstate-on-exception-entry.patch
+kvm-arm-arm64-correct-cpsr-on-exception-entry.patch
+kvm-arm-arm64-correct-aarch32-spsr-on-exception-entry.patch
+kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch
+mips-syscalls-fix-indentation-of-the-sysnr-message.patch
+mips-fix-indentation-of-the-relocs-message.patch
+mips-boot-fix-typo-in-vmlinux.lzma.its-target.patch
+s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch
+powerpc-mmu_gather-enable-rcu_table_free-even-for-smp-case.patch
+powerpc-ptdump-fix-w-x-verification.patch
+powerpc-xmon-don-t-access-asdr-in-vms.patch
+powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch
+powerpc-32s-fix-bad_kuap_fault.patch
+powerpc-32s-fix-cpu-wake-up-from-sleep-mode.patch