--- /dev/null
+From 107352a24900fb458152b92a4e72fbdc83fd5510 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 18 Dec 2018 14:59:09 +0000
+Subject: arm/arm64: KVM: vgic: Force VM halt when changing the active state of GICv3 PPIs/SGIs
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 107352a24900fb458152b92a4e72fbdc83fd5510 upstream.
+
+We currently only halt the guest when a vCPU messes with the active
+state of an SPI. This is perfectly fine for GICv2, but isn't enough
+for GICv3, where all vCPUs can access the state of any other vCPU.
+
+Let's broaden the condition to include any GICv3 interrupt that
+has an active state (i.e. all but LPIs).
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic-mmio.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/arm/vgic/vgic-mmio.c
++++ b/virt/kvm/arm/vgic/vgic-mmio.c
+@@ -368,14 +368,16 @@ static void vgic_mmio_change_active(stru
+ */
+ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
+ {
+- if (intid > VGIC_NR_PRIVATE_IRQS)
++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
++ intid > VGIC_NR_PRIVATE_IRQS)
+ kvm_arm_halt_guest(vcpu->kvm);
+ }
+
+ /* See vgic_change_active_prepare */
+ static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
+ {
+- if (intid > VGIC_NR_PRIVATE_IRQS)
++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
++ intid > VGIC_NR_PRIVATE_IRQS)
+ kvm_arm_resume_guest(vcpu->kvm);
+ }
+
--- /dev/null
+From 8ac686d7dfed721102860ff2571e6b9f529ae81a Mon Sep 17 00:00:00 2001
+From: Sylwester Nawrocki <s.nawrocki@samsung.com>
+Date: Wed, 12 Dec 2018 18:57:44 +0100
+Subject: ARM: dts: exynos: Specify I2S assigned clocks in proper node
+
+From: Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+commit 8ac686d7dfed721102860ff2571e6b9f529ae81a upstream.
+
+The assigned parent clocks should be normally specified in the consumer
+device's DT node, this ensures respective driver always sees correct clock
+settings when required.
+
+This patch fixes regression in audio subsystem on Odroid XU3/XU4 boards
+that appeared after commits:
+
+commit 647d04f8e07a ("ASoC: samsung: i2s: Ensure the RCLK rate is properly determined")
+commit 995e73e55f46 ("ASoC: samsung: i2s: Fix rclk_srcrate handling")
+commit 48279c53fd1d ("ASoC: samsung: i2s: Prevent external abort on exynos5433 I2S1 access")
+
+Without this patch the driver gets wrong clock as the I2S function clock
+(op_clk) in probe() and effectively the clock which is finally assigned
+from DT is not being enabled/disabled in the runtime resume/suspend ops.
+
+Without the above listed commits the EXYNOS_I2S_BUS clock was always set
+as parent of CLK_I2S_RCLK_SRC regardless of DT settings so there was no issue
+with not enabled EXYNOS_SCLK_I2S.
+
+Cc: <stable@vger.kernel.org> # 4.17.x
+Signed-off-by: Sylwester Nawrocki <s.nawrocki@samsung.com>
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi | 9 ++++-----
+ arch/arm/boot/dts/exynos5422-odroidxu4.dts | 9 ++++-----
+ 2 files changed, 8 insertions(+), 10 deletions(-)
+
+--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+@@ -26,8 +26,7 @@
+ "Speakers", "SPKL",
+ "Speakers", "SPKR";
+
+- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
+- <&clock CLK_MOUT_EPLL>,
++ assigned-clocks = <&clock CLK_MOUT_EPLL>,
+ <&clock CLK_MOUT_MAU_EPLL>,
+ <&clock CLK_MOUT_USER_MAU_EPLL>,
+ <&clock_audss EXYNOS_MOUT_AUDSS>,
+@@ -36,8 +35,7 @@
+ <&clock_audss EXYNOS_DOUT_AUD_BUS>,
+ <&clock_audss EXYNOS_DOUT_I2S>;
+
+- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
+- <&clock CLK_FOUT_EPLL>,
++ assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
+ <&clock CLK_MOUT_EPLL>,
+ <&clock CLK_MOUT_MAU_EPLL>,
+ <&clock CLK_MAU_EPLL>,
+@@ -48,7 +46,6 @@
+ <0>,
+ <0>,
+ <0>,
+- <0>,
+ <196608001>,
+ <(196608002 / 2)>,
+ <196608000>;
+@@ -84,4 +81,6 @@
+
+ &i2s0 {
+ status = "okay";
++ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
++ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
+ };
+--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
++++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
+@@ -33,8 +33,7 @@
+ compatible = "samsung,odroid-xu3-audio";
+ model = "Odroid-XU4";
+
+- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
+- <&clock CLK_MOUT_EPLL>,
++ assigned-clocks = <&clock CLK_MOUT_EPLL>,
+ <&clock CLK_MOUT_MAU_EPLL>,
+ <&clock CLK_MOUT_USER_MAU_EPLL>,
+ <&clock_audss EXYNOS_MOUT_AUDSS>,
+@@ -43,8 +42,7 @@
+ <&clock_audss EXYNOS_DOUT_AUD_BUS>,
+ <&clock_audss EXYNOS_DOUT_I2S>;
+
+- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
+- <&clock CLK_FOUT_EPLL>,
++ assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
+ <&clock CLK_MOUT_EPLL>,
+ <&clock CLK_MOUT_MAU_EPLL>,
+ <&clock CLK_MAU_EPLL>,
+@@ -55,7 +53,6 @@
+ <0>,
+ <0>,
+ <0>,
+- <0>,
+ <196608001>,
+ <(196608002 / 2)>,
+ <196608000>;
+@@ -79,6 +76,8 @@
+
+ &i2s0 {
+ status = "okay";
++ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
++ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
+ };
+
+ &pwm {
--- /dev/null
+From 169113ece0f29ebe884a6cfcf57c1ace04d8a36a Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 3 Jan 2019 17:45:07 +0000
+Subject: arm64: compat: Avoid sending SIGILL for unallocated syscall numbers
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 169113ece0f29ebe884a6cfcf57c1ace04d8a36a upstream.
+
+The ARM Linux kernel handles the EABI syscall numbers as follows:
+
+ 0 - NR_SYSCALLS-1 : Invoke syscall via syscall table
+ NR_SYSCALLS - 0xeffff : -ENOSYS (to be allocated in future)
+ 0xf0000 - 0xf07ff : Private syscall or -ENOSYS if not allocated
+ > 0xf07ff : SIGILL
+
+Our compat code gets this wrong and ends up sending SIGILL in response
+to all syscalls greater than NR_SYSCALLS which have a value greater
+than 0x7ff in the bottom 16 bits.
+
+Fix this by defining the end of the ARM private syscall region and
+checking the syscall number against that directly. Update the comment
+while we're at it.
+
+Cc: <stable@vger.kernel.org>
+Cc: Dave Martin <Dave.Martin@arm.com>
+Reported-by: Pi-Hsun Shih <pihsun@chromium.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/unistd.h | 5 +++--
+ arch/arm64/kernel/sys_compat.c | 4 ++--
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/unistd.h
++++ b/arch/arm64/include/asm/unistd.h
+@@ -40,8 +40,9 @@
+ * The following SVCs are ARM private.
+ */
+ #define __ARM_NR_COMPAT_BASE 0x0f0000
+-#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
+-#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
++#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2)
++#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
++#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
+
+ #define __NR_compat_syscalls 399
+ #endif
+--- a/arch/arm64/kernel/sys_compat.c
++++ b/arch/arm64/kernel/sys_compat.c
+@@ -102,12 +102,12 @@ long compat_arm_syscall(struct pt_regs *
+
+ default:
+ /*
+- * Calls 9f00xx..9f07ff are defined to return -ENOSYS
++ * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
+ * if not implemented, rather than raising SIGILL. This
+ * way the calling program can gracefully determine whether
+ * a feature is supported.
+ */
+- if ((no & 0xffff) <= 0x7ff)
++ if (no < __ARM_NR_COMPAT_END)
+ return -ENOSYS;
+ break;
+ }
--- /dev/null
+From 53290432145a8eb143fe29e06e9c1465d43dc723 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 3 Jan 2019 18:00:39 +0000
+Subject: arm64: compat: Don't pull syscall number from regs in arm_compat_syscall
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 53290432145a8eb143fe29e06e9c1465d43dc723 upstream.
+
+The syscall number may have been changed by a tracer, so we should pass
+the actual number in from the caller instead of pulling it from the
+saved r7 value directly.
+
+Cc: <stable@vger.kernel.org>
+Cc: Pi-Hsun Shih <pihsun@chromium.org>
+Reviewed-by: Dave Martin <Dave.Martin@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/sys_compat.c | 9 ++++-----
+ arch/arm64/kernel/syscall.c | 9 ++++-----
+ 2 files changed, 8 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/kernel/sys_compat.c
++++ b/arch/arm64/kernel/sys_compat.c
+@@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start,
+ /*
+ * Handle all unrecognised system calls.
+ */
+-long compat_arm_syscall(struct pt_regs *regs)
++long compat_arm_syscall(struct pt_regs *regs, int scno)
+ {
+- unsigned int no = regs->regs[7];
+ void __user *addr;
+
+- switch (no) {
++ switch (scno) {
+ /*
+ * Flush a region from virtual address 'r0' to virtual address 'r1'
+ * _exclusive_. There is no alignment requirement on either address;
+@@ -107,7 +106,7 @@ long compat_arm_syscall(struct pt_regs *
+ * way the calling program can gracefully determine whether
+ * a feature is supported.
+ */
+- if (no < __ARM_NR_COMPAT_END)
++ if (scno < __ARM_NR_COMPAT_END)
+ return -ENOSYS;
+ break;
+ }
+@@ -116,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *
+ (compat_thumb_mode(regs) ? 2 : 4);
+
+ arm64_notify_die("Oops - bad compat syscall(2)", regs,
+- SIGILL, ILL_ILLTRP, addr, no);
++ SIGILL, ILL_ILLTRP, addr, scno);
+ return 0;
+ }
+--- a/arch/arm64/kernel/syscall.c
++++ b/arch/arm64/kernel/syscall.c
+@@ -13,16 +13,15 @@
+ #include <asm/thread_info.h>
+ #include <asm/unistd.h>
+
+-long compat_arm_syscall(struct pt_regs *regs);
+-
++long compat_arm_syscall(struct pt_regs *regs, int scno);
+ long sys_ni_syscall(void);
+
+-asmlinkage long do_ni_syscall(struct pt_regs *regs)
++static long do_ni_syscall(struct pt_regs *regs, int scno)
+ {
+ #ifdef CONFIG_COMPAT
+ long ret;
+ if (is_compat_task()) {
+- ret = compat_arm_syscall(regs);
++ ret = compat_arm_syscall(regs, scno);
+ if (ret != -ENOSYS)
+ return ret;
+ }
+@@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_reg
+ syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
+ ret = __invoke_syscall(regs, syscall_fn);
+ } else {
+- ret = do_ni_syscall(regs);
++ ret = do_ni_syscall(regs, scno);
+ }
+
+ regs->regs[0] = ret;
--- /dev/null
+From df655b75c43fba0f2621680ab261083297fd6d16 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 13 Dec 2018 16:06:14 +0000
+Subject: arm64: KVM: Avoid setting the upper 32 bits of VTCR_EL2 to 1
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit df655b75c43fba0f2621680ab261083297fd6d16 upstream.
+
+Although bit 31 of VTCR_EL2 is RES1, we inadvertently end up setting all
+of the upper 32 bits to 1 as well because we define VTCR_EL2_RES1 as
+signed, which is sign-extended when assigning to kvm->arch.vtcr.
+
+Lucky for us, the architecture currently treats these upper bits as RES0
+so, whilst we've been naughty, we haven't set fire to anything yet.
+
+Cc: <stable@vger.kernel.org>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/kvm_arm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -104,7 +104,7 @@
+ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
+
+ /* VTCR_EL2 Registers bits */
+-#define VTCR_EL2_RES1 (1 << 31)
++#define VTCR_EL2_RES1 (1U << 31)
+ #define VTCR_EL2_HD (1 << 22)
+ #define VTCR_EL2_HA (1 << 21)
+ #define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
--- /dev/null
+From 3cd508a8c1379427afb5e16c2e0a7c986d907853 Mon Sep 17 00:00:00 2001
+From: Robin Murphy <robin.murphy@arm.com>
+Date: Wed, 17 Oct 2018 21:32:58 +0100
+Subject: iommu/arm-smmu-v3: Fix big-endian CMD_SYNC writes
+
+From: Robin Murphy <robin.murphy@arm.com>
+
+commit 3cd508a8c1379427afb5e16c2e0a7c986d907853 upstream.
+
+When we insert the sync sequence number into the CMD_SYNC.MSIData field,
+we do so in CPU-native byte order, before writing out the whole command
+as explicitly little-endian dwords. Thus on big-endian systems, the SMMU
+will receive and write back a byteswapped version of sync_nr, which would
+be perfect if it were targeting a similarly-little-endian ITS, but since
+it's actually writing back to memory being polled by the CPUs, they're
+going to end up seeing the wrong thing.
+
+Since the SMMU doesn't care what the MSIData actually contains, the
+minimal-overhead solution is to simply add an extra byteswap initially,
+such that it then writes back the big-endian format directly.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 37de98f8f1cf ("iommu/arm-smmu-v3: Use CMD_SYNC completion MSI")
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/arm-smmu-v3.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -828,7 +828,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
+- cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
++ /*
++ * Commands are written little-endian, but we want the SMMU to
++ * receive MSIData, and thus write it back to memory, in CPU
++ * byte order, so big-endian needs an extra byteswap here.
++ */
++ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
++ cpu_to_le32(ent->sync.msidata));
+ cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
+ break;
+ default:
--- /dev/null
+From fb544d1ca65a89f7a3895f7531221ceeed74ada7 Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@arm.com>
+Date: Tue, 11 Dec 2018 13:23:57 +0100
+Subject: KVM: arm/arm64: Fix VMID alloc race by reverting to lock-less
+
+From: Christoffer Dall <christoffer.dall@arm.com>
+
+commit fb544d1ca65a89f7a3895f7531221ceeed74ada7 upstream.
+
+We recently addressed a VMID generation race by introducing a read/write
+lock around accesses and updates to the vmid generation values.
+
+However, kvm_arch_vcpu_ioctl_run() also calls need_new_vmid_gen() but
+does so without taking the read lock.
+
+As far as I can tell, this can lead to the same kind of race:
+
+ VM 0, VCPU 0 VM 0, VCPU 1
+ ------------ ------------
+ update_vttbr (vmid 254)
+ update_vttbr (vmid 1) // roll over
+ read_lock(kvm_vmid_lock);
+ force_vm_exit()
+ local_irq_disable
+ need_new_vmid_gen == false //because vmid gen matches
+
+ enter_guest (vmid 254)
+ kvm_arch.vttbr = <PGD>:<VMID 1>
+ read_unlock(kvm_vmid_lock);
+
+ enter_guest (vmid 1)
+
+Which results in running two VCPUs in the same VM with different VMIDs
+and (even worse) other VCPUs from other VMs could now allocate clashing
+VMID 254 from the new generation as long as VCPU 0 is not exiting.
+
+Attempt to solve this by making sure vttbr is updated before another CPU
+can observe the updated VMID generation.
+
+Cc: stable@vger.kernel.org
+Fixes: f0cf47d939d0 "KVM: arm/arm64: Close VMID generation race"
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/arm.c | 23 +++++++++++------------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *,
+ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
+ static u32 kvm_next_vmid;
+ static unsigned int kvm_vmid_bits __read_mostly;
+-static DEFINE_RWLOCK(kvm_vmid_lock);
++static DEFINE_SPINLOCK(kvm_vmid_lock);
+
+ static bool vgic_present;
+
+@@ -484,7 +484,9 @@ void force_vm_exit(const cpumask_t *mask
+ */
+ static bool need_new_vmid_gen(struct kvm *kvm)
+ {
+- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
++ u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
++ smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
++ return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
+ }
+
+ /**
+@@ -499,16 +501,11 @@ static void update_vttbr(struct kvm *kvm
+ {
+ phys_addr_t pgd_phys;
+ u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
+- bool new_gen;
+
+- read_lock(&kvm_vmid_lock);
+- new_gen = need_new_vmid_gen(kvm);
+- read_unlock(&kvm_vmid_lock);
+-
+- if (!new_gen)
++ if (!need_new_vmid_gen(kvm))
+ return;
+
+- write_lock(&kvm_vmid_lock);
++ spin_lock(&kvm_vmid_lock);
+
+ /*
+ * We need to re-check the vmid_gen here to ensure that if another vcpu
+@@ -516,7 +513,7 @@ static void update_vttbr(struct kvm *kvm
+ * use the same vmid.
+ */
+ if (!need_new_vmid_gen(kvm)) {
+- write_unlock(&kvm_vmid_lock);
++ spin_unlock(&kvm_vmid_lock);
+ return;
+ }
+
+@@ -539,7 +536,6 @@ static void update_vttbr(struct kvm *kvm
+ kvm_call_hyp(__kvm_flush_vm_context);
+ }
+
+- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
+ kvm->arch.vmid = kvm_next_vmid;
+ kvm_next_vmid++;
+ kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
+@@ -550,7 +546,10 @@ static void update_vttbr(struct kvm *kvm
+ vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
+ kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
+
+- write_unlock(&kvm_vmid_lock);
++ smp_wmb();
++ WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
++
++ spin_unlock(&kvm_vmid_lock);
+ }
+
+ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
--- /dev/null
+From bea2ef803ade3359026d5d357348842bca9edcf1 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 4 Dec 2018 17:11:19 +0000
+Subject: KVM: arm/arm64: vgic: Cap SPIs to the VM-defined maximum
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit bea2ef803ade3359026d5d357348842bca9edcf1 upstream.
+
+SPIs should be checked against the VMs specific configuration, and
+not the architectural maximum.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/arm/vgic/vgic.c
++++ b/virt/kvm/arm/vgic/vgic.c
+@@ -108,8 +108,8 @@ struct vgic_irq *vgic_get_irq(struct kvm
+ }
+
+ /* SPIs */
+- if (intid <= VGIC_MAX_SPI) {
+- intid = array_index_nospec(intid, VGIC_MAX_SPI);
++ if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
++ intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
+ return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
+ }
+
--- /dev/null
+From 2e2f6c3c0b08eed3fcf7de3c7684c940451bdeb1 Mon Sep 17 00:00:00 2001
+From: Julien Thierry <julien.thierry@arm.com>
+Date: Mon, 26 Nov 2018 18:26:44 +0000
+Subject: KVM: arm/arm64: vgic: Do not cond_resched_lock() with IRQs disabled
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+commit 2e2f6c3c0b08eed3fcf7de3c7684c940451bdeb1 upstream.
+
+To change the active state of an MMIO, halt is requested for all vcpus of
+the affected guest before modifying the IRQ state. This is done by calling
+cond_resched_lock() in vgic_mmio_change_active(). However interrupts are
+disabled at this point and we cannot reschedule a vcpu.
+
+We actually don't need any of this, as kvm_arm_halt_guest ensures that
+all the other vcpus are out of the guest. Let's just drop that useless
+code.
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic-mmio.c | 21 ---------------------
+ 1 file changed, 21 deletions(-)
+
+--- a/virt/kvm/arm/vgic/vgic-mmio.c
++++ b/virt/kvm/arm/vgic/vgic-mmio.c
+@@ -313,27 +313,6 @@ static void vgic_mmio_change_active(stru
+
+ spin_lock_irqsave(&irq->irq_lock, flags);
+
+- /*
+- * If this virtual IRQ was written into a list register, we
+- * have to make sure the CPU that runs the VCPU thread has
+- * synced back the LR state to the struct vgic_irq.
+- *
+- * As long as the conditions below are true, we know the VCPU thread
+- * may be on its way back from the guest (we kicked the VCPU thread in
+- * vgic_change_active_prepare) and still has to sync back this IRQ,
+- * so we release and re-acquire the spin_lock to let the other thread
+- * sync back the IRQ.
+- *
+- * When accessing VGIC state from user space, requester_vcpu is
+- * NULL, which is fine, because we guarantee that no VCPUs are running
+- * when accessing VGIC state from user space so irq->vcpu->cpu is
+- * always -1.
+- */
+- while (irq->vcpu && /* IRQ may have state in an LR somewhere */
+- irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
+- irq->vcpu->cpu != -1) /* VCPU thread is running */
+- cond_resched_lock(&irq->irq_lock);
+-
+ if (irq->hw) {
+ vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
+ } else {
--- /dev/null
+From c23b2e6fc4ca346018618266bcabd335c0a8a49e Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Wed, 12 Dec 2018 14:11:23 -0600
+Subject: KVM: arm/arm64: vgic: Fix off-by-one bug in vgic_get_irq()
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit c23b2e6fc4ca346018618266bcabd335c0a8a49e upstream.
+
+When using the nospec API, it should be taken into account that:
+
+"...if the CPU speculates past the bounds check then
+ * array_index_nospec() will clamp the index within the range of [0,
+ * size)."
+
+The above is part of the header for macro array_index_nospec() in
+linux/nospec.h
+
+Now, in this particular case, if intid evaluates to exactly VGIC_MAX_SPI
+or to exaclty VGIC_MAX_PRIVATE, the array_index_nospec() macro ends up
+returning VGIC_MAX_SPI - 1 or VGIC_MAX_PRIVATE - 1 respectively, instead
+of VGIC_MAX_SPI or VGIC_MAX_PRIVATE, which, based on the original logic:
+
+ /* SGIs and PPIs */
+ if (intid <= VGIC_MAX_PRIVATE)
+ return &vcpu->arch.vgic_cpu.private_irqs[intid];
+
+ /* SPIs */
+ if (intid <= VGIC_MAX_SPI)
+ return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
+
+are valid values for intid.
+
+Fix this by calling array_index_nospec() macro with VGIC_MAX_PRIVATE + 1
+and VGIC_MAX_SPI + 1 as arguments for its parameter size.
+
+Fixes: 41b87599c743 ("KVM: arm/arm64: vgic: fix possible spectre-v1 in vgic_get_irq()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+[dropped the SPI part which was fixed separately]
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/virt/kvm/arm/vgic/vgic.c
++++ b/virt/kvm/arm/vgic/vgic.c
+@@ -103,7 +103,7 @@ struct vgic_irq *vgic_get_irq(struct kvm
+ {
+ /* SGIs and PPIs */
+ if (intid <= VGIC_MAX_PRIVATE) {
+- intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
++ intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
+ return &vcpu->arch.vgic_cpu.private_irqs[intid];
+ }
+
--- /dev/null
+From 60c3ab30d8c2ff3a52606df03f05af2aae07dc6b Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@arm.com>
+Date: Tue, 11 Dec 2018 12:51:03 +0100
+Subject: KVM: arm/arm64: vgic-v2: Set active_source to 0 when restoring state
+
+From: Christoffer Dall <christoffer.dall@arm.com>
+
+commit 60c3ab30d8c2ff3a52606df03f05af2aae07dc6b upstream.
+
+When restoring the active state from userspace, we don't know which CPU
+was the source for the active state, and this is not architecturally
+exposed in any of the register state.
+
+Set the active_source to 0 in this case. In the future, we can expand
+on this and exposse the information as additional information to
+userspace for GICv2 if anyone cares.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic-mmio.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/virt/kvm/arm/vgic/vgic-mmio.c
++++ b/virt/kvm/arm/vgic/vgic-mmio.c
+@@ -317,11 +317,26 @@ static void vgic_mmio_change_active(stru
+ vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
+ } else {
+ u32 model = vcpu->kvm->arch.vgic.vgic_model;
++ u8 active_source;
+
+ irq->active = active;
++
++ /*
++ * The GICv2 architecture indicates that the source CPUID for
++ * an SGI should be provided during an EOI which implies that
++ * the active state is stored somewhere, but at the same time
++ * this state is not architecturally exposed anywhere and we
++ * have no way of knowing the right source.
++ *
++ * This may lead to a VCPU not being able to receive
++ * additional instances of a particular SGI after migration
++ * for a GICv2 VM on some GIC implementations. Oh well.
++ */
++ active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
++
+ if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
+ active && vgic_irq_is_sgi(irq->intid))
+- irq->active_source = requester_vcpu->vcpu_id;
++ irq->active_source = active_source;
+ }
+
+ if (irq->active)
--- /dev/null
+From dfbaecb2b707cfdc5276b548d52b437384bd6483 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Fri, 4 Jan 2019 23:32:53 +0100
+Subject: parisc: Remap hugepage-aligned pages in set_kernel_text_rw()
+
+From: Helge Deller <deller@gmx.de>
+
+commit dfbaecb2b707cfdc5276b548d52b437384bd6483 upstream.
+
+The alternative coding patch for parisc in kernel 4.20 broke booting
+machines with PA8500-PA8700 CPUs. The problem is, that for such machines
+the parisc kernel automatically utilizes huge pages to access kernel
+text code, but the set_kernel_text_rw() function, which is used shortly
+before applying any alternative patches, didn't used the correctly
+hugepage-aligned addresses to remap the kernel text read-writeable.
+
+Fixes: 3847dab77421 ("parisc: Add alternative coding infrastructure")
+Cc: <stable@vger.kernel.org> [4.20]
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/mm/init.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/mm/init.c
++++ b/arch/parisc/mm/init.c
+@@ -512,8 +512,8 @@ static void __init map_pages(unsigned lo
+
+ void __init set_kernel_text_rw(int enable_read_write)
+ {
+- unsigned long start = (unsigned long)__init_begin;
+- unsigned long end = (unsigned long)_etext;
++ unsigned long start = (unsigned long) _text;
++ unsigned long end = (unsigned long) &data_start;
+
+ map_pages(start, __pa(start), end-start,
+ PAGE_KERNEL_RWX, enable_read_write ? 1:0);
--- /dev/null
+From 3cc9ffbb1f51eb4320575a48e4805a8f52e0e26b Mon Sep 17 00:00:00 2001
+From: "Maciej W. Rozycki" <macro@linux-mips.org>
+Date: Wed, 7 Nov 2018 02:39:13 +0000
+Subject: rtc: m41t80: Correct alarm month range with RTC reads
+
+From: Maciej W. Rozycki <macro@linux-mips.org>
+
+commit 3cc9ffbb1f51eb4320575a48e4805a8f52e0e26b upstream.
+
+Add the missing adjustment of the month range on alarm reads from the
+RTC, correcting an issue coming from commit 9c6dfed92c3e ("rtc: m41t80:
+add alarm functionality"). The range is 1-12 for hardware and 0-11 for
+`struct rtc_time', and is already correctly handled on alarm writes to
+the RTC.
+
+It was correct up until commit 48e9766726eb ("drivers/rtc/rtc-m41t80.c:
+remove disabled alarm functionality") too, which removed the previous
+implementation of alarm support.
+
+Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
+Fixes: 9c6dfed92c3e ("rtc: m41t80: add alarm functionality")
+References: 48e9766726eb ("drivers/rtc/rtc-m41t80.c: remove disabled alarm functionality")
+Cc: stable@vger.kernel.org # 4.7+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rtc/rtc-m41t80.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/rtc/rtc-m41t80.c
++++ b/drivers/rtc/rtc-m41t80.c
+@@ -393,7 +393,7 @@ static int m41t80_read_alarm(struct devi
+ alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
+ alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
+ alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
+- alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f);
++ alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1;
+
+ alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
+ alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
smb3-fix-large-reads-on-encrypted-connections.patch
cifs-return-correct-errors-when-pinning-memory-failed-for-direct-i-o.patch
cifs-use-the-correct-length-when-pinning-memory-for-direct-i-o-for-write.patch
+arm64-kvm-avoid-setting-the-upper-32-bits-of-vtcr_el2-to-1.patch
+arm-arm64-kvm-vgic-force-vm-halt-when-changing-the-active-state-of-gicv3-ppis-sgis.patch
+arm-dts-exynos-specify-i2s-assigned-clocks-in-proper-node.patch
+rtc-m41t80-correct-alarm-month-range-with-rtc-reads.patch
+kvm-arm-arm64-vgic-do-not-cond_resched_lock-with-irqs-disabled.patch
+kvm-arm-arm64-vgic-cap-spis-to-the-vm-defined-maximum.patch
+kvm-arm-arm64-vgic-v2-set-active_source-to-0-when-restoring-state.patch
+kvm-arm-arm64-fix-vmid-alloc-race-by-reverting-to-lock-less.patch
+kvm-arm-arm64-vgic-fix-off-by-one-bug-in-vgic_get_irq.patch
+iommu-arm-smmu-v3-fix-big-endian-cmd_sync-writes.patch
+arm64-compat-avoid-sending-sigill-for-unallocated-syscall-numbers.patch
+arm64-compat-don-t-pull-syscall-number-from-regs-in-arm_compat_syscall.patch
+parisc-remap-hugepage-aligned-pages-in-set_kernel_text_rw.patch
+tpm-tpm_try_transmit-refactor-error-flow.patch
+tpm-tpm_i2c_nuvoton-use-correct-command-duration-for-tpm-2.x.patch
--- /dev/null
+From 2ba5780ce30549cf57929b01d8cba6fe656e31c5 Mon Sep 17 00:00:00 2001
+From: Tomas Winkler <tomas.winkler@intel.com>
+Date: Fri, 19 Oct 2018 21:22:47 +0300
+Subject: tpm: tpm_i2c_nuvoton: use correct command duration for TPM 2.x
+
+From: Tomas Winkler <tomas.winkler@intel.com>
+
+commit 2ba5780ce30549cf57929b01d8cba6fe656e31c5 upstream.
+
+tpm_i2c_nuvoton calculated commands duration using TPM 1.x
+values via tpm_calc_ordinal_duration() also for TPM 2.x chips.
+Call tpm2_calc_ordinal_duration() for retrieving ordinal
+duration for TPM 2.X chips.
+
+Cc: stable@vger.kernel.org
+Cc: Nayna Jain <nayna@linux.vnet.ibm.com>
+Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
+Reviewed-by: Nayna Jain <nayna@linux.ibm.com>
+Tested-by: Nayna Jain <nayna@linux.ibm.com> (For TPM 2.0)
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_i2c_nuvoton.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
+@@ -369,6 +369,7 @@ static int i2c_nuvoton_send(struct tpm_c
+ struct device *dev = chip->dev.parent;
+ struct i2c_client *client = to_i2c_client(dev);
+ u32 ordinal;
++ unsigned long duration;
+ size_t count = 0;
+ int burst_count, bytes2write, retries, rc = -EIO;
+
+@@ -455,10 +456,12 @@ static int i2c_nuvoton_send(struct tpm_c
+ return rc;
+ }
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+- rc = i2c_nuvoton_wait_for_data_avail(chip,
+- tpm_calc_ordinal_duration(chip,
+- ordinal),
+- &priv->read_queue);
++ if (chip->flags & TPM_CHIP_FLAG_TPM2)
++ duration = tpm2_calc_ordinal_duration(chip, ordinal);
++ else
++ duration = tpm_calc_ordinal_duration(chip, ordinal);
++
++ rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
+ if (rc) {
+ dev_err(dev, "%s() timeout command duration\n", __func__);
+ i2c_nuvoton_ready(chip);
--- /dev/null
+From 01f54664a4db0d612de0ece8e0022f21f9374e9b Mon Sep 17 00:00:00 2001
+From: Tomas Winkler <tomas.winkler@intel.com>
+Date: Tue, 16 Oct 2018 16:37:16 +0300
+Subject: tpm: tpm_try_transmit() refactor error flow.
+
+From: Tomas Winkler <tomas.winkler@intel.com>
+
+commit 01f54664a4db0d612de0ece8e0022f21f9374e9b upstream.
+
+First, rename out_no_locality to out_locality for bailing out on
+both tpm_cmd_ready() and tpm_request_locality() failure.
+Second, ignore the return value of go_to_idle() as it may override
+the return value of the actual tpm operation, the go_to_idle() error
+will be caught on any consequent command.
+Last, fix the wrong 'goto out', that jumped back instead of forward.
+
+Cc: stable@vger.kernel.org
+Fixes: 627448e85c76 ("tpm: separate cmd_ready/go_idle from runtime_pm")
+Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Tested-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm-interface.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -477,13 +477,15 @@ static ssize_t tpm_try_transmit(struct t
+
+ if (need_locality) {
+ rc = tpm_request_locality(chip, flags);
+- if (rc < 0)
+- goto out_no_locality;
++ if (rc < 0) {
++ need_locality = false;
++ goto out_locality;
++ }
+ }
+
+ rc = tpm_cmd_ready(chip, flags);
+ if (rc)
+- goto out;
++ goto out_locality;
+
+ rc = tpm2_prepare_space(chip, space, ordinal, buf);
+ if (rc)
+@@ -547,14 +549,13 @@ out_recv:
+ dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
+
+ out:
+- rc = tpm_go_idle(chip, flags);
+- if (rc)
+- goto out;
++ /* may fail but do not override previous error value in rc */
++ tpm_go_idle(chip, flags);
+
++out_locality:
+ if (need_locality)
+ tpm_relinquish_locality(chip, flags);
+
+-out_no_locality:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+