From 0922fbee039d7c5a06636ab37cc4d848371a8dc0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 7 Jan 2019 11:36:09 +0100 Subject: [PATCH] 4.20-stable patches added patches: arm-arm64-kvm-vgic-force-vm-halt-when-changing-the-active-state-of-gicv3-ppis-sgis.patch arm-dts-exynos-specify-i2s-assigned-clocks-in-proper-node.patch arm64-compat-avoid-sending-sigill-for-unallocated-syscall-numbers.patch arm64-compat-don-t-pull-syscall-number-from-regs-in-arm_compat_syscall.patch arm64-kvm-avoid-setting-the-upper-32-bits-of-vtcr_el2-to-1.patch iommu-arm-smmu-v3-fix-big-endian-cmd_sync-writes.patch kvm-arm-arm64-fix-vmid-alloc-race-by-reverting-to-lock-less.patch kvm-arm-arm64-vgic-cap-spis-to-the-vm-defined-maximum.patch kvm-arm-arm64-vgic-do-not-cond_resched_lock-with-irqs-disabled.patch kvm-arm-arm64-vgic-fix-off-by-one-bug-in-vgic_get_irq.patch kvm-arm-arm64-vgic-v2-set-active_source-to-0-when-restoring-state.patch parisc-remap-hugepage-aligned-pages-in-set_kernel_text_rw.patch rtc-m41t80-correct-alarm-month-range-with-rtc-reads.patch tpm-tpm_i2c_nuvoton-use-correct-command-duration-for-tpm-2.x.patch tpm-tpm_try_transmit-refactor-error-flow.patch --- ...-the-active-state-of-gicv3-ppis-sgis.patch | 46 +++++++ ...y-i2s-assigned-clocks-in-proper-node.patch | 114 +++++++++++++++++ ...gill-for-unallocated-syscall-numbers.patch | 66 ++++++++++ ...mber-from-regs-in-arm_compat_syscall.patch | 89 +++++++++++++ ...g-the-upper-32-bits-of-vtcr_el2-to-1.patch | 38 ++++++ ...mu-v3-fix-big-endian-cmd_sync-writes.patch | 48 +++++++ ...alloc-race-by-reverting-to-lock-less.patch | 120 ++++++++++++++++++ ...c-cap-spis-to-the-vm-defined-maximum.patch | 33 +++++ ...cond_resched_lock-with-irqs-disabled.patch | 58 +++++++++ ...c-fix-off-by-one-bug-in-vgic_get_irq.patch | 58 +++++++++ ...ive_source-to-0-when-restoring-state.patch | 56 ++++++++ ...-aligned-pages-in-set_kernel_text_rw.patch | 38 ++++++ ...ect-alarm-month-range-with-rtc-reads.patch | 41 ++++++ queue-4.20/series | 15 +++ ...correct-command-duration-for-tpm-2.x.patch | 54 ++++++++ ...tpm_try_transmit-refactor-error-flow.patch | 67 ++++++++++ 16 files changed, 941 insertions(+) create mode 100644 queue-4.20/arm-arm64-kvm-vgic-force-vm-halt-when-changing-the-active-state-of-gicv3-ppis-sgis.patch create mode 100644 queue-4.20/arm-dts-exynos-specify-i2s-assigned-clocks-in-proper-node.patch create mode 100644 queue-4.20/arm64-compat-avoid-sending-sigill-for-unallocated-syscall-numbers.patch create mode 100644 queue-4.20/arm64-compat-don-t-pull-syscall-number-from-regs-in-arm_compat_syscall.patch create mode 100644 queue-4.20/arm64-kvm-avoid-setting-the-upper-32-bits-of-vtcr_el2-to-1.patch create mode 100644 queue-4.20/iommu-arm-smmu-v3-fix-big-endian-cmd_sync-writes.patch create mode 100644 queue-4.20/kvm-arm-arm64-fix-vmid-alloc-race-by-reverting-to-lock-less.patch create mode 100644 queue-4.20/kvm-arm-arm64-vgic-cap-spis-to-the-vm-defined-maximum.patch create mode 100644 queue-4.20/kvm-arm-arm64-vgic-do-not-cond_resched_lock-with-irqs-disabled.patch create mode 100644 queue-4.20/kvm-arm-arm64-vgic-fix-off-by-one-bug-in-vgic_get_irq.patch create mode 100644 queue-4.20/kvm-arm-arm64-vgic-v2-set-active_source-to-0-when-restoring-state.patch create mode 100644 queue-4.20/parisc-remap-hugepage-aligned-pages-in-set_kernel_text_rw.patch create mode 100644 queue-4.20/rtc-m41t80-correct-alarm-month-range-with-rtc-reads.patch create mode 100644 queue-4.20/tpm-tpm_i2c_nuvoton-use-correct-command-duration-for-tpm-2.x.patch create mode 100644 queue-4.20/tpm-tpm_try_transmit-refactor-error-flow.patch diff --git a/queue-4.20/arm-arm64-kvm-vgic-force-vm-halt-when-changing-the-active-state-of-gicv3-ppis-sgis.patch b/queue-4.20/arm-arm64-kvm-vgic-force-vm-halt-when-changing-the-active-state-of-gicv3-ppis-sgis.patch new file mode 100644 index 00000000000..211b3419ff1 --- /dev/null +++ b/queue-4.20/arm-arm64-kvm-vgic-force-vm-halt-when-changing-the-active-state-of-gicv3-ppis-sgis.patch @@ -0,0 +1,46 @@ +From 107352a24900fb458152b92a4e72fbdc83fd5510 Mon Sep 17 00:00:00 2001 +From: Marc Zyngier +Date: Tue, 18 Dec 2018 14:59:09 +0000 +Subject: arm/arm64: KVM: vgic: Force VM halt when changing the active state of GICv3 PPIs/SGIs + +From: Marc Zyngier + +commit 107352a24900fb458152b92a4e72fbdc83fd5510 upstream. + +We currently only halt the guest when a vCPU messes with the active +state of an SPI. This is perfectly fine for GICv2, but isn't enough +for GICv3, where all vCPUs can access the state of any other vCPU. + +Let's broaden the condition to include any GICv3 interrupt that +has an active state (i.e. all but LPIs). + +Cc: stable@vger.kernel.org +Reviewed-by: Christoffer Dall +Signed-off-by: Marc Zyngier +Signed-off-by: Greg Kroah-Hartman + +--- + virt/kvm/arm/vgic/vgic-mmio.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/virt/kvm/arm/vgic/vgic-mmio.c ++++ b/virt/kvm/arm/vgic/vgic-mmio.c +@@ -368,14 +368,16 @@ static void vgic_mmio_change_active(stru + */ + static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) + { +- if (intid > VGIC_NR_PRIVATE_IRQS) ++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || ++ intid > VGIC_NR_PRIVATE_IRQS) + kvm_arm_halt_guest(vcpu->kvm); + } + + /* See vgic_change_active_prepare */ + static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) + { +- if (intid > VGIC_NR_PRIVATE_IRQS) ++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || ++ intid > VGIC_NR_PRIVATE_IRQS) + kvm_arm_resume_guest(vcpu->kvm); + } + diff --git a/queue-4.20/arm-dts-exynos-specify-i2s-assigned-clocks-in-proper-node.patch b/queue-4.20/arm-dts-exynos-specify-i2s-assigned-clocks-in-proper-node.patch new file mode 100644 index 00000000000..d8840e7553a --- /dev/null +++ b/queue-4.20/arm-dts-exynos-specify-i2s-assigned-clocks-in-proper-node.patch @@ -0,0 +1,114 @@ +From 8ac686d7dfed721102860ff2571e6b9f529ae81a Mon Sep 17 00:00:00 2001 +From: Sylwester Nawrocki +Date: Wed, 12 Dec 2018 18:57:44 +0100 +Subject: ARM: dts: exynos: Specify I2S assigned clocks in proper node + +From: Sylwester Nawrocki + +commit 8ac686d7dfed721102860ff2571e6b9f529ae81a upstream. + +The assigned parent clocks should be normally specified in the consumer +device's DT node, this ensures respective driver always sees correct clock +settings when required. + +This patch fixes regression in audio subsystem on Odroid XU3/XU4 boards +that appeared after commits: + +commit 647d04f8e07a ("ASoC: samsung: i2s: Ensure the RCLK rate is properly determined") +commit 995e73e55f46 ("ASoC: samsung: i2s: Fix rclk_srcrate handling") +commit 48279c53fd1d ("ASoC: samsung: i2s: Prevent external abort on exynos5433 I2S1 access") + +Without this patch the driver gets wrong clock as the I2S function clock +(op_clk) in probe() and effectively the clock which is finally assigned +from DT is not being enabled/disabled in the runtime resume/suspend ops. + +Without the above listed commits the EXYNOS_I2S_BUS clock was always set +as parent of CLK_I2S_RCLK_SRC regardless of DT settings so there was no issue +with not enabled EXYNOS_SCLK_I2S. + +Cc: # 4.17.x +Signed-off-by: Sylwester Nawrocki +Signed-off-by: Krzysztof Kozlowski +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi | 9 ++++----- + arch/arm/boot/dts/exynos5422-odroidxu4.dts | 9 ++++----- + 2 files changed, 8 insertions(+), 10 deletions(-) + +--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi ++++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi +@@ -26,8 +26,7 @@ + "Speakers", "SPKL", + "Speakers", "SPKR"; + +- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>, +- <&clock CLK_MOUT_EPLL>, ++ assigned-clocks = <&clock CLK_MOUT_EPLL>, + <&clock CLK_MOUT_MAU_EPLL>, + <&clock CLK_MOUT_USER_MAU_EPLL>, + <&clock_audss EXYNOS_MOUT_AUDSS>, +@@ -36,8 +35,7 @@ + <&clock_audss EXYNOS_DOUT_AUD_BUS>, + <&clock_audss EXYNOS_DOUT_I2S>; + +- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>, +- <&clock CLK_FOUT_EPLL>, ++ assigned-clock-parents = <&clock CLK_FOUT_EPLL>, + <&clock CLK_MOUT_EPLL>, + <&clock CLK_MOUT_MAU_EPLL>, + <&clock CLK_MAU_EPLL>, +@@ -48,7 +46,6 @@ + <0>, + <0>, + <0>, +- <0>, + <196608001>, + <(196608002 / 2)>, + <196608000>; +@@ -84,4 +81,6 @@ + + &i2s0 { + status = "okay"; ++ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>; ++ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>; + }; +--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts ++++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts +@@ -33,8 +33,7 @@ + compatible = "samsung,odroid-xu3-audio"; + model = "Odroid-XU4"; + +- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>, +- <&clock CLK_MOUT_EPLL>, ++ assigned-clocks = <&clock CLK_MOUT_EPLL>, + <&clock CLK_MOUT_MAU_EPLL>, + <&clock CLK_MOUT_USER_MAU_EPLL>, + <&clock_audss EXYNOS_MOUT_AUDSS>, +@@ -43,8 +42,7 @@ + <&clock_audss EXYNOS_DOUT_AUD_BUS>, + <&clock_audss EXYNOS_DOUT_I2S>; + +- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>, +- <&clock CLK_FOUT_EPLL>, ++ assigned-clock-parents = <&clock CLK_FOUT_EPLL>, + <&clock CLK_MOUT_EPLL>, + <&clock CLK_MOUT_MAU_EPLL>, + <&clock CLK_MAU_EPLL>, +@@ -55,7 +53,6 @@ + <0>, + <0>, + <0>, +- <0>, + <196608001>, + <(196608002 / 2)>, + <196608000>; +@@ -79,6 +76,8 @@ + + &i2s0 { + status = "okay"; ++ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>; ++ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>; + }; + + &pwm { diff --git a/queue-4.20/arm64-compat-avoid-sending-sigill-for-unallocated-syscall-numbers.patch b/queue-4.20/arm64-compat-avoid-sending-sigill-for-unallocated-syscall-numbers.patch new file mode 100644 index 00000000000..71212af6afd --- /dev/null +++ b/queue-4.20/arm64-compat-avoid-sending-sigill-for-unallocated-syscall-numbers.patch @@ -0,0 +1,66 @@ +From 169113ece0f29ebe884a6cfcf57c1ace04d8a36a Mon Sep 17 00:00:00 2001 +From: Will Deacon +Date: Thu, 3 Jan 2019 17:45:07 +0000 +Subject: arm64: compat: Avoid sending SIGILL for unallocated syscall numbers + +From: Will Deacon + +commit 169113ece0f29ebe884a6cfcf57c1ace04d8a36a upstream. + +The ARM Linux kernel handles the EABI syscall numbers as follows: + + 0 - NR_SYSCALLS-1 : Invoke syscall via syscall table + NR_SYSCALLS - 0xeffff : -ENOSYS (to be allocated in future) + 0xf0000 - 0xf07ff : Private syscall or -ENOSYS if not allocated + > 0xf07ff : SIGILL + +Our compat code gets this wrong and ends up sending SIGILL in response +to all syscalls greater than NR_SYSCALLS which have a value greater +than 0x7ff in the bottom 16 bits. + +Fix this by defining the end of the ARM private syscall region and +checking the syscall number against that directly. Update the comment +while we're at it. + +Cc: +Cc: Dave Martin +Reported-by: Pi-Hsun Shih +Signed-off-by: Will Deacon +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/include/asm/unistd.h | 5 +++-- + arch/arm64/kernel/sys_compat.c | 4 ++-- + 2 files changed, 5 insertions(+), 4 deletions(-) + +--- a/arch/arm64/include/asm/unistd.h ++++ b/arch/arm64/include/asm/unistd.h +@@ -40,8 +40,9 @@ + * The following SVCs are ARM private. + */ + #define __ARM_NR_COMPAT_BASE 0x0f0000 +-#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) +-#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) ++#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2) ++#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) ++#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) + + #define __NR_compat_syscalls 399 + #endif +--- a/arch/arm64/kernel/sys_compat.c ++++ b/arch/arm64/kernel/sys_compat.c +@@ -102,12 +102,12 @@ long compat_arm_syscall(struct pt_regs * + + default: + /* +- * Calls 9f00xx..9f07ff are defined to return -ENOSYS ++ * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS + * if not implemented, rather than raising SIGILL. This + * way the calling program can gracefully determine whether + * a feature is supported. + */ +- if ((no & 0xffff) <= 0x7ff) ++ if (no < __ARM_NR_COMPAT_END) + return -ENOSYS; + break; + } diff --git a/queue-4.20/arm64-compat-don-t-pull-syscall-number-from-regs-in-arm_compat_syscall.patch b/queue-4.20/arm64-compat-don-t-pull-syscall-number-from-regs-in-arm_compat_syscall.patch new file mode 100644 index 00000000000..21778273f76 --- /dev/null +++ b/queue-4.20/arm64-compat-don-t-pull-syscall-number-from-regs-in-arm_compat_syscall.patch @@ -0,0 +1,89 @@ +From 53290432145a8eb143fe29e06e9c1465d43dc723 Mon Sep 17 00:00:00 2001 +From: Will Deacon +Date: Thu, 3 Jan 2019 18:00:39 +0000 +Subject: arm64: compat: Don't pull syscall number from regs in arm_compat_syscall + +From: Will Deacon + +commit 53290432145a8eb143fe29e06e9c1465d43dc723 upstream. + +The syscall number may have been changed by a tracer, so we should pass +the actual number in from the caller instead of pulling it from the +saved r7 value directly. + +Cc: +Cc: Pi-Hsun Shih +Reviewed-by: Dave Martin +Signed-off-by: Will Deacon +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/kernel/sys_compat.c | 9 ++++----- + arch/arm64/kernel/syscall.c | 9 ++++----- + 2 files changed, 8 insertions(+), 10 deletions(-) + +--- a/arch/arm64/kernel/sys_compat.c ++++ b/arch/arm64/kernel/sys_compat.c +@@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, + /* + * Handle all unrecognised system calls. + */ +-long compat_arm_syscall(struct pt_regs *regs) ++long compat_arm_syscall(struct pt_regs *regs, int scno) + { +- unsigned int no = regs->regs[7]; + void __user *addr; + +- switch (no) { ++ switch (scno) { + /* + * Flush a region from virtual address 'r0' to virtual address 'r1' + * _exclusive_. There is no alignment requirement on either address; +@@ -107,7 +106,7 @@ long compat_arm_syscall(struct pt_regs * + * way the calling program can gracefully determine whether + * a feature is supported. + */ +- if (no < __ARM_NR_COMPAT_END) ++ if (scno < __ARM_NR_COMPAT_END) + return -ENOSYS; + break; + } +@@ -116,6 +115,6 @@ long compat_arm_syscall(struct pt_regs * + (compat_thumb_mode(regs) ? 2 : 4); + + arm64_notify_die("Oops - bad compat syscall(2)", regs, +- SIGILL, ILL_ILLTRP, addr, no); ++ SIGILL, ILL_ILLTRP, addr, scno); + return 0; + } +--- a/arch/arm64/kernel/syscall.c ++++ b/arch/arm64/kernel/syscall.c +@@ -13,16 +13,15 @@ + #include + #include + +-long compat_arm_syscall(struct pt_regs *regs); +- ++long compat_arm_syscall(struct pt_regs *regs, int scno); + long sys_ni_syscall(void); + +-asmlinkage long do_ni_syscall(struct pt_regs *regs) ++static long do_ni_syscall(struct pt_regs *regs, int scno) + { + #ifdef CONFIG_COMPAT + long ret; + if (is_compat_task()) { +- ret = compat_arm_syscall(regs); ++ ret = compat_arm_syscall(regs, scno); + if (ret != -ENOSYS) + return ret; + } +@@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_reg + syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; + ret = __invoke_syscall(regs, syscall_fn); + } else { +- ret = do_ni_syscall(regs); ++ ret = do_ni_syscall(regs, scno); + } + + regs->regs[0] = ret; diff --git a/queue-4.20/arm64-kvm-avoid-setting-the-upper-32-bits-of-vtcr_el2-to-1.patch b/queue-4.20/arm64-kvm-avoid-setting-the-upper-32-bits-of-vtcr_el2-to-1.patch new file mode 100644 index 00000000000..70f350dbb9b --- /dev/null +++ b/queue-4.20/arm64-kvm-avoid-setting-the-upper-32-bits-of-vtcr_el2-to-1.patch @@ -0,0 +1,38 @@ +From df655b75c43fba0f2621680ab261083297fd6d16 Mon Sep 17 00:00:00 2001 +From: Will Deacon +Date: Thu, 13 Dec 2018 16:06:14 +0000 +Subject: arm64: KVM: Avoid setting the upper 32 bits of VTCR_EL2 to 1 + +From: Will Deacon + +commit df655b75c43fba0f2621680ab261083297fd6d16 upstream. + +Although bit 31 of VTCR_EL2 is RES1, we inadvertently end up setting all +of the upper 32 bits to 1 as well because we define VTCR_EL2_RES1 as +signed, which is sign-extended when assigning to kvm->arch.vtcr. + +Lucky for us, the architecture currently treats these upper bits as RES0 +so, whilst we've been naughty, we haven't set fire to anything yet. + +Cc: +Cc: Marc Zyngier +Cc: Christoffer Dall +Signed-off-by: Will Deacon +Signed-off-by: Marc Zyngier +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/include/asm/kvm_arm.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm64/include/asm/kvm_arm.h ++++ b/arch/arm64/include/asm/kvm_arm.h +@@ -104,7 +104,7 @@ + TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK) + + /* VTCR_EL2 Registers bits */ +-#define VTCR_EL2_RES1 (1 << 31) ++#define VTCR_EL2_RES1 (1U << 31) + #define VTCR_EL2_HD (1 << 22) + #define VTCR_EL2_HA (1 << 21) + #define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT diff --git a/queue-4.20/iommu-arm-smmu-v3-fix-big-endian-cmd_sync-writes.patch b/queue-4.20/iommu-arm-smmu-v3-fix-big-endian-cmd_sync-writes.patch new file mode 100644 index 00000000000..76406d53c7b --- /dev/null +++ b/queue-4.20/iommu-arm-smmu-v3-fix-big-endian-cmd_sync-writes.patch @@ -0,0 +1,48 @@ +From 3cd508a8c1379427afb5e16c2e0a7c986d907853 Mon Sep 17 00:00:00 2001 +From: Robin Murphy +Date: Wed, 17 Oct 2018 21:32:58 +0100 +Subject: iommu/arm-smmu-v3: Fix big-endian CMD_SYNC writes + +From: Robin Murphy + +commit 3cd508a8c1379427afb5e16c2e0a7c986d907853 upstream. + +When we insert the sync sequence number into the CMD_SYNC.MSIData field, +we do so in CPU-native byte order, before writing out the whole command +as explicitly little-endian dwords. Thus on big-endian systems, the SMMU +will receive and write back a byteswapped version of sync_nr, which would +be perfect if it were targeting a similarly-little-endian ITS, but since +it's actually writing back to memory being polled by the CPUs, they're +going to end up seeing the wrong thing. + +Since the SMMU doesn't care what the MSIData actually contains, the +minimal-overhead solution is to simply add an extra byteswap initially, +such that it then writes back the big-endian format directly. + +Cc: +Fixes: 37de98f8f1cf ("iommu/arm-smmu-v3: Use CMD_SYNC completion MSI") +Signed-off-by: Robin Murphy +Signed-off-by: Will Deacon +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/iommu/arm-smmu-v3.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +--- a/drivers/iommu/arm-smmu-v3.c ++++ b/drivers/iommu/arm-smmu-v3.c +@@ -828,7 +828,13 @@ static int arm_smmu_cmdq_build_cmd(u64 * + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV); + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH); + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB); +- cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata); ++ /* ++ * Commands are written little-endian, but we want the SMMU to ++ * receive MSIData, and thus write it back to memory, in CPU ++ * byte order, so big-endian needs an extra byteswap here. ++ */ ++ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ++ cpu_to_le32(ent->sync.msidata)); + cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; + break; + default: diff --git a/queue-4.20/kvm-arm-arm64-fix-vmid-alloc-race-by-reverting-to-lock-less.patch b/queue-4.20/kvm-arm-arm64-fix-vmid-alloc-race-by-reverting-to-lock-less.patch new file mode 100644 index 00000000000..a6be56484d8 --- /dev/null +++ b/queue-4.20/kvm-arm-arm64-fix-vmid-alloc-race-by-reverting-to-lock-less.patch @@ -0,0 +1,120 @@ +From fb544d1ca65a89f7a3895f7531221ceeed74ada7 Mon Sep 17 00:00:00 2001 +From: Christoffer Dall +Date: Tue, 11 Dec 2018 13:23:57 +0100 +Subject: KVM: arm/arm64: Fix VMID alloc race by reverting to lock-less + +From: Christoffer Dall + +commit fb544d1ca65a89f7a3895f7531221ceeed74ada7 upstream. + +We recently addressed a VMID generation race by introducing a read/write +lock around accesses and updates to the vmid generation values. + +However, kvm_arch_vcpu_ioctl_run() also calls need_new_vmid_gen() but +does so without taking the read lock. + +As far as I can tell, this can lead to the same kind of race: + + VM 0, VCPU 0 VM 0, VCPU 1 + ------------ ------------ + update_vttbr (vmid 254) + update_vttbr (vmid 1) // roll over + read_lock(kvm_vmid_lock); + force_vm_exit() + local_irq_disable + need_new_vmid_gen == false //because vmid gen matches + + enter_guest (vmid 254) + kvm_arch.vttbr = : + read_unlock(kvm_vmid_lock); + + enter_guest (vmid 1) + +Which results in running two VCPUs in the same VM with different VMIDs +and (even worse) other VCPUs from other VMs could now allocate clashing +VMID 254 from the new generation as long as VCPU 0 is not exiting. + +Attempt to solve this by making sure vttbr is updated before another CPU +can observe the updated VMID generation. + +Cc: stable@vger.kernel.org +Fixes: f0cf47d939d0 "KVM: arm/arm64: Close VMID generation race" +Reviewed-by: Julien Thierry +Signed-off-by: Christoffer Dall +Signed-off-by: Marc Zyngier +Signed-off-by: Greg Kroah-Hartman + +--- + virt/kvm/arm/arm.c | 23 +++++++++++------------ + 1 file changed, 11 insertions(+), 12 deletions(-) + +--- a/virt/kvm/arm/arm.c ++++ b/virt/kvm/arm/arm.c +@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, + static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); + static u32 kvm_next_vmid; + static unsigned int kvm_vmid_bits __read_mostly; +-static DEFINE_RWLOCK(kvm_vmid_lock); ++static DEFINE_SPINLOCK(kvm_vmid_lock); + + static bool vgic_present; + +@@ -484,7 +484,9 @@ void force_vm_exit(const cpumask_t *mask + */ + static bool need_new_vmid_gen(struct kvm *kvm) + { +- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); ++ u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen); ++ smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ ++ return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen); + } + + /** +@@ -499,16 +501,11 @@ static void update_vttbr(struct kvm *kvm + { + phys_addr_t pgd_phys; + u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0; +- bool new_gen; + +- read_lock(&kvm_vmid_lock); +- new_gen = need_new_vmid_gen(kvm); +- read_unlock(&kvm_vmid_lock); +- +- if (!new_gen) ++ if (!need_new_vmid_gen(kvm)) + return; + +- write_lock(&kvm_vmid_lock); ++ spin_lock(&kvm_vmid_lock); + + /* + * We need to re-check the vmid_gen here to ensure that if another vcpu +@@ -516,7 +513,7 @@ static void update_vttbr(struct kvm *kvm + * use the same vmid. + */ + if (!need_new_vmid_gen(kvm)) { +- write_unlock(&kvm_vmid_lock); ++ spin_unlock(&kvm_vmid_lock); + return; + } + +@@ -539,7 +536,6 @@ static void update_vttbr(struct kvm *kvm + kvm_call_hyp(__kvm_flush_vm_context); + } + +- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); + kvm->arch.vmid = kvm_next_vmid; + kvm_next_vmid++; + kvm_next_vmid &= (1 << kvm_vmid_bits) - 1; +@@ -550,7 +546,10 @@ static void update_vttbr(struct kvm *kvm + vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); + kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp; + +- write_unlock(&kvm_vmid_lock); ++ smp_wmb(); ++ WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen)); ++ ++ spin_unlock(&kvm_vmid_lock); + } + + static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) diff --git a/queue-4.20/kvm-arm-arm64-vgic-cap-spis-to-the-vm-defined-maximum.patch b/queue-4.20/kvm-arm-arm64-vgic-cap-spis-to-the-vm-defined-maximum.patch new file mode 100644 index 00000000000..f7f70dbb7a3 --- /dev/null +++ b/queue-4.20/kvm-arm-arm64-vgic-cap-spis-to-the-vm-defined-maximum.patch @@ -0,0 +1,33 @@ +From bea2ef803ade3359026d5d357348842bca9edcf1 Mon Sep 17 00:00:00 2001 +From: Marc Zyngier +Date: Tue, 4 Dec 2018 17:11:19 +0000 +Subject: KVM: arm/arm64: vgic: Cap SPIs to the VM-defined maximum + +From: Marc Zyngier + +commit bea2ef803ade3359026d5d357348842bca9edcf1 upstream. + +SPIs should be checked against the VMs specific configuration, and +not the architectural maximum. + +Cc: stable@vger.kernel.org +Signed-off-by: Marc Zyngier +Signed-off-by: Greg Kroah-Hartman + +--- + virt/kvm/arm/vgic/vgic.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/virt/kvm/arm/vgic/vgic.c ++++ b/virt/kvm/arm/vgic/vgic.c +@@ -108,8 +108,8 @@ struct vgic_irq *vgic_get_irq(struct kvm + } + + /* SPIs */ +- if (intid <= VGIC_MAX_SPI) { +- intid = array_index_nospec(intid, VGIC_MAX_SPI); ++ if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) { ++ intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS); + return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; + } + diff --git a/queue-4.20/kvm-arm-arm64-vgic-do-not-cond_resched_lock-with-irqs-disabled.patch b/queue-4.20/kvm-arm-arm64-vgic-do-not-cond_resched_lock-with-irqs-disabled.patch new file mode 100644 index 00000000000..6ddc8ed572e --- /dev/null +++ b/queue-4.20/kvm-arm-arm64-vgic-do-not-cond_resched_lock-with-irqs-disabled.patch @@ -0,0 +1,58 @@ +From 2e2f6c3c0b08eed3fcf7de3c7684c940451bdeb1 Mon Sep 17 00:00:00 2001 +From: Julien Thierry +Date: Mon, 26 Nov 2018 18:26:44 +0000 +Subject: KVM: arm/arm64: vgic: Do not cond_resched_lock() with IRQs disabled + +From: Julien Thierry + +commit 2e2f6c3c0b08eed3fcf7de3c7684c940451bdeb1 upstream. + +To change the active state of an MMIO, halt is requested for all vcpus of +the affected guest before modifying the IRQ state. This is done by calling +cond_resched_lock() in vgic_mmio_change_active(). However interrupts are +disabled at this point and we cannot reschedule a vcpu. + +We actually don't need any of this, as kvm_arm_halt_guest ensures that +all the other vcpus are out of the guest. Let's just drop that useless +code. + +Signed-off-by: Julien Thierry +Suggested-by: Christoffer Dall +Cc: stable@vger.kernel.org +Signed-off-by: Marc Zyngier +Signed-off-by: Greg Kroah-Hartman + +--- + virt/kvm/arm/vgic/vgic-mmio.c | 21 --------------------- + 1 file changed, 21 deletions(-) + +--- a/virt/kvm/arm/vgic/vgic-mmio.c ++++ b/virt/kvm/arm/vgic/vgic-mmio.c +@@ -313,27 +313,6 @@ static void vgic_mmio_change_active(stru + + spin_lock_irqsave(&irq->irq_lock, flags); + +- /* +- * If this virtual IRQ was written into a list register, we +- * have to make sure the CPU that runs the VCPU thread has +- * synced back the LR state to the struct vgic_irq. +- * +- * As long as the conditions below are true, we know the VCPU thread +- * may be on its way back from the guest (we kicked the VCPU thread in +- * vgic_change_active_prepare) and still has to sync back this IRQ, +- * so we release and re-acquire the spin_lock to let the other thread +- * sync back the IRQ. +- * +- * When accessing VGIC state from user space, requester_vcpu is +- * NULL, which is fine, because we guarantee that no VCPUs are running +- * when accessing VGIC state from user space so irq->vcpu->cpu is +- * always -1. +- */ +- while (irq->vcpu && /* IRQ may have state in an LR somewhere */ +- irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */ +- irq->vcpu->cpu != -1) /* VCPU thread is running */ +- cond_resched_lock(&irq->irq_lock); +- + if (irq->hw) { + vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); + } else { diff --git a/queue-4.20/kvm-arm-arm64-vgic-fix-off-by-one-bug-in-vgic_get_irq.patch b/queue-4.20/kvm-arm-arm64-vgic-fix-off-by-one-bug-in-vgic_get_irq.patch new file mode 100644 index 00000000000..099de5c17d3 --- /dev/null +++ b/queue-4.20/kvm-arm-arm64-vgic-fix-off-by-one-bug-in-vgic_get_irq.patch @@ -0,0 +1,58 @@ +From c23b2e6fc4ca346018618266bcabd335c0a8a49e Mon Sep 17 00:00:00 2001 +From: "Gustavo A. R. Silva" +Date: Wed, 12 Dec 2018 14:11:23 -0600 +Subject: KVM: arm/arm64: vgic: Fix off-by-one bug in vgic_get_irq() + +From: Gustavo A. R. Silva + +commit c23b2e6fc4ca346018618266bcabd335c0a8a49e upstream. + +When using the nospec API, it should be taken into account that: + +"...if the CPU speculates past the bounds check then + * array_index_nospec() will clamp the index within the range of [0, + * size)." + +The above is part of the header for macro array_index_nospec() in +linux/nospec.h + +Now, in this particular case, if intid evaluates to exactly VGIC_MAX_SPI +or to exaclty VGIC_MAX_PRIVATE, the array_index_nospec() macro ends up +returning VGIC_MAX_SPI - 1 or VGIC_MAX_PRIVATE - 1 respectively, instead +of VGIC_MAX_SPI or VGIC_MAX_PRIVATE, which, based on the original logic: + + /* SGIs and PPIs */ + if (intid <= VGIC_MAX_PRIVATE) + return &vcpu->arch.vgic_cpu.private_irqs[intid]; + + /* SPIs */ + if (intid <= VGIC_MAX_SPI) + return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; + +are valid values for intid. + +Fix this by calling array_index_nospec() macro with VGIC_MAX_PRIVATE + 1 +and VGIC_MAX_SPI + 1 as arguments for its parameter size. + +Fixes: 41b87599c743 ("KVM: arm/arm64: vgic: fix possible spectre-v1 in vgic_get_irq()") +Cc: stable@vger.kernel.org +Signed-off-by: Gustavo A. R. Silva +[dropped the SPI part which was fixed separately] +Signed-off-by: Marc Zyngier +Signed-off-by: Greg Kroah-Hartman + +--- + virt/kvm/arm/vgic/vgic.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/virt/kvm/arm/vgic/vgic.c ++++ b/virt/kvm/arm/vgic/vgic.c +@@ -103,7 +103,7 @@ struct vgic_irq *vgic_get_irq(struct kvm + { + /* SGIs and PPIs */ + if (intid <= VGIC_MAX_PRIVATE) { +- intid = array_index_nospec(intid, VGIC_MAX_PRIVATE); ++ intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1); + return &vcpu->arch.vgic_cpu.private_irqs[intid]; + } + diff --git a/queue-4.20/kvm-arm-arm64-vgic-v2-set-active_source-to-0-when-restoring-state.patch b/queue-4.20/kvm-arm-arm64-vgic-v2-set-active_source-to-0-when-restoring-state.patch new file mode 100644 index 00000000000..15626252690 --- /dev/null +++ b/queue-4.20/kvm-arm-arm64-vgic-v2-set-active_source-to-0-when-restoring-state.patch @@ -0,0 +1,56 @@ +From 60c3ab30d8c2ff3a52606df03f05af2aae07dc6b Mon Sep 17 00:00:00 2001 +From: Christoffer Dall +Date: Tue, 11 Dec 2018 12:51:03 +0100 +Subject: KVM: arm/arm64: vgic-v2: Set active_source to 0 when restoring state + +From: Christoffer Dall + +commit 60c3ab30d8c2ff3a52606df03f05af2aae07dc6b upstream. + +When restoring the active state from userspace, we don't know which CPU +was the source for the active state, and this is not architecturally +exposed in any of the register state. + +Set the active_source to 0 in this case. In the future, we can expand +on this and exposse the information as additional information to +userspace for GICv2 if anyone cares. + +Cc: stable@vger.kernel.org +Signed-off-by: Christoffer Dall +Signed-off-by: Marc Zyngier +Signed-off-by: Greg Kroah-Hartman + +--- + virt/kvm/arm/vgic/vgic-mmio.c | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +--- a/virt/kvm/arm/vgic/vgic-mmio.c ++++ b/virt/kvm/arm/vgic/vgic-mmio.c +@@ -317,11 +317,26 @@ static void vgic_mmio_change_active(stru + vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); + } else { + u32 model = vcpu->kvm->arch.vgic.vgic_model; ++ u8 active_source; + + irq->active = active; ++ ++ /* ++ * The GICv2 architecture indicates that the source CPUID for ++ * an SGI should be provided during an EOI which implies that ++ * the active state is stored somewhere, but at the same time ++ * this state is not architecturally exposed anywhere and we ++ * have no way of knowing the right source. ++ * ++ * This may lead to a VCPU not being able to receive ++ * additional instances of a particular SGI after migration ++ * for a GICv2 VM on some GIC implementations. Oh well. ++ */ ++ active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0; ++ + if (model == KVM_DEV_TYPE_ARM_VGIC_V2 && + active && vgic_irq_is_sgi(irq->intid)) +- irq->active_source = requester_vcpu->vcpu_id; ++ irq->active_source = active_source; + } + + if (irq->active) diff --git a/queue-4.20/parisc-remap-hugepage-aligned-pages-in-set_kernel_text_rw.patch b/queue-4.20/parisc-remap-hugepage-aligned-pages-in-set_kernel_text_rw.patch new file mode 100644 index 00000000000..46811d3bd9f --- /dev/null +++ b/queue-4.20/parisc-remap-hugepage-aligned-pages-in-set_kernel_text_rw.patch @@ -0,0 +1,38 @@ +From dfbaecb2b707cfdc5276b548d52b437384bd6483 Mon Sep 17 00:00:00 2001 +From: Helge Deller +Date: Fri, 4 Jan 2019 23:32:53 +0100 +Subject: parisc: Remap hugepage-aligned pages in set_kernel_text_rw() + +From: Helge Deller + +commit dfbaecb2b707cfdc5276b548d52b437384bd6483 upstream. + +The alternative coding patch for parisc in kernel 4.20 broke booting +machines with PA8500-PA8700 CPUs. The problem is, that for such machines +the parisc kernel automatically utilizes huge pages to access kernel +text code, but the set_kernel_text_rw() function, which is used shortly +before applying any alternative patches, didn't used the correctly +hugepage-aligned addresses to remap the kernel text read-writeable. + +Fixes: 3847dab77421 ("parisc: Add alternative coding infrastructure") +Cc: [4.20] +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman + +--- + arch/parisc/mm/init.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/parisc/mm/init.c ++++ b/arch/parisc/mm/init.c +@@ -512,8 +512,8 @@ static void __init map_pages(unsigned lo + + void __init set_kernel_text_rw(int enable_read_write) + { +- unsigned long start = (unsigned long)__init_begin; +- unsigned long end = (unsigned long)_etext; ++ unsigned long start = (unsigned long) _text; ++ unsigned long end = (unsigned long) &data_start; + + map_pages(start, __pa(start), end-start, + PAGE_KERNEL_RWX, enable_read_write ? 1:0); diff --git a/queue-4.20/rtc-m41t80-correct-alarm-month-range-with-rtc-reads.patch b/queue-4.20/rtc-m41t80-correct-alarm-month-range-with-rtc-reads.patch new file mode 100644 index 00000000000..8b6ac373ce7 --- /dev/null +++ b/queue-4.20/rtc-m41t80-correct-alarm-month-range-with-rtc-reads.patch @@ -0,0 +1,41 @@ +From 3cc9ffbb1f51eb4320575a48e4805a8f52e0e26b Mon Sep 17 00:00:00 2001 +From: "Maciej W. Rozycki" +Date: Wed, 7 Nov 2018 02:39:13 +0000 +Subject: rtc: m41t80: Correct alarm month range with RTC reads + +From: Maciej W. Rozycki + +commit 3cc9ffbb1f51eb4320575a48e4805a8f52e0e26b upstream. + +Add the missing adjustment of the month range on alarm reads from the +RTC, correcting an issue coming from commit 9c6dfed92c3e ("rtc: m41t80: +add alarm functionality"). The range is 1-12 for hardware and 0-11 for +`struct rtc_time', and is already correctly handled on alarm writes to +the RTC. + +It was correct up until commit 48e9766726eb ("drivers/rtc/rtc-m41t80.c: +remove disabled alarm functionality") too, which removed the previous +implementation of alarm support. + +Signed-off-by: Maciej W. Rozycki +Fixes: 9c6dfed92c3e ("rtc: m41t80: add alarm functionality") +References: 48e9766726eb ("drivers/rtc/rtc-m41t80.c: remove disabled alarm functionality") +Cc: stable@vger.kernel.org # 4.7+ +Signed-off-by: Alexandre Belloni +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/rtc/rtc-m41t80.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/rtc/rtc-m41t80.c ++++ b/drivers/rtc/rtc-m41t80.c +@@ -393,7 +393,7 @@ static int m41t80_read_alarm(struct devi + alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f); + alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f); + alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f); +- alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f); ++ alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1; + + alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE); + alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled; diff --git a/queue-4.20/series b/queue-4.20/series index 938ab3db913..cc71d614de7 100644 --- a/queue-4.20/series +++ b/queue-4.20/series @@ -128,3 +128,18 @@ cifs-fix-error-mapping-for-smb2_lock-command-which-caused-ofd-lock-problem.patch smb3-fix-large-reads-on-encrypted-connections.patch cifs-return-correct-errors-when-pinning-memory-failed-for-direct-i-o.patch cifs-use-the-correct-length-when-pinning-memory-for-direct-i-o-for-write.patch +arm64-kvm-avoid-setting-the-upper-32-bits-of-vtcr_el2-to-1.patch +arm-arm64-kvm-vgic-force-vm-halt-when-changing-the-active-state-of-gicv3-ppis-sgis.patch +arm-dts-exynos-specify-i2s-assigned-clocks-in-proper-node.patch +rtc-m41t80-correct-alarm-month-range-with-rtc-reads.patch +kvm-arm-arm64-vgic-do-not-cond_resched_lock-with-irqs-disabled.patch +kvm-arm-arm64-vgic-cap-spis-to-the-vm-defined-maximum.patch +kvm-arm-arm64-vgic-v2-set-active_source-to-0-when-restoring-state.patch +kvm-arm-arm64-fix-vmid-alloc-race-by-reverting-to-lock-less.patch +kvm-arm-arm64-vgic-fix-off-by-one-bug-in-vgic_get_irq.patch +iommu-arm-smmu-v3-fix-big-endian-cmd_sync-writes.patch +arm64-compat-avoid-sending-sigill-for-unallocated-syscall-numbers.patch +arm64-compat-don-t-pull-syscall-number-from-regs-in-arm_compat_syscall.patch +parisc-remap-hugepage-aligned-pages-in-set_kernel_text_rw.patch +tpm-tpm_try_transmit-refactor-error-flow.patch +tpm-tpm_i2c_nuvoton-use-correct-command-duration-for-tpm-2.x.patch diff --git a/queue-4.20/tpm-tpm_i2c_nuvoton-use-correct-command-duration-for-tpm-2.x.patch b/queue-4.20/tpm-tpm_i2c_nuvoton-use-correct-command-duration-for-tpm-2.x.patch new file mode 100644 index 00000000000..86882e53faa --- /dev/null +++ b/queue-4.20/tpm-tpm_i2c_nuvoton-use-correct-command-duration-for-tpm-2.x.patch @@ -0,0 +1,54 @@ +From 2ba5780ce30549cf57929b01d8cba6fe656e31c5 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Fri, 19 Oct 2018 21:22:47 +0300 +Subject: tpm: tpm_i2c_nuvoton: use correct command duration for TPM 2.x + +From: Tomas Winkler + +commit 2ba5780ce30549cf57929b01d8cba6fe656e31c5 upstream. + +tpm_i2c_nuvoton calculated commands duration using TPM 1.x +values via tpm_calc_ordinal_duration() also for TPM 2.x chips. +Call tpm2_calc_ordinal_duration() for retrieving ordinal +duration for TPM 2.X chips. + +Cc: stable@vger.kernel.org +Cc: Nayna Jain +Signed-off-by: Tomas Winkler +Reviewed-by: Nayna Jain +Tested-by: Nayna Jain (For TPM 2.0) +Reviewed-by: Jarkko Sakkinen +Signed-off-by: Jarkko Sakkinen +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/char/tpm/tpm_i2c_nuvoton.c | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +--- a/drivers/char/tpm/tpm_i2c_nuvoton.c ++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c +@@ -369,6 +369,7 @@ static int i2c_nuvoton_send(struct tpm_c + struct device *dev = chip->dev.parent; + struct i2c_client *client = to_i2c_client(dev); + u32 ordinal; ++ unsigned long duration; + size_t count = 0; + int burst_count, bytes2write, retries, rc = -EIO; + +@@ -455,10 +456,12 @@ static int i2c_nuvoton_send(struct tpm_c + return rc; + } + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); +- rc = i2c_nuvoton_wait_for_data_avail(chip, +- tpm_calc_ordinal_duration(chip, +- ordinal), +- &priv->read_queue); ++ if (chip->flags & TPM_CHIP_FLAG_TPM2) ++ duration = tpm2_calc_ordinal_duration(chip, ordinal); ++ else ++ duration = tpm_calc_ordinal_duration(chip, ordinal); ++ ++ rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue); + if (rc) { + dev_err(dev, "%s() timeout command duration\n", __func__); + i2c_nuvoton_ready(chip); diff --git a/queue-4.20/tpm-tpm_try_transmit-refactor-error-flow.patch b/queue-4.20/tpm-tpm_try_transmit-refactor-error-flow.patch new file mode 100644 index 00000000000..4bcdccbfc3e --- /dev/null +++ b/queue-4.20/tpm-tpm_try_transmit-refactor-error-flow.patch @@ -0,0 +1,67 @@ +From 01f54664a4db0d612de0ece8e0022f21f9374e9b Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Tue, 16 Oct 2018 16:37:16 +0300 +Subject: tpm: tpm_try_transmit() refactor error flow. + +From: Tomas Winkler + +commit 01f54664a4db0d612de0ece8e0022f21f9374e9b upstream. + +First, rename out_no_locality to out_locality for bailing out on +both tpm_cmd_ready() and tpm_request_locality() failure. +Second, ignore the return value of go_to_idle() as it may override +the return value of the actual tpm operation, the go_to_idle() error +will be caught on any consequent command. +Last, fix the wrong 'goto out', that jumped back instead of forward. + +Cc: stable@vger.kernel.org +Fixes: 627448e85c76 ("tpm: separate cmd_ready/go_idle from runtime_pm") +Signed-off-by: Tomas Winkler +Reviewed-by: Jarkko Sakkinen +Tested-by: Jarkko Sakkinen +Signed-off-by: Jarkko Sakkinen +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/char/tpm/tpm-interface.c | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +--- a/drivers/char/tpm/tpm-interface.c ++++ b/drivers/char/tpm/tpm-interface.c +@@ -477,13 +477,15 @@ static ssize_t tpm_try_transmit(struct t + + if (need_locality) { + rc = tpm_request_locality(chip, flags); +- if (rc < 0) +- goto out_no_locality; ++ if (rc < 0) { ++ need_locality = false; ++ goto out_locality; ++ } + } + + rc = tpm_cmd_ready(chip, flags); + if (rc) +- goto out; ++ goto out_locality; + + rc = tpm2_prepare_space(chip, space, ordinal, buf); + if (rc) +@@ -547,14 +549,13 @@ out_recv: + dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc); + + out: +- rc = tpm_go_idle(chip, flags); +- if (rc) +- goto out; ++ /* may fail but do not override previous error value in rc */ ++ tpm_go_idle(chip, flags); + ++out_locality: + if (need_locality) + tpm_relinquish_locality(chip, flags); + +-out_no_locality: + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); + -- 2.47.2