]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 May 2019 12:45:46 +0000 (14:45 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 May 2019 12:45:46 +0000 (14:45 +0200)
added patches:
arm-dts-exynos-fix-audio-microphone-routing-on-odroid-xu3.patch
arm-dts-exynos-fix-interrupt-for-shared-eints-on-exynos5260.patch
arm-dts-qcom-ipq4019-enlarge-pcie-bar-range.patch
arm-exynos-fix-a-leaked-reference-by-adding-missing-of_node_put.patch
arm64-arch_timer-ensure-counter-register-reads-occur-with-seqlock-held.patch
arm64-clear-osdlr_el1-on-cpu-boot.patch
arm64-compat-reduce-address-limit.patch
arm64-dts-rockchip-disable-dcmds-on-rk3399-s-emmc-controller.patch
arm64-dts-rockchip-fix-io-domain-voltage-setting-of-apio5-on-rockpro64.patch
arm64-mmap-ensure-file-offset-is-treated-as-unsigned.patch
arm64-save-and-restore-osdlr_el1-across-suspend-resume.patch
crypto-arm-aes-neonbs-don-t-access-already-freed-walk.iv.patch
crypto-arm64-aes-neonbs-don-t-access-already-freed-walk.iv.patch
crypto-arm64-gcm-aes-ce-fix-no-neon-fallback-code.patch
crypto-caam-qi2-fix-dma-mapping-of-stack-memory.patch
crypto-caam-qi2-fix-zero-length-buffer-dma-mapping.patch
crypto-caam-qi2-generate-hash-keys-in-place.patch
crypto-ccp-do-not-free-psp_master-when-platform_init-fails.patch
crypto-chacha-generic-fix-use-as-arm64-no-neon-fallback.patch
crypto-chacha20poly1305-set-cra_name-correctly.patch
crypto-crct10dif-generic-fix-use-via-crypto_shash_digest.patch
crypto-crypto4xx-fix-cfb-and-ofb-overran-dst-buffer-issues.patch
crypto-crypto4xx-fix-ctr-aes-missing-output-iv.patch
crypto-gcm-fix-incompatibility-between-gcm-and-gcm_base.patch
crypto-lrw-don-t-access-already-freed-walk.iv.patch
crypto-rockchip-update-iv-buffer-to-contain-the-next-iv.patch
crypto-salsa20-don-t-access-already-freed-walk.iv.patch
crypto-skcipher-don-t-warn-on-unprocessed-data-after-slow-walk-step.patch
crypto-vmx-fix-copy-paste-error-in-ctr-mode.patch
crypto-x86-crct10dif-pcl-fix-use-via-crypto_shash_digest.patch
mmc-sdhci-of-arasan-add-dts-property-to-disable-dcmds.patch
objtool-fix-function-fallthrough-detection.patch
power-supply-axp288_charger-fix-unchecked-return-value.patch
power-supply-axp288_fuel_gauge-add-acepc-t8-and-t11-mini-pcs-to-the-blacklist.patch
sched-x86-save-flags-on-context-switch.patch
x86-mce-add-an-mce-record-filtering-function.patch
x86-mce-amd-carve-out-the-mc4_misc-thresholding-quirk.patch
x86-mce-amd-don-t-report-l1-btb-mca-errors-on-some-family-17h-models.patch
x86-mce-amd-turn-off-mc4_misc-thresholding-on-all-family-0x15-models.patch
x86-mce-group-amd-function-prototypes-in-asm-mce.h.patch
x86-speculation-mds-improve-cpu-buffer-clear-documentation.patch
x86-speculation-mds-revert-cpu-buffer-clear-on-double-fault-exit.patch

43 files changed:
queue-5.0/arm-dts-exynos-fix-audio-microphone-routing-on-odroid-xu3.patch [new file with mode: 0644]
queue-5.0/arm-dts-exynos-fix-interrupt-for-shared-eints-on-exynos5260.patch [new file with mode: 0644]
queue-5.0/arm-dts-qcom-ipq4019-enlarge-pcie-bar-range.patch [new file with mode: 0644]
queue-5.0/arm-exynos-fix-a-leaked-reference-by-adding-missing-of_node_put.patch [new file with mode: 0644]
queue-5.0/arm64-arch_timer-ensure-counter-register-reads-occur-with-seqlock-held.patch [new file with mode: 0644]
queue-5.0/arm64-clear-osdlr_el1-on-cpu-boot.patch [new file with mode: 0644]
queue-5.0/arm64-compat-reduce-address-limit.patch [new file with mode: 0644]
queue-5.0/arm64-dts-rockchip-disable-dcmds-on-rk3399-s-emmc-controller.patch [new file with mode: 0644]
queue-5.0/arm64-dts-rockchip-fix-io-domain-voltage-setting-of-apio5-on-rockpro64.patch [new file with mode: 0644]
queue-5.0/arm64-mmap-ensure-file-offset-is-treated-as-unsigned.patch [new file with mode: 0644]
queue-5.0/arm64-save-and-restore-osdlr_el1-across-suspend-resume.patch [new file with mode: 0644]
queue-5.0/crypto-arm-aes-neonbs-don-t-access-already-freed-walk.iv.patch [new file with mode: 0644]
queue-5.0/crypto-arm64-aes-neonbs-don-t-access-already-freed-walk.iv.patch [new file with mode: 0644]
queue-5.0/crypto-arm64-gcm-aes-ce-fix-no-neon-fallback-code.patch [new file with mode: 0644]
queue-5.0/crypto-caam-qi2-fix-dma-mapping-of-stack-memory.patch [new file with mode: 0644]
queue-5.0/crypto-caam-qi2-fix-zero-length-buffer-dma-mapping.patch [new file with mode: 0644]
queue-5.0/crypto-caam-qi2-generate-hash-keys-in-place.patch [new file with mode: 0644]
queue-5.0/crypto-ccp-do-not-free-psp_master-when-platform_init-fails.patch [new file with mode: 0644]
queue-5.0/crypto-chacha-generic-fix-use-as-arm64-no-neon-fallback.patch [new file with mode: 0644]
queue-5.0/crypto-chacha20poly1305-set-cra_name-correctly.patch [new file with mode: 0644]
queue-5.0/crypto-crct10dif-generic-fix-use-via-crypto_shash_digest.patch [new file with mode: 0644]
queue-5.0/crypto-crypto4xx-fix-cfb-and-ofb-overran-dst-buffer-issues.patch [new file with mode: 0644]
queue-5.0/crypto-crypto4xx-fix-ctr-aes-missing-output-iv.patch [new file with mode: 0644]
queue-5.0/crypto-gcm-fix-incompatibility-between-gcm-and-gcm_base.patch [new file with mode: 0644]
queue-5.0/crypto-lrw-don-t-access-already-freed-walk.iv.patch [new file with mode: 0644]
queue-5.0/crypto-rockchip-update-iv-buffer-to-contain-the-next-iv.patch [new file with mode: 0644]
queue-5.0/crypto-salsa20-don-t-access-already-freed-walk.iv.patch [new file with mode: 0644]
queue-5.0/crypto-skcipher-don-t-warn-on-unprocessed-data-after-slow-walk-step.patch [new file with mode: 0644]
queue-5.0/crypto-vmx-fix-copy-paste-error-in-ctr-mode.patch [new file with mode: 0644]
queue-5.0/crypto-x86-crct10dif-pcl-fix-use-via-crypto_shash_digest.patch [new file with mode: 0644]
queue-5.0/mmc-sdhci-of-arasan-add-dts-property-to-disable-dcmds.patch [new file with mode: 0644]
queue-5.0/objtool-fix-function-fallthrough-detection.patch [new file with mode: 0644]
queue-5.0/power-supply-axp288_charger-fix-unchecked-return-value.patch [new file with mode: 0644]
queue-5.0/power-supply-axp288_fuel_gauge-add-acepc-t8-and-t11-mini-pcs-to-the-blacklist.patch [new file with mode: 0644]
queue-5.0/sched-x86-save-flags-on-context-switch.patch [new file with mode: 0644]
queue-5.0/series
queue-5.0/x86-mce-add-an-mce-record-filtering-function.patch [new file with mode: 0644]
queue-5.0/x86-mce-amd-carve-out-the-mc4_misc-thresholding-quirk.patch [new file with mode: 0644]
queue-5.0/x86-mce-amd-don-t-report-l1-btb-mca-errors-on-some-family-17h-models.patch [new file with mode: 0644]
queue-5.0/x86-mce-amd-turn-off-mc4_misc-thresholding-on-all-family-0x15-models.patch [new file with mode: 0644]
queue-5.0/x86-mce-group-amd-function-prototypes-in-asm-mce.h.patch [new file with mode: 0644]
queue-5.0/x86-speculation-mds-improve-cpu-buffer-clear-documentation.patch [new file with mode: 0644]
queue-5.0/x86-speculation-mds-revert-cpu-buffer-clear-on-double-fault-exit.patch [new file with mode: 0644]

diff --git a/queue-5.0/arm-dts-exynos-fix-audio-microphone-routing-on-odroid-xu3.patch b/queue-5.0/arm-dts-exynos-fix-audio-microphone-routing-on-odroid-xu3.patch
new file mode 100644 (file)
index 0000000..1fcec35
--- /dev/null
@@ -0,0 +1,33 @@
+From 9b23e1a3e8fde76e8cc0e366ab1ed4ffb4440feb Mon Sep 17 00:00:00 2001
+From: Sylwester Nawrocki <s.nawrocki@samsung.com>
+Date: Wed, 20 Mar 2019 10:59:50 +0100
+Subject: ARM: dts: exynos: Fix audio (microphone) routing on Odroid XU3
+
+From: Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+commit 9b23e1a3e8fde76e8cc0e366ab1ed4ffb4440feb upstream.
+
+The name of CODEC input widget to which microphone is connected through
+the "Headphone" jack is "IN12" not "IN1". This fixes microphone support
+on Odroid XU3.
+
+Cc: <stable@vger.kernel.org> # v4.14+
+Signed-off-by: Sylwester Nawrocki <s.nawrocki@samsung.com>
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+@@ -22,7 +22,7 @@
+                       "Headphone Jack", "HPL",
+                       "Headphone Jack", "HPR",
+                       "Headphone Jack", "MICBIAS",
+-                      "IN1", "Headphone Jack",
++                      "IN12", "Headphone Jack",
+                       "Speakers", "SPKL",
+                       "Speakers", "SPKR";
diff --git a/queue-5.0/arm-dts-exynos-fix-interrupt-for-shared-eints-on-exynos5260.patch b/queue-5.0/arm-dts-exynos-fix-interrupt-for-shared-eints-on-exynos5260.patch
new file mode 100644 (file)
index 0000000..7720191
--- /dev/null
@@ -0,0 +1,33 @@
+From b7ed69d67ff0788d8463e599dd5dd1b45c701a7e Mon Sep 17 00:00:00 2001
+From: Stuart Menefy <stuart.menefy@mathembedded.com>
+Date: Tue, 19 Feb 2019 13:03:37 +0000
+Subject: ARM: dts: exynos: Fix interrupt for shared EINTs on Exynos5260
+
+From: Stuart Menefy <stuart.menefy@mathembedded.com>
+
+commit b7ed69d67ff0788d8463e599dd5dd1b45c701a7e upstream.
+
+Fix the interrupt information for the GPIO lines with a shared EINT
+interrupt.
+
+Fixes: 16d7ff2642e7 ("ARM: dts: add dts files for exynos5260 SoC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Stuart Menefy <stuart.menefy@mathembedded.com>
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/exynos5260.dtsi |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/exynos5260.dtsi
++++ b/arch/arm/boot/dts/exynos5260.dtsi
+@@ -223,7 +223,7 @@
+                       wakeup-interrupt-controller {
+                               compatible = "samsung,exynos4210-wakeup-eint";
+                               interrupt-parent = <&gic>;
+-                              interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
++                              interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+                       };
+               };
diff --git a/queue-5.0/arm-dts-qcom-ipq4019-enlarge-pcie-bar-range.patch b/queue-5.0/arm-dts-qcom-ipq4019-enlarge-pcie-bar-range.patch
new file mode 100644 (file)
index 0000000..4840fab
--- /dev/null
@@ -0,0 +1,53 @@
+From f3e35357cd460a8aeb48b8113dc4b761a7d5c828 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Tue, 26 Feb 2019 01:12:01 +0100
+Subject: ARM: dts: qcom: ipq4019: enlarge PCIe BAR range
+
+From: Christian Lamparter <chunkeey@gmail.com>
+
+commit f3e35357cd460a8aeb48b8113dc4b761a7d5c828 upstream.
+
+David Bauer reported that the VDSL modem (attached via PCIe)
+on his AVM Fritz!Box 7530 was complaining about not having
+enough space in the BAR. A closer inspection of the old
+qcom-ipq40xx.dtsi pulled from the GL-iNet repository listed:
+
+| qcom,pcie@80000 {
+|      compatible = "qcom,msm_pcie";
+|      reg = <0x80000 0x2000>,
+|            <0x99000 0x800>,
+|            <0x40000000 0xf1d>,
+|            <0x40000f20 0xa8>,
+|            <0x40100000 0x1000>,
+|            <0x40200000 0x100000>,
+|            <0x40300000 0xd00000>;
+|      reg-names = "parf", "phy", "dm_core", "elbi",
+|                      "conf", "io", "bars";
+
+Matching the reg-names with the listed reg leads to
+<0xd00000> as the size for the "bars".
+
+Cc: stable@vger.kernel.org
+BugLink: https://www.mail-archive.com/openwrt-devel@lists.openwrt.org/msg45212.html
+Reported-by: David Bauer <mail@david-bauer.net>
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Andy Gross <agross@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -393,8 +393,8 @@
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+-                      ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
+-                                0x82000000 0 0x40300000 0x40300000 0 0x400000>;
++                      ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
++                               <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
+                       interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>;
+                       interrupt-names = "msi";
diff --git a/queue-5.0/arm-exynos-fix-a-leaked-reference-by-adding-missing-of_node_put.patch b/queue-5.0/arm-exynos-fix-a-leaked-reference-by-adding-missing-of_node_put.patch
new file mode 100644 (file)
index 0000000..0cac483
--- /dev/null
@@ -0,0 +1,51 @@
+From 629266bf7229cd6a550075f5961f95607b823b59 Mon Sep 17 00:00:00 2001
+From: Wen Yang <wen.yang99@zte.com.cn>
+Date: Tue, 5 Mar 2019 19:33:54 +0800
+Subject: ARM: exynos: Fix a leaked reference by adding missing of_node_put
+
+From: Wen Yang <wen.yang99@zte.com.cn>
+
+commit 629266bf7229cd6a550075f5961f95607b823b59 upstream.
+
+The call to of_get_next_child returns a node pointer with refcount
+incremented thus it must be explicitly decremented after the last
+usage.
+
+Detected by coccinelle with warnings like:
+    arch/arm/mach-exynos/firmware.c:201:2-8: ERROR: missing of_node_put;
+        acquired a node pointer with refcount incremented on line 193,
+        but without a corresponding object release within this function.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Wen Yang <wen.yang99@zte.com.cn>
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-exynos/firmware.c |    1 +
+ arch/arm/mach-exynos/suspend.c  |    2 ++
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/arm/mach-exynos/firmware.c
++++ b/arch/arm/mach-exynos/firmware.c
+@@ -196,6 +196,7 @@ bool __init exynos_secure_firmware_avail
+               return false;
+       addr = of_get_address(nd, 0, NULL, NULL);
++      of_node_put(nd);
+       if (!addr) {
+               pr_err("%s: No address specified.\n", __func__);
+               return false;
+--- a/arch/arm/mach-exynos/suspend.c
++++ b/arch/arm/mach-exynos/suspend.c
+@@ -639,8 +639,10 @@ void __init exynos_pm_init(void)
+       if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
+               pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
++              of_node_put(np);
+               return;
+       }
++      of_node_put(np);
+       pm_data = (const struct exynos_pm_data *) match->data;
diff --git a/queue-5.0/arm64-arch_timer-ensure-counter-register-reads-occur-with-seqlock-held.patch b/queue-5.0/arm64-arch_timer-ensure-counter-register-reads-occur-with-seqlock-held.patch
new file mode 100644 (file)
index 0000000..e3f22d7
--- /dev/null
@@ -0,0 +1,164 @@
+From 75a19a0202db21638a1c2b424afb867e1f9a2376 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 29 Apr 2019 17:26:22 +0100
+Subject: arm64: arch_timer: Ensure counter register reads occur with seqlock held
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 75a19a0202db21638a1c2b424afb867e1f9a2376 upstream.
+
+When executing clock_gettime(), either in the vDSO or via a system call,
+we need to ensure that the read of the counter register occurs within
+the seqlock reader critical section. This ensures that updates to the
+clocksource parameters (e.g. the multiplier) are consistent with the
+counter value and therefore avoids the situation where time appears to
+go backwards across multiple reads.
+
+Extend the vDSO logic so that the seqlock critical section covers the
+read of the counter register as well as accesses to the data page. Since
+reads of the counter system registers are not ordered by memory barrier
+instructions, introduce dependency ordering from the counter read to a
+subsequent memory access so that the seqlock memory barriers apply to
+the counter access in both the vDSO and the system call paths.
+
+Cc: <stable@vger.kernel.org>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Link: https://lore.kernel.org/linux-arm-kernel/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
+Reported-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/arch_timer.h   |   33 +++++++++++++++++++++++++++++++--
+ arch/arm64/kernel/vdso/gettimeofday.S |   15 +++++++++++----
+ 2 files changed, 42 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/arch_timer.h
++++ b/arch/arm64/include/asm/arch_timer.h
+@@ -148,18 +148,47 @@ static inline void arch_timer_set_cntkct
+       isb();
+ }
++/*
++ * Ensure that reads of the counter are treated the same as memory reads
++ * for the purposes of ordering by subsequent memory barriers.
++ *
++ * This insanity brought to you by speculative system register reads,
++ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
++ *
++ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
++ */
++#define arch_counter_enforce_ordering(val) do {                               \
++      u64 tmp, _val = (val);                                          \
++                                                                      \
++      asm volatile(                                                   \
++      "       eor     %0, %1, %1\n"                                   \
++      "       add     %0, sp, %0\n"                                   \
++      "       ldr     xzr, [%0]"                                      \
++      : "=r" (tmp) : "r" (_val));                                     \
++} while (0)
++
+ static inline u64 arch_counter_get_cntpct(void)
+ {
++      u64 cnt;
++
+       isb();
+-      return arch_timer_reg_read_stable(cntpct_el0);
++      cnt = arch_timer_reg_read_stable(cntpct_el0);
++      arch_counter_enforce_ordering(cnt);
++      return cnt;
+ }
+ static inline u64 arch_counter_get_cntvct(void)
+ {
++      u64 cnt;
++
+       isb();
+-      return arch_timer_reg_read_stable(cntvct_el0);
++      cnt = arch_timer_reg_read_stable(cntvct_el0);
++      arch_counter_enforce_ordering(cnt);
++      return cnt;
+ }
++#undef arch_counter_enforce_ordering
++
+ static inline int arch_timer_arch_init(void)
+ {
+       return 0;
+--- a/arch/arm64/kernel/vdso/gettimeofday.S
++++ b/arch/arm64/kernel/vdso/gettimeofday.S
+@@ -73,6 +73,13 @@ x_tmp               .req    x8
+       movn    x_tmp, #0xff00, lsl #48
+       and     \res, x_tmp, \res
+       mul     \res, \res, \mult
++      /*
++       * Fake address dependency from the value computed from the counter
++       * register to subsequent data page accesses so that the sequence
++       * locking also orders the read of the counter.
++       */
++      and     x_tmp, \res, xzr
++      add     vdso_data, vdso_data, x_tmp
+       .endm
+       /*
+@@ -147,12 +154,12 @@ ENTRY(__kernel_gettimeofday)
+       /* w11 = cs_mono_mult, w12 = cs_shift */
+       ldp     w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
+       ldp     x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
+-      seqcnt_check fail=1b
+       get_nsec_per_sec res=x9
+       lsl     x9, x9, x12
+       get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
++      seqcnt_check fail=1b
+       get_ts_realtime res_sec=x10, res_nsec=x11, \
+               clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
+@@ -211,13 +218,13 @@ realtime:
+       /* w11 = cs_mono_mult, w12 = cs_shift */
+       ldp     w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
+       ldp     x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
+-      seqcnt_check fail=realtime
+       /* All computations are done with left-shifted nsecs. */
+       get_nsec_per_sec res=x9
+       lsl     x9, x9, x12
+       get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
++      seqcnt_check fail=realtime
+       get_ts_realtime res_sec=x10, res_nsec=x11, \
+               clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
+       clock_gettime_return, shift=1
+@@ -231,7 +238,6 @@ monotonic:
+       ldp     w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
+       ldp     x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
+       ldp     x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
+-      seqcnt_check fail=monotonic
+       /* All computations are done with left-shifted nsecs. */
+       lsl     x4, x4, x12
+@@ -239,6 +245,7 @@ monotonic:
+       lsl     x9, x9, x12
+       get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
++      seqcnt_check fail=monotonic
+       get_ts_realtime res_sec=x10, res_nsec=x11, \
+               clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
+@@ -253,13 +260,13 @@ monotonic_raw:
+       /* w11 = cs_raw_mult, w12 = cs_shift */
+       ldp     w12, w11, [vdso_data, #VDSO_CS_SHIFT]
+       ldp     x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
+-      seqcnt_check fail=monotonic_raw
+       /* All computations are done with left-shifted nsecs. */
+       get_nsec_per_sec res=x9
+       lsl     x9, x9, x12
+       get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
++      seqcnt_check fail=monotonic_raw
+       get_ts_clock_raw res_sec=x10, res_nsec=x11, \
+               clock_nsec=x15, nsec_to_sec=x9
diff --git a/queue-5.0/arm64-clear-osdlr_el1-on-cpu-boot.patch b/queue-5.0/arm64-clear-osdlr_el1-on-cpu-boot.patch
new file mode 100644 (file)
index 0000000..ccb8d04
--- /dev/null
@@ -0,0 +1,31 @@
+From 6fda41bf12615ee7c3ddac88155099b1a8cf8d00 Mon Sep 17 00:00:00 2001
+From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+Date: Mon, 8 Apr 2019 18:17:18 +0100
+Subject: arm64: Clear OSDLR_EL1 on CPU boot
+
+From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+
+commit 6fda41bf12615ee7c3ddac88155099b1a8cf8d00 upstream.
+
+Some firmwares may reboot CPUs with OS Double Lock set. Make sure that
+it is unlocked, in order to use debug exceptions.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/debug-monitors.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
+  */
+ static int clear_os_lock(unsigned int cpu)
+ {
++      write_sysreg(0, osdlr_el1);
+       write_sysreg(0, oslar_el1);
+       isb();
+       return 0;
diff --git a/queue-5.0/arm64-compat-reduce-address-limit.patch b/queue-5.0/arm64-compat-reduce-address-limit.patch
new file mode 100644 (file)
index 0000000..af2ac60
--- /dev/null
@@ -0,0 +1,52 @@
+From d263119387de9975d2acba1dfd3392f7c5979c18 Mon Sep 17 00:00:00 2001
+From: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Date: Mon, 1 Apr 2019 12:30:14 +0100
+Subject: arm64: compat: Reduce address limit
+
+From: Vincenzo Frascino <vincenzo.frascino@arm.com>
+
+commit d263119387de9975d2acba1dfd3392f7c5979c18 upstream.
+
+Currently, compat tasks running on arm64 can allocate memory up to
+TASK_SIZE_32 (UL(0x100000000)).
+
+This means that mmap() allocations, if we treat them as returning an
+array, are not compliant with the sections 6.5.8 of the C standard
+(C99) which states that: "If the expression P points to an element of
+an array object and the expression Q points to the last element of the
+same array object, the pointer expression Q+1 compares greater than P".
+
+Redefine TASK_SIZE_32 to address the issue.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: <stable@vger.kernel.org>
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
+[will: fixed typo in comment]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/processor.h |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -57,7 +57,15 @@
+ #define TASK_SIZE_64          (UL(1) << vabits_user)
+ #ifdef CONFIG_COMPAT
++#ifdef CONFIG_ARM64_64K_PAGES
++/*
++ * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
++ * by the compat vectors page.
++ */
+ #define TASK_SIZE_32          UL(0x100000000)
++#else
++#define TASK_SIZE_32          (UL(0x100000000) - PAGE_SIZE)
++#endif /* CONFIG_ARM64_64K_PAGES */
+ #define TASK_SIZE             (test_thread_flag(TIF_32BIT) ? \
+                               TASK_SIZE_32 : TASK_SIZE_64)
+ #define TASK_SIZE_OF(tsk)     (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
diff --git a/queue-5.0/arm64-dts-rockchip-disable-dcmds-on-rk3399-s-emmc-controller.patch b/queue-5.0/arm64-dts-rockchip-disable-dcmds-on-rk3399-s-emmc-controller.patch
new file mode 100644 (file)
index 0000000..ab160e3
--- /dev/null
@@ -0,0 +1,83 @@
+From a3eec13b8fd2b9791a21fa16e38dfea8111579bf Mon Sep 17 00:00:00 2001
+From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+Date: Fri, 22 Mar 2019 12:38:06 +0100
+Subject: arm64: dts: rockchip: Disable DCMDs on RK3399's eMMC controller.
+
+From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+
+commit a3eec13b8fd2b9791a21fa16e38dfea8111579bf upstream.
+
+When using direct commands (DCMDs) on an RK3399, we get spurious
+CQE completion interrupts for the DCMD transaction slot (#31):
+
+[  931.196520] ------------[ cut here ]------------
+[  931.201702] mmc1: cqhci: spurious TCN for tag 31
+[  931.206906] WARNING: CPU: 0 PID: 1433 at /usr/src/kernel/drivers/mmc/host/cqhci.c:725 cqhci_irq+0x2e4/0x490
+[  931.206909] Modules linked in:
+[  931.206918] CPU: 0 PID: 1433 Comm: irq/29-mmc1 Not tainted 4.19.8-rt6-funkadelic #1
+[  931.206920] Hardware name: Theobroma Systems RK3399-Q7 SoM (DT)
+[  931.206924] pstate: 40000005 (nZcv daif -PAN -UAO)
+[  931.206927] pc : cqhci_irq+0x2e4/0x490
+[  931.206931] lr : cqhci_irq+0x2e4/0x490
+[  931.206933] sp : ffff00000e54bc80
+[  931.206934] x29: ffff00000e54bc80 x28: 0000000000000000
+[  931.206939] x27: 0000000000000001 x26: ffff000008f217e8
+[  931.206944] x25: ffff8000f02ef030 x24: ffff0000091417b0
+[  931.206948] x23: ffff0000090aa000 x22: ffff8000f008b000
+[  931.206953] x21: 0000000000000002 x20: 000000000000001f
+[  931.206957] x19: ffff8000f02ef018 x18: ffffffffffffffff
+[  931.206961] x17: 0000000000000000 x16: 0000000000000000
+[  931.206966] x15: ffff0000090aa6c8 x14: 0720072007200720
+[  931.206970] x13: 0720072007200720 x12: 0720072007200720
+[  931.206975] x11: 0720072007200720 x10: 0720072007200720
+[  931.206980] x9 : 0720072007200720 x8 : 0720072007200720
+[  931.206984] x7 : 0720073107330720 x6 : 00000000000005a0
+[  931.206988] x5 : ffff00000860d4b0 x4 : 0000000000000000
+[  931.206993] x3 : 0000000000000001 x2 : 0000000000000001
+[  931.206997] x1 : 1bde3a91b0d4d900 x0 : 0000000000000000
+[  931.207001] Call trace:
+[  931.207005]  cqhci_irq+0x2e4/0x490
+[  931.207009]  sdhci_arasan_cqhci_irq+0x5c/0x90
+[  931.207013]  sdhci_irq+0x98/0x930
+[  931.207019]  irq_forced_thread_fn+0x2c/0xa0
+[  931.207023]  irq_thread+0x114/0x1c0
+[  931.207027]  kthread+0x128/0x130
+[  931.207032]  ret_from_fork+0x10/0x20
+[  931.207035] ---[ end trace 0000000000000002 ]---
+
+The driver shows this message only for the first spurious interrupt
+by using WARN_ONCE(). Changing this to WARN() shows, that this is
+happening quite frequently (up to once a second).
+
+Since the eMMC 5.1 specification, where CQE and CQHCI are specified,
+does not mention that spurious TCN interrupts for DCMDs can be simply
+ignored, we must assume that using this feature is not working reliably.
+
+The current implementation uses DCMD for REQ_OP_FLUSH only, and
+I could not see any performance/power impact when disabling
+this optional feature for RK3399.
+
+Therefore this patch disables DCMDs for RK3399.
+
+Signed-off-by: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+Signed-off-by: Philipp Tomsich <philipp.tomsich@theobroma-systems.com>
+Fixes: 84362d79f436 ("mmc: sdhci-of-arasan: Add CQHCI support for arasan,sdhci-5.1")
+Cc: stable@vger.kernel.org
+[the corresponding code changes are queued for 5.2 so doing that as well]
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/rockchip/rk3399.dtsi |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -333,6 +333,7 @@
+               phys = <&emmc_phy>;
+               phy-names = "phy_arasan";
+               power-domains = <&power RK3399_PD_EMMC>;
++              disable-cqe-dcmd;
+               status = "disabled";
+       };
diff --git a/queue-5.0/arm64-dts-rockchip-fix-io-domain-voltage-setting-of-apio5-on-rockpro64.patch b/queue-5.0/arm64-dts-rockchip-fix-io-domain-voltage-setting-of-apio5-on-rockpro64.patch
new file mode 100644 (file)
index 0000000..adfdb59
--- /dev/null
@@ -0,0 +1,39 @@
+From 798689e45190756c2eca6656ee4c624370a5012a Mon Sep 17 00:00:00 2001
+From: Katsuhiro Suzuki <katsuhiro@katsuster.net>
+Date: Wed, 27 Mar 2019 21:03:17 +0900
+Subject: arm64: dts: rockchip: fix IO domain voltage setting of APIO5 on rockpro64
+
+From: Katsuhiro Suzuki <katsuhiro@katsuster.net>
+
+commit 798689e45190756c2eca6656ee4c624370a5012a upstream.
+
+This patch fixes IO domain voltage setting that is related to
+audio_gpio3d4a_ms (bit 1) of GRF_IO_VSEL.
+
+This is because RockPro64 schematics P.16 says that regulator
+supplies 3.0V power to APIO5_VDD. So audio_gpio3d4a_ms bit should
+be clear (means 3.0V). Power domain map is saying different thing
+(supplies 1.8V) but I believe P.16 is actual connectings.
+
+Fixes: e4f3fb490967 ("arm64: dts: rockchip: add initial dts support for Rockpro64")
+Cc: stable@vger.kernel.org
+Suggested-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Katsuhiro Suzuki <katsuhiro@katsuster.net>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
+@@ -489,7 +489,7 @@
+       status = "okay";
+       bt656-supply = <&vcc1v8_dvp>;
+-      audio-supply = <&vcca1v8_codec>;
++      audio-supply = <&vcc_3v0>;
+       sdmmc-supply = <&vcc_sdio>;
+       gpio1830-supply = <&vcc_3v0>;
+ };
diff --git a/queue-5.0/arm64-mmap-ensure-file-offset-is-treated-as-unsigned.patch b/queue-5.0/arm64-mmap-ensure-file-offset-is-treated-as-unsigned.patch
new file mode 100644 (file)
index 0000000..745322d
--- /dev/null
@@ -0,0 +1,40 @@
+From f08cae2f28db24d95be5204046b60618d8de4ddc Mon Sep 17 00:00:00 2001
+From: Boyang Zhou <zhouby_cn@126.com>
+Date: Mon, 29 Apr 2019 15:27:19 +0100
+Subject: arm64: mmap: Ensure file offset is treated as unsigned
+
+From: Boyang Zhou <zhouby_cn@126.com>
+
+commit f08cae2f28db24d95be5204046b60618d8de4ddc upstream.
+
+The file offset argument to the arm64 sys_mmap() implementation is
+scaled from bytes to pages by shifting right by PAGE_SHIFT.
+Unfortunately, the offset is passed in as a signed 'off_t' type and
+therefore large offsets (i.e. with the top bit set) are incorrectly
+sign-extended by the shift. This has been observed to cause false mmap()
+failures when mapping GPU doorbells on an arm64 server part.
+
+Change the type of the file offset argument to sys_mmap() from 'off_t'
+to 'unsigned long' so that the shifting scales the value as expected.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Boyang Zhou <zhouby_cn@126.com>
+[will: rewrote commit message]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/sys.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/sys.c
++++ b/arch/arm64/kernel/sys.c
+@@ -31,7 +31,7 @@
+ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+               unsigned long, prot, unsigned long, flags,
+-              unsigned long, fd, off_t, off)
++              unsigned long, fd, unsigned long, off)
+ {
+       if (offset_in_page(off) != 0)
+               return -EINVAL;
diff --git a/queue-5.0/arm64-save-and-restore-osdlr_el1-across-suspend-resume.patch b/queue-5.0/arm64-save-and-restore-osdlr_el1-across-suspend-resume.patch
new file mode 100644 (file)
index 0000000..71b1eb4
--- /dev/null
@@ -0,0 +1,83 @@
+From 827a108e354db633698f0b4a10c1ffd2b1f8d1d0 Mon Sep 17 00:00:00 2001
+From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+Date: Mon, 8 Apr 2019 18:17:19 +0100
+Subject: arm64: Save and restore OSDLR_EL1 across suspend/resume
+
+From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+
+commit 827a108e354db633698f0b4a10c1ffd2b1f8d1d0 upstream.
+
+When the CPU comes out of suspend, the firmware may have modified the OS
+Double Lock Register. Save it in an unused slot of cpu_suspend_ctx, and
+restore it on resume.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/proc.S |   34 ++++++++++++++++++----------------
+ 1 file changed, 18 insertions(+), 16 deletions(-)
+
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -76,24 +76,25 @@ ENTRY(cpu_do_suspend)
+       mrs     x2, tpidr_el0
+       mrs     x3, tpidrro_el0
+       mrs     x4, contextidr_el1
+-      mrs     x5, cpacr_el1
+-      mrs     x6, tcr_el1
+-      mrs     x7, vbar_el1
+-      mrs     x8, mdscr_el1
+-      mrs     x9, oslsr_el1
+-      mrs     x10, sctlr_el1
++      mrs     x5, osdlr_el1
++      mrs     x6, cpacr_el1
++      mrs     x7, tcr_el1
++      mrs     x8, vbar_el1
++      mrs     x9, mdscr_el1
++      mrs     x10, oslsr_el1
++      mrs     x11, sctlr_el1
+ alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+-      mrs     x11, tpidr_el1
++      mrs     x12, tpidr_el1
+ alternative_else
+-      mrs     x11, tpidr_el2
++      mrs     x12, tpidr_el2
+ alternative_endif
+-      mrs     x12, sp_el0
++      mrs     x13, sp_el0
+       stp     x2, x3, [x0]
+-      stp     x4, xzr, [x0, #16]
+-      stp     x5, x6, [x0, #32]
+-      stp     x7, x8, [x0, #48]
+-      stp     x9, x10, [x0, #64]
+-      stp     x11, x12, [x0, #80]
++      stp     x4, x5, [x0, #16]
++      stp     x6, x7, [x0, #32]
++      stp     x8, x9, [x0, #48]
++      stp     x10, x11, [x0, #64]
++      stp     x12, x13, [x0, #80]
+       ret
+ ENDPROC(cpu_do_suspend)
+@@ -116,8 +117,8 @@ ENTRY(cpu_do_resume)
+       msr     cpacr_el1, x6
+       /* Don't change t0sz here, mask those bits when restoring */
+-      mrs     x5, tcr_el1
+-      bfi     x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
++      mrs     x7, tcr_el1
++      bfi     x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+       msr     tcr_el1, x8
+       msr     vbar_el1, x9
+@@ -141,6 +142,7 @@ alternative_endif
+       /*
+        * Restore oslsr_el1 by writing oslar_el1
+        */
++      msr     osdlr_el1, x5
+       ubfx    x11, x11, #1, #1
+       msr     oslar_el1, x11
+       reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
diff --git a/queue-5.0/crypto-arm-aes-neonbs-don-t-access-already-freed-walk.iv.patch b/queue-5.0/crypto-arm-aes-neonbs-don-t-access-already-freed-walk.iv.patch
new file mode 100644 (file)
index 0000000..6ccdbd2
--- /dev/null
@@ -0,0 +1,42 @@
+From 767f015ea0b7ab9d60432ff6cd06b664fd71f50f Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 9 Apr 2019 23:46:31 -0700
+Subject: crypto: arm/aes-neonbs - don't access already-freed walk.iv
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 767f015ea0b7ab9d60432ff6cd06b664fd71f50f upstream.
+
+If the user-provided IV needs to be aligned to the algorithm's
+alignmask, then skcipher_walk_virt() copies the IV into a new aligned
+buffer walk.iv.  But skcipher_walk_virt() can fail afterwards, and then
+if the caller unconditionally accesses walk.iv, it's a use-after-free.
+
+arm32 xts-aes-neonbs doesn't set an alignmask, so currently it isn't
+affected by this despite unconditionally accessing walk.iv.  However
+this is more subtle than desired, and it was actually broken prior to
+the alignmask being removed by commit cc477bf64573 ("crypto: arm/aes -
+replace bit-sliced OpenSSL NEON code").  Thus, update xts-aes-neonbs to
+start checking the return value of skcipher_walk_virt().
+
+Fixes: e4e7f10bfc40 ("ARM: add support for bit sliced AES using NEON instructions")
+Cc: <stable@vger.kernel.org> # v3.13+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/crypto/aes-neonbs-glue.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/crypto/aes-neonbs-glue.c
++++ b/arch/arm/crypto/aes-neonbs-glue.c
+@@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_r
+       int err;
+       err = skcipher_walk_virt(&walk, req, true);
++      if (err)
++              return err;
+       crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
diff --git a/queue-5.0/crypto-arm64-aes-neonbs-don-t-access-already-freed-walk.iv.patch b/queue-5.0/crypto-arm64-aes-neonbs-don-t-access-already-freed-walk.iv.patch
new file mode 100644 (file)
index 0000000..17a875d
--- /dev/null
@@ -0,0 +1,41 @@
+From 4a8108b70508df0b6c4ffa4a3974dab93dcbe851 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 9 Apr 2019 23:46:32 -0700
+Subject: crypto: arm64/aes-neonbs - don't access already-freed walk.iv
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 4a8108b70508df0b6c4ffa4a3974dab93dcbe851 upstream.
+
+If the user-provided IV needs to be aligned to the algorithm's
+alignmask, then skcipher_walk_virt() copies the IV into a new aligned
+buffer walk.iv.  But skcipher_walk_virt() can fail afterwards, and then
+if the caller unconditionally accesses walk.iv, it's a use-after-free.
+
+xts-aes-neonbs doesn't set an alignmask, so currently it isn't affected
+by this despite unconditionally accessing walk.iv.  However this is more
+subtle than desired, and unconditionally accessing walk.iv has caused a
+real problem in other algorithms.  Thus, update xts-aes-neonbs to start
+checking the return value of skcipher_walk_virt().
+
+Fixes: 1abee99eafab ("crypto: arm64/aes - reimplement bit-sliced ARM/NEON implementation for arm64")
+Cc: <stable@vger.kernel.org> # v4.11+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/aes-neonbs-glue.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/crypto/aes-neonbs-glue.c
++++ b/arch/arm64/crypto/aes-neonbs-glue.c
+@@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_r
+       int err;
+       err = skcipher_walk_virt(&walk, req, false);
++      if (err)
++              return err;
+       kernel_neon_begin();
+       neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
diff --git a/queue-5.0/crypto-arm64-gcm-aes-ce-fix-no-neon-fallback-code.patch b/queue-5.0/crypto-arm64-gcm-aes-ce-fix-no-neon-fallback-code.patch
new file mode 100644 (file)
index 0000000..2757719
--- /dev/null
@@ -0,0 +1,68 @@
+From 580e295178402d14bbf598a5702f8e01fc59dbaa Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 12 Mar 2019 22:12:46 -0700
+Subject: crypto: arm64/gcm-aes-ce - fix no-NEON fallback code
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 580e295178402d14bbf598a5702f8e01fc59dbaa upstream.
+
+The arm64 gcm-aes-ce algorithm is failing the extra crypto self-tests
+following my patches to test the !may_use_simd() code paths, which
+previously were untested.  The problem is that in the !may_use_simd()
+case, an odd number of AES blocks can be processed within each step of
+the skcipher_walk.  However, the skcipher_walk is being done with a
+"stride" of 2 blocks and is advanced by an even number of blocks after
+each step.  This causes the encryption to produce the wrong ciphertext
+and authentication tag, and causes the decryption to incorrectly fail.
+
+Fix it by only processing an even number of blocks per step.
+
+Fixes: c2b24c36e0a3 ("crypto: arm64/aes-gcm-ce - fix scatterwalk API violation")
+Fixes: 71e52c278c54 ("crypto: arm64/aes-ce-gcm - operate on two input blocks at a time")
+Cc: <stable@vger.kernel.org> # v4.19+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/ghash-ce-glue.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/crypto/ghash-ce-glue.c
++++ b/arch/arm64/crypto/ghash-ce-glue.c
+@@ -418,9 +418,11 @@ static int gcm_encrypt(struct aead_reque
+               put_unaligned_be32(2, iv + GCM_IV_SIZE);
+               while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
+-                      int blocks = walk.nbytes / AES_BLOCK_SIZE;
++                      const int blocks =
++                              walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
+                       u8 *dst = walk.dst.virt.addr;
+                       u8 *src = walk.src.virt.addr;
++                      int remaining = blocks;
+                       do {
+                               __aes_arm64_encrypt(ctx->aes_key.key_enc,
+@@ -430,9 +432,9 @@ static int gcm_encrypt(struct aead_reque
+                               dst += AES_BLOCK_SIZE;
+                               src += AES_BLOCK_SIZE;
+-                      } while (--blocks > 0);
++                      } while (--remaining > 0);
+-                      ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
++                      ghash_do_update(blocks, dg,
+                                       walk.dst.virt.addr, &ctx->ghash_key,
+                                       NULL);
+@@ -553,7 +555,7 @@ static int gcm_decrypt(struct aead_reque
+               put_unaligned_be32(2, iv + GCM_IV_SIZE);
+               while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
+-                      int blocks = walk.nbytes / AES_BLOCK_SIZE;
++                      int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
+                       u8 *dst = walk.dst.virt.addr;
+                       u8 *src = walk.src.virt.addr;
diff --git a/queue-5.0/crypto-caam-qi2-fix-dma-mapping-of-stack-memory.patch b/queue-5.0/crypto-caam-qi2-fix-dma-mapping-of-stack-memory.patch
new file mode 100644 (file)
index 0000000..bc2cec0
--- /dev/null
@@ -0,0 +1,398 @@
+From 5965dc745287bebf7a2eba91a66f017537fa4c54 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Thu, 25 Apr 2019 17:52:22 +0300
+Subject: crypto: caam/qi2 - fix DMA mapping of stack memory
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 5965dc745287bebf7a2eba91a66f017537fa4c54 upstream.
+
+Commits c19650d6ea99 ("crypto: caam - fix DMA mapping of stack memory")
+and 65055e210884 ("crypto: caam - fix hash context DMA unmap size")
+fixed the ahash implementation in caam/jr driver such that req->result
+is not DMA-mapped (since it's not guaranteed to be DMA-able).
+
+Apply a similar fix for ahash implementation in caam/qi2 driver.
+
+Cc: <stable@vger.kernel.org> # v4.20+
+Fixes: 3f16f6c9d632 ("crypto: caam/qi2 - add support for ahash algorithms")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_qi2.c |  111 +++++++++++++++-----------------------
+ drivers/crypto/caam/caamalg_qi2.h |    2 
+ 2 files changed, 45 insertions(+), 68 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -2855,6 +2855,7 @@ struct caam_hash_state {
+       struct caam_request caam_req;
+       dma_addr_t buf_dma;
+       dma_addr_t ctx_dma;
++      int ctx_dma_len;
+       u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+       int buflen_0;
+       u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+@@ -2928,6 +2929,7 @@ static inline int ctx_map_to_qm_sg(struc
+                                  struct caam_hash_state *state, int ctx_len,
+                                  struct dpaa2_sg_entry *qm_sg, u32 flag)
+ {
++      state->ctx_dma_len = ctx_len;
+       state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
+       if (dma_mapping_error(dev, state->ctx_dma)) {
+               dev_err(dev, "unable to map ctx\n");
+@@ -3166,14 +3168,12 @@ bad_free_key:
+ }
+ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
+-                             struct ahash_request *req, int dst_len)
++                             struct ahash_request *req)
+ {
+       struct caam_hash_state *state = ahash_request_ctx(req);
+       if (edesc->src_nents)
+               dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+-      if (edesc->dst_dma)
+-              dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+       if (edesc->qm_sg_bytes)
+               dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
+@@ -3188,18 +3188,15 @@ static inline void ahash_unmap(struct de
+ static inline void ahash_unmap_ctx(struct device *dev,
+                                  struct ahash_edesc *edesc,
+-                                 struct ahash_request *req, int dst_len,
+-                                 u32 flag)
++                                 struct ahash_request *req, u32 flag)
+ {
+-      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+-      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+       struct caam_hash_state *state = ahash_request_ctx(req);
+       if (state->ctx_dma) {
+-              dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
++              dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
+               state->ctx_dma = 0;
+       }
+-      ahash_unmap(dev, edesc, req, dst_len);
++      ahash_unmap(dev, edesc, req);
+ }
+ static void ahash_done(void *cbk_ctx, u32 status)
+@@ -3220,16 +3217,13 @@ static void ahash_done(void *cbk_ctx, u3
+               ecode = -EIO;
+       }
+-      ahash_unmap(ctx->dev, edesc, req, digestsize);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
++      memcpy(req->result, state->caam_ctx, digestsize);
+       qi_cache_free(edesc);
+       print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+                            ctx->ctx_len, 1);
+-      if (req->result)
+-              print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+-                                   DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+-                                   digestsize, 1);
+       req->base.complete(&req->base, ecode);
+ }
+@@ -3251,7 +3245,7 @@ static void ahash_done_bi(void *cbk_ctx,
+               ecode = -EIO;
+       }
+-      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+       switch_buf(state);
+       qi_cache_free(edesc);
+@@ -3284,16 +3278,13 @@ static void ahash_done_ctx_src(void *cbk
+               ecode = -EIO;
+       }
+-      ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
++      memcpy(req->result, state->caam_ctx, digestsize);
+       qi_cache_free(edesc);
+       print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+                            ctx->ctx_len, 1);
+-      if (req->result)
+-              print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+-                                   DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+-                                   digestsize, 1);
+       req->base.complete(&req->base, ecode);
+ }
+@@ -3315,7 +3306,7 @@ static void ahash_done_ctx_dst(void *cbk
+               ecode = -EIO;
+       }
+-      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+       switch_buf(state);
+       qi_cache_free(edesc);
+@@ -3453,7 +3444,7 @@ static int ahash_update_ctx(struct ahash
+       return ret;
+ unmap_ctx:
+-      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+       qi_cache_free(edesc);
+       return ret;
+ }
+@@ -3485,7 +3476,7 @@ static int ahash_final_ctx(struct ahash_
+       sg_table = &edesc->sgt[0];
+       ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+-                             DMA_TO_DEVICE);
++                             DMA_BIDIRECTIONAL);
+       if (ret)
+               goto unmap_ctx;
+@@ -3504,22 +3495,13 @@ static int ahash_final_ctx(struct ahash_
+       }
+       edesc->qm_sg_bytes = qm_sg_bytes;
+-      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+-                                      DMA_FROM_DEVICE);
+-      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+-              dev_err(ctx->dev, "unable to map dst\n");
+-              edesc->dst_dma = 0;
+-              ret = -ENOMEM;
+-              goto unmap_ctx;
+-      }
+-
+       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+       dpaa2_fl_set_final(in_fle, true);
+       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+       dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
+       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+-      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+       dpaa2_fl_set_len(out_fle, digestsize);
+       req_ctx->flc = &ctx->flc[FINALIZE];
+@@ -3534,7 +3516,7 @@ static int ahash_final_ctx(struct ahash_
+               return ret;
+ unmap_ctx:
+-      ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+       qi_cache_free(edesc);
+       return ret;
+ }
+@@ -3587,7 +3569,7 @@ static int ahash_finup_ctx(struct ahash_
+       sg_table = &edesc->sgt[0];
+       ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+-                             DMA_TO_DEVICE);
++                             DMA_BIDIRECTIONAL);
+       if (ret)
+               goto unmap_ctx;
+@@ -3606,22 +3588,13 @@ static int ahash_finup_ctx(struct ahash_
+       }
+       edesc->qm_sg_bytes = qm_sg_bytes;
+-      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+-                                      DMA_FROM_DEVICE);
+-      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+-              dev_err(ctx->dev, "unable to map dst\n");
+-              edesc->dst_dma = 0;
+-              ret = -ENOMEM;
+-              goto unmap_ctx;
+-      }
+-
+       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+       dpaa2_fl_set_final(in_fle, true);
+       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+       dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
+       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+-      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+       dpaa2_fl_set_len(out_fle, digestsize);
+       req_ctx->flc = &ctx->flc[FINALIZE];
+@@ -3636,7 +3609,7 @@ static int ahash_finup_ctx(struct ahash_
+               return ret;
+ unmap_ctx:
+-      ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+       qi_cache_free(edesc);
+       return ret;
+ }
+@@ -3705,18 +3678,19 @@ static int ahash_digest(struct ahash_req
+               dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+       }
+-      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++      state->ctx_dma_len = digestsize;
++      state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
+                                       DMA_FROM_DEVICE);
+-      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+-              dev_err(ctx->dev, "unable to map dst\n");
+-              edesc->dst_dma = 0;
++      if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
++              dev_err(ctx->dev, "unable to map ctx\n");
++              state->ctx_dma = 0;
+               goto unmap;
+       }
+       dpaa2_fl_set_final(in_fle, true);
+       dpaa2_fl_set_len(in_fle, req->nbytes);
+       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+-      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+       dpaa2_fl_set_len(out_fle, digestsize);
+       req_ctx->flc = &ctx->flc[DIGEST];
+@@ -3730,7 +3704,7 @@ static int ahash_digest(struct ahash_req
+               return ret;
+ unmap:
+-      ahash_unmap(ctx->dev, edesc, req, digestsize);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+       qi_cache_free(edesc);
+       return ret;
+ }
+@@ -3765,11 +3739,12 @@ static int ahash_final_no_ctx(struct aha
+               }
+       }
+-      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++      state->ctx_dma_len = digestsize;
++      state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
+                                       DMA_FROM_DEVICE);
+-      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+-              dev_err(ctx->dev, "unable to map dst\n");
+-              edesc->dst_dma = 0;
++      if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
++              dev_err(ctx->dev, "unable to map ctx\n");
++              state->ctx_dma = 0;
+               goto unmap;
+       }
+@@ -3787,7 +3762,7 @@ static int ahash_final_no_ctx(struct aha
+               dpaa2_fl_set_len(in_fle, buflen);
+       }
+       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+-      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+       dpaa2_fl_set_len(out_fle, digestsize);
+       req_ctx->flc = &ctx->flc[DIGEST];
+@@ -3802,7 +3777,7 @@ static int ahash_final_no_ctx(struct aha
+               return ret;
+ unmap:
+-      ahash_unmap(ctx->dev, edesc, req, digestsize);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+       qi_cache_free(edesc);
+       return ret;
+ }
+@@ -3882,6 +3857,7 @@ static int ahash_update_no_ctx(struct ah
+               }
+               edesc->qm_sg_bytes = qm_sg_bytes;
++              state->ctx_dma_len = ctx->ctx_len;
+               state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+                                               ctx->ctx_len, DMA_FROM_DEVICE);
+               if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+@@ -3930,7 +3906,7 @@ static int ahash_update_no_ctx(struct ah
+       return ret;
+ unmap_ctx:
+-      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
+       qi_cache_free(edesc);
+       return ret;
+ }
+@@ -3995,11 +3971,12 @@ static int ahash_finup_no_ctx(struct aha
+       }
+       edesc->qm_sg_bytes = qm_sg_bytes;
+-      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++      state->ctx_dma_len = digestsize;
++      state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
+                                       DMA_FROM_DEVICE);
+-      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+-              dev_err(ctx->dev, "unable to map dst\n");
+-              edesc->dst_dma = 0;
++      if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
++              dev_err(ctx->dev, "unable to map ctx\n");
++              state->ctx_dma = 0;
+               ret = -ENOMEM;
+               goto unmap;
+       }
+@@ -4010,7 +3987,7 @@ static int ahash_finup_no_ctx(struct aha
+       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+       dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
+       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+-      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+       dpaa2_fl_set_len(out_fle, digestsize);
+       req_ctx->flc = &ctx->flc[DIGEST];
+@@ -4025,7 +4002,7 @@ static int ahash_finup_no_ctx(struct aha
+       return ret;
+ unmap:
+-      ahash_unmap(ctx->dev, edesc, req, digestsize);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+       qi_cache_free(edesc);
+       return -ENOMEM;
+ }
+@@ -4112,6 +4089,7 @@ static int ahash_update_first(struct aha
+                       scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+                                                *next_buflen, 0);
++              state->ctx_dma_len = ctx->ctx_len;
+               state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+                                               ctx->ctx_len, DMA_FROM_DEVICE);
+               if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+@@ -4155,7 +4133,7 @@ static int ahash_update_first(struct aha
+       return ret;
+ unmap_ctx:
+-      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
++      ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
+       qi_cache_free(edesc);
+       return ret;
+ }
+@@ -4174,6 +4152,7 @@ static int ahash_init(struct ahash_reque
+       state->final = ahash_final_no_ctx;
+       state->ctx_dma = 0;
++      state->ctx_dma_len = 0;
+       state->current_buf = 0;
+       state->buf_dma = 0;
+       state->buflen_0 = 0;
+--- a/drivers/crypto/caam/caamalg_qi2.h
++++ b/drivers/crypto/caam/caamalg_qi2.h
+@@ -160,14 +160,12 @@ struct skcipher_edesc {
+ /*
+  * ahash_edesc - s/w-extended ahash descriptor
+- * @dst_dma: I/O virtual address of req->result
+  * @qm_sg_dma: I/O virtual address of h/w link table
+  * @src_nents: number of segments in input scatterlist
+  * @qm_sg_bytes: length of dma mapped qm_sg space
+  * @sgt: pointer to h/w link table
+  */
+ struct ahash_edesc {
+-      dma_addr_t dst_dma;
+       dma_addr_t qm_sg_dma;
+       int src_nents;
+       int qm_sg_bytes;
diff --git a/queue-5.0/crypto-caam-qi2-fix-zero-length-buffer-dma-mapping.patch b/queue-5.0/crypto-caam-qi2-fix-zero-length-buffer-dma-mapping.patch
new file mode 100644 (file)
index 0000000..a073432
--- /dev/null
@@ -0,0 +1,69 @@
+From 07586d3ddf284dd7a1a6579579d8efa7296fe60f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Thu, 25 Apr 2019 17:52:21 +0300
+Subject: crypto: caam/qi2 - fix zero-length buffer DMA mapping
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 07586d3ddf284dd7a1a6579579d8efa7296fe60f upstream.
+
+Commit 04e6d25c5bb2 ("crypto: caam - fix zero-length buffer DMA mapping")
+fixed an issue in caam/jr driver where ahash implementation was
+DMA mapping a zero-length buffer.
+
+Current commit applies a similar fix for caam/qi2 driver.
+
+Cc: <stable@vger.kernel.org> # v4.20+
+Fixes: 3f16f6c9d632 ("crypto: caam/qi2 - add support for ahash algorithms")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_qi2.c |   25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -3756,10 +3756,13 @@ static int ahash_final_no_ctx(struct aha
+       if (!edesc)
+               return ret;
+-      state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
+-      if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+-              dev_err(ctx->dev, "unable to map src\n");
+-              goto unmap;
++      if (buflen) {
++              state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
++                                              DMA_TO_DEVICE);
++              if (dma_mapping_error(ctx->dev, state->buf_dma)) {
++                      dev_err(ctx->dev, "unable to map src\n");
++                      goto unmap;
++              }
+       }
+       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+@@ -3772,9 +3775,17 @@ static int ahash_final_no_ctx(struct aha
+       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+       dpaa2_fl_set_final(in_fle, true);
+-      dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+-      dpaa2_fl_set_addr(in_fle, state->buf_dma);
+-      dpaa2_fl_set_len(in_fle, buflen);
++      /*
++       * crypto engine requires the input entry to be present when
++       * "frame list" FD is used.
++       * Since engine does not support FMT=2'b11 (unused entry type), leaving
++       * in_fle zeroized (except for "Final" flag) is the best option.
++       */
++      if (buflen) {
++              dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++              dpaa2_fl_set_addr(in_fle, state->buf_dma);
++              dpaa2_fl_set_len(in_fle, buflen);
++      }
+       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+       dpaa2_fl_set_len(out_fle, digestsize);
diff --git a/queue-5.0/crypto-caam-qi2-generate-hash-keys-in-place.patch b/queue-5.0/crypto-caam-qi2-generate-hash-keys-in-place.patch
new file mode 100644 (file)
index 0000000..2a8b9bf
--- /dev/null
@@ -0,0 +1,123 @@
+From 418cd20e4dcdca97e6f6d59e6336228dacf2e45d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Thu, 25 Apr 2019 17:52:23 +0300
+Subject: crypto: caam/qi2 - generate hash keys in-place
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 418cd20e4dcdca97e6f6d59e6336228dacf2e45d upstream.
+
+Commit 307244452d3d ("crypto: caam - generate hash keys in-place")
+fixed ahash implementation in caam/jr driver such that user-provided key
+buffer is not DMA mapped, since it's not guaranteed to be DMAable.
+
+Apply a similar fix for caam/qi2 driver.
+
+Cc: <stable@vger.kernel.org> # v4.20+
+Fixes: 3f16f6c9d632 ("crypto: caam/qi2 - add support for ahash algorithms")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_qi2.c |   41 +++++++++++++-------------------------
+ 1 file changed, 15 insertions(+), 26 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -3021,13 +3021,13 @@ static void split_key_sh_done(void *cbk_
+ }
+ /* Digest hash size if it is too large */
+-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+-                         u32 *keylen, u8 *key_out, u32 digestsize)
++static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
++                         u32 digestsize)
+ {
+       struct caam_request *req_ctx;
+       u32 *desc;
+       struct split_key_sh_result result;
+-      dma_addr_t src_dma, dst_dma;
++      dma_addr_t key_dma;
+       struct caam_flc *flc;
+       dma_addr_t flc_dma;
+       int ret = -ENOMEM;
+@@ -3044,17 +3044,10 @@ static int hash_digest_key(struct caam_h
+       if (!flc)
+               goto err_flc;
+-      src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
+-                               DMA_TO_DEVICE);
+-      if (dma_mapping_error(ctx->dev, src_dma)) {
+-              dev_err(ctx->dev, "unable to map key input memory\n");
+-              goto err_src_dma;
+-      }
+-      dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
+-                               DMA_FROM_DEVICE);
+-      if (dma_mapping_error(ctx->dev, dst_dma)) {
+-              dev_err(ctx->dev, "unable to map key output memory\n");
+-              goto err_dst_dma;
++      key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
++      if (dma_mapping_error(ctx->dev, key_dma)) {
++              dev_err(ctx->dev, "unable to map key memory\n");
++              goto err_key_dma;
+       }
+       desc = flc->sh_desc;
+@@ -3079,14 +3072,14 @@ static int hash_digest_key(struct caam_h
+       dpaa2_fl_set_final(in_fle, true);
+       dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+-      dpaa2_fl_set_addr(in_fle, src_dma);
++      dpaa2_fl_set_addr(in_fle, key_dma);
+       dpaa2_fl_set_len(in_fle, *keylen);
+       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+-      dpaa2_fl_set_addr(out_fle, dst_dma);
++      dpaa2_fl_set_addr(out_fle, key_dma);
+       dpaa2_fl_set_len(out_fle, digestsize);
+       print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
+-                           DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
++                           DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
+       print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+                            1);
+@@ -3106,17 +3099,15 @@ static int hash_digest_key(struct caam_h
+               wait_for_completion(&result.completion);
+               ret = result.err;
+               print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
+-                                   DUMP_PREFIX_ADDRESS, 16, 4, key_in,
++                                   DUMP_PREFIX_ADDRESS, 16, 4, key,
+                                    digestsize, 1);
+       }
+       dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
+                        DMA_TO_DEVICE);
+ err_flc_dma:
+-      dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
+-err_dst_dma:
+-      dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
+-err_src_dma:
++      dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
++err_key_dma:
+       kfree(flc);
+ err_flc:
+       kfree(req_ctx);
+@@ -3138,12 +3129,10 @@ static int ahash_setkey(struct crypto_ah
+       dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
+       if (keylen > blocksize) {
+-              hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
+-                                         GFP_KERNEL | GFP_DMA);
++              hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+               if (!hashed_key)
+                       return -ENOMEM;
+-              ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+-                                    digestsize);
++              ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
+               if (ret)
+                       goto bad_free_key;
+               key = hashed_key;
diff --git a/queue-5.0/crypto-ccp-do-not-free-psp_master-when-platform_init-fails.patch b/queue-5.0/crypto-ccp-do-not-free-psp_master-when-platform_init-fails.patch
new file mode 100644 (file)
index 0000000..235104c
--- /dev/null
@@ -0,0 +1,39 @@
+From f5a2aeb8b254c764772729a6e48d4e0c914bb56a Mon Sep 17 00:00:00 2001
+From: "Singh, Brijesh" <brijesh.singh@amd.com>
+Date: Mon, 8 Apr 2019 20:42:55 +0000
+Subject: crypto: ccp - Do not free psp_master when PLATFORM_INIT fails
+
+From: Singh, Brijesh <brijesh.singh@amd.com>
+
+commit f5a2aeb8b254c764772729a6e48d4e0c914bb56a upstream.
+
+Currently, we free the psp_master if the PLATFORM_INIT fails during the
+SEV FW probe. If psp_master is freed then driver does not invoke the PSP
+FW. As per SEV FW spec, there are several commands (PLATFORM_RESET,
+PLATFORM_STATUS, GET_ID etc) which can be executed in the UNINIT state
+We should not free the psp_master when PLATFORM_INIT fails.
+
+Fixes: 200664d5237f ("crypto: ccp: Add SEV support")
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: stable@vger.kernel.org # 4.19.y
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/psp-dev.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -972,7 +972,7 @@ void psp_pci_init(void)
+       rc = sev_platform_init(&error);
+       if (rc) {
+               dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
+-              goto err;
++              return;
+       }
+       dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
diff --git a/queue-5.0/crypto-chacha-generic-fix-use-as-arm64-no-neon-fallback.patch b/queue-5.0/crypto-chacha-generic-fix-use-as-arm64-no-neon-fallback.patch
new file mode 100644 (file)
index 0000000..e1c8af0
--- /dev/null
@@ -0,0 +1,61 @@
+From 7aceaaef04eaaf6019ca159bc354d800559bba1d Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 12 Mar 2019 22:12:45 -0700
+Subject: crypto: chacha-generic - fix use as arm64 no-NEON fallback
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 7aceaaef04eaaf6019ca159bc354d800559bba1d upstream.
+
+The arm64 implementations of ChaCha and XChaCha are failing the extra
+crypto self-tests following my patches to test the !may_use_simd() code
+paths, which previously were untested.  The problem is as follows:
+
+When !may_use_simd(), the arm64 NEON implementations fall back to the
+generic implementation, which uses the skcipher_walk API to iterate
+through the src/dst scatterlists.  Due to how the skcipher_walk API
+works, walk.stride is set from the skcipher_alg actually being used,
+which in this case is the arm64 NEON algorithm.  Thus walk.stride is
+5*CHACHA_BLOCK_SIZE, not CHACHA_BLOCK_SIZE.
+
+This unnecessarily large stride shouldn't cause an actual problem.
+However, the generic implementation computes round_down(nbytes,
+walk.stride).  round_down() assumes the round amount is a power of 2,
+which 5*CHACHA_BLOCK_SIZE is not, so it gives the wrong result.
+
+This causes the following case in skcipher_walk_done() to be hit,
+causing a WARN() and failing the encryption operation:
+
+       if (WARN_ON(err)) {
+               /* unexpected case; didn't process all bytes */
+               err = -EINVAL;
+               goto finish;
+       }
+
+Fix it by rounding down to CHACHA_BLOCK_SIZE instead of walk.stride.
+
+(Or we could replace round_down() with rounddown(), but that would add a
+slow division operation every time, which I think we should avoid.)
+
+Fixes: 2fe55987b262 ("crypto: arm64/chacha - use combined SIMD/ALU routine for more speed")
+Cc: <stable@vger.kernel.org> # v5.0+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/chacha_generic.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/crypto/chacha_generic.c
++++ b/crypto/chacha_generic.c
+@@ -52,7 +52,7 @@ static int chacha_stream_xor(struct skci
+               unsigned int nbytes = walk.nbytes;
+               if (nbytes < walk.total)
+-                      nbytes = round_down(nbytes, walk.stride);
++                      nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
+               chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+                              nbytes, ctx->nrounds);
diff --git a/queue-5.0/crypto-chacha20poly1305-set-cra_name-correctly.patch b/queue-5.0/crypto-chacha20poly1305-set-cra_name-correctly.patch
new file mode 100644 (file)
index 0000000..7a87d26
--- /dev/null
@@ -0,0 +1,46 @@
+From 5e27f38f1f3f45a0c938299c3a34a2d2db77165a Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sun, 31 Mar 2019 13:04:16 -0700
+Subject: crypto: chacha20poly1305 - set cra_name correctly
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 5e27f38f1f3f45a0c938299c3a34a2d2db77165a upstream.
+
+If the rfc7539 template is instantiated with specific implementations,
+e.g. "rfc7539(chacha20-generic,poly1305-generic)" rather than
+"rfc7539(chacha20,poly1305)", then the implementation names end up
+included in the instance's cra_name.  This is incorrect because it then
+prevents all users from allocating "rfc7539(chacha20,poly1305)", if the
+highest priority implementations of chacha20 and poly1305 were selected.
+Also, the self-tests aren't run on an instance allocated in this way.
+
+Fix it by setting the instance's cra_name from the underlying
+algorithms' actual cra_names, rather than from the requested names.
+This matches what other templates do.
+
+Fixes: 71ebc4d1b27d ("crypto: chacha20poly1305 - Add a ChaCha20-Poly1305 AEAD construction, RFC7539")
+Cc: <stable@vger.kernel.org> # v4.2+
+Cc: Martin Willi <martin@strongswan.org>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Martin Willi <martin@strongswan.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/chacha20poly1305.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/crypto/chacha20poly1305.c
++++ b/crypto/chacha20poly1305.c
+@@ -645,8 +645,8 @@ static int chachapoly_create(struct cryp
+       err = -ENAMETOOLONG;
+       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+-                   "%s(%s,%s)", name, chacha_name,
+-                   poly_name) >= CRYPTO_MAX_ALG_NAME)
++                   "%s(%s,%s)", name, chacha->base.cra_name,
++                   poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
+               goto out_drop_chacha;
+       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                    "%s(%s,%s)", name, chacha->base.cra_driver_name,
diff --git a/queue-5.0/crypto-crct10dif-generic-fix-use-via-crypto_shash_digest.patch b/queue-5.0/crypto-crct10dif-generic-fix-use-via-crypto_shash_digest.patch
new file mode 100644 (file)
index 0000000..d032c1c
--- /dev/null
@@ -0,0 +1,65 @@
+From 307508d1072979f4435416f87936f87eaeb82054 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sun, 31 Mar 2019 13:04:12 -0700
+Subject: crypto: crct10dif-generic - fix use via crypto_shash_digest()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 307508d1072979f4435416f87936f87eaeb82054 upstream.
+
+The ->digest() method of crct10dif-generic reads the current CRC value
+from the shash_desc context.  But this value is uninitialized, causing
+crypto_shash_digest() to compute the wrong result.  Fix it.
+
+Probably this wasn't noticed before because lib/crc-t10dif.c only uses
+crypto_shash_update(), not crypto_shash_digest().  Likewise,
+crypto_shash_digest() is not yet tested by the crypto self-tests because
+those only test the ahash API which only uses shash init/update/final.
+
+This bug was detected by my patches that improve testmgr to fuzz
+algorithms against their generic implementation.
+
+Fixes: 2d31e518a428 ("crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework")
+Cc: <stable@vger.kernel.org> # v3.11+
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/crct10dif_generic.c |   11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/crypto/crct10dif_generic.c
++++ b/crypto/crct10dif_generic.c
+@@ -65,10 +65,9 @@ static int chksum_final(struct shash_des
+       return 0;
+ }
+-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
+-                      u8 *out)
++static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
+ {
+-      *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
++      *(__u16 *)out = crc_t10dif_generic(crc, data, len);
+       return 0;
+ }
+@@ -77,15 +76,13 @@ static int chksum_finup(struct shash_des
+ {
+       struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+-      return __chksum_finup(&ctx->crc, data, len, out);
++      return __chksum_finup(ctx->crc, data, len, out);
+ }
+ static int chksum_digest(struct shash_desc *desc, const u8 *data,
+                        unsigned int length, u8 *out)
+ {
+-      struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+-
+-      return __chksum_finup(&ctx->crc, data, length, out);
++      return __chksum_finup(0, data, length, out);
+ }
+ static struct shash_alg alg = {
diff --git a/queue-5.0/crypto-crypto4xx-fix-cfb-and-ofb-overran-dst-buffer-issues.patch b/queue-5.0/crypto-crypto4xx-fix-cfb-and-ofb-overran-dst-buffer-issues.patch
new file mode 100644 (file)
index 0000000..7d2a500
--- /dev/null
@@ -0,0 +1,128 @@
+From 7e92e1717e3eaf6b322c252947c696b3059f05be Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Mon, 22 Apr 2019 13:25:59 +0200
+Subject: crypto: crypto4xx - fix cfb and ofb "overran dst buffer" issues
+
+From: Christian Lamparter <chunkeey@gmail.com>
+
+commit 7e92e1717e3eaf6b322c252947c696b3059f05be upstream.
+
+Currently, crypto4xx CFB and OFB AES ciphers are
+failing testmgr's test vectors.
+
+|cfb-aes-ppc4xx encryption overran dst buffer on test vector 3, cfg="in-place"
+|ofb-aes-ppc4xx encryption overran dst buffer on test vector 1, cfg="in-place"
+
+This is because of a very subtile "bug" in the hardware that
+gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
+of the hardware spec:
+
+the OFB and CFB modes for AES are listed there as operation
+modes for >>> "Block ciphers" <<<. Which kind of makes sense,
+but we would like them to be considered as stream ciphers just
+like the CTR mode.
+
+To workaround this issue and stop the hardware from causing
+"overran dst buffer" on crypttexts that are not a multiple
+of 16 (AES_BLOCK_SIZE), we force the driver to use the scatter
+buffers as the go-between.
+
+As a bonus this patch also kills redundant pd_uinfo->num_gd
+and pd_uinfo->num_sd setters since the value has already been
+set before.
+
+Cc: stable@vger.kernel.org
+Fixes: f2a13e7cba9e ("crypto: crypto4xx - enable AES RFC3686, ECB, CFB and OFB offloads")
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/amcc/crypto4xx_core.c |   31 +++++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 10 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -712,7 +712,23 @@ int crypto4xx_build_pd(struct crypto_asy
+       size_t offset_to_sr_ptr;
+       u32 gd_idx = 0;
+       int tmp;
+-      bool is_busy;
++      bool is_busy, force_sd;
++
++      /*
++       * There's a very subtile/disguised "bug" in the hardware that
++       * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
++       * of the hardware spec:
++       * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
++       * operation modes for >>> "Block ciphers" <<<.
++       *
++       * To workaround this issue and stop the hardware from causing
++       * "overran dst buffer" on crypttexts that are not a multiple
++       * of 16 (AES_BLOCK_SIZE), we force the driver to use the
++       * scatter buffers.
++       */
++      force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
++              || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
++              && (datalen % AES_BLOCK_SIZE);
+       /* figure how many gd are needed */
+       tmp = sg_nents_for_len(src, assoclen + datalen);
+@@ -730,7 +746,7 @@ int crypto4xx_build_pd(struct crypto_asy
+       }
+       /* figure how many sd are needed */
+-      if (sg_is_last(dst)) {
++      if (sg_is_last(dst) && force_sd == false) {
+               num_sd = 0;
+       } else {
+               if (datalen > PPC4XX_SD_BUFFER_SIZE) {
+@@ -805,9 +821,10 @@ int crypto4xx_build_pd(struct crypto_asy
+       pd->sa_len = sa_len;
+       pd_uinfo = &dev->pdr_uinfo[pd_entry];
+-      pd_uinfo->async_req = req;
+       pd_uinfo->num_gd = num_gd;
+       pd_uinfo->num_sd = num_sd;
++      pd_uinfo->dest_va = dst;
++      pd_uinfo->async_req = req;
+       if (iv_len)
+               memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
+@@ -826,7 +843,6 @@ int crypto4xx_build_pd(struct crypto_asy
+               /* get first gd we are going to use */
+               gd_idx = fst_gd;
+               pd_uinfo->first_gd = fst_gd;
+-              pd_uinfo->num_gd = num_gd;
+               gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+               pd->src = gd_dma;
+               /* enable gather */
+@@ -863,17 +879,14 @@ int crypto4xx_build_pd(struct crypto_asy
+                * Indicate gather array is not used
+                */
+               pd_uinfo->first_gd = 0xffffffff;
+-              pd_uinfo->num_gd = 0;
+       }
+-      if (sg_is_last(dst)) {
++      if (!num_sd) {
+               /*
+                * we know application give us dst a whole piece of memory
+                * no need to use scatter ring.
+                */
+               pd_uinfo->using_sd = 0;
+               pd_uinfo->first_sd = 0xffffffff;
+-              pd_uinfo->num_sd = 0;
+-              pd_uinfo->dest_va = dst;
+               sa->sa_command_0.bf.scatter = 0;
+               pd->dest = (u32)dma_map_page(dev->core_dev->device,
+                                            sg_page(dst), dst->offset,
+@@ -887,9 +900,7 @@ int crypto4xx_build_pd(struct crypto_asy
+               nbytes = datalen;
+               sa->sa_command_0.bf.scatter = 1;
+               pd_uinfo->using_sd = 1;
+-              pd_uinfo->dest_va = dst;
+               pd_uinfo->first_sd = fst_sd;
+-              pd_uinfo->num_sd = num_sd;
+               sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
+               pd->dest = sd_dma;
+               /* setup scatter descriptor */
diff --git a/queue-5.0/crypto-crypto4xx-fix-ctr-aes-missing-output-iv.patch b/queue-5.0/crypto-crypto4xx-fix-ctr-aes-missing-output-iv.patch
new file mode 100644 (file)
index 0000000..987133b
--- /dev/null
@@ -0,0 +1,73 @@
+From 25baaf8e2c93197d063b372ef7b62f2767c7ac0b Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Mon, 22 Apr 2019 13:25:58 +0200
+Subject: crypto: crypto4xx - fix ctr-aes missing output IV
+
+From: Christian Lamparter <chunkeey@gmail.com>
+
+commit 25baaf8e2c93197d063b372ef7b62f2767c7ac0b upstream.
+
+Commit 8efd972ef96a ("crypto: testmgr - support checking skcipher output IV")
+caused the crypto4xx driver to produce the following error:
+
+| ctr-aes-ppc4xx encryption test failed (wrong output IV)
+| on test vector 0, cfg="in-place"
+
+This patch fixes this by reworking the crypto4xx_setkey_aes()
+function to:
+
+ - not save the iv for ECB (as per 18.2.38 CRYP0_SA_CMD_0:
+   "This bit mut be cleared for DES ECB mode or AES ECB mode,
+   when no IV is used.")
+
+ - instruct the hardware to save the generated IV for all
+   other modes of operations that have IV and then supply
+   it back to the callee in pretty much the same way as we
+   do it for cbc-aes already.
+
+ - make it clear that the DIR_(IN|OUT)BOUND is the important
+   bit that tells the hardware to encrypt or decrypt the data.
+   (this is cosmetic - but it hopefully prevents me from
+    getting confused again).
+
+ - don't load any bogus hash when we don't use any hash
+   operation to begin with.
+
+Cc: stable@vger.kernel.org
+Fixes: f2a13e7cba9e ("crypto: crypto4xx - enable AES RFC3686, ECB, CFB and OFB offloads")
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/amcc/crypto4xx_alg.c |   12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct c
+       /* Setup SA */
+       sa = ctx->sa_in;
+-      set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
+-                               SA_SAVE_IV : SA_NOT_SAVE_IV),
+-                               SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
++      set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
++                               SA_NOT_SAVE_IV : SA_SAVE_IV),
++                               SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
++                               SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
+                                SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+                                SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+                                SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
+@@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct c
+       memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+       sa = ctx->sa_out;
+       sa->sa_command_0.bf.dir = DIR_OUTBOUND;
++      /*
++       * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
++       * it's the DIR_(IN|OUT)BOUND that matters
++       */
++      sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
+       return 0;
+ }
diff --git a/queue-5.0/crypto-gcm-fix-incompatibility-between-gcm-and-gcm_base.patch b/queue-5.0/crypto-gcm-fix-incompatibility-between-gcm-and-gcm_base.patch
new file mode 100644 (file)
index 0000000..fecb8a1
--- /dev/null
@@ -0,0 +1,137 @@
+From f699594d436960160f6d5ba84ed4a222f20d11cd Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 18 Apr 2019 14:43:02 -0700
+Subject: crypto: gcm - fix incompatibility between "gcm" and "gcm_base"
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit f699594d436960160f6d5ba84ed4a222f20d11cd upstream.
+
+GCM instances can be created by either the "gcm" template, which only
+allows choosing the block cipher, e.g. "gcm(aes)"; or by "gcm_base",
+which allows choosing the ctr and ghash implementations, e.g.
+"gcm_base(ctr(aes-generic),ghash-generic)".
+
+However, a "gcm_base" instance prevents a "gcm" instance from being
+registered using the same implementations.  Nor will the instance be
+found by lookups of "gcm".  This can be used as a denial of service.
+Moreover, "gcm_base" instances are never tested by the crypto
+self-tests, even if there are compatible "gcm" tests.
+
+The root cause of these problems is that instances of the two templates
+use different cra_names.  Therefore, fix these problems by making
+"gcm_base" instances set the same cra_name as "gcm" instances, e.g.
+"gcm(aes)" instead of "gcm_base(ctr(aes-generic),ghash-generic)".
+
+This requires extracting the block cipher name from the name of the ctr
+algorithm.  It also requires starting to verify that the algorithms are
+really ctr and ghash, not something else entirely.  But it would be
+bizarre if anyone were actually using non-gcm-compatible algorithms with
+gcm_base, so this shouldn't break anyone in practice.
+
+Fixes: d00aa19b507b ("[CRYPTO] gcm: Allow block cipher parameter")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/gcm.c |   34 +++++++++++-----------------------
+ 1 file changed, 11 insertions(+), 23 deletions(-)
+
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_
+ static int crypto_gcm_create_common(struct crypto_template *tmpl,
+                                   struct rtattr **tb,
+-                                  const char *full_name,
+                                   const char *ctr_name,
+                                   const char *ghash_name)
+ {
+@@ -638,7 +637,8 @@ static int crypto_gcm_create_common(stru
+               goto err_free_inst;
+       err = -EINVAL;
+-      if (ghash->digestsize != 16)
++      if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
++          ghash->digestsize != 16)
+               goto err_drop_ghash;
+       crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
+@@ -650,24 +650,24 @@ static int crypto_gcm_create_common(stru
+       ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
+-      /* We only support 16-byte blocks. */
++      /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
+       err = -EINVAL;
+-      if (crypto_skcipher_alg_ivsize(ctr) != 16)
++      if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
++          crypto_skcipher_alg_ivsize(ctr) != 16 ||
++          ctr->base.cra_blocksize != 1)
+               goto out_put_ctr;
+-      /* Not a stream cipher? */
+-      if (ctr->base.cra_blocksize != 1)
++      err = -ENAMETOOLONG;
++      if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
++                   "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
+               goto out_put_ctr;
+-      err = -ENAMETOOLONG;
+       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                    "gcm_base(%s,%s)", ctr->base.cra_driver_name,
+                    ghash_alg->cra_driver_name) >=
+           CRYPTO_MAX_ALG_NAME)
+               goto out_put_ctr;
+-      memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
+-
+       inst->alg.base.cra_flags = (ghash->base.cra_flags |
+                                   ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
+       inst->alg.base.cra_priority = (ghash->base.cra_priority +
+@@ -709,7 +709,6 @@ static int crypto_gcm_create(struct cryp
+ {
+       const char *cipher_name;
+       char ctr_name[CRYPTO_MAX_ALG_NAME];
+-      char full_name[CRYPTO_MAX_ALG_NAME];
+       cipher_name = crypto_attr_alg_name(tb[1]);
+       if (IS_ERR(cipher_name))
+@@ -719,12 +718,7 @@ static int crypto_gcm_create(struct cryp
+           CRYPTO_MAX_ALG_NAME)
+               return -ENAMETOOLONG;
+-      if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
+-          CRYPTO_MAX_ALG_NAME)
+-              return -ENAMETOOLONG;
+-
+-      return crypto_gcm_create_common(tmpl, tb, full_name,
+-                                      ctr_name, "ghash");
++      return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
+ }
+ static struct crypto_template crypto_gcm_tmpl = {
+@@ -738,7 +732,6 @@ static int crypto_gcm_base_create(struct
+ {
+       const char *ctr_name;
+       const char *ghash_name;
+-      char full_name[CRYPTO_MAX_ALG_NAME];
+       ctr_name = crypto_attr_alg_name(tb[1]);
+       if (IS_ERR(ctr_name))
+@@ -748,12 +741,7 @@ static int crypto_gcm_base_create(struct
+       if (IS_ERR(ghash_name))
+               return PTR_ERR(ghash_name);
+-      if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
+-                   ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
+-              return -ENAMETOOLONG;
+-
+-      return crypto_gcm_create_common(tmpl, tb, full_name,
+-                                      ctr_name, ghash_name);
++      return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
+ }
+ static struct crypto_template crypto_gcm_base_tmpl = {
diff --git a/queue-5.0/crypto-lrw-don-t-access-already-freed-walk.iv.patch b/queue-5.0/crypto-lrw-don-t-access-already-freed-walk.iv.patch
new file mode 100644 (file)
index 0000000..e732dc3
--- /dev/null
@@ -0,0 +1,47 @@
+From aec286cd36eacfd797e3d5dab8d5d23c15d1bb5e Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 9 Apr 2019 23:46:29 -0700
+Subject: crypto: lrw - don't access already-freed walk.iv
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit aec286cd36eacfd797e3d5dab8d5d23c15d1bb5e upstream.
+
+If the user-provided IV needs to be aligned to the algorithm's
+alignmask, then skcipher_walk_virt() copies the IV into a new aligned
+buffer walk.iv.  But skcipher_walk_virt() can fail afterwards, and then
+if the caller unconditionally accesses walk.iv, it's a use-after-free.
+
+Fix this in the LRW template by checking the return value of
+skcipher_walk_virt().
+
+This bug was detected by my patches that improve testmgr to fuzz
+algorithms against their generic implementation.  When the extra
+self-tests were run on a KASAN-enabled kernel, a KASAN use-after-free
+splat occured during lrw(aes) testing.
+
+Fixes: c778f96bf347 ("crypto: lrw - Optimize tweak computation")
+Cc: <stable@vger.kernel.org> # v4.20+
+Cc: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/lrw.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/crypto/lrw.c
++++ b/crypto/lrw.c
+@@ -162,8 +162,10 @@ static int xor_tweak(struct skcipher_req
+       }
+       err = skcipher_walk_virt(&w, req, false);
+-      iv = (__be32 *)w.iv;
++      if (err)
++              return err;
++      iv = (__be32 *)w.iv;
+       counter[0] = be32_to_cpu(iv[3]);
+       counter[1] = be32_to_cpu(iv[2]);
+       counter[2] = be32_to_cpu(iv[1]);
diff --git a/queue-5.0/crypto-rockchip-update-iv-buffer-to-contain-the-next-iv.patch b/queue-5.0/crypto-rockchip-update-iv-buffer-to-contain-the-next-iv.patch
new file mode 100644 (file)
index 0000000..0b61173
--- /dev/null
@@ -0,0 +1,68 @@
+From f0cfd57b43fec65761ca61d3892b983a71515f23 Mon Sep 17 00:00:00 2001
+From: Zhang Zhijie <zhangzj@rock-chips.com>
+Date: Fri, 12 Apr 2019 17:16:33 +0800
+Subject: crypto: rockchip - update IV buffer to contain the next IV
+
+From: Zhang Zhijie <zhangzj@rock-chips.com>
+
+commit f0cfd57b43fec65761ca61d3892b983a71515f23 upstream.
+
+The Kernel Crypto API request output the next IV data to
+IV buffer for CBC implementation. So the last block data of
+ciphertext should be copid into assigned IV buffer.
+
+Reported-by: Eric Biggers <ebiggers@google.com>
+Fixes: 433cd2c617bf ("crypto: rockchip - add crypto driver for rk3288")
+Cc: <stable@vger.kernel.org> # v4.5+
+Signed-off-by: Zhang Zhijie <zhangzj@rock-chips.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c |   25 +++++++++++++++------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+@@ -250,9 +250,14 @@ static int rk_set_data_start(struct rk_c
+       u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
+               dev->sg_src->offset + dev->sg_src->length - ivsize;
+-      /* store the iv that need to be updated in chain mode */
+-      if (ctx->mode & RK_CRYPTO_DEC)
++      /* Store the iv that need to be updated in chain mode.
++       * And update the IV buffer to contain the next IV for decryption mode.
++       */
++      if (ctx->mode & RK_CRYPTO_DEC) {
+               memcpy(ctx->iv, src_last_blk, ivsize);
++              sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
++                                 ivsize, dev->total - ivsize);
++      }
+       err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
+       if (!err)
+@@ -288,13 +293,19 @@ static void rk_iv_copyback(struct rk_cry
+       struct ablkcipher_request *req =
+               ablkcipher_request_cast(dev->async_req);
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
++      struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+       u32 ivsize = crypto_ablkcipher_ivsize(tfm);
+-      if (ivsize == DES_BLOCK_SIZE)
+-              memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
+-                            ivsize);
+-      else if (ivsize == AES_BLOCK_SIZE)
+-              memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
++      /* Update the IV buffer to contain the next IV for encryption mode. */
++      if (!(ctx->mode & RK_CRYPTO_DEC)) {
++              if (dev->aligned) {
++                      memcpy(req->info, sg_virt(dev->sg_dst) +
++                              dev->sg_dst->length - ivsize, ivsize);
++              } else {
++                      memcpy(req->info, dev->addr_vir +
++                              dev->count - ivsize, ivsize);
++              }
++      }
+ }
+ static void rk_update_iv(struct rk_crypto_info *dev)
diff --git a/queue-5.0/crypto-salsa20-don-t-access-already-freed-walk.iv.patch b/queue-5.0/crypto-salsa20-don-t-access-already-freed-walk.iv.patch
new file mode 100644 (file)
index 0000000..0ec02fa
--- /dev/null
@@ -0,0 +1,44 @@
+From edaf28e996af69222b2cb40455dbb5459c2b875a Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 9 Apr 2019 23:46:30 -0700
+Subject: crypto: salsa20 - don't access already-freed walk.iv
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit edaf28e996af69222b2cb40455dbb5459c2b875a upstream.
+
+If the user-provided IV needs to be aligned to the algorithm's
+alignmask, then skcipher_walk_virt() copies the IV into a new aligned
+buffer walk.iv.  But skcipher_walk_virt() can fail afterwards, and then
+if the caller unconditionally accesses walk.iv, it's a use-after-free.
+
+salsa20-generic doesn't set an alignmask, so currently it isn't affected
+by this despite unconditionally accessing walk.iv.  However this is more
+subtle than desired, and it was actually broken prior to the alignmask
+being removed by commit b62b3db76f73 ("crypto: salsa20-generic - cleanup
+and convert to skcipher API").
+
+Since salsa20-generic does not update the IV and does not need any IV
+alignment, update it to use req->iv instead of walk.iv.
+
+Fixes: 2407d60872dd ("[CRYPTO] salsa20: Salsa20 stream cipher")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/salsa20_generic.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/crypto/salsa20_generic.c
++++ b/crypto/salsa20_generic.c
+@@ -161,7 +161,7 @@ static int salsa20_crypt(struct skcipher
+       err = skcipher_walk_virt(&walk, req, false);
+-      salsa20_init(state, ctx, walk.iv);
++      salsa20_init(state, ctx, req->iv);
+       while (walk.nbytes > 0) {
+               unsigned int nbytes = walk.nbytes;
diff --git a/queue-5.0/crypto-skcipher-don-t-warn-on-unprocessed-data-after-slow-walk-step.patch b/queue-5.0/crypto-skcipher-don-t-warn-on-unprocessed-data-after-slow-walk-step.patch
new file mode 100644 (file)
index 0000000..11ac8a7
--- /dev/null
@@ -0,0 +1,56 @@
+From dcaca01a42cc2c425154a13412b4124293a6e11e Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sun, 31 Mar 2019 13:04:15 -0700
+Subject: crypto: skcipher - don't WARN on unprocessed data after slow walk step
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit dcaca01a42cc2c425154a13412b4124293a6e11e upstream.
+
+skcipher_walk_done() assumes it's a bug if, after the "slow" path is
+executed where the next chunk of data is processed via a bounce buffer,
+the algorithm says it didn't process all bytes.  Thus it WARNs on this.
+
+However, this can happen legitimately when the message needs to be
+evenly divisible into "blocks" but isn't, and the algorithm has a
+'walksize' greater than the block size.  For example, ecb-aes-neonbs
+sets 'walksize' to 128 bytes and only supports messages evenly divisible
+into 16-byte blocks.  If, say, 17 message bytes remain but they straddle
+scatterlist elements, the skcipher_walk code will take the "slow" path
+and pass the algorithm all 17 bytes in the bounce buffer.  But the
+algorithm will only be able to process 16 bytes, triggering the WARN.
+
+Fix this by just removing the WARN_ON().  Returning -EINVAL, as the code
+already does, is the right behavior.
+
+This bug was detected by my patches that improve testmgr to fuzz
+algorithms against their generic implementation.
+
+Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
+Cc: <stable@vger.kernel.org> # v4.10+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/skcipher.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -131,8 +131,13 @@ unmap_src:
+               memcpy(walk->dst.virt.addr, walk->page, n);
+               skcipher_unmap_dst(walk);
+       } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
+-              if (WARN_ON(err)) {
+-                      /* unexpected case; didn't process all bytes */
++              if (err) {
++                      /*
++                       * Didn't process all bytes.  Either the algorithm is
++                       * broken, or this was the last step and it turned out
++                       * the message wasn't evenly divisible into blocks but
++                       * the algorithm requires it.
++                       */
+                       err = -EINVAL;
+                       goto finish;
+               }
diff --git a/queue-5.0/crypto-vmx-fix-copy-paste-error-in-ctr-mode.patch b/queue-5.0/crypto-vmx-fix-copy-paste-error-in-ctr-mode.patch
new file mode 100644 (file)
index 0000000..c24581e
--- /dev/null
@@ -0,0 +1,56 @@
+From dcf7b48212c0fab7df69e84fab22d6cb7c8c0fb9 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 15 Mar 2019 13:09:01 +1100
+Subject: crypto: vmx - fix copy-paste error in CTR mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Daniel Axtens <dja@axtens.net>
+
+commit dcf7b48212c0fab7df69e84fab22d6cb7c8c0fb9 upstream.
+
+The original assembly imported from OpenSSL has two copy-paste
+errors in handling CTR mode. When dealing with a 2 or 3 block tail,
+the code branches to the CBC decryption exit path, rather than to
+the CTR exit path.
+
+This leads to corruption of the IV, which leads to subsequent blocks
+being corrupted.
+
+This can be detected with libkcapi test suite, which is available at
+https://github.com/smuellerDD/libkcapi
+
+Reported-by: Ondrej Mosnáček <omosnacek@gmail.com>
+Fixes: 5c380d623ed3 ("crypto: vmx - Add support for VMS instructions by ASM")
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Tested-by: Michael Ellerman <mpe@ellerman.id.au>
+Tested-by: Ondrej Mosnacek <omosnacek@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/vmx/aesp8-ppc.pl |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/vmx/aesp8-ppc.pl
++++ b/drivers/crypto/vmx/aesp8-ppc.pl
+@@ -1854,7 +1854,7 @@ Lctr32_enc8x_three:
+       stvx_u          $out1,$x10,$out
+       stvx_u          $out2,$x20,$out
+       addi            $out,$out,0x30
+-      b               Lcbc_dec8x_done
++      b               Lctr32_enc8x_done
+ .align        5
+ Lctr32_enc8x_two:
+@@ -1866,7 +1866,7 @@ Lctr32_enc8x_two:
+       stvx_u          $out0,$x00,$out
+       stvx_u          $out1,$x10,$out
+       addi            $out,$out,0x20
+-      b               Lcbc_dec8x_done
++      b               Lctr32_enc8x_done
+ .align        5
+ Lctr32_enc8x_one:
diff --git a/queue-5.0/crypto-x86-crct10dif-pcl-fix-use-via-crypto_shash_digest.patch b/queue-5.0/crypto-x86-crct10dif-pcl-fix-use-via-crypto_shash_digest.patch
new file mode 100644 (file)
index 0000000..0c91aeb
--- /dev/null
@@ -0,0 +1,68 @@
+From dec3d0b1071a0f3194e66a83d26ecf4aa8c5910e Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sun, 31 Mar 2019 13:04:13 -0700
+Subject: crypto: x86/crct10dif-pcl - fix use via crypto_shash_digest()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit dec3d0b1071a0f3194e66a83d26ecf4aa8c5910e upstream.
+
+The ->digest() method of crct10dif-pclmul reads the current CRC value
+from the shash_desc context.  But this value is uninitialized, causing
+crypto_shash_digest() to compute the wrong result.  Fix it.
+
+Probably this wasn't noticed before because lib/crc-t10dif.c only uses
+crypto_shash_update(), not crypto_shash_digest().  Likewise,
+crypto_shash_digest() is not yet tested by the crypto self-tests because
+those only test the ahash API which only uses shash init/update/final.
+
+Fixes: 0b95a7f85718 ("crypto: crct10dif - Glue code to cast accelerated CRCT10DIF assembly as a crypto transform")
+Cc: <stable@vger.kernel.org> # v3.11+
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/crct10dif-pclmul_glue.c |   13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
++++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
+@@ -76,15 +76,14 @@ static int chksum_final(struct shash_des
+       return 0;
+ }
+-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
+-                      u8 *out)
++static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
+ {
+       if (irq_fpu_usable()) {
+               kernel_fpu_begin();
+-              *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
++              *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
+               kernel_fpu_end();
+       } else
+-              *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
++              *(__u16 *)out = crc_t10dif_generic(crc, data, len);
+       return 0;
+ }
+@@ -93,15 +92,13 @@ static int chksum_finup(struct shash_des
+ {
+       struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+-      return __chksum_finup(&ctx->crc, data, len, out);
++      return __chksum_finup(ctx->crc, data, len, out);
+ }
+ static int chksum_digest(struct shash_desc *desc, const u8 *data,
+                        unsigned int length, u8 *out)
+ {
+-      struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+-
+-      return __chksum_finup(&ctx->crc, data, length, out);
++      return __chksum_finup(0, data, length, out);
+ }
+ static struct shash_alg alg = {
diff --git a/queue-5.0/mmc-sdhci-of-arasan-add-dts-property-to-disable-dcmds.patch b/queue-5.0/mmc-sdhci-of-arasan-add-dts-property-to-disable-dcmds.patch
new file mode 100644 (file)
index 0000000..ae5a8f9
--- /dev/null
@@ -0,0 +1,43 @@
+From 7bda9482e7ed4d27d83c1f9cb5cbe3b34ddac3e8 Mon Sep 17 00:00:00 2001
+From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+Date: Fri, 22 Mar 2019 12:38:05 +0100
+Subject: mmc: sdhci-of-arasan: Add DTS property to disable DCMDs.
+
+From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+
+commit 7bda9482e7ed4d27d83c1f9cb5cbe3b34ddac3e8 upstream.
+
+Direct commands (DCMDs) are an optional feature of eMMC 5.1's command
+queue engine (CQE). The Arasan eMMC 5.1 controller uses the CQHCI,
+which exposes a control register bit to enable the feature.
+The current implementation sets this bit unconditionally.
+
+This patch allows to suppress the feature activation,
+by specifying the property disable-cqe-dcmd.
+
+Signed-off-by: Christoph Muellner <christoph.muellner@theobroma-systems.com>
+Signed-off-by: Philipp Tomsich <philipp.tomsich@theobroma-systems.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Fixes: 84362d79f436 ("mmc: sdhci-of-arasan: Add CQHCI support for arasan,sdhci-5.1")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-arasan.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-of-arasan.c
++++ b/drivers/mmc/host/sdhci-of-arasan.c
+@@ -832,7 +832,10 @@ static int sdhci_arasan_probe(struct pla
+               host->mmc_host_ops.start_signal_voltage_switch =
+                                       sdhci_arasan_voltage_switch;
+               sdhci_arasan->has_cqe = true;
+-              host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
++              host->mmc->caps2 |= MMC_CAP2_CQE;
++
++              if (!of_property_read_bool(np, "disable-cqe-dcmd"))
++                      host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
+       }
+       ret = sdhci_arasan_add_host(sdhci_arasan);
diff --git a/queue-5.0/objtool-fix-function-fallthrough-detection.patch b/queue-5.0/objtool-fix-function-fallthrough-detection.patch
new file mode 100644 (file)
index 0000000..d181925
--- /dev/null
@@ -0,0 +1,59 @@
+From e6f393bc939d566ce3def71232d8013de9aaadde Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Mon, 13 May 2019 12:01:32 -0500
+Subject: objtool: Fix function fallthrough detection
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit e6f393bc939d566ce3def71232d8013de9aaadde upstream.
+
+When a function falls through to the next function due to a compiler
+bug, objtool prints some obscure warnings.  For example:
+
+  drivers/regulator/core.o: warning: objtool: regulator_count_voltages()+0x95: return with modified stack frame
+  drivers/regulator/core.o: warning: objtool: regulator_count_voltages()+0x0: stack state mismatch: cfa1=7+32 cfa2=7+8
+
+Instead it should be printing:
+
+  drivers/regulator/core.o: warning: objtool: regulator_supply_is_couple() falls through to next function regulator_count_voltages()
+
+This used to work, but was broken by the following commit:
+
+  13810435b9a7 ("objtool: Support GCC 8's cold subfunctions")
+
+The padding nops at the end of a function aren't actually part of the
+function, as defined by the symbol table.  So the 'func' variable in
+validate_branch() is getting cleared to NULL when a padding nop is
+encountered, breaking the fallthrough detection.
+
+If the current instruction doesn't have a function associated with it,
+just consider it to be part of the previously detected function by not
+overwriting the previous value of 'func'.
+
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: <stable@vger.kernel.org>
+Fixes: 13810435b9a7 ("objtool: Support GCC 8's cold subfunctions")
+Link: http://lkml.kernel.org/r/546d143820cd08a46624ae8440d093dd6c902cae.1557766718.git.jpoimboe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/objtool/check.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1832,7 +1832,8 @@ static int validate_branch(struct objtoo
+                       return 1;
+               }
+-              func = insn->func ? insn->func->pfunc : NULL;
++              if (insn->func)
++                      func = insn->func->pfunc;
+               if (func && insn->ignore) {
+                       WARN_FUNC("BUG: why am I validating an ignored function?",
diff --git a/queue-5.0/power-supply-axp288_charger-fix-unchecked-return-value.patch b/queue-5.0/power-supply-axp288_charger-fix-unchecked-return-value.patch
new file mode 100644 (file)
index 0000000..35103b7
--- /dev/null
@@ -0,0 +1,42 @@
+From c3422ad5f84a66739ec6a37251ca27638c85b6be Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Mon, 18 Mar 2019 11:14:39 -0500
+Subject: power: supply: axp288_charger: Fix unchecked return value
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit c3422ad5f84a66739ec6a37251ca27638c85b6be upstream.
+
+Currently there is no check on platform_get_irq() return value
+in case it fails, hence never actually reporting any errors and
+causing unexpected behavior when using such value as argument
+for function regmap_irq_get_virq().
+
+Fix this by adding a proper check, a message reporting any errors
+and returning *pirq*
+
+Addresses-Coverity-ID: 1443940 ("Improper use of negative value")
+Fixes: 843735b788a4 ("power: axp288_charger: axp288 charger driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/power/supply/axp288_charger.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -833,6 +833,10 @@ static int axp288_charger_probe(struct p
+       /* Register charger interrupts */
+       for (i = 0; i < CHRG_INTR_END; i++) {
+               pirq = platform_get_irq(info->pdev, i);
++              if (pirq < 0) {
++                      dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
++                      return pirq;
++              }
+               info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
+               if (info->irq[i] < 0) {
+                       dev_warn(&info->pdev->dev,
diff --git a/queue-5.0/power-supply-axp288_fuel_gauge-add-acepc-t8-and-t11-mini-pcs-to-the-blacklist.patch b/queue-5.0/power-supply-axp288_fuel_gauge-add-acepc-t8-and-t11-mini-pcs-to-the-blacklist.patch
new file mode 100644 (file)
index 0000000..5cba7bd
--- /dev/null
@@ -0,0 +1,58 @@
+From 9274c78305e12c5f461bec15f49c38e0f32ca705 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Mon, 22 Apr 2019 22:43:01 +0200
+Subject: power: supply: axp288_fuel_gauge: Add ACEPC T8 and T11 mini PCs to the blacklist
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 9274c78305e12c5f461bec15f49c38e0f32ca705 upstream.
+
+The ACEPC T8 and T11 Cherry Trail Z8350 mini PCs use an AXP288 and as PCs,
+rather then portables, they does not have a battery. Still for some
+reason the AXP288 not only thinks there is a battery, it actually
+thinks it is discharging while the PC is running, slowly going to
+0% full, causing userspace to shutdown the system due to the battery
+being critically low after a while.
+
+This commit adds the ACEPC T8 and T11 to the axp288 fuel-gauge driver
+blacklist, so that we stop reporting bogus battery readings on this device.
+
+BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1690852
+Cc: stable@vger.kernel.org
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/power/supply/axp288_fuel_gauge.c |   20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/power/supply/axp288_fuel_gauge.c
++++ b/drivers/power/supply/axp288_fuel_gauge.c
+@@ -696,6 +696,26 @@ intr_failed:
+  */
+ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
+       {
++              /* ACEPC T8 Cherry Trail Z8350 mini PC */
++              .matches = {
++                      DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
++                      DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
++                      DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
++                      /* also match on somewhat unique bios-version */
++                      DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
++              },
++      },
++      {
++              /* ACEPC T11 Cherry Trail Z8350 mini PC */
++              .matches = {
++                      DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
++                      DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
++                      DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
++                      /* also match on somewhat unique bios-version */
++                      DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
++              },
++      },
++      {
+               /* Intel Cherry Trail Compute Stick, Windows version */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
diff --git a/queue-5.0/sched-x86-save-flags-on-context-switch.patch b/queue-5.0/sched-x86-save-flags-on-context-switch.patch
new file mode 100644 (file)
index 0000000..74170a1
--- /dev/null
@@ -0,0 +1,128 @@
+From 6690e86be83ac75832e461c141055b5d601c0a6d Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 14 Feb 2019 10:30:52 +0100
+Subject: sched/x86: Save [ER]FLAGS on context switch
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 6690e86be83ac75832e461c141055b5d601c0a6d upstream.
+
+Effectively reverts commit:
+
+  2c7577a75837 ("sched/x86_64: Don't save flags on context switch")
+
+Specifically because SMAP uses FLAGS.AC which invalidates the claim
+that the kernel has clean flags.
+
+In particular; while preemption from interrupt return is fine (the
+IRET frame on the exception stack contains FLAGS) it breaks any code
+that does synchonous scheduling, including preempt_enable().
+
+This has become a significant issue ever since commit:
+
+  5b24a7a2aa20 ("Add 'unsafe' user access functions for batched accesses")
+
+provided for means of having 'normal' C code between STAC / CLAC,
+exposing the FLAGS.AC state. So far this hasn't led to trouble,
+however fix it before it comes apart.
+
+Reported-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@kernel.org
+Fixes: 5b24a7a2aa20 ("Add 'unsafe' user access functions for batched accesses")
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/entry_32.S        |    2 ++
+ arch/x86/entry/entry_64.S        |    2 ++
+ arch/x86/include/asm/switch_to.h |    1 +
+ arch/x86/kernel/process_32.c     |    7 +++++++
+ arch/x86/kernel/process_64.c     |    8 ++++++++
+ 5 files changed, 20 insertions(+)
+
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -650,6 +650,7 @@ ENTRY(__switch_to_asm)
+       pushl   %ebx
+       pushl   %edi
+       pushl   %esi
++      pushfl
+       /* switch stack */
+       movl    %esp, TASK_threadsp(%eax)
+@@ -672,6 +673,7 @@ ENTRY(__switch_to_asm)
+ #endif
+       /* restore callee-saved registers */
++      popfl
+       popl    %esi
+       popl    %edi
+       popl    %ebx
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -291,6 +291,7 @@ ENTRY(__switch_to_asm)
+       pushq   %r13
+       pushq   %r14
+       pushq   %r15
++      pushfq
+       /* switch stack */
+       movq    %rsp, TASK_threadsp(%rdi)
+@@ -313,6 +314,7 @@ ENTRY(__switch_to_asm)
+ #endif
+       /* restore callee-saved registers */
++      popfq
+       popq    %r15
+       popq    %r14
+       popq    %r13
+--- a/arch/x86/include/asm/switch_to.h
++++ b/arch/x86/include/asm/switch_to.h
+@@ -40,6 +40,7 @@ asmlinkage void ret_from_fork(void);
+  * order of the fields must match the code in __switch_to_asm().
+  */
+ struct inactive_task_frame {
++      unsigned long flags;
+ #ifdef CONFIG_X86_64
+       unsigned long r15;
+       unsigned long r14;
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -127,6 +127,13 @@ int copy_thread_tls(unsigned long clone_
+       struct task_struct *tsk;
+       int err;
++      /*
++       * For a new task use the RESET flags value since there is no before.
++       * All the status flags are zero; DF and all the system flags must also
++       * be 0, specifically IF must be 0 because we context switch to the new
++       * task with interrupts disabled.
++       */
++      frame->flags = X86_EFLAGS_FIXED;
+       frame->bp = 0;
+       frame->ret_addr = (unsigned long) ret_from_fork;
+       p->thread.sp = (unsigned long) fork_frame;
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -392,6 +392,14 @@ int copy_thread_tls(unsigned long clone_
+       childregs = task_pt_regs(p);
+       fork_frame = container_of(childregs, struct fork_frame, regs);
+       frame = &fork_frame->frame;
++
++      /*
++       * For a new task use the RESET flags value since there is no before.
++       * All the status flags are zero; DF and all the system flags must also
++       * be 0, specifically IF must be 0 because we context switch to the new
++       * task with interrupts disabled.
++       */
++      frame->flags = X86_EFLAGS_FIXED;
+       frame->bp = 0;
+       frame->ret_addr = (unsigned long) ret_from_fork;
+       p->thread.sp = (unsigned long) fork_frame;
index c18ea0bf5170f88afcb506d1cdf9390f30cce9f4..928c99864c177fb3cc7a21871b90498d0332f204 100644 (file)
@@ -1 +1,43 @@
 locking-rwsem-prevent-decrement-of-reader-count-befo.patch
+x86-speculation-mds-revert-cpu-buffer-clear-on-double-fault-exit.patch
+x86-speculation-mds-improve-cpu-buffer-clear-documentation.patch
+objtool-fix-function-fallthrough-detection.patch
+arm64-dts-rockchip-fix-io-domain-voltage-setting-of-apio5-on-rockpro64.patch
+arm64-dts-rockchip-disable-dcmds-on-rk3399-s-emmc-controller.patch
+arm-dts-qcom-ipq4019-enlarge-pcie-bar-range.patch
+arm-dts-exynos-fix-interrupt-for-shared-eints-on-exynos5260.patch
+arm-dts-exynos-fix-audio-microphone-routing-on-odroid-xu3.patch
+mmc-sdhci-of-arasan-add-dts-property-to-disable-dcmds.patch
+arm-exynos-fix-a-leaked-reference-by-adding-missing-of_node_put.patch
+power-supply-axp288_charger-fix-unchecked-return-value.patch
+power-supply-axp288_fuel_gauge-add-acepc-t8-and-t11-mini-pcs-to-the-blacklist.patch
+arm64-mmap-ensure-file-offset-is-treated-as-unsigned.patch
+arm64-arch_timer-ensure-counter-register-reads-occur-with-seqlock-held.patch
+arm64-compat-reduce-address-limit.patch
+arm64-clear-osdlr_el1-on-cpu-boot.patch
+arm64-save-and-restore-osdlr_el1-across-suspend-resume.patch
+sched-x86-save-flags-on-context-switch.patch
+x86-mce-add-an-mce-record-filtering-function.patch
+x86-mce-amd-turn-off-mc4_misc-thresholding-on-all-family-0x15-models.patch
+x86-mce-amd-carve-out-the-mc4_misc-thresholding-quirk.patch
+x86-mce-group-amd-function-prototypes-in-asm-mce.h.patch
+x86-mce-amd-don-t-report-l1-btb-mca-errors-on-some-family-17h-models.patch
+crypto-crypto4xx-fix-ctr-aes-missing-output-iv.patch
+crypto-crypto4xx-fix-cfb-and-ofb-overran-dst-buffer-issues.patch
+crypto-salsa20-don-t-access-already-freed-walk.iv.patch
+crypto-lrw-don-t-access-already-freed-walk.iv.patch
+crypto-chacha-generic-fix-use-as-arm64-no-neon-fallback.patch
+crypto-chacha20poly1305-set-cra_name-correctly.patch
+crypto-ccp-do-not-free-psp_master-when-platform_init-fails.patch
+crypto-vmx-fix-copy-paste-error-in-ctr-mode.patch
+crypto-skcipher-don-t-warn-on-unprocessed-data-after-slow-walk-step.patch
+crypto-crct10dif-generic-fix-use-via-crypto_shash_digest.patch
+crypto-x86-crct10dif-pcl-fix-use-via-crypto_shash_digest.patch
+crypto-arm64-gcm-aes-ce-fix-no-neon-fallback-code.patch
+crypto-gcm-fix-incompatibility-between-gcm-and-gcm_base.patch
+crypto-rockchip-update-iv-buffer-to-contain-the-next-iv.patch
+crypto-caam-qi2-fix-zero-length-buffer-dma-mapping.patch
+crypto-caam-qi2-fix-dma-mapping-of-stack-memory.patch
+crypto-caam-qi2-generate-hash-keys-in-place.patch
+crypto-arm-aes-neonbs-don-t-access-already-freed-walk.iv.patch
+crypto-arm64-aes-neonbs-don-t-access-already-freed-walk.iv.patch
diff --git a/queue-5.0/x86-mce-add-an-mce-record-filtering-function.patch b/queue-5.0/x86-mce-add-an-mce-record-filtering-function.patch
new file mode 100644 (file)
index 0000000..f43cec1
--- /dev/null
@@ -0,0 +1,82 @@
+From 45d4b7b9cb88526f6d5bd4c03efab88d75d10e4f Mon Sep 17 00:00:00 2001
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+Date: Mon, 25 Mar 2019 16:34:22 +0000
+Subject: x86/MCE: Add an MCE-record filtering function
+
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+
+commit 45d4b7b9cb88526f6d5bd4c03efab88d75d10e4f upstream.
+
+Some systems may report spurious MCA errors. In general, spurious MCA
+errors may be disabled by clearing a particular bit in MCA_CTL. However,
+clearing a bit in MCA_CTL may not be recommended for some errors, so the
+only option is to ignore them.
+
+An MCA error is printed and handled after it has been added to the MCE
+event pool. So an MCA error can be ignored by not adding it to that pool
+in the first place.
+
+Add such a filtering function.
+
+ [ bp: Move function prototype to the internal header and massage. ]
+
+Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: "clemej@gmail.com" <clemej@gmail.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Pu Wen <puwen@hygon.cn>
+Cc: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
+Cc: "rafal@milecki.pl" <rafal@milecki.pl>
+Cc: Shirish S <Shirish.S@amd.com>
+Cc: <stable@vger.kernel.org> # 5.0.x
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/20190325163410.171021-1-Yazen.Ghannam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mce/core.c     |    5 +++++
+ arch/x86/kernel/cpu/mce/genpool.c  |    3 +++
+ arch/x86/kernel/cpu/mce/internal.h |    3 +++
+ 3 files changed, 11 insertions(+)
+
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -1801,6 +1801,11 @@ static void __mcheck_cpu_init_timer(void
+       mce_start_timer(t);
+ }
++bool filter_mce(struct mce *m)
++{
++      return false;
++}
++
+ /* Handle unconfigured int18 (should never happen) */
+ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+ {
+--- a/arch/x86/kernel/cpu/mce/genpool.c
++++ b/arch/x86/kernel/cpu/mce/genpool.c
+@@ -99,6 +99,9 @@ int mce_gen_pool_add(struct mce *mce)
+ {
+       struct mce_evt_llist *node;
++      if (filter_mce(mce))
++              return -EINVAL;
++
+       if (!mce_evt_pool)
+               return -EINVAL;
+--- a/arch/x86/kernel/cpu/mce/internal.h
++++ b/arch/x86/kernel/cpu/mce/internal.h
+@@ -173,4 +173,7 @@ struct mca_msr_regs {
+ extern struct mca_msr_regs msr_ops;
++/* Decide whether to add MCE record to MCE event pool or filter it out. */
++extern bool filter_mce(struct mce *m);
++
+ #endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/queue-5.0/x86-mce-amd-carve-out-the-mc4_misc-thresholding-quirk.patch b/queue-5.0/x86-mce-amd-carve-out-the-mc4_misc-thresholding-quirk.patch
new file mode 100644 (file)
index 0000000..2268af7
--- /dev/null
@@ -0,0 +1,124 @@
+From 30aa3d26edb0f3d7992757287eec0ca588a5c259 Mon Sep 17 00:00:00 2001
+From: Shirish S <Shirish.S@amd.com>
+Date: Wed, 16 Jan 2019 15:10:40 +0000
+Subject: x86/MCE/AMD: Carve out the MC4_MISC thresholding quirk
+
+From: Shirish S <Shirish.S@amd.com>
+
+commit 30aa3d26edb0f3d7992757287eec0ca588a5c259 upstream.
+
+The MC4_MISC thresholding quirk needs to be applied during S5 -> S0 and
+S3 -> S0 state transitions, which follow different code paths. Carve it
+out into a separate function and call it mce_amd_feature_init() where
+the two code paths of the state transitions converge.
+
+ [ bp: massage commit message and the carved out function. ]
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: Yazen Ghannam <yazen.ghannam@amd.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/1547651417-23583-3-git-send-email-shirish.s@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mce/amd.c  |   36 ++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/mce/core.c |   29 -----------------------------
+ 2 files changed, 36 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -545,6 +545,40 @@ out:
+       return offset;
+ }
++/*
++ * Turn off MC4_MISC thresholding banks on all family 0x15 models since
++ * they're not supported there.
++ */
++void disable_err_thresholding(struct cpuinfo_x86 *c)
++{
++      int i;
++      u64 hwcr;
++      bool need_toggle;
++      u32 msrs[] = {
++              0x00000413, /* MC4_MISC0 */
++              0xc0000408, /* MC4_MISC1 */
++      };
++
++      if (c->x86 != 0x15)
++              return;
++
++      rdmsrl(MSR_K7_HWCR, hwcr);
++
++      /* McStatusWrEn has to be set */
++      need_toggle = !(hwcr & BIT(18));
++
++      if (need_toggle)
++              wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
++
++      /* Clear CntP bit safely */
++      for (i = 0; i < ARRAY_SIZE(msrs); i++)
++              msr_clear_bit(msrs[i], 62);
++
++      /* restore old settings */
++      if (need_toggle)
++              wrmsrl(MSR_K7_HWCR, hwcr);
++}
++
+ /* cpu init entry point, called from mce.c with preempt off */
+ void mce_amd_feature_init(struct cpuinfo_x86 *c)
+ {
+@@ -552,6 +586,8 @@ void mce_amd_feature_init(struct cpuinfo
+       unsigned int bank, block, cpu = smp_processor_id();
+       int offset = -1;
++      disable_err_thresholding(c);
++
+       for (bank = 0; bank < mca_cfg.banks; ++bank) {
+               if (mce_flags.smca)
+                       smca_configure(bank, cpu);
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -1612,35 +1612,6 @@ static int __mcheck_cpu_apply_quirks(str
+               if (c->x86 == 0x15 && c->x86_model <= 0xf)
+                       mce_flags.overflow_recov = 1;
+-              /*
+-               * Turn off MC4_MISC thresholding banks on all models since
+-               * they're not supported there.
+-               */
+-              if (c->x86 == 0x15) {
+-                      int i;
+-                      u64 hwcr;
+-                      bool need_toggle;
+-                      u32 msrs[] = {
+-                              0x00000413, /* MC4_MISC0 */
+-                              0xc0000408, /* MC4_MISC1 */
+-                      };
+-
+-                      rdmsrl(MSR_K7_HWCR, hwcr);
+-
+-                      /* McStatusWrEn has to be set */
+-                      need_toggle = !(hwcr & BIT(18));
+-
+-                      if (need_toggle)
+-                              wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
+-
+-                      /* Clear CntP bit safely */
+-                      for (i = 0; i < ARRAY_SIZE(msrs); i++)
+-                              msr_clear_bit(msrs[i], 62);
+-
+-                      /* restore old settings */
+-                      if (need_toggle)
+-                              wrmsrl(MSR_K7_HWCR, hwcr);
+-              }
+       }
+       if (c->x86_vendor == X86_VENDOR_INTEL) {
diff --git a/queue-5.0/x86-mce-amd-don-t-report-l1-btb-mca-errors-on-some-family-17h-models.patch b/queue-5.0/x86-mce-amd-don-t-report-l1-btb-mca-errors-on-some-family-17h-models.patch
new file mode 100644 (file)
index 0000000..d0d56d9
--- /dev/null
@@ -0,0 +1,198 @@
+From 71a84402b93e5fbd8f817f40059c137e10171788 Mon Sep 17 00:00:00 2001
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+Date: Mon, 25 Mar 2019 16:34:22 +0000
+Subject: x86/MCE/AMD: Don't report L1 BTB MCA errors on some family 17h models
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+
+commit 71a84402b93e5fbd8f817f40059c137e10171788 upstream.
+
+AMD family 17h Models 10h-2Fh may report a high number of L1 BTB MCA
+errors under certain conditions. The errors are benign and can safely be
+ignored. However, the high error rate may cause the MCA threshold
+counter to overflow causing a high rate of thresholding interrupts.
+
+In addition, users may see the errors reported through the AMD MCE
+decoder module, even with the interrupt disabled, due to MCA polling.
+
+Clear the "Counter Present" bit in the Instruction Fetch bank's
+MCA_MISC0 register. This will prevent enabling MCA thresholding on this
+bank which will prevent the high interrupt rate due to this error.
+
+Define an AMD-specific function to filter these errors from the MCE
+event pool so that they don't get reported during early boot.
+
+Rename filter function in EDAC/mce_amd to avoid a naming conflict, while
+at it.
+
+ [ bp: Move function prototype to the internal header and
+   massage/cleanup, fix typos. ]
+
+Reported-by: Rafał Miłecki <rafal@milecki.pl>
+Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: "clemej@gmail.com" <clemej@gmail.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: Pu Wen <puwen@hygon.cn>
+Cc: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
+Cc: Shirish S <Shirish.S@amd.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Cc: x86-ml <x86@kernel.org>
+Cc: <stable@vger.kernel.org> # 5.0.x: c95b323dcd35: x86/MCE/AMD: Turn off MC4_MISC thresholding on all family 0x15 models
+Cc: <stable@vger.kernel.org> # 5.0.x: 30aa3d26edb0: x86/MCE/AMD: Carve out the MC4_MISC thresholding quirk
+Cc: <stable@vger.kernel.org> # 5.0.x: 9308fd407455: x86/MCE: Group AMD function prototypes in <asm/mce.h>
+Cc: <stable@vger.kernel.org> # 5.0.x
+Link: https://lkml.kernel.org/r/20190325163410.171021-2-Yazen.Ghannam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mce/amd.c      |   52 +++++++++++++++++++++++++++----------
+ arch/x86/kernel/cpu/mce/core.c     |    3 ++
+ arch/x86/kernel/cpu/mce/internal.h |    6 ++++
+ drivers/edac/mce_amd.c             |    4 +-
+ 4 files changed, 50 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -545,33 +545,59 @@ out:
+       return offset;
+ }
++bool amd_filter_mce(struct mce *m)
++{
++      enum smca_bank_types bank_type = smca_get_bank_type(m->bank);
++      struct cpuinfo_x86 *c = &boot_cpu_data;
++      u8 xec = (m->status >> 16) & 0x3F;
++
++      /* See Family 17h Models 10h-2Fh Erratum #1114. */
++      if (c->x86 == 0x17 &&
++          c->x86_model >= 0x10 && c->x86_model <= 0x2F &&
++          bank_type == SMCA_IF && xec == 10)
++              return true;
++
++      return false;
++}
++
+ /*
+- * Turn off MC4_MISC thresholding banks on all family 0x15 models since
+- * they're not supported there.
++ * Turn off thresholding banks for the following conditions:
++ * - MC4_MISC thresholding is not supported on Family 0x15.
++ * - Prevent possible spurious interrupts from the IF bank on Family 0x17
++ *   Models 0x10-0x2F due to Erratum #1114.
+  */
+-void disable_err_thresholding(struct cpuinfo_x86 *c)
++void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
+ {
+-      int i;
++      int i, num_msrs;
+       u64 hwcr;
+       bool need_toggle;
+-      u32 msrs[] = {
+-              0x00000413, /* MC4_MISC0 */
+-              0xc0000408, /* MC4_MISC1 */
+-      };
++      u32 msrs[NR_BLOCKS];
+-      if (c->x86 != 0x15)
++      if (c->x86 == 0x15 && bank == 4) {
++              msrs[0] = 0x00000413; /* MC4_MISC0 */
++              msrs[1] = 0xc0000408; /* MC4_MISC1 */
++              num_msrs = 2;
++      } else if (c->x86 == 0x17 &&
++                 (c->x86_model >= 0x10 && c->x86_model <= 0x2F)) {
++
++              if (smca_get_bank_type(bank) != SMCA_IF)
++                      return;
++
++              msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank);
++              num_msrs = 1;
++      } else {
+               return;
++      }
+       rdmsrl(MSR_K7_HWCR, hwcr);
+       /* McStatusWrEn has to be set */
+       need_toggle = !(hwcr & BIT(18));
+-
+       if (need_toggle)
+               wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
+       /* Clear CntP bit safely */
+-      for (i = 0; i < ARRAY_SIZE(msrs); i++)
++      for (i = 0; i < num_msrs; i++)
+               msr_clear_bit(msrs[i], 62);
+       /* restore old settings */
+@@ -586,12 +612,12 @@ void mce_amd_feature_init(struct cpuinfo
+       unsigned int bank, block, cpu = smp_processor_id();
+       int offset = -1;
+-      disable_err_thresholding(c);
+-
+       for (bank = 0; bank < mca_cfg.banks; ++bank) {
+               if (mce_flags.smca)
+                       smca_configure(bank, cpu);
++              disable_err_thresholding(c, bank);
++
+               for (block = 0; block < NR_BLOCKS; ++block) {
+                       address = get_block_address(address, low, high, bank, block);
+                       if (!address)
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -1773,6 +1773,9 @@ static void __mcheck_cpu_init_timer(void
+ bool filter_mce(struct mce *m)
+ {
++      if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
++              return amd_filter_mce(m);
++
+       return false;
+ }
+--- a/arch/x86/kernel/cpu/mce/internal.h
++++ b/arch/x86/kernel/cpu/mce/internal.h
+@@ -176,4 +176,10 @@ extern struct mca_msr_regs msr_ops;
+ /* Decide whether to add MCE record to MCE event pool or filter it out. */
+ extern bool filter_mce(struct mce *m);
++#ifdef CONFIG_X86_MCE_AMD
++extern bool amd_filter_mce(struct mce *m);
++#else
++static inline bool amd_filter_mce(struct mce *m)                      { return false; };
++#endif
++
+ #endif /* __X86_MCE_INTERNAL_H__ */
+--- a/drivers/edac/mce_amd.c
++++ b/drivers/edac/mce_amd.c
+@@ -914,7 +914,7 @@ static inline void amd_decode_err_code(u
+ /*
+  * Filter out unwanted MCE signatures here.
+  */
+-static bool amd_filter_mce(struct mce *m)
++static bool ignore_mce(struct mce *m)
+ {
+       /*
+        * NB GART TLB error reporting is disabled by default.
+@@ -948,7 +948,7 @@ amd_decode_mce(struct notifier_block *nb
+       unsigned int fam = x86_family(m->cpuid);
+       int ecc;
+-      if (amd_filter_mce(m))
++      if (ignore_mce(m))
+               return NOTIFY_STOP;
+       pr_emerg(HW_ERR "%s\n", decode_error_status(m));
diff --git a/queue-5.0/x86-mce-amd-turn-off-mc4_misc-thresholding-on-all-family-0x15-models.patch b/queue-5.0/x86-mce-amd-turn-off-mc4_misc-thresholding-on-all-family-0x15-models.patch
new file mode 100644 (file)
index 0000000..577152d
--- /dev/null
@@ -0,0 +1,45 @@
+From c95b323dcd3598dd7ef5005d6723c1ba3b801093 Mon Sep 17 00:00:00 2001
+From: Shirish S <Shirish.S@amd.com>
+Date: Thu, 10 Jan 2019 07:54:40 +0000
+Subject: x86/MCE/AMD: Turn off MC4_MISC thresholding on all family 0x15 models
+
+From: Shirish S <Shirish.S@amd.com>
+
+commit c95b323dcd3598dd7ef5005d6723c1ba3b801093 upstream.
+
+MC4_MISC thresholding is not supported on all family 0x15 processors,
+hence skip the x86_model check when applying the quirk.
+
+ [ bp: massage commit message. ]
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/1547106849-3476-2-git-send-email-shirish.s@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mce/core.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -1613,11 +1613,10 @@ static int __mcheck_cpu_apply_quirks(str
+                       mce_flags.overflow_recov = 1;
+               /*
+-               * Turn off MC4_MISC thresholding banks on those models since
++               * Turn off MC4_MISC thresholding banks on all models since
+                * they're not supported there.
+                */
+-              if (c->x86 == 0x15 &&
+-                  (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
++              if (c->x86 == 0x15) {
+                       int i;
+                       u64 hwcr;
+                       bool need_toggle;
diff --git a/queue-5.0/x86-mce-group-amd-function-prototypes-in-asm-mce.h.patch b/queue-5.0/x86-mce-group-amd-function-prototypes-in-asm-mce.h.patch
new file mode 100644 (file)
index 0000000..373efbd
--- /dev/null
@@ -0,0 +1,79 @@
+From 9308fd4074551f222f30322d1ee8c5aff18e9747 Mon Sep 17 00:00:00 2001
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+Date: Fri, 22 Mar 2019 20:29:00 +0000
+Subject: x86/MCE: Group AMD function prototypes in <asm/mce.h>
+
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+
+commit 9308fd4074551f222f30322d1ee8c5aff18e9747 upstream.
+
+There are two groups of "ifdef CONFIG_X86_MCE_AMD" function prototypes
+in <asm/mce.h>. Merge these two groups.
+
+No functional change.
+
+ [ bp: align vertically. ]
+
+Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: "clemej@gmail.com" <clemej@gmail.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Pu Wen <puwen@hygon.cn>
+Cc: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
+Cc: "rafal@milecki.pl" <rafal@milecki.pl>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/20190322202848.20749-3-Yazen.Ghannam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mce.h |   25 +++++++++++--------------
+ 1 file changed, 11 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/include/asm/mce.h
++++ b/arch/x86/include/asm/mce.h
+@@ -209,16 +209,6 @@ static inline void cmci_rediscover(void)
+ static inline void cmci_recheck(void) {}
+ #endif
+-#ifdef CONFIG_X86_MCE_AMD
+-void mce_amd_feature_init(struct cpuinfo_x86 *c);
+-int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
+-#else
+-static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
+-static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
+-#endif
+-
+-static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
+-
+ int mce_available(struct cpuinfo_x86 *c);
+ bool mce_is_memory_error(struct mce *m);
+ bool mce_is_correctable(struct mce *m);
+@@ -338,12 +328,19 @@ extern bool amd_mce_is_memory_error(stru
+ extern int mce_threshold_create_device(unsigned int cpu);
+ extern int mce_threshold_remove_device(unsigned int cpu);
+-#else
++void mce_amd_feature_init(struct cpuinfo_x86 *c);
++int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
+-static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
+-static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
+-static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
++#else
++static inline int mce_threshold_create_device(unsigned int cpu)               { return 0; };
++static inline int mce_threshold_remove_device(unsigned int cpu)               { return 0; };
++static inline bool amd_mce_is_memory_error(struct mce *m)             { return false; };
++static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)                { }
++static inline int
++umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)        { return -EINVAL; };
+ #endif
++static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c)      { return mce_amd_feature_init(c); }
++
+ #endif /* _ASM_X86_MCE_H */
diff --git a/queue-5.0/x86-speculation-mds-improve-cpu-buffer-clear-documentation.patch b/queue-5.0/x86-speculation-mds-improve-cpu-buffer-clear-documentation.patch
new file mode 100644 (file)
index 0000000..996ae76
--- /dev/null
@@ -0,0 +1,80 @@
+From 9d8d0294e78a164d407133dea05caf4b84247d6a Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 14 May 2019 13:24:40 -0700
+Subject: x86/speculation/mds: Improve CPU buffer clear documentation
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 9d8d0294e78a164d407133dea05caf4b84247d6a upstream.
+
+On x86_64, all returns to usermode go through
+prepare_exit_to_usermode(), with the sole exception of do_nmi().
+This even includes machine checks -- this was added several years
+ago to support MCE recovery.  Update the documentation.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Frederic Weisbecker <frederic@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Fixes: 04dcbdb80578 ("x86/speculation/mds: Clear CPU buffers on exit to user")
+Link: http://lkml.kernel.org/r/999fa9e126ba6a48e9d214d2f18dbde5c62ac55c.1557865329.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/x86/mds.rst |   39 +++++++--------------------------------
+ 1 file changed, 7 insertions(+), 32 deletions(-)
+
+--- a/Documentation/x86/mds.rst
++++ b/Documentation/x86/mds.rst
+@@ -142,38 +142,13 @@ Mitigation points
+    mds_user_clear.
+    The mitigation is invoked in prepare_exit_to_usermode() which covers
+-   most of the kernel to user space transitions. There are a few exceptions
+-   which are not invoking prepare_exit_to_usermode() on return to user
+-   space. These exceptions use the paranoid exit code.
+-
+-   - Non Maskable Interrupt (NMI):
+-
+-     Access to sensible data like keys, credentials in the NMI context is
+-     mostly theoretical: The CPU can do prefetching or execute a
+-     misspeculated code path and thereby fetching data which might end up
+-     leaking through a buffer.
+-
+-     But for mounting other attacks the kernel stack address of the task is
+-     already valuable information. So in full mitigation mode, the NMI is
+-     mitigated on the return from do_nmi() to provide almost complete
+-     coverage.
+-
+-   - Machine Check Exception (#MC):
+-
+-     Another corner case is a #MC which hits between the CPU buffer clear
+-     invocation and the actual return to user. As this still is in kernel
+-     space it takes the paranoid exit path which does not clear the CPU
+-     buffers. So the #MC handler repopulates the buffers to some
+-     extent. Machine checks are not reliably controllable and the window is
+-     extremly small so mitigation would just tick a checkbox that this
+-     theoretical corner case is covered. To keep the amount of special
+-     cases small, ignore #MC.
+-
+-   - Debug Exception (#DB):
+-
+-     This takes the paranoid exit path only when the INT1 breakpoint is in
+-     kernel space. #DB on a user space address takes the regular exit path,
+-     so no extra mitigation required.
++   all but one of the kernel to user space transitions.  The exception
++   is when we return from a Non Maskable Interrupt (NMI), which is
++   handled directly in do_nmi().
++
++   (The reason that NMI is special is that prepare_exit_to_usermode() can
++    enable IRQs.  In NMI context, NMIs are blocked, and we don't want to
++    enable IRQs with NMIs blocked.)
+ 2. C-State transition
diff --git a/queue-5.0/x86-speculation-mds-revert-cpu-buffer-clear-on-double-fault-exit.patch b/queue-5.0/x86-speculation-mds-revert-cpu-buffer-clear-on-double-fault-exit.patch
new file mode 100644 (file)
index 0000000..80daf08
--- /dev/null
@@ -0,0 +1,73 @@
+From 88640e1dcd089879530a49a8d212d1814678dfe7 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 14 May 2019 13:24:39 -0700
+Subject: x86/speculation/mds: Revert CPU buffer clear on double fault exit
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 88640e1dcd089879530a49a8d212d1814678dfe7 upstream.
+
+The double fault ESPFIX path doesn't return to user mode at all --
+it returns back to the kernel by simulating a #GP fault.
+prepare_exit_to_usermode() will run on the way out of
+general_protection before running user code.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Frederic Weisbecker <frederic@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Fixes: 04dcbdb80578 ("x86/speculation/mds: Clear CPU buffers on exit to user")
+Link: http://lkml.kernel.org/r/ac97612445c0a44ee10374f6ea79c222fe22a5c4.1557865329.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/x86/mds.rst |    7 -------
+ arch/x86/kernel/traps.c   |    8 --------
+ 2 files changed, 15 deletions(-)
+
+--- a/Documentation/x86/mds.rst
++++ b/Documentation/x86/mds.rst
+@@ -158,13 +158,6 @@ Mitigation points
+      mitigated on the return from do_nmi() to provide almost complete
+      coverage.
+-   - Double fault (#DF):
+-
+-     A double fault is usually fatal, but the ESPFIX workaround, which can
+-     be triggered from user space through modify_ldt(2) is a recoverable
+-     double fault. #DF uses the paranoid exit path, so explicit mitigation
+-     in the double fault handler is required.
+-
+    - Machine Check Exception (#MC):
+      Another corner case is a #MC which hits between the CPU buffer clear
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -58,7 +58,6 @@
+ #include <asm/alternative.h>
+ #include <asm/fpu/xstate.h>
+ #include <asm/trace/mpx.h>
+-#include <asm/nospec-branch.h>
+ #include <asm/mpx.h>
+ #include <asm/vm86.h>
+ #include <asm/umip.h>
+@@ -367,13 +366,6 @@ dotraplinkage void do_double_fault(struc
+               regs->ip = (unsigned long)general_protection;
+               regs->sp = (unsigned long)&gpregs->orig_ax;
+-              /*
+-               * This situation can be triggered by userspace via
+-               * modify_ldt(2) and the return does not take the regular
+-               * user space exit, so a CPU buffer clear is required when
+-               * MDS mitigation is enabled.
+-               */
+-              mds_user_clear_cpu_buffers();
+               return;
+       }
+ #endif