]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Apr 2019 16:43:06 +0000 (18:43 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Apr 2019 16:43:06 +0000 (18:43 +0200)
added patches:
arm-dts-am335x-evm-correct-the-regulators-for-the-audio-codec.patch
arm-dts-am335x-evmsk-correct-the-regulators-for-the-audio-codec.patch
arm-dts-at91-fix-typo-in-isc_d0-on-pc9.patch
arm-dts-rockchip-fix-rk3288-cpu-opp-node-reference.patch
arm-dts-rockchip-fix-sd-card-detection-on-rk3288-tinker.patch
arm-omap1-ams-delta-fix-broken-gpio-id-allocation.patch
arm64-backtrace-don-t-bother-trying-to-unwind-the-userspace-stack.patch
arm64-dts-rockchip-fix-rk3328-rgmii-high-tx-error-rate.patch
arm64-dts-rockchip-fix-vcc_host1_5v-gpio-polarity-on-rk3328-rock64.patch
arm64-ftrace-fix-inadvertent-bug-in-trampoline-check.patch
arm64-futex-fix-futex_wake_op-atomic-ops-with-non-zero-result-value.patch
csky-fix-syscall_get_arguments-and-syscall_set_arguments.patch
dm-disable-discard-if-the-underlying-storage-no-longer-supports-it.patch
dm-integrity-change-memcmp-to-strncmp-in-dm_integrity_ctr.patch
dm-integrity-fix-deadlock-with-overlapping-i-o.patch
dm-revert-8f50e358153d-dm-limit-the-max-bio-size-as-bio_max_pages-page_size.patch
dm-table-propagate-bdi_cap_stable_writes-to-fix-sporadic-checksum-errors.patch
ib-mlx5-reset-access-mask-when-looping-inside-page-fault-handler.patch
pci-add-function-1-dma-alias-quirk-for-marvell-9170-sata-controller.patch
pci-pciehp-ignore-link-state-changes-after-powering-off-a-slot.patch
powerpc-64s-radix-fix-radix-segment-exception-handling.patch
sched-fair-do-not-re-read-h_load_next-during-hierarchical-load-calculation.patch
x86-asm-remove-dead-__gnuc__-conditionals.patch
x86-asm-use-stricter-assembly-constraints-in-bitops.patch
x86-perf-amd-remove-need-to-check-running-bit-in-nmi-handler.patch
x86-perf-amd-resolve-nmi-latency-issues-for-active-pmcs.patch
x86-perf-amd-resolve-race-condition-when-disabling-pmc.patch
xen-prevent-buffer-overflow-in-privcmd-ioctl.patch
xprtrdma-fix-helper-that-drains-the-transport.patch
xtensa-fix-return_address.patch

31 files changed:
queue-5.0/arm-dts-am335x-evm-correct-the-regulators-for-the-audio-codec.patch [new file with mode: 0644]
queue-5.0/arm-dts-am335x-evmsk-correct-the-regulators-for-the-audio-codec.patch [new file with mode: 0644]
queue-5.0/arm-dts-at91-fix-typo-in-isc_d0-on-pc9.patch [new file with mode: 0644]
queue-5.0/arm-dts-rockchip-fix-rk3288-cpu-opp-node-reference.patch [new file with mode: 0644]
queue-5.0/arm-dts-rockchip-fix-sd-card-detection-on-rk3288-tinker.patch [new file with mode: 0644]
queue-5.0/arm-omap1-ams-delta-fix-broken-gpio-id-allocation.patch [new file with mode: 0644]
queue-5.0/arm64-backtrace-don-t-bother-trying-to-unwind-the-userspace-stack.patch [new file with mode: 0644]
queue-5.0/arm64-dts-rockchip-fix-rk3328-rgmii-high-tx-error-rate.patch [new file with mode: 0644]
queue-5.0/arm64-dts-rockchip-fix-vcc_host1_5v-gpio-polarity-on-rk3328-rock64.patch [new file with mode: 0644]
queue-5.0/arm64-ftrace-fix-inadvertent-bug-in-trampoline-check.patch [new file with mode: 0644]
queue-5.0/arm64-futex-fix-futex_wake_op-atomic-ops-with-non-zero-result-value.patch [new file with mode: 0644]
queue-5.0/csky-fix-syscall_get_arguments-and-syscall_set_arguments.patch [new file with mode: 0644]
queue-5.0/dm-disable-discard-if-the-underlying-storage-no-longer-supports-it.patch [new file with mode: 0644]
queue-5.0/dm-integrity-change-memcmp-to-strncmp-in-dm_integrity_ctr.patch [new file with mode: 0644]
queue-5.0/dm-integrity-fix-deadlock-with-overlapping-i-o.patch [new file with mode: 0644]
queue-5.0/dm-revert-8f50e358153d-dm-limit-the-max-bio-size-as-bio_max_pages-page_size.patch [new file with mode: 0644]
queue-5.0/dm-table-propagate-bdi_cap_stable_writes-to-fix-sporadic-checksum-errors.patch [new file with mode: 0644]
queue-5.0/ib-mlx5-reset-access-mask-when-looping-inside-page-fault-handler.patch [new file with mode: 0644]
queue-5.0/pci-add-function-1-dma-alias-quirk-for-marvell-9170-sata-controller.patch [new file with mode: 0644]
queue-5.0/pci-pciehp-ignore-link-state-changes-after-powering-off-a-slot.patch [new file with mode: 0644]
queue-5.0/powerpc-64s-radix-fix-radix-segment-exception-handling.patch [new file with mode: 0644]
queue-5.0/sched-fair-do-not-re-read-h_load_next-during-hierarchical-load-calculation.patch [new file with mode: 0644]
queue-5.0/series
queue-5.0/x86-asm-remove-dead-__gnuc__-conditionals.patch [new file with mode: 0644]
queue-5.0/x86-asm-use-stricter-assembly-constraints-in-bitops.patch [new file with mode: 0644]
queue-5.0/x86-perf-amd-remove-need-to-check-running-bit-in-nmi-handler.patch [new file with mode: 0644]
queue-5.0/x86-perf-amd-resolve-nmi-latency-issues-for-active-pmcs.patch [new file with mode: 0644]
queue-5.0/x86-perf-amd-resolve-race-condition-when-disabling-pmc.patch [new file with mode: 0644]
queue-5.0/xen-prevent-buffer-overflow-in-privcmd-ioctl.patch [new file with mode: 0644]
queue-5.0/xprtrdma-fix-helper-that-drains-the-transport.patch [new file with mode: 0644]
queue-5.0/xtensa-fix-return_address.patch [new file with mode: 0644]

diff --git a/queue-5.0/arm-dts-am335x-evm-correct-the-regulators-for-the-audio-codec.patch b/queue-5.0/arm-dts-am335x-evm-correct-the-regulators-for-the-audio-codec.patch
new file mode 100644 (file)
index 0000000..e722283
--- /dev/null
@@ -0,0 +1,63 @@
+From 4f96dc0a3e79ec257a2b082dab3ee694ff88c317 Mon Sep 17 00:00:00 2001
+From: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Date: Fri, 15 Mar 2019 12:59:09 +0200
+Subject: ARM: dts: am335x-evm: Correct the regulators for the audio codec
+
+From: Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+commit 4f96dc0a3e79ec257a2b082dab3ee694ff88c317 upstream.
+
+Correctly map the regulators used by tlv320aic3106.
+Both 1.8V and 3.3V for the codec is derived from VBAT via fixed regulators.
+
+Cc: <Stable@vger.kernel.org> # v4.14+
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/am335x-evm.dts |   26 ++++++++++++++++++++++----
+ 1 file changed, 22 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/boot/dts/am335x-evm.dts
++++ b/arch/arm/boot/dts/am335x-evm.dts
+@@ -57,6 +57,24 @@
+               enable-active-high;
+       };
++      /* TPS79501 */
++      v1_8d_reg: fixedregulator-v1_8d {
++              compatible = "regulator-fixed";
++              regulator-name = "v1_8d";
++              vin-supply = <&vbat>;
++              regulator-min-microvolt = <1800000>;
++              regulator-max-microvolt = <1800000>;
++      };
++
++      /* TPS79501 */
++      v3_3d_reg: fixedregulator-v3_3d {
++              compatible = "regulator-fixed";
++              regulator-name = "v3_3d";
++              vin-supply = <&vbat>;
++              regulator-min-microvolt = <3300000>;
++              regulator-max-microvolt = <3300000>;
++      };
++
+       matrix_keypad: matrix_keypad0 {
+               compatible = "gpio-matrix-keypad";
+               debounce-delay-ms = <5>;
+@@ -499,10 +517,10 @@
+               status = "okay";
+               /* Regulators */
+-              AVDD-supply = <&vaux2_reg>;
+-              IOVDD-supply = <&vaux2_reg>;
+-              DRVDD-supply = <&vaux2_reg>;
+-              DVDD-supply = <&vbat>;
++              AVDD-supply = <&v3_3d_reg>;
++              IOVDD-supply = <&v3_3d_reg>;
++              DRVDD-supply = <&v3_3d_reg>;
++              DVDD-supply = <&v1_8d_reg>;
+       };
+ };
diff --git a/queue-5.0/arm-dts-am335x-evmsk-correct-the-regulators-for-the-audio-codec.patch b/queue-5.0/arm-dts-am335x-evmsk-correct-the-regulators-for-the-audio-codec.patch
new file mode 100644 (file)
index 0000000..aaef28c
--- /dev/null
@@ -0,0 +1,63 @@
+From 6691370646e844be98bb6558c024269791d20bd7 Mon Sep 17 00:00:00 2001
+From: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Date: Fri, 15 Mar 2019 12:59:17 +0200
+Subject: ARM: dts: am335x-evmsk: Correct the regulators for the audio codec
+
+From: Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+commit 6691370646e844be98bb6558c024269791d20bd7 upstream.
+
+Correctly map the regulators used by tlv320aic3106.
+Both 1.8V and 3.3V for the codec is derived from VBAT via fixed regulators.
+
+Cc: <Stable@vger.kernel.org> # v4.14+
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/am335x-evmsk.dts |   26 ++++++++++++++++++++++----
+ 1 file changed, 22 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/boot/dts/am335x-evmsk.dts
++++ b/arch/arm/boot/dts/am335x-evmsk.dts
+@@ -73,6 +73,24 @@
+               enable-active-high;
+       };
++      /* TPS79518 */
++      v1_8d_reg: fixedregulator-v1_8d {
++              compatible = "regulator-fixed";
++              regulator-name = "v1_8d";
++              vin-supply = <&vbat>;
++              regulator-min-microvolt = <1800000>;
++              regulator-max-microvolt = <1800000>;
++      };
++
++      /* TPS78633 */
++      v3_3d_reg: fixedregulator-v3_3d {
++              compatible = "regulator-fixed";
++              regulator-name = "v3_3d";
++              vin-supply = <&vbat>;
++              regulator-min-microvolt = <3300000>;
++              regulator-max-microvolt = <3300000>;
++      };
++
+       leds {
+               pinctrl-names = "default";
+               pinctrl-0 = <&user_leds_s0>;
+@@ -501,10 +519,10 @@
+               status = "okay";
+               /* Regulators */
+-              AVDD-supply = <&vaux2_reg>;
+-              IOVDD-supply = <&vaux2_reg>;
+-              DRVDD-supply = <&vaux2_reg>;
+-              DVDD-supply = <&vbat>;
++              AVDD-supply = <&v3_3d_reg>;
++              IOVDD-supply = <&v3_3d_reg>;
++              DRVDD-supply = <&v3_3d_reg>;
++              DVDD-supply = <&v1_8d_reg>;
+       };
+ };
diff --git a/queue-5.0/arm-dts-at91-fix-typo-in-isc_d0-on-pc9.patch b/queue-5.0/arm-dts-at91-fix-typo-in-isc_d0-on-pc9.patch
new file mode 100644 (file)
index 0000000..b48374e
--- /dev/null
@@ -0,0 +1,34 @@
+From e7dfb6d04e4715be1f3eb2c60d97b753fd2e4516 Mon Sep 17 00:00:00 2001
+From: David Engraf <david.engraf@sysgo.com>
+Date: Mon, 11 Mar 2019 08:57:42 +0100
+Subject: ARM: dts: at91: Fix typo in ISC_D0 on PC9
+
+From: David Engraf <david.engraf@sysgo.com>
+
+commit e7dfb6d04e4715be1f3eb2c60d97b753fd2e4516 upstream.
+
+The function argument for the ISC_D0 on PC9 was incorrect. According to
+the documentation it should be 'C' aka 3.
+
+Signed-off-by: David Engraf <david.engraf@sysgo.com>
+Reviewed-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Signed-off-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Fixes: 7f16cb676c00 ("ARM: at91/dt: add sama5d2 pinmux")
+Cc: <stable@vger.kernel.org> # v4.4+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/sama5d2-pinfunc.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
+@@ -518,7 +518,7 @@
+ #define PIN_PC9__GPIO                 PINMUX_PIN(PIN_PC9, 0, 0)
+ #define PIN_PC9__FIQ                  PINMUX_PIN(PIN_PC9, 1, 3)
+ #define PIN_PC9__GTSUCOMP             PINMUX_PIN(PIN_PC9, 2, 1)
+-#define PIN_PC9__ISC_D0                       PINMUX_PIN(PIN_PC9, 2, 1)
++#define PIN_PC9__ISC_D0                       PINMUX_PIN(PIN_PC9, 3, 1)
+ #define PIN_PC9__TIOA4                        PINMUX_PIN(PIN_PC9, 4, 2)
+ #define PIN_PC10                      74
+ #define PIN_PC10__GPIO                        PINMUX_PIN(PIN_PC10, 0, 0)
diff --git a/queue-5.0/arm-dts-rockchip-fix-rk3288-cpu-opp-node-reference.patch b/queue-5.0/arm-dts-rockchip-fix-rk3288-cpu-opp-node-reference.patch
new file mode 100644 (file)
index 0000000..e75b272
--- /dev/null
@@ -0,0 +1,54 @@
+From 6b2fde3dbfab6ebc45b0cd605e17ca5057ff9a3b Mon Sep 17 00:00:00 2001
+From: Jonas Karlman <jonas@kwiboo.se>
+Date: Sun, 24 Feb 2019 21:51:22 +0000
+Subject: ARM: dts: rockchip: fix rk3288 cpu opp node reference
+
+From: Jonas Karlman <jonas@kwiboo.se>
+
+commit 6b2fde3dbfab6ebc45b0cd605e17ca5057ff9a3b upstream.
+
+The following error can be seen during boot:
+
+  of: /cpus/cpu@501: Couldn't find opp node
+
+Change cpu nodes to use operating-points-v2 in order to fix this.
+
+Fixes: ce76de984649 ("ARM: dts: rockchip: convert rk3288 to operating-points-v2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/rk3288.dtsi |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -70,7 +70,7 @@
+                       compatible = "arm,cortex-a12";
+                       reg = <0x501>;
+                       resets = <&cru SRST_CORE1>;
+-                      operating-points = <&cpu_opp_table>;
++                      operating-points-v2 = <&cpu_opp_table>;
+                       #cooling-cells = <2>; /* min followed by max */
+                       clock-latency = <40000>;
+                       clocks = <&cru ARMCLK>;
+@@ -80,7 +80,7 @@
+                       compatible = "arm,cortex-a12";
+                       reg = <0x502>;
+                       resets = <&cru SRST_CORE2>;
+-                      operating-points = <&cpu_opp_table>;
++                      operating-points-v2 = <&cpu_opp_table>;
+                       #cooling-cells = <2>; /* min followed by max */
+                       clock-latency = <40000>;
+                       clocks = <&cru ARMCLK>;
+@@ -90,7 +90,7 @@
+                       compatible = "arm,cortex-a12";
+                       reg = <0x503>;
+                       resets = <&cru SRST_CORE3>;
+-                      operating-points = <&cpu_opp_table>;
++                      operating-points-v2 = <&cpu_opp_table>;
+                       #cooling-cells = <2>; /* min followed by max */
+                       clock-latency = <40000>;
+                       clocks = <&cru ARMCLK>;
diff --git a/queue-5.0/arm-dts-rockchip-fix-sd-card-detection-on-rk3288-tinker.patch b/queue-5.0/arm-dts-rockchip-fix-sd-card-detection-on-rk3288-tinker.patch
new file mode 100644 (file)
index 0000000..94cbaa9
--- /dev/null
@@ -0,0 +1,79 @@
+From 8dbc4d5ddb59f49cb3e85bccf42a4720b27a6576 Mon Sep 17 00:00:00 2001
+From: David Summers <beagleboard@davidjohnsummers.uk>
+Date: Sat, 9 Mar 2019 15:39:21 +0000
+Subject: ARM: dts: rockchip: Fix SD card detection on rk3288-tinker
+
+From: David Summers <beagleboard@davidjohnsummers.uk>
+
+commit 8dbc4d5ddb59f49cb3e85bccf42a4720b27a6576 upstream.
+
+The Problem:
+
+On ASUS Tinker Board S, when booting from the eMMC, and there is card
+in the sd slot, there are constant errors.
+
+Also when warm reboot, uboot can not access the sd slot
+
+Cause:
+
+Identified by Robin Murphy @ ARM. The Card Detect on rk3288
+devices is pulled up by vccio-sd; so when the regulator powers this
+off, card detect gives spurious errors. A second problem, is during
+power down, vccio-sd apprears to be powered down. This causes a
+problem when warm rebooting from the sd card. This was identified by
+Jonas Karlman.
+
+History:
+
+A common fault on these rk3288 board, which impliment the reference
+design.
+
+When this arose before:
+
+http://lists.infradead.org/pipermail/linux-arm-kernel/2014-August/281153.html
+
+And Ulf and Jaehoon clearly said this was a broken card detect design,
+which should be solved via polling
+
+Solution:
+
+Hence broken-cd is set as a property. This cures the errors. The
+powering down of vccio-sd during reboot is cured by adding
+regulator-boot-on.
+
+This solutions has been fairly widely reviewed and tested.
+
+Fixes: e58c5e739d6f ("ARM: dts: rockchip: move shared tinker-board nodes to a common dtsi")
+Cc: stable@vger.kernel.org
+[Heiko: slightly inaccurate fixes but tinker is a sbc (aka like a Pi) where
+ we can hopefully expect people not to rely on overly old stable kernels]
+Signed-off-by: David Summers <beagleboard@davidjohnsummers.uk>
+Reviewed-by: Jonas Karlman <jonas@kwiboo.se>
+Tested-by: Jonas Karlman <jonas@kwiboo.se>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/rk3288-tinker.dtsi |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/rk3288-tinker.dtsi
++++ b/arch/arm/boot/dts/rk3288-tinker.dtsi
+@@ -254,6 +254,7 @@
+                       };
+                       vccio_sd: LDO_REG5 {
++                              regulator-boot-on;
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-name = "vccio_sd";
+@@ -430,7 +431,7 @@
+       bus-width = <4>;
+       cap-mmc-highspeed;
+       cap-sd-highspeed;
+-      card-detect-delay = <200>;
++      broken-cd;
+       disable-wp;                     /* wp not hooked up */
+       pinctrl-names = "default";
+       pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
diff --git a/queue-5.0/arm-omap1-ams-delta-fix-broken-gpio-id-allocation.patch b/queue-5.0/arm-omap1-ams-delta-fix-broken-gpio-id-allocation.patch
new file mode 100644 (file)
index 0000000..6da31ec
--- /dev/null
@@ -0,0 +1,45 @@
+From 3e2cf62efec52fb49daed437cc486c3cb9a0afa2 Mon Sep 17 00:00:00 2001
+From: Janusz Krzysztofik <jmkrzyszt@gmail.com>
+Date: Tue, 19 Mar 2019 21:19:52 +0100
+Subject: ARM: OMAP1: ams-delta: Fix broken GPIO ID allocation
+
+From: Janusz Krzysztofik <jmkrzyszt@gmail.com>
+
+commit 3e2cf62efec52fb49daed437cc486c3cb9a0afa2 upstream.
+
+In order to request dynamic allocationn of GPIO IDs, a negative number
+should be passed as a base GPIO ID via platform data.  Unfortuntely,
+commit 771e53c4d1a1 ("ARM: OMAP1: ams-delta: Drop board specific global
+GPIO numbers") didn't follow that rule while switching to dynamically
+allocated GPIO IDs for Amstrad Delta latches, making their IDs
+overlapping with those already assigned to OMAP GPIO devices.  Fix it.
+
+Fixes: 771e53c4d1a1 ("ARM: OMAP1: ams-delta: Drop board specific global GPIO numbers")
+Signed-off-by: Janusz Krzysztofik <jmkrzyszt@gmail.com>
+Cc: stable@vger.kernel.org
+Acked-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-omap1/board-ams-delta.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/mach-omap1/board-ams-delta.c
++++ b/arch/arm/mach-omap1/board-ams-delta.c
+@@ -182,6 +182,7 @@ static struct resource latch1_resources[
+ static struct bgpio_pdata latch1_pdata = {
+       .label  = LATCH1_LABEL,
++      .base   = -1,
+       .ngpio  = LATCH1_NGPIO,
+ };
+@@ -219,6 +220,7 @@ static struct resource latch2_resources[
+ static struct bgpio_pdata latch2_pdata = {
+       .label  = LATCH2_LABEL,
++      .base   = -1,
+       .ngpio  = LATCH2_NGPIO,
+ };
diff --git a/queue-5.0/arm64-backtrace-don-t-bother-trying-to-unwind-the-userspace-stack.patch b/queue-5.0/arm64-backtrace-don-t-bother-trying-to-unwind-the-userspace-stack.patch
new file mode 100644 (file)
index 0000000..023026f
--- /dev/null
@@ -0,0 +1,72 @@
+From 1e6f5440a6814d28c32d347f338bfef68bc3e69d Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 8 Apr 2019 17:56:34 +0100
+Subject: arm64: backtrace: Don't bother trying to unwind the userspace stack
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 1e6f5440a6814d28c32d347f338bfef68bc3e69d upstream.
+
+Calling dump_backtrace() with a pt_regs argument corresponding to
+userspace doesn't make any sense and our unwinder will simply print
+"Call trace:" before unwinding the stack looking for user frames.
+
+Rather than go through this song and dance, just return early if we're
+passed a user register state.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 1149aad10b1e ("arm64: Add dump_backtrace() in show_regs")
+Reported-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/traps.c |   15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -102,10 +102,16 @@ static void dump_instr(const char *lvl,
+ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+ {
+       struct stackframe frame;
+-      int skip;
++      int skip = 0;
+       pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
++      if (regs) {
++              if (user_mode(regs))
++                      return;
++              skip = 1;
++      }
++
+       if (!tsk)
+               tsk = current;
+@@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs
+       frame.graph = 0;
+ #endif
+-      skip = !!regs;
+       printk("Call trace:\n");
+       do {
+               /* skip until specified stack frame */
+@@ -176,15 +181,13 @@ static int __die(const char *str, int er
+               return ret;
+       print_modules();
+-      __show_regs(regs);
+       pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
+                TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+                end_of_stack(tsk));
++      show_regs(regs);
+-      if (!user_mode(regs)) {
+-              dump_backtrace(regs, tsk);
++      if (!user_mode(regs))
+               dump_instr(KERN_EMERG, regs);
+-      }
+       return ret;
+ }
diff --git a/queue-5.0/arm64-dts-rockchip-fix-rk3328-rgmii-high-tx-error-rate.patch b/queue-5.0/arm64-dts-rockchip-fix-rk3328-rgmii-high-tx-error-rate.patch
new file mode 100644 (file)
index 0000000..07eb83d
--- /dev/null
@@ -0,0 +1,121 @@
+From 6fd8b9780ec1a49ac46e0aaf8775247205e66231 Mon Sep 17 00:00:00 2001
+From: Peter Geis <pgwipeout@gmail.com>
+Date: Wed, 13 Mar 2019 18:45:36 +0000
+Subject: arm64: dts: rockchip: fix rk3328 rgmii high tx error rate
+
+From: Peter Geis <pgwipeout@gmail.com>
+
+commit 6fd8b9780ec1a49ac46e0aaf8775247205e66231 upstream.
+
+Several rk3328 based boards experience high rgmii tx error rates.
+This is due to several pins in the rk3328.dtsi rgmii pinmux that are
+missing a defined pull strength setting.
+This causes the pinmux driver to default to 2ma (bit mask 00).
+
+These pins are only defined in the rk3328.dtsi, and are not listed in
+the rk3328 specification.
+The TRM only lists them as "Reserved"
+(RK3328 TRM V1.1, 3.3.3 Detail Register Description, GRF_GPIO0B_IOMUX,
+GRF_GPIO0C_IOMUX, GRF_GPIO0D_IOMUX).
+However, removal of these pins from the rgmii pinmux definition causes
+the interface to fail to transmit.
+
+Also, the rgmii tx and rx pins defined in the dtsi are not consistent
+with the rk3328 specification, with tx pins currently set to 12ma and
+rx pins set to 2ma.
+
+Fix this by setting tx pins to 8ma and the rx pins to 4ma, consistent
+with the specification.
+Defining the drive strength for the undefined pins eliminated the high
+tx packet error rate observed under heavy data transfers.
+Aligning the drive strength to the TRM values eliminated the occasional
+packet retry errors under iperf3 testing.
+This allows much higher data rates with no recorded tx errors.
+
+Tested on the rk3328-roc-cc board.
+
+Fixes: 52e02d377a72 ("arm64: dts: rockchip: add core dtsi file for RK3328 SoCs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Peter Geis <pgwipeout@gmail.com>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/rockchip/rk3328.dtsi |   44 +++++++++++++++----------------
+ 1 file changed, 22 insertions(+), 22 deletions(-)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -1628,50 +1628,50 @@
+                       rgmiim1_pins: rgmiim1-pins {
+                               rockchip,pins =
+                                       /* mac_txclk */
+-                                      <1 RK_PB4 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PB4 2 &pcfg_pull_none_8ma>,
+                                       /* mac_rxclk */
+-                                      <1 RK_PB5 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB5 2 &pcfg_pull_none_4ma>,
+                                       /* mac_mdio */
+-                                      <1 RK_PC3 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PC3 2 &pcfg_pull_none_4ma>,
+                                       /* mac_txen */
+-                                      <1 RK_PD1 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PD1 2 &pcfg_pull_none_8ma>,
+                                       /* mac_clk */
+-                                      <1 RK_PC5 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PC5 2 &pcfg_pull_none_4ma>,
+                                       /* mac_rxdv */
+-                                      <1 RK_PC6 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PC6 2 &pcfg_pull_none_4ma>,
+                                       /* mac_mdc */
+-                                      <1 RK_PC7 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PC7 2 &pcfg_pull_none_4ma>,
+                                       /* mac_rxd1 */
+-                                      <1 RK_PB2 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB2 2 &pcfg_pull_none_4ma>,
+                                       /* mac_rxd0 */
+-                                      <1 RK_PB3 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB3 2 &pcfg_pull_none_4ma>,
+                                       /* mac_txd1 */
+-                                      <1 RK_PB0 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PB0 2 &pcfg_pull_none_8ma>,
+                                       /* mac_txd0 */
+-                                      <1 RK_PB1 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PB1 2 &pcfg_pull_none_8ma>,
+                                       /* mac_rxd3 */
+-                                      <1 RK_PB6 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB6 2 &pcfg_pull_none_4ma>,
+                                       /* mac_rxd2 */
+-                                      <1 RK_PB7 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB7 2 &pcfg_pull_none_4ma>,
+                                       /* mac_txd3 */
+-                                      <1 RK_PC0 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PC0 2 &pcfg_pull_none_8ma>,
+                                       /* mac_txd2 */
+-                                      <1 RK_PC1 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PC1 2 &pcfg_pull_none_8ma>,
+                                       /* mac_txclk */
+-                                      <0 RK_PB0 1 &pcfg_pull_none>,
++                                      <0 RK_PB0 1 &pcfg_pull_none_8ma>,
+                                       /* mac_txen */
+-                                      <0 RK_PB4 1 &pcfg_pull_none>,
++                                      <0 RK_PB4 1 &pcfg_pull_none_8ma>,
+                                       /* mac_clk */
+-                                      <0 RK_PD0 1 &pcfg_pull_none>,
++                                      <0 RK_PD0 1 &pcfg_pull_none_4ma>,
+                                       /* mac_txd1 */
+-                                      <0 RK_PC0 1 &pcfg_pull_none>,
++                                      <0 RK_PC0 1 &pcfg_pull_none_8ma>,
+                                       /* mac_txd0 */
+-                                      <0 RK_PC1 1 &pcfg_pull_none>,
++                                      <0 RK_PC1 1 &pcfg_pull_none_8ma>,
+                                       /* mac_txd3 */
+-                                      <0 RK_PC7 1 &pcfg_pull_none>,
++                                      <0 RK_PC7 1 &pcfg_pull_none_8ma>,
+                                       /* mac_txd2 */
+-                                      <0 RK_PC6 1 &pcfg_pull_none>;
++                                      <0 RK_PC6 1 &pcfg_pull_none_8ma>;
+                       };
+                       rmiim1_pins: rmiim1-pins {
diff --git a/queue-5.0/arm64-dts-rockchip-fix-vcc_host1_5v-gpio-polarity-on-rk3328-rock64.patch b/queue-5.0/arm64-dts-rockchip-fix-vcc_host1_5v-gpio-polarity-on-rk3328-rock64.patch
new file mode 100644 (file)
index 0000000..b7f9c05
--- /dev/null
@@ -0,0 +1,35 @@
+From a8772e5d826d0f61f8aa9c284b3ab49035d5273d Mon Sep 17 00:00:00 2001
+From: Tomohiro Mayama <parly-gh@iris.mystia.org>
+Date: Sun, 10 Mar 2019 01:10:12 +0900
+Subject: arm64: dts: rockchip: Fix vcc_host1_5v GPIO polarity on rk3328-rock64
+
+From: Tomohiro Mayama <parly-gh@iris.mystia.org>
+
+commit a8772e5d826d0f61f8aa9c284b3ab49035d5273d upstream.
+
+This patch makes USB ports functioning again.
+
+Fixes: 955bebde057e ("arm64: dts: rockchip: add rk3328-rock64 board")
+Cc: stable@vger.kernel.org
+Suggested-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Tomohiro Mayama <parly-gh@iris.mystia.org>
+Tested-by: Katsuhiro Suzuki <katsuhiro@katsuster.net>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/rockchip/rk3328-rock64.dts |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+@@ -46,8 +46,7 @@
+       vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
+               compatible = "regulator-fixed";
+-              enable-active-high;
+-              gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
++              gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&usb20_host_drv>;
+               regulator-name = "vcc_host1_5v";
diff --git a/queue-5.0/arm64-ftrace-fix-inadvertent-bug-in-trampoline-check.patch b/queue-5.0/arm64-ftrace-fix-inadvertent-bug-in-trampoline-check.patch
new file mode 100644 (file)
index 0000000..fc474f9
--- /dev/null
@@ -0,0 +1,57 @@
+From 5a3ae7b314a2259b1188b22b392f5eba01e443ee Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Sun, 7 Apr 2019 21:06:16 +0200
+Subject: arm64/ftrace: fix inadvertent BUG() in trampoline check
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 5a3ae7b314a2259b1188b22b392f5eba01e443ee upstream.
+
+The ftrace trampoline code (which deals with modules loaded out of
+BL range of the core kernel) uses plt_entries_equal() to check whether
+the per-module trampoline equals a zero buffer, to decide whether the
+trampoline has already been initialized.
+
+This triggers a BUG() in the opcode manipulation code, since we end
+up checking the ADRP offset of a 0x0 opcode, which is not an ADRP
+instruction.
+
+So instead, add a helper to check whether a PLT is initialized, and
+call that from the frace code.
+
+Cc: <stable@vger.kernel.org> # v5.0
+Fixes: bdb85cd1d206 ("arm64/module: switch to ADRP/ADD sequences for PLT entries")
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/module.h |    5 +++++
+ arch/arm64/kernel/ftrace.c      |    3 +--
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/module.h
++++ b/arch/arm64/include/asm/module.h
+@@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_f
+ struct plt_entry get_plt_entry(u64 dst, void *pc);
+ bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
++static inline bool plt_entry_is_initialized(const struct plt_entry *e)
++{
++      return e->adrp || e->add || e->br;
++}
++
+ #endif /* __ASM_MODULE_H */
+--- a/arch/arm64/kernel/ftrace.c
++++ b/arch/arm64/kernel/ftrace.c
+@@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *
+               trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
+               if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+                                      &trampoline)) {
+-                      if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+-                                             &(struct plt_entry){})) {
++                      if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
+                               pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
+                               return -EINVAL;
+                       }
diff --git a/queue-5.0/arm64-futex-fix-futex_wake_op-atomic-ops-with-non-zero-result-value.patch b/queue-5.0/arm64-futex-fix-futex_wake_op-atomic-ops-with-non-zero-result-value.patch
new file mode 100644 (file)
index 0000000..dd098eb
--- /dev/null
@@ -0,0 +1,92 @@
+From 045afc24124d80c6998d9c770844c67912083506 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 8 Apr 2019 12:45:09 +0100
+Subject: arm64: futex: Fix FUTEX_WAKE_OP atomic ops with non-zero result value
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 045afc24124d80c6998d9c770844c67912083506 upstream.
+
+Rather embarrassingly, our futex() FUTEX_WAKE_OP implementation doesn't
+explicitly set the return value on the non-faulting path and instead
+leaves it holding the result of the underlying atomic operation. This
+means that any FUTEX_WAKE_OP atomic operation which computes a non-zero
+value will be reported as having failed. Regrettably, I wrote the buggy
+code back in 2011 and it was upstreamed as part of the initial arm64
+support in 2012.
+
+The reasons we appear to get away with this are:
+
+  1. FUTEX_WAKE_OP is rarely used and therefore doesn't appear to get
+     exercised by futex() test applications
+
+  2. If the result of the atomic operation is zero, the system call
+     behaves correctly
+
+  3. Prior to version 2.25, the only operation used by GLIBC set the
+     futex to zero, and therefore worked as expected. From 2.25 onwards,
+     FUTEX_WAKE_OP is not used by GLIBC at all.
+
+Fix the implementation by ensuring that the return value is either 0
+to indicate that the atomic operation completed successfully, or -EFAULT
+if we encountered a fault when accessing the user mapping.
+
+Cc: <stable@kernel.org>
+Fixes: 6170a97460db ("arm64: Atomic operations")
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/futex.h |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -30,8 +30,8 @@ do {                                                                 \
+ "     prfm    pstl1strm, %2\n"                                        \
+ "1:   ldxr    %w1, %2\n"                                              \
+       insn "\n"                                                       \
+-"2:   stlxr   %w3, %w0, %2\n"                                         \
+-"     cbnz    %w3, 1b\n"                                              \
++"2:   stlxr   %w0, %w3, %2\n"                                         \
++"     cbnz    %w0, 1b\n"                                              \
+ "     dmb     ish\n"                                                  \
+ "3:\n"                                                                        \
+ "     .pushsection .fixup,\"ax\"\n"                                   \
+@@ -50,30 +50,30 @@ do {                                                                       \
+ static inline int
+ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
+ {
+-      int oldval = 0, ret, tmp;
++      int oldval, ret, tmp;
+       u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
+       pagefault_disable();
+       switch (op) {
+       case FUTEX_OP_SET:
+-              __futex_atomic_op("mov  %w0, %w4",
++              __futex_atomic_op("mov  %w3, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       case FUTEX_OP_ADD:
+-              __futex_atomic_op("add  %w0, %w1, %w4",
++              __futex_atomic_op("add  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       case FUTEX_OP_OR:
+-              __futex_atomic_op("orr  %w0, %w1, %w4",
++              __futex_atomic_op("orr  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       case FUTEX_OP_ANDN:
+-              __futex_atomic_op("and  %w0, %w1, %w4",
++              __futex_atomic_op("and  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, ~oparg);
+               break;
+       case FUTEX_OP_XOR:
+-              __futex_atomic_op("eor  %w0, %w1, %w4",
++              __futex_atomic_op("eor  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       default:
diff --git a/queue-5.0/csky-fix-syscall_get_arguments-and-syscall_set_arguments.patch b/queue-5.0/csky-fix-syscall_get_arguments-and-syscall_set_arguments.patch
new file mode 100644 (file)
index 0000000..00ef59f
--- /dev/null
@@ -0,0 +1,65 @@
+From ed3bb007021b9bddb90afae28a19f08ed8890add Mon Sep 17 00:00:00 2001
+From: "Dmitry V. Levin" <ldv@altlinux.org>
+Date: Fri, 29 Mar 2019 20:12:30 +0300
+Subject: csky: Fix syscall_get_arguments() and syscall_set_arguments()
+
+From: Dmitry V. Levin <ldv@altlinux.org>
+
+commit ed3bb007021b9bddb90afae28a19f08ed8890add upstream.
+
+C-SKY syscall arguments are located in orig_a0,a1,a2,a3,regs[0],regs[1]
+fields of struct pt_regs.
+
+Due to an off-by-one bug and a bug in pointer arithmetic
+syscall_get_arguments() was reading orig_a0,regs[1..5] fields instead.
+Likewise, syscall_set_arguments() was writing orig_a0,regs[1..5] fields
+instead.
+
+Link: http://lkml.kernel.org/r/20190329171230.GB32456@altlinux.org
+
+Fixes: 4859bfca11c7d ("csky: System Call")
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Will Drewry <wad@chromium.org>
+Cc: stable@vger.kernel.org # v4.20+
+Tested-by: Guo Ren <ren_guo@c-sky.com>
+Acked-by: Guo Ren <ren_guo@c-sky.com>
+Signed-off-by: Dmitry V. Levin <ldv@altlinux.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/csky/include/asm/syscall.h |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/csky/include/asm/syscall.h
++++ b/arch/csky/include/asm/syscall.h
+@@ -49,10 +49,11 @@ syscall_get_arguments(struct task_struct
+       if (i == 0) {
+               args[0] = regs->orig_a0;
+               args++;
+-              i++;
+               n--;
++      } else {
++              i--;
+       }
+-      memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
++      memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
+ }
+ static inline void
+@@ -63,10 +64,11 @@ syscall_set_arguments(struct task_struct
+       if (i == 0) {
+               regs->orig_a0 = args[0];
+               args++;
+-              i++;
+               n--;
++      } else {
++              i--;
+       }
+-      memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
++      memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
+ }
+ static inline int
diff --git a/queue-5.0/dm-disable-discard-if-the-underlying-storage-no-longer-supports-it.patch b/queue-5.0/dm-disable-discard-if-the-underlying-storage-no-longer-supports-it.patch
new file mode 100644 (file)
index 0000000..2e8023c
--- /dev/null
@@ -0,0 +1,120 @@
+From bcb44433bba5eaff293888ef22ffa07f1f0347d6 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Wed, 3 Apr 2019 12:23:11 -0400
+Subject: dm: disable DISCARD if the underlying storage no longer supports it
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit bcb44433bba5eaff293888ef22ffa07f1f0347d6 upstream.
+
+Storage devices which report supporting discard commands like
+WRITE_SAME_16 with unmap, but reject discard commands sent to the
+storage device.  This is a clear storage firmware bug but it doesn't
+change the fact that should a program cause discards to be sent to a
+multipath device layered on this buggy storage, all paths can end up
+failed at the same time from the discards, causing possible I/O loss.
+
+The first discard to a path will fail with Illegal Request, Invalid
+field in cdb, e.g.:
+ kernel: sd 8:0:8:19: [sdfn] tag#0 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE
+ kernel: sd 8:0:8:19: [sdfn] tag#0 Sense Key : Illegal Request [current]
+ kernel: sd 8:0:8:19: [sdfn] tag#0 Add. Sense: Invalid field in cdb
+ kernel: sd 8:0:8:19: [sdfn] tag#0 CDB: Write same(16) 93 08 00 00 00 00 00 a0 08 00 00 00 80 00 00 00
+ kernel: blk_update_request: critical target error, dev sdfn, sector 10487808
+
+The SCSI layer converts this to the BLK_STS_TARGET error number, the sd
+device disables its support for discard on this path, and because of the
+BLK_STS_TARGET error multipath fails the discard without failing any
+path or retrying down a different path.  But subsequent discards can
+cause path failures.  Any discards sent to the path which already failed
+a discard ends up failing with EIO from blk_cloned_rq_check_limits with
+an "over max size limit" error since the discard limit was set to 0 by
+the sd driver for the path.  As the error is EIO, this now fails the
+path and multipath tries to send the discard down the next path.  This
+cycle continues as discards are sent until all paths fail.
+
+Fix this by training DM core to disable DISCARD if the underlying
+storage already did so.
+
+Also, fix branching in dm_done() and clone_endio() to reflect the
+mutually exclussive nature of the IO operations in question.
+
+Cc: stable@vger.kernel.org
+Reported-by: David Jeffery <djeffery@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-core.h |    1 +
+ drivers/md/dm-rq.c   |   11 +++++++----
+ drivers/md/dm.c      |   20 ++++++++++++++++----
+ 3 files changed, 24 insertions(+), 8 deletions(-)
+
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -115,6 +115,7 @@ struct mapped_device {
+       struct srcu_struct io_barrier;
+ };
++void disable_discard(struct mapped_device *md);
+ void disable_write_same(struct mapped_device *md);
+ void disable_write_zeroes(struct mapped_device *md);
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -206,11 +206,14 @@ static void dm_done(struct request *clon
+       }
+       if (unlikely(error == BLK_STS_TARGET)) {
+-              if (req_op(clone) == REQ_OP_WRITE_SAME &&
+-                  !clone->q->limits.max_write_same_sectors)
++              if (req_op(clone) == REQ_OP_DISCARD &&
++                  !clone->q->limits.max_discard_sectors)
++                      disable_discard(tio->md);
++              else if (req_op(clone) == REQ_OP_WRITE_SAME &&
++                       !clone->q->limits.max_write_same_sectors)
+                       disable_write_same(tio->md);
+-              if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+-                  !clone->q->limits.max_write_zeroes_sectors)
++              else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
++                       !clone->q->limits.max_write_zeroes_sectors)
+                       disable_write_zeroes(tio->md);
+       }
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -963,6 +963,15 @@ static void dec_pending(struct dm_io *io
+       }
+ }
++void disable_discard(struct mapped_device *md)
++{
++      struct queue_limits *limits = dm_get_queue_limits(md);
++
++      /* device doesn't really support DISCARD, disable it */
++      limits->max_discard_sectors = 0;
++      blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
++}
++
+ void disable_write_same(struct mapped_device *md)
+ {
+       struct queue_limits *limits = dm_get_queue_limits(md);
+@@ -988,11 +997,14 @@ static void clone_endio(struct bio *bio)
+       dm_endio_fn endio = tio->ti->type->end_io;
+       if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
+-              if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+-                  !bio->bi_disk->queue->limits.max_write_same_sectors)
++              if (bio_op(bio) == REQ_OP_DISCARD &&
++                  !bio->bi_disk->queue->limits.max_discard_sectors)
++                      disable_discard(md);
++              else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
++                       !bio->bi_disk->queue->limits.max_write_same_sectors)
+                       disable_write_same(md);
+-              if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+-                  !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
++              else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
++                       !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+                       disable_write_zeroes(md);
+       }
diff --git a/queue-5.0/dm-integrity-change-memcmp-to-strncmp-in-dm_integrity_ctr.patch b/queue-5.0/dm-integrity-change-memcmp-to-strncmp-in-dm_integrity_ctr.patch
new file mode 100644 (file)
index 0000000..810c2db
--- /dev/null
@@ -0,0 +1,57 @@
+From 0d74e6a3b6421d98eeafbed26f29156d469bc0b5 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 13 Mar 2019 07:56:02 -0400
+Subject: dm integrity: change memcmp to strncmp in dm_integrity_ctr
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 0d74e6a3b6421d98eeafbed26f29156d469bc0b5 upstream.
+
+If the string opt_string is small, the function memcmp can access bytes
+that are beyond the terminating nul character. In theory, it could cause
+segfault, if opt_string were located just below some unmapped memory.
+
+Change from memcmp to strncmp so that we don't read bytes beyond the end
+of the string.
+
+Cc: stable@vger.kernel.org # v4.12+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-integrity.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -3185,7 +3185,7 @@ static int dm_integrity_ctr(struct dm_ta
+                       journal_watermark = val;
+               else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
+                       sync_msec = val;
+-              else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
++              else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
+                       if (ic->meta_dev) {
+                               dm_put_device(ti, ic->meta_dev);
+                               ic->meta_dev = NULL;
+@@ -3204,17 +3204,17 @@ static int dm_integrity_ctr(struct dm_ta
+                               goto bad;
+                       }
+                       ic->sectors_per_block = val >> SECTOR_SHIFT;
+-              } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
++              } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+                       r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
+                                           "Invalid internal_hash argument");
+                       if (r)
+                               goto bad;
+-              } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
++              } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
+                       r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
+                                           "Invalid journal_crypt argument");
+                       if (r)
+                               goto bad;
+-              } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
++              } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
+                       r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
+                                           "Invalid journal_mac argument");
+                       if (r)
diff --git a/queue-5.0/dm-integrity-fix-deadlock-with-overlapping-i-o.patch b/queue-5.0/dm-integrity-fix-deadlock-with-overlapping-i-o.patch
new file mode 100644 (file)
index 0000000..0e39d82
--- /dev/null
@@ -0,0 +1,49 @@
+From 4ed319c6ac08e9a28fca7ac188181ac122f4de84 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 5 Apr 2019 15:26:39 -0400
+Subject: dm integrity: fix deadlock with overlapping I/O
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 4ed319c6ac08e9a28fca7ac188181ac122f4de84 upstream.
+
+dm-integrity will deadlock if overlapping I/O is issued to it, the bug
+was introduced by commit 724376a04d1a ("dm integrity: implement fair
+range locks").  Users rarely use overlapping I/O so this bug went
+undetected until now.
+
+Fix this bug by correcting, likely cut-n-paste, typos in
+ranges_overlap() and also remove a flawed ranges_overlap() check in
+remove_range_unlocked().  This condition could leave unprocessed bios
+hanging on wait_list forever.
+
+Cc: stable@vger.kernel.org # v4.19+
+Fixes: 724376a04d1a ("dm integrity: implement fair range locks")
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-integrity.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_
+ static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
+ {
+       return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
+-             range2->logical_sector + range2->n_sectors > range2->logical_sector;
++             range1->logical_sector + range1->n_sectors > range2->logical_sector;
+ }
+ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
+@@ -959,8 +959,6 @@ static void remove_range_unlocked(struct
+               struct dm_integrity_range *last_range =
+                       list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
+               struct task_struct *last_range_task;
+-              if (!ranges_overlap(range, last_range))
+-                      break;
+               last_range_task = last_range->task;
+               list_del(&last_range->wait_entry);
+               if (!add_new_range(ic, last_range, false)) {
diff --git a/queue-5.0/dm-revert-8f50e358153d-dm-limit-the-max-bio-size-as-bio_max_pages-page_size.patch b/queue-5.0/dm-revert-8f50e358153d-dm-limit-the-max-bio-size-as-bio_max_pages-page_size.patch
new file mode 100644 (file)
index 0000000..77379f1
--- /dev/null
@@ -0,0 +1,53 @@
+From 75ae193626de3238ca5fb895868ec91c94e63b1b Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 21 Mar 2019 16:46:12 -0400
+Subject: dm: revert 8f50e358153d ("dm: limit the max bio size as BIO_MAX_PAGES * PAGE_SIZE")
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 75ae193626de3238ca5fb895868ec91c94e63b1b upstream.
+
+The limit was already incorporated to dm-crypt with commit 4e870e948fba
+("dm crypt: fix error with too large bios"), so we don't need to apply
+it globally to all targets. The quantity BIO_MAX_PAGES * PAGE_SIZE is
+wrong anyway because the variable ti->max_io_len it is supposed to be in
+the units of 512-byte sectors not in bytes.
+
+Reduction of the limit to 1048576 sectors could even cause data
+corruption in rare cases - suppose that we have a dm-striped device with
+stripe size 768MiB. The target will call dm_set_target_max_io_len with
+the value 1572864. The buggy code would reduce it to 1048576. Now, the
+dm-core will errorneously split the bios on 1048576-sector boundary
+insetad of 1572864-sector boundary and pass these stripe-crossing bios
+to the striped target.
+
+Cc: stable@vger.kernel.org # v4.16+
+Fixes: 8f50e358153d ("dm: limit the max bio size as BIO_MAX_PAGES * PAGE_SIZE")
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Acked-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c |   10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1060,15 +1060,7 @@ int dm_set_target_max_io_len(struct dm_t
+               return -EINVAL;
+       }
+-      /*
+-       * BIO based queue uses its own splitting. When multipage bvecs
+-       * is switched on, size of the incoming bio may be too big to
+-       * be handled in some targets, such as crypt.
+-       *
+-       * When these targets are ready for the big bio, we can remove
+-       * the limit.
+-       */
+-      ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
++      ti->max_io_len = (uint32_t) len;
+       return 0;
+ }
diff --git a/queue-5.0/dm-table-propagate-bdi_cap_stable_writes-to-fix-sporadic-checksum-errors.patch b/queue-5.0/dm-table-propagate-bdi_cap_stable_writes-to-fix-sporadic-checksum-errors.patch
new file mode 100644 (file)
index 0000000..c68dbc1
--- /dev/null
@@ -0,0 +1,80 @@
+From eb40c0acdc342b815d4d03ae6abb09e80c0f2988 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Tue, 26 Mar 2019 20:20:58 +0100
+Subject: dm table: propagate BDI_CAP_STABLE_WRITES to fix sporadic checksum errors
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit eb40c0acdc342b815d4d03ae6abb09e80c0f2988 upstream.
+
+Some devices don't use blk_integrity but still want stable pages
+because they do their own checksumming.  Examples include rbd and iSCSI
+when data digests are negotiated.  Stacking DM (and thus LVM) on top of
+these devices results in sporadic checksum errors.
+
+Set BDI_CAP_STABLE_WRITES if any underlying device has it set.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-table.c |   39 +++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 39 insertions(+)
+
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1852,6 +1852,36 @@ static bool dm_table_supports_secure_era
+       return true;
+ }
++static int device_requires_stable_pages(struct dm_target *ti,
++                                      struct dm_dev *dev, sector_t start,
++                                      sector_t len, void *data)
++{
++      struct request_queue *q = bdev_get_queue(dev->bdev);
++
++      return q && bdi_cap_stable_pages_required(q->backing_dev_info);
++}
++
++/*
++ * If any underlying device requires stable pages, a table must require
++ * them as well.  Only targets that support iterate_devices are considered:
++ * don't want error, zero, etc to require stable pages.
++ */
++static bool dm_table_requires_stable_pages(struct dm_table *t)
++{
++      struct dm_target *ti;
++      unsigned i;
++
++      for (i = 0; i < dm_table_get_num_targets(t); i++) {
++              ti = dm_table_get_target(t, i);
++
++              if (ti->type->iterate_devices &&
++                  ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
++                      return true;
++      }
++
++      return false;
++}
++
+ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+                              struct queue_limits *limits)
+ {
+@@ -1910,6 +1940,15 @@ void dm_table_set_restrictions(struct dm
+       dm_table_verify_integrity(t);
+       /*
++       * Some devices don't use blk_integrity but still want stable pages
++       * because they do their own checksumming.
++       */
++      if (dm_table_requires_stable_pages(t))
++              q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
++      else
++              q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
++
++      /*
+        * Determine whether or not this queue's I/O timings contribute
+        * to the entropy pool, Only request-based targets use this.
+        * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
diff --git a/queue-5.0/ib-mlx5-reset-access-mask-when-looping-inside-page-fault-handler.patch b/queue-5.0/ib-mlx5-reset-access-mask-when-looping-inside-page-fault-handler.patch
new file mode 100644 (file)
index 0000000..1e6654f
--- /dev/null
@@ -0,0 +1,44 @@
+From 1abe186ed8a6593069bc122da55fc684383fdc1c Mon Sep 17 00:00:00 2001
+From: Moni Shoua <monis@mellanox.com>
+Date: Tue, 19 Mar 2019 11:24:36 +0200
+Subject: IB/mlx5: Reset access mask when looping inside page fault handler
+
+From: Moni Shoua <monis@mellanox.com>
+
+commit 1abe186ed8a6593069bc122da55fc684383fdc1c upstream.
+
+If page-fault handler spans multiple MRs then the access mask needs to
+be reset before each MR handling or otherwise write access will be
+granted to mapped pages instead of read-only.
+
+Cc: <stable@vger.kernel.org> # 3.19
+Fixes: 7bdf65d411c1 ("IB/mlx5: Handle page faults")
+Reported-by: Jerome Glisse <jglisse@redhat.com>
+Signed-off-by: Moni Shoua <monis@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/odp.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -560,7 +560,7 @@ static int pagefault_mr(struct mlx5_ib_d
+       struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
+       bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
+       bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
+-      u64 access_mask = ODP_READ_ALLOWED_BIT;
++      u64 access_mask;
+       u64 start_idx, page_mask;
+       struct ib_umem_odp *odp;
+       size_t size;
+@@ -582,6 +582,7 @@ next_mr:
+       page_shift = mr->umem->page_shift;
+       page_mask = ~(BIT(page_shift) - 1);
+       start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
++      access_mask = ODP_READ_ALLOWED_BIT;
+       if (prefetch && !downgrade && !mr->umem->writable) {
+               /* prefetch with write-access must
diff --git a/queue-5.0/pci-add-function-1-dma-alias-quirk-for-marvell-9170-sata-controller.patch b/queue-5.0/pci-add-function-1-dma-alias-quirk-for-marvell-9170-sata-controller.patch
new file mode 100644 (file)
index 0000000..eebc713
--- /dev/null
@@ -0,0 +1,37 @@
+From 9cde402a59770a0669d895399c13407f63d7d209 Mon Sep 17 00:00:00 2001
+From: Andre Przywara <andre.przywara@arm.com>
+Date: Fri, 5 Apr 2019 16:20:47 +0100
+Subject: PCI: Add function 1 DMA alias quirk for Marvell 9170 SATA controller
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+commit 9cde402a59770a0669d895399c13407f63d7d209 upstream.
+
+There is a Marvell 88SE9170 PCIe SATA controller I found on a board here.
+Some quick testing with the ARM SMMU enabled reveals that it suffers from
+the same requester ID mixup problems as the other Marvell chips listed
+already.
+
+Add the PCI vendor/device ID to the list of chips which need the
+workaround.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+CC: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/quirks.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_M
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
+                        quirk_dma_func1_alias);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
++                       quirk_dma_func1_alias);
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
+                        quirk_dma_func1_alias);
diff --git a/queue-5.0/pci-pciehp-ignore-link-state-changes-after-powering-off-a-slot.patch b/queue-5.0/pci-pciehp-ignore-link-state-changes-after-powering-off-a-slot.patch
new file mode 100644 (file)
index 0000000..280d374
--- /dev/null
@@ -0,0 +1,50 @@
+From 3943af9d01e94330d0cfac6fccdbc829aad50c92 Mon Sep 17 00:00:00 2001
+From: Sergey Miroshnichenko <s.miroshnichenko@yadro.com>
+Date: Tue, 12 Mar 2019 15:05:48 +0300
+Subject: PCI: pciehp: Ignore Link State Changes after powering off a slot
+
+From: Sergey Miroshnichenko <s.miroshnichenko@yadro.com>
+
+commit 3943af9d01e94330d0cfac6fccdbc829aad50c92 upstream.
+
+During a safe hot remove, the OS powers off the slot, which may cause a
+Data Link Layer State Changed event.  The slot has already been set to
+OFF_STATE, so that event results in re-enabling the device, making it
+impossible to safely remove it.
+
+Clear out the Presence Detect Changed and Data Link Layer State Changed
+events when the disabled slot has settled down.
+
+It is still possible to re-enable the device if it remains in the slot
+after pressing the Attention Button by pressing it again.
+
+Fixes the problem that Micah reported below: an NVMe drive power button may
+not actually turn off the drive.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=203237
+Reported-by: Micah Parrish <micah.parrish@hpe.com>
+Tested-by: Micah Parrish <micah.parrish@hpe.com>
+Signed-off-by: Sergey Miroshnichenko <s.miroshnichenko@yadro.com>
+[bhelgaas: changelog, add bugzilla URL]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Lukas Wunner <lukas@wunner.de>
+Cc: stable@vger.kernel.org     # v4.19+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/hotplug/pciehp_ctrl.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -115,6 +115,10 @@ static void remove_board(struct controll
+                * removed from the slot/adapter.
+                */
+               msleep(1000);
++
++              /* Ignore link or presence changes caused by power off */
++              atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
++                         &ctrl->pending_events);
+       }
+       /* turn off Green LED */
diff --git a/queue-5.0/powerpc-64s-radix-fix-radix-segment-exception-handling.patch b/queue-5.0/powerpc-64s-radix-fix-radix-segment-exception-handling.patch
new file mode 100644 (file)
index 0000000..ca655b7
--- /dev/null
@@ -0,0 +1,71 @@
+From 7100e8704b61247649c50551b965e71d168df30b Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Fri, 29 Mar 2019 17:42:57 +1000
+Subject: powerpc/64s/radix: Fix radix segment exception handling
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 7100e8704b61247649c50551b965e71d168df30b upstream.
+
+Commit 48e7b76957 ("powerpc/64s/hash: Convert SLB miss handlers to C")
+broke the radix-mode segment exception handler. In radix mode, this is
+exception is not an SLB miss, rather it signals that the EA is outside
+the range translated by any page table.
+
+The commit lost the radix feature alternate code patch, which can
+cause faults to some EAs to kernel BUG at arch/powerpc/mm/slb.c:639!
+
+The original radix code would send faults to slb_miss_large_addr,
+which would end up faulting due to slb_addr_limit being 0. This patch
+sends radix directly to do_bad_slb_fault, which is a bit clearer.
+
+Fixes: 48e7b7695745 ("powerpc/64s/hash: Convert SLB miss handlers to C")
+Cc: stable@vger.kernel.org # v4.20+
+Reported-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/exceptions-64s.S |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -612,11 +612,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
+       ld      r4,PACA_EXSLB+EX_DAR(r13)
+       std     r4,_DAR(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
++BEGIN_MMU_FTR_SECTION
++      /* HPT case, do SLB fault */
+       bl      do_slb_fault
+       cmpdi   r3,0
+       bne-    1f
+       b       fast_exception_return
+ 1:    /* Error case */
++MMU_FTR_SECTION_ELSE
++      /* Radix case, access is outside page table range */
++      li      r3,-EFAULT
++ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+       std     r3,RESULT(r1)
+       bl      save_nvgprs
+       RECONCILE_IRQ_STATE(r10, r11)
+@@ -661,11 +667,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_
+       EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
+       ld      r4,_NIP(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
++BEGIN_MMU_FTR_SECTION
++      /* HPT case, do SLB fault */
+       bl      do_slb_fault
+       cmpdi   r3,0
+       bne-    1f
+       b       fast_exception_return
+ 1:    /* Error case */
++MMU_FTR_SECTION_ELSE
++      /* Radix case, access is outside page table range */
++      li      r3,-EFAULT
++ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+       std     r3,RESULT(r1)
+       bl      save_nvgprs
+       RECONCILE_IRQ_STATE(r10, r11)
diff --git a/queue-5.0/sched-fair-do-not-re-read-h_load_next-during-hierarchical-load-calculation.patch b/queue-5.0/sched-fair-do-not-re-read-h_load_next-during-hierarchical-load-calculation.patch
new file mode 100644 (file)
index 0000000..86c604d
--- /dev/null
@@ -0,0 +1,82 @@
+From 0e9f02450da07fc7b1346c8c32c771555173e397 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@techsingularity.net>
+Date: Tue, 19 Mar 2019 12:36:10 +0000
+Subject: sched/fair: Do not re-read ->h_load_next during hierarchical load calculation
+
+From: Mel Gorman <mgorman@techsingularity.net>
+
+commit 0e9f02450da07fc7b1346c8c32c771555173e397 upstream.
+
+A NULL pointer dereference bug was reported on a distribution kernel but
+the same issue should be present on mainline kernel. It occured on s390
+but should not be arch-specific.  A partial oops looks like:
+
+  Unable to handle kernel pointer dereference in virtual kernel address space
+  ...
+  Call Trace:
+    ...
+    try_to_wake_up+0xfc/0x450
+    vhost_poll_wakeup+0x3a/0x50 [vhost]
+    __wake_up_common+0xbc/0x178
+    __wake_up_common_lock+0x9e/0x160
+    __wake_up_sync_key+0x4e/0x60
+    sock_def_readable+0x5e/0x98
+
+The bug hits any time between 1 hour to 3 days. The dereference occurs
+in update_cfs_rq_h_load when accumulating h_load. The problem is that
+cfq_rq->h_load_next is not protected by any locking and can be updated
+by parallel calls to task_h_load. Depending on the compiler, code may be
+generated that re-reads cfq_rq->h_load_next after the check for NULL and
+then oops when reading se->avg.load_avg. The dissassembly showed that it
+was possible to reread h_load_next after the check for NULL.
+
+While this does not appear to be an issue for later compilers, it's still
+an accident if the correct code is generated. Full locking in this path
+would have high overhead so this patch uses READ_ONCE to read h_load_next
+only once and check for NULL before dereferencing. It was confirmed that
+there were no further oops after 10 days of testing.
+
+As Peter pointed out, it is also necessary to use WRITE_ONCE() to avoid any
+potential problems with store tearing.
+
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: <stable@vger.kernel.org>
+Fixes: 685207963be9 ("sched: Move h_load calculation to task_h_load()")
+Link: https://lkml.kernel.org/r/20190319123610.nsivgf3mjbjjesxb@techsingularity.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7713,10 +7713,10 @@ static void update_cfs_rq_h_load(struct
+       if (cfs_rq->last_h_load_update == now)
+               return;
+-      cfs_rq->h_load_next = NULL;
++      WRITE_ONCE(cfs_rq->h_load_next, NULL);
+       for_each_sched_entity(se) {
+               cfs_rq = cfs_rq_of(se);
+-              cfs_rq->h_load_next = se;
++              WRITE_ONCE(cfs_rq->h_load_next, se);
+               if (cfs_rq->last_h_load_update == now)
+                       break;
+       }
+@@ -7726,7 +7726,7 @@ static void update_cfs_rq_h_load(struct
+               cfs_rq->last_h_load_update = now;
+       }
+-      while ((se = cfs_rq->h_load_next) != NULL) {
++      while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
+               load = cfs_rq->h_load;
+               load = div64_ul(load * se->avg.load_avg,
+                       cfs_rq_load_avg(cfs_rq) + 1);
index 5c0b337e24c632aace664c1d975b68355785d0bb..c47acedfb61f86ff488422e281d03a2738c62a70 100644 (file)
@@ -82,3 +82,33 @@ genirq-respect-irqchip_skip_set_wake-in-irq_chip_set_wake_parent.patch
 genirq-initialize-request_mutex-if-config_sparse_irq-n.patch
 virtio-honour-may_reduce_num-in-vring_create_virtqueue.patch
 drm-i915-dp-revert-back-to-max-link-rate-and-lane-count-on-edp.patch
+arm-omap1-ams-delta-fix-broken-gpio-id-allocation.patch
+arm-dts-rockchip-fix-rk3288-cpu-opp-node-reference.patch
+arm-dts-am335x-evmsk-correct-the-regulators-for-the-audio-codec.patch
+arm-dts-am335x-evm-correct-the-regulators-for-the-audio-codec.patch
+arm-dts-rockchip-fix-sd-card-detection-on-rk3288-tinker.patch
+arm-dts-at91-fix-typo-in-isc_d0-on-pc9.patch
+arm64-futex-fix-futex_wake_op-atomic-ops-with-non-zero-result-value.patch
+arm64-dts-rockchip-fix-vcc_host1_5v-gpio-polarity-on-rk3328-rock64.patch
+arm64-dts-rockchip-fix-rk3328-rgmii-high-tx-error-rate.patch
+arm64-backtrace-don-t-bother-trying-to-unwind-the-userspace-stack.patch
+arm64-ftrace-fix-inadvertent-bug-in-trampoline-check.patch
+ib-mlx5-reset-access-mask-when-looping-inside-page-fault-handler.patch
+xen-prevent-buffer-overflow-in-privcmd-ioctl.patch
+sched-fair-do-not-re-read-h_load_next-during-hierarchical-load-calculation.patch
+xtensa-fix-return_address.patch
+csky-fix-syscall_get_arguments-and-syscall_set_arguments.patch
+x86-asm-remove-dead-__gnuc__-conditionals.patch
+x86-asm-use-stricter-assembly-constraints-in-bitops.patch
+x86-perf-amd-resolve-race-condition-when-disabling-pmc.patch
+x86-perf-amd-resolve-nmi-latency-issues-for-active-pmcs.patch
+x86-perf-amd-remove-need-to-check-running-bit-in-nmi-handler.patch
+pci-add-function-1-dma-alias-quirk-for-marvell-9170-sata-controller.patch
+pci-pciehp-ignore-link-state-changes-after-powering-off-a-slot.patch
+xprtrdma-fix-helper-that-drains-the-transport.patch
+powerpc-64s-radix-fix-radix-segment-exception-handling.patch
+dm-integrity-change-memcmp-to-strncmp-in-dm_integrity_ctr.patch
+dm-revert-8f50e358153d-dm-limit-the-max-bio-size-as-bio_max_pages-page_size.patch
+dm-table-propagate-bdi_cap_stable_writes-to-fix-sporadic-checksum-errors.patch
+dm-disable-discard-if-the-underlying-storage-no-longer-supports-it.patch
+dm-integrity-fix-deadlock-with-overlapping-i-o.patch
diff --git a/queue-5.0/x86-asm-remove-dead-__gnuc__-conditionals.patch b/queue-5.0/x86-asm-remove-dead-__gnuc__-conditionals.patch
new file mode 100644 (file)
index 0000000..ec382dc
--- /dev/null
@@ -0,0 +1,116 @@
+From 88ca66d8540ca26119b1428cddb96b37925bdf01 Mon Sep 17 00:00:00 2001
+From: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Date: Fri, 11 Jan 2019 09:49:30 +0100
+Subject: x86/asm: Remove dead __GNUC__ conditionals
+
+From: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+
+commit 88ca66d8540ca26119b1428cddb96b37925bdf01 upstream.
+
+The minimum supported gcc version is >= 4.6, so these can be removed.
+
+Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/20190111084931.24601-1-linux@rasmusvillemoes.dk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/bitops.h    |    6 ------
+ arch/x86/include/asm/string_32.h |   20 --------------------
+ arch/x86/include/asm/string_64.h |   15 ---------------
+ 3 files changed, 41 deletions(-)
+
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -36,13 +36,7 @@
+  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+  */
+-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
+-/* Technically wrong, but this avoids compilation errors on some gcc
+-   versions. */
+-#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
+-#else
+ #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
+-#endif
+ #define ADDR                          BITOP_ADDR(addr)
+--- a/arch/x86/include/asm/string_32.h
++++ b/arch/x86/include/asm/string_32.h
+@@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to,
+  *    No 3D Now!
+  */
+-#if (__GNUC__ >= 4)
+ #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
+-#else
+-#define memcpy(t, f, n)                               \
+-      (__builtin_constant_p((n))              \
+-       ? __constant_memcpy((t), (f), (n))     \
+-       : __memcpy((t), (f), (n)))
+-#endif
+ #endif
+ #endif /* !CONFIG_FORTIFY_SOURCE */
+@@ -282,12 +275,7 @@ void *__constant_c_and_count_memset(void
+       {
+               int d0, d1;
+-#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
+-              /* Workaround for broken gcc 4.0 */
+-              register unsigned long eax asm("%eax") = pattern;
+-#else
+               unsigned long eax = pattern;
+-#endif
+               switch (count % 4) {
+               case 0:
+@@ -321,15 +309,7 @@ void *__constant_c_and_count_memset(void
+ #define __HAVE_ARCH_MEMSET
+ extern void *memset(void *, int, size_t);
+ #ifndef CONFIG_FORTIFY_SOURCE
+-#if (__GNUC__ >= 4)
+ #define memset(s, c, count) __builtin_memset(s, c, count)
+-#else
+-#define memset(s, c, count)                                           \
+-      (__builtin_constant_p(c)                                        \
+-       ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
+-                               (count))                               \
+-       : __memset((s), (c), (count)))
+-#endif
+ #endif /* !CONFIG_FORTIFY_SOURCE */
+ #define __HAVE_ARCH_MEMSET16
+--- a/arch/x86/include/asm/string_64.h
++++ b/arch/x86/include/asm/string_64.h
+@@ -14,21 +14,6 @@
+ extern void *memcpy(void *to, const void *from, size_t len);
+ extern void *__memcpy(void *to, const void *from, size_t len);
+-#ifndef CONFIG_FORTIFY_SOURCE
+-#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
+-#define memcpy(dst, src, len)                                 \
+-({                                                            \
+-      size_t __len = (len);                                   \
+-      void *__ret;                                            \
+-      if (__builtin_constant_p(len) && __len >= 64)           \
+-              __ret = __memcpy((dst), (src), __len);          \
+-      else                                                    \
+-              __ret = __builtin_memcpy((dst), (src), __len);  \
+-      __ret;                                                  \
+-})
+-#endif
+-#endif /* !CONFIG_FORTIFY_SOURCE */
+-
+ #define __HAVE_ARCH_MEMSET
+ void *memset(void *s, int c, size_t n);
+ void *__memset(void *s, int c, size_t n);
diff --git a/queue-5.0/x86-asm-use-stricter-assembly-constraints-in-bitops.patch b/queue-5.0/x86-asm-use-stricter-assembly-constraints-in-bitops.patch
new file mode 100644 (file)
index 0000000..30dd1d5
--- /dev/null
@@ -0,0 +1,228 @@
+From 5b77e95dd7790ff6c8fbf1cd8d0104ebed818a03 Mon Sep 17 00:00:00 2001
+From: Alexander Potapenko <glider@google.com>
+Date: Tue, 2 Apr 2019 13:28:13 +0200
+Subject: x86/asm: Use stricter assembly constraints in bitops
+
+From: Alexander Potapenko <glider@google.com>
+
+commit 5b77e95dd7790ff6c8fbf1cd8d0104ebed818a03 upstream.
+
+There's a number of problems with how arch/x86/include/asm/bitops.h
+is currently using assembly constraints for the memory region
+bitops are modifying:
+
+1) Use memory clobber in bitops that touch arbitrary memory
+
+Certain bit operations that read/write bits take a base pointer and an
+arbitrarily large offset to address the bit relative to that base.
+Inline assembly constraints aren't expressive enough to tell the
+compiler that the assembly directive is going to touch a specific memory
+location of unknown size, therefore we have to use the "memory" clobber
+to indicate that the assembly is going to access memory locations other
+than those listed in the inputs/outputs.
+
+To indicate that BTR/BTS instructions don't necessarily touch the first
+sizeof(long) bytes of the argument, we also move the address to assembly
+inputs.
+
+This particular change leads to size increase of 124 kernel functions in
+a defconfig build. For some of them the diff is in NOP operations, other
+end up re-reading values from memory and may potentially slow down the
+execution. But without these clobbers the compiler is free to cache
+the contents of the bitmaps and use them as if they weren't changed by
+the inline assembly.
+
+2) Use byte-sized arguments for operations touching single bytes.
+
+Passing a long value to ANDB/ORB/XORB instructions makes the compiler
+treat sizeof(long) bytes as being clobbered, which isn't the case. This
+may theoretically lead to worse code in the case of heavy optimization.
+
+Practical impact:
+
+I've built a defconfig kernel and looked through some of the functions
+generated by GCC 7.3.0 with and without this clobber, and didn't spot
+any miscompilations.
+
+However there is a (trivial) theoretical case where this code leads to
+miscompilation:
+
+  https://lkml.org/lkml/2019/3/28/393
+
+using just GCC 8.3.0 with -O2.  It isn't hard to imagine someone writes
+such a function in the kernel someday.
+
+So the primary motivation is to fix an existing misuse of the asm
+directive, which happens to work in certain configurations now, but
+isn't guaranteed to work under different circumstances.
+
+[ --mingo: Added -stable tag because defconfig only builds a fraction
+  of the kernel and the trivial testcase looks normal enough to
+  be used in existing or in-development code. ]
+
+Signed-off-by: Alexander Potapenko <glider@google.com>
+Cc: <stable@vger.kernel.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: James Y Knight <jyknight@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20190402112813.193378-1-glider@google.com
+[ Edited the changelog, tidied up one of the defines. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/bitops.h |   41 ++++++++++++++++++-----------------------
+ 1 file changed, 18 insertions(+), 23 deletions(-)
+
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -36,16 +36,17 @@
+  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+  */
+-#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
++#define RLONG_ADDR(x)                  "m" (*(volatile long *) (x))
++#define WBYTE_ADDR(x)                 "+m" (*(volatile char *) (x))
+-#define ADDR                          BITOP_ADDR(addr)
++#define ADDR                          RLONG_ADDR(addr)
+ /*
+  * We do the locked ops that don't return the old value as
+  * a mask operation on a byte.
+  */
+ #define IS_IMMEDIATE(nr)              (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr)     BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr)     WBYTE_ADDR((void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr)                        (1 << ((nr) & 7))
+ /**
+@@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long
+                       : "memory");
+       } else {
+               asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
+-                      : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
++                      : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
+       }
+ }
+@@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long
+  */
+ static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
+ {
+-      asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
++      asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
+ }
+ /**
+@@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned lon
+                       : "iq" ((u8)~CONST_MASK(nr)));
+       } else {
+               asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
+-                      : BITOP_ADDR(addr)
+-                      : "Ir" (nr));
++                      : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
+       }
+ }
+@@ -131,7 +131,7 @@ static __always_inline void clear_bit_un
+ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
+ {
+-      asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
++      asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
+ }
+ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+@@ -139,7 +139,7 @@ static __always_inline bool clear_bit_un
+       bool negative;
+       asm volatile(LOCK_PREFIX "andb %2,%1"
+               CC_SET(s)
+-              : CC_OUT(s) (negative), ADDR
++              : CC_OUT(s) (negative), WBYTE_ADDR(addr)
+               : "ir" ((char) ~(1 << nr)) : "memory");
+       return negative;
+ }
+@@ -155,13 +155,9 @@ static __always_inline bool clear_bit_un
+  * __clear_bit() is non-atomic and implies release semantics before the memory
+  * operation. It can be used for an unlock if no other CPUs can concurrently
+  * modify other bits in the word.
+- *
+- * No memory barrier is required here, because x86 cannot reorder stores past
+- * older loads. Same principle as spin_unlock.
+  */
+ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+ {
+-      barrier();
+       __clear_bit(nr, addr);
+ }
+@@ -176,7 +172,7 @@ static __always_inline void __clear_bit_
+  */
+ static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
+ {
+-      asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
++      asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
+ }
+ /**
+@@ -196,8 +192,7 @@ static __always_inline void change_bit(l
+                       : "iq" ((u8)CONST_MASK(nr)));
+       } else {
+               asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
+-                      : BITOP_ADDR(addr)
+-                      : "Ir" (nr));
++                      : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
+       }
+ }
+@@ -242,8 +237,8 @@ static __always_inline bool __test_and_s
+       asm(__ASM_SIZE(bts) " %2,%1"
+           CC_SET(c)
+-          : CC_OUT(c) (oldbit), ADDR
+-          : "Ir" (nr));
++          : CC_OUT(c) (oldbit)
++          : ADDR, "Ir" (nr) : "memory");
+       return oldbit;
+ }
+@@ -282,8 +277,8 @@ static __always_inline bool __test_and_c
+       asm volatile(__ASM_SIZE(btr) " %2,%1"
+                    CC_SET(c)
+-                   : CC_OUT(c) (oldbit), ADDR
+-                   : "Ir" (nr));
++                   : CC_OUT(c) (oldbit)
++                   : ADDR, "Ir" (nr) : "memory");
+       return oldbit;
+ }
+@@ -294,8 +289,8 @@ static __always_inline bool __test_and_c
+       asm volatile(__ASM_SIZE(btc) " %2,%1"
+                    CC_SET(c)
+-                   : CC_OUT(c) (oldbit), ADDR
+-                   : "Ir" (nr) : "memory");
++                   : CC_OUT(c) (oldbit)
++                   : ADDR, "Ir" (nr) : "memory");
+       return oldbit;
+ }
+@@ -326,7 +321,7 @@ static __always_inline bool variable_tes
+       asm volatile(__ASM_SIZE(bt) " %2,%1"
+                    CC_SET(c)
+                    : CC_OUT(c) (oldbit)
+-                   : "m" (*(unsigned long *)addr), "Ir" (nr));
++                   : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
+       return oldbit;
+ }
diff --git a/queue-5.0/x86-perf-amd-remove-need-to-check-running-bit-in-nmi-handler.patch b/queue-5.0/x86-perf-amd-remove-need-to-check-running-bit-in-nmi-handler.patch
new file mode 100644 (file)
index 0000000..08f58ad
--- /dev/null
@@ -0,0 +1,127 @@
+From 3966c3feca3fd10b2935caa0b4a08c7dd59469e5 Mon Sep 17 00:00:00 2001
+From: "Lendacky, Thomas" <Thomas.Lendacky@amd.com>
+Date: Tue, 2 Apr 2019 15:21:18 +0000
+Subject: x86/perf/amd: Remove need to check "running" bit in NMI handler
+
+From: Lendacky, Thomas <Thomas.Lendacky@amd.com>
+
+commit 3966c3feca3fd10b2935caa0b4a08c7dd59469e5 upstream.
+
+Spurious interrupt support was added to perf in the following commit, almost
+a decade ago:
+
+  63e6be6d98e1 ("perf, x86: Catch spurious interrupts after disabling counters")
+
+The two previous patches (resolving the race condition when disabling a
+PMC and NMI latency mitigation) allow for the removal of this older
+spurious interrupt support.
+
+Currently in x86_pmu_stop(), the bit for the PMC in the active_mask bitmap
+is cleared before disabling the PMC, which sets up a race condition. This
+race condition was mitigated by introducing the running bitmap. That race
+condition can be eliminated by first disabling the PMC, waiting for PMC
+reset on overflow and then clearing the bit for the PMC in the active_mask
+bitmap. The NMI handler will not re-enable a disabled counter.
+
+If x86_pmu_stop() is called from the perf NMI handler, the NMI latency
+mitigation support will guard against any unhandled NMI messages.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@vger.kernel.org> # 4.14.x-
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Link: https://lkml.kernel.org/r/Message-ID:
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/amd/core.c |   21 +++++++++++++++++++--
+ arch/x86/events/core.c     |   13 +++----------
+ 2 files changed, 22 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -4,8 +4,8 @@
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/delay.h>
+-#include <linux/nmi.h>
+ #include <asm/apicdef.h>
++#include <asm/nmi.h>
+ #include "../perf_event.h"
+@@ -491,6 +491,23 @@ static void amd_pmu_disable_all(void)
+       }
+ }
++static void amd_pmu_disable_event(struct perf_event *event)
++{
++      x86_pmu_disable_event(event);
++
++      /*
++       * This can be called from NMI context (via x86_pmu_stop). The counter
++       * may have overflowed, but either way, we'll never see it get reset
++       * by the NMI if we're already in the NMI. And the NMI latency support
++       * below will take care of any pending NMI that might have been
++       * generated by the overflow.
++       */
++      if (in_nmi())
++              return;
++
++      amd_pmu_wait_on_overflow(event->hw.idx);
++}
++
+ /*
+  * Because of NMI latency, if multiple PMC counters are active or other sources
+  * of NMIs are received, the perf NMI handler can handle one or more overflowed
+@@ -738,7 +755,7 @@ static __initconst const struct x86_pmu
+       .disable_all            = amd_pmu_disable_all,
+       .enable_all             = x86_pmu_enable_all,
+       .enable                 = x86_pmu_enable_event,
+-      .disable                = x86_pmu_disable_event,
++      .disable                = amd_pmu_disable_event,
+       .hw_config              = amd_pmu_hw_config,
+       .schedule_events        = x86_schedule_events,
+       .eventsel               = MSR_K7_EVNTSEL0,
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *eve
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+-      if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
++      if (test_bit(hwc->idx, cpuc->active_mask)) {
+               x86_pmu.disable(event);
++              __clear_bit(hwc->idx, cpuc->active_mask);
+               cpuc->events[hwc->idx] = NULL;
+               WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+               hwc->state |= PERF_HES_STOPPED;
+@@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *r
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+-              if (!test_bit(idx, cpuc->active_mask)) {
+-                      /*
+-                       * Though we deactivated the counter some cpus
+-                       * might still deliver spurious interrupts still
+-                       * in flight. Catch them:
+-                       */
+-                      if (__test_and_clear_bit(idx, cpuc->running))
+-                              handled++;
++              if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+-              }
+               event = cpuc->events[idx];
diff --git a/queue-5.0/x86-perf-amd-resolve-nmi-latency-issues-for-active-pmcs.patch b/queue-5.0/x86-perf-amd-resolve-nmi-latency-issues-for-active-pmcs.patch
new file mode 100644 (file)
index 0000000..49aba7d
--- /dev/null
@@ -0,0 +1,142 @@
+From 6d3edaae16c6c7d238360f2841212c2b26774d5e Mon Sep 17 00:00:00 2001
+From: "Lendacky, Thomas" <Thomas.Lendacky@amd.com>
+Date: Tue, 2 Apr 2019 15:21:16 +0000
+Subject: x86/perf/amd: Resolve NMI latency issues for active PMCs
+
+From: Lendacky, Thomas <Thomas.Lendacky@amd.com>
+
+commit 6d3edaae16c6c7d238360f2841212c2b26774d5e upstream.
+
+On AMD processors, the detection of an overflowed PMC counter in the NMI
+handler relies on the current value of the PMC. So, for example, to check
+for overflow on a 48-bit counter, bit 47 is checked to see if it is 1 (not
+overflowed) or 0 (overflowed).
+
+When the perf NMI handler executes it does not know in advance which PMC
+counters have overflowed. As such, the NMI handler will process all active
+PMC counters that have overflowed. NMI latency in newer AMD processors can
+result in multiple overflowed PMC counters being processed in one NMI and
+then a subsequent NMI, that does not appear to be a back-to-back NMI, not
+finding any PMC counters that have overflowed. This may appear to be an
+unhandled NMI resulting in either a panic or a series of messages,
+depending on how the kernel was configured.
+
+To mitigate this issue, add an AMD handle_irq callback function,
+amd_pmu_handle_irq(), that will invoke the common x86_pmu_handle_irq()
+function and upon return perform some additional processing that will
+indicate if the NMI has been handled or would have been handled had an
+earlier NMI not handled the overflowed PMC. Using a per-CPU variable, a
+minimum value of the number of active PMCs or 2 will be set whenever a
+PMC is active. This is used to indicate the possible number of NMIs that
+can still occur. The value of 2 is used for when an NMI does not arrive
+at the LAPIC in time to be collapsed into an already pending NMI. Each
+time the function is called without having handled an overflowed counter,
+the per-CPU value is checked. If the value is non-zero, it is decremented
+and the NMI indicates that it handled the NMI. If the value is zero, then
+the NMI indicates that it did not handle the NMI.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@vger.kernel.org> # 4.14.x-
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Link: https://lkml.kernel.org/r/Message-ID:
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/amd/core.c |   56 ++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 55 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -4,10 +4,13 @@
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/delay.h>
++#include <linux/nmi.h>
+ #include <asm/apicdef.h>
+ #include "../perf_event.h"
++static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
++
+ static __initconst const u64 amd_hw_cache_event_ids
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+@@ -488,6 +491,57 @@ static void amd_pmu_disable_all(void)
+       }
+ }
++/*
++ * Because of NMI latency, if multiple PMC counters are active or other sources
++ * of NMIs are received, the perf NMI handler can handle one or more overflowed
++ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
++ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
++ * back-to-back NMI support won't be active. This PMC handler needs to take into
++ * account that this can occur, otherwise this could result in unknown NMI
++ * messages being issued. Examples of this is PMC overflow while in the NMI
++ * handler when multiple PMCs are active or PMC overflow while handling some
++ * other source of an NMI.
++ *
++ * Attempt to mitigate this by using the number of active PMCs to determine
++ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
++ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
++ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
++ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
++ */
++static int amd_pmu_handle_irq(struct pt_regs *regs)
++{
++      struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++      int active, handled;
++
++      /*
++       * Obtain the active count before calling x86_pmu_handle_irq() since
++       * it is possible that x86_pmu_handle_irq() may make a counter
++       * inactive (through x86_pmu_stop).
++       */
++      active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
++
++      /* Process any counter overflows */
++      handled = x86_pmu_handle_irq(regs);
++
++      /*
++       * If a counter was handled, record the number of possible remaining
++       * NMIs that can occur.
++       */
++      if (handled) {
++              this_cpu_write(perf_nmi_counter,
++                             min_t(unsigned int, 2, active));
++
++              return handled;
++      }
++
++      if (!this_cpu_read(perf_nmi_counter))
++              return NMI_DONE;
++
++      this_cpu_dec(perf_nmi_counter);
++
++      return NMI_HANDLED;
++}
++
+ static struct event_constraint *
+ amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event)
+@@ -680,7 +734,7 @@ static ssize_t amd_event_sysfs_show(char
+ static __initconst const struct x86_pmu amd_pmu = {
+       .name                   = "AMD",
+-      .handle_irq             = x86_pmu_handle_irq,
++      .handle_irq             = amd_pmu_handle_irq,
+       .disable_all            = amd_pmu_disable_all,
+       .enable_all             = x86_pmu_enable_all,
+       .enable                 = x86_pmu_enable_event,
diff --git a/queue-5.0/x86-perf-amd-resolve-race-condition-when-disabling-pmc.patch b/queue-5.0/x86-perf-amd-resolve-race-condition-when-disabling-pmc.patch
new file mode 100644 (file)
index 0000000..5bd08f8
--- /dev/null
@@ -0,0 +1,152 @@
+From 914123fa39042e651d79eaf86bbf63a1b938dddf Mon Sep 17 00:00:00 2001
+From: "Lendacky, Thomas" <Thomas.Lendacky@amd.com>
+Date: Tue, 2 Apr 2019 15:21:14 +0000
+Subject: x86/perf/amd: Resolve race condition when disabling PMC
+
+From: Lendacky, Thomas <Thomas.Lendacky@amd.com>
+
+commit 914123fa39042e651d79eaf86bbf63a1b938dddf upstream.
+
+On AMD processors, the detection of an overflowed counter in the NMI
+handler relies on the current value of the counter. So, for example, to
+check for overflow on a 48 bit counter, bit 47 is checked to see if it
+is 1 (not overflowed) or 0 (overflowed).
+
+There is currently a race condition present when disabling and then
+updating the PMC. Increased NMI latency in newer AMD processors makes this
+race condition more pronounced. If the counter value has overflowed, it is
+possible to update the PMC value before the NMI handler can run. The
+updated PMC value is not an overflowed value, so when the perf NMI handler
+does run, it will not find an overflowed counter. This may appear as an
+unknown NMI resulting in either a panic or a series of messages, depending
+on how the kernel is configured.
+
+To eliminate this race condition, the PMC value must be checked after
+disabling the counter. Add an AMD function, amd_pmu_disable_all(), that
+will wait for the NMI handler to reset any active and overflowed counter
+after calling x86_pmu_disable_all().
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@vger.kernel.org> # 4.14.x-
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Link: https://lkml.kernel.org/r/Message-ID:
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/amd/core.c |   65 ++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 62 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -3,6 +3,7 @@
+ #include <linux/types.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
+ #include <asm/apicdef.h>
+ #include "../perf_event.h"
+@@ -429,6 +430,64 @@ static void amd_pmu_cpu_dead(int cpu)
+       }
+ }
++/*
++ * When a PMC counter overflows, an NMI is used to process the event and
++ * reset the counter. NMI latency can result in the counter being updated
++ * before the NMI can run, which can result in what appear to be spurious
++ * NMIs. This function is intended to wait for the NMI to run and reset
++ * the counter to avoid possible unhandled NMI messages.
++ */
++#define OVERFLOW_WAIT_COUNT   50
++
++static void amd_pmu_wait_on_overflow(int idx)
++{
++      unsigned int i;
++      u64 counter;
++
++      /*
++       * Wait for the counter to be reset if it has overflowed. This loop
++       * should exit very, very quickly, but just in case, don't wait
++       * forever...
++       */
++      for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
++              rdmsrl(x86_pmu_event_addr(idx), counter);
++              if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
++                      break;
++
++              /* Might be in IRQ context, so can't sleep */
++              udelay(1);
++      }
++}
++
++static void amd_pmu_disable_all(void)
++{
++      struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++      int idx;
++
++      x86_pmu_disable_all();
++
++      /*
++       * This shouldn't be called from NMI context, but add a safeguard here
++       * to return, since if we're in NMI context we can't wait for an NMI
++       * to reset an overflowed counter value.
++       */
++      if (in_nmi())
++              return;
++
++      /*
++       * Check each counter for overflow and wait for it to be reset by the
++       * NMI if it has overflowed. This relies on the fact that all active
++       * counters are always enabled when this function is caled and
++       * ARCH_PERFMON_EVENTSEL_INT is always set.
++       */
++      for (idx = 0; idx < x86_pmu.num_counters; idx++) {
++              if (!test_bit(idx, cpuc->active_mask))
++                      continue;
++
++              amd_pmu_wait_on_overflow(idx);
++      }
++}
++
+ static struct event_constraint *
+ amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event)
+@@ -622,7 +681,7 @@ static ssize_t amd_event_sysfs_show(char
+ static __initconst const struct x86_pmu amd_pmu = {
+       .name                   = "AMD",
+       .handle_irq             = x86_pmu_handle_irq,
+-      .disable_all            = x86_pmu_disable_all,
++      .disable_all            = amd_pmu_disable_all,
+       .enable_all             = x86_pmu_enable_all,
+       .enable                 = x86_pmu_enable_event,
+       .disable                = x86_pmu_disable_event,
+@@ -732,7 +791,7 @@ void amd_pmu_enable_virt(void)
+       cpuc->perf_ctr_virt_mask = 0;
+       /* Reload all events */
+-      x86_pmu_disable_all();
++      amd_pmu_disable_all();
+       x86_pmu_enable_all(0);
+ }
+ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
+@@ -750,7 +809,7 @@ void amd_pmu_disable_virt(void)
+       cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
+       /* Reload all events */
+-      x86_pmu_disable_all();
++      amd_pmu_disable_all();
+       x86_pmu_enable_all(0);
+ }
+ EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
diff --git a/queue-5.0/xen-prevent-buffer-overflow-in-privcmd-ioctl.patch b/queue-5.0/xen-prevent-buffer-overflow-in-privcmd-ioctl.patch
new file mode 100644 (file)
index 0000000..265d3b9
--- /dev/null
@@ -0,0 +1,37 @@
+From 42d8644bd77dd2d747e004e367cb0c895a606f39 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 4 Apr 2019 18:12:17 +0300
+Subject: xen: Prevent buffer overflow in privcmd ioctl
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 42d8644bd77dd2d747e004e367cb0c895a606f39 upstream.
+
+The "call" variable comes from the user in privcmd_ioctl_hypercall().
+It's an offset into the hypercall_page[] which has (PAGE_SIZE / 32)
+elements.  We need to put an upper bound on it to prevent an out of
+bounds access.
+
+Cc: stable@vger.kernel.org
+Fixes: 1246ae0bb992 ("xen: add variable hypercall caller")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/xen/hypercall.h |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
+       __HYPERCALL_DECLS;
+       __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
++      if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
++              return -EINVAL;
++
+       asm volatile(CALL_NOSPEC
+                    : __HYPERCALL_5PARAM
+                    : [thunk_target] "a" (&hypercall_page[call])
diff --git a/queue-5.0/xprtrdma-fix-helper-that-drains-the-transport.patch b/queue-5.0/xprtrdma-fix-helper-that-drains-the-transport.patch
new file mode 100644 (file)
index 0000000..7846bab
--- /dev/null
@@ -0,0 +1,33 @@
+From e1ede312f17e96a9c5cda9aaa1cdcf442c1a5da8 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Tue, 9 Apr 2019 17:04:09 -0400
+Subject: xprtrdma: Fix helper that drains the transport
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit e1ede312f17e96a9c5cda9aaa1cdcf442c1a5da8 upstream.
+
+We want to drain only the RQ first. Otherwise the transport can
+deadlock on ->close if there are outstanding Send completions.
+
+Fixes: 6d2d0ee27c7a ("xprtrdma: Replace rpcrdma_receive_wq ... ")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Cc: stable@vger.kernel.org # v5.0+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/xprtrdma/verbs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -90,7 +90,7 @@ static void rpcrdma_xprt_drain(struct rp
+       /* Flush Receives, then wait for deferred Reply work
+        * to complete.
+        */
+-      ib_drain_qp(ia->ri_id->qp);
++      ib_drain_rq(ia->ri_id->qp);
+       drain_workqueue(buf->rb_completion_wq);
+       /* Deferred Reply processing might have scheduled
diff --git a/queue-5.0/xtensa-fix-return_address.patch b/queue-5.0/xtensa-fix-return_address.patch
new file mode 100644 (file)
index 0000000..f05f33a
--- /dev/null
@@ -0,0 +1,42 @@
+From ada770b1e74a77fff2d5f539bf6c42c25f4784db Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Thu, 4 Apr 2019 11:08:40 -0700
+Subject: xtensa: fix return_address
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit ada770b1e74a77fff2d5f539bf6c42c25f4784db upstream.
+
+return_address returns the address that is one level higher in the call
+stack than requested in its argument, because level 0 corresponds to its
+caller's return address. Use requested level as the number of stack
+frames to skip.
+
+This fixes the address reported by might_sleep and friends.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/stacktrace.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/xtensa/kernel/stacktrace.c
++++ b/arch/xtensa/kernel/stacktrace.c
+@@ -253,10 +253,14 @@ static int return_address_cb(struct stac
+       return 1;
+ }
++/*
++ * level == 0 is for the return address from the caller of this function,
++ * not from this function itself.
++ */
+ unsigned long return_address(unsigned level)
+ {
+       struct return_addr_data r = {
+-              .skip = level + 1,
++              .skip = level,
+       };
+       walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+       return r.addr;