]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.2-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 5 Aug 2019 05:41:04 +0000 (07:41 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 5 Aug 2019 05:41:04 +0000 (07:41 +0200)
added patches:
alsa-hda-fix-1-minute-detection-delay-when-i915-module-is-not-available.patch
arm64-compat-allow-single-byte-watchpoints-on-all-addresses.patch
arm64-cpufeature-fix-feature-comparison-for-ctr_el0.-cwg-erg.patch
cgroup-kselftest-relax-fs_spec-checks.patch
clk-mediatek-mt8183-register-13mhz-clock-earlier-for-clocksource.patch
drivers-perf-arm_pmu-fix-failure-path-in-pm-notifier.patch
eeprom-at24-make-spd-world-readable-again.patch
i2c-at91-disable-txrdy-interrupt-after-sending-data.patch
i2c-at91-fix-clk_offset-for-sama5d2.patch
i2c-iproc-fix-i2c-master-read-more-than-63-bytes.patch
ib-hfi1-check-for-error-on-call-to-alloc_rsm_map_table.patch
ib-hfi1-drop-all-tid-rdma-read-resp-packets-after-r_next_psn.patch
ib-hfi1-field-not-zero-ed-when-allocating-tid-flow-memory.patch
ib-mlx5-fix-clean_mr-to-work-in-the-expected-order.patch
ib-mlx5-fix-rss-toeplitz-setup-to-be-aligned-with-the-hw-specification.patch
ib-mlx5-fix-unreg_umr-to-ignore-the-mkey-state.patch
ib-mlx5-move-mrs-to-a-kernel-pd-when-freeing-them-to-the-mr-cache.patch
ib-mlx5-use-direct-mkey-destroy-command-upon-umr-unreg-failure.patch
io_uring-fix-kasan-use-after-free-in-io_sq_wq_submit_work.patch
loop-fix-mount-2-failure-due-to-race-with-loop_set_fd.patch
mm-compaction-avoid-100-cpu-usage-during-compaction-when-a-task-is-killed.patch
mm-migrate-fix-reference-check-race-between-__find_get_block-and-migration.patch
mm-migrate.c-initialize-pud_entry-in-migrate_vma.patch
mm-vmscan-check-if-mem-cgroup-is-disabled-or-not-before-calling-memcg-slab-shrinker.patch
mtd-rawnand-micron-handle-on-die-ecc-off-devices-correctly.patch
nbd-replace-kill_bdev-with-__invalidate_device-again.patch
parisc-add-archclean-makefile-target.patch
parisc-fix-build-of-compressed-kernel-even-with-debug-enabled.patch
parisc-strip-debug-info-from-kernel-before-creating-compressed-vmlinuz.patch
powerpc-kasan-fix-early-boot-failure-on-ppc32.patch
rdma-bnxt_re-honor-vlan_id-in-gid-entry-comparison.patch
rdma-devices-do-not-deadlock-during-client-removal.patch
s390-dasd-fix-endless-loop-after-read-unit-address-configuration.patch
scsi-mpt3sas-use-63-bit-dma-addressing-on-sas35-hba.patch
selinux-fix-memory-leak-in-policydb_init.patch
ubsan-build-ubsan.c-more-conservatively.patch
xen-gntdev.c-replace-vm_map_pages-with-vm_map_pages_zero.patch
xen-swiotlb-fix-condition-for-calling-xen_destroy_contiguous_region.patch

39 files changed:
queue-5.2/alsa-hda-fix-1-minute-detection-delay-when-i915-module-is-not-available.patch [new file with mode: 0644]
queue-5.2/arm64-compat-allow-single-byte-watchpoints-on-all-addresses.patch [new file with mode: 0644]
queue-5.2/arm64-cpufeature-fix-feature-comparison-for-ctr_el0.-cwg-erg.patch [new file with mode: 0644]
queue-5.2/cgroup-kselftest-relax-fs_spec-checks.patch [new file with mode: 0644]
queue-5.2/clk-mediatek-mt8183-register-13mhz-clock-earlier-for-clocksource.patch [new file with mode: 0644]
queue-5.2/drivers-perf-arm_pmu-fix-failure-path-in-pm-notifier.patch [new file with mode: 0644]
queue-5.2/eeprom-at24-make-spd-world-readable-again.patch [new file with mode: 0644]
queue-5.2/i2c-at91-disable-txrdy-interrupt-after-sending-data.patch [new file with mode: 0644]
queue-5.2/i2c-at91-fix-clk_offset-for-sama5d2.patch [new file with mode: 0644]
queue-5.2/i2c-iproc-fix-i2c-master-read-more-than-63-bytes.patch [new file with mode: 0644]
queue-5.2/ib-hfi1-check-for-error-on-call-to-alloc_rsm_map_table.patch [new file with mode: 0644]
queue-5.2/ib-hfi1-drop-all-tid-rdma-read-resp-packets-after-r_next_psn.patch [new file with mode: 0644]
queue-5.2/ib-hfi1-field-not-zero-ed-when-allocating-tid-flow-memory.patch [new file with mode: 0644]
queue-5.2/ib-mlx5-fix-clean_mr-to-work-in-the-expected-order.patch [new file with mode: 0644]
queue-5.2/ib-mlx5-fix-rss-toeplitz-setup-to-be-aligned-with-the-hw-specification.patch [new file with mode: 0644]
queue-5.2/ib-mlx5-fix-unreg_umr-to-ignore-the-mkey-state.patch [new file with mode: 0644]
queue-5.2/ib-mlx5-move-mrs-to-a-kernel-pd-when-freeing-them-to-the-mr-cache.patch [new file with mode: 0644]
queue-5.2/ib-mlx5-use-direct-mkey-destroy-command-upon-umr-unreg-failure.patch [new file with mode: 0644]
queue-5.2/io_uring-fix-kasan-use-after-free-in-io_sq_wq_submit_work.patch [new file with mode: 0644]
queue-5.2/loop-fix-mount-2-failure-due-to-race-with-loop_set_fd.patch [new file with mode: 0644]
queue-5.2/mm-compaction-avoid-100-cpu-usage-during-compaction-when-a-task-is-killed.patch [new file with mode: 0644]
queue-5.2/mm-migrate-fix-reference-check-race-between-__find_get_block-and-migration.patch [new file with mode: 0644]
queue-5.2/mm-migrate.c-initialize-pud_entry-in-migrate_vma.patch [new file with mode: 0644]
queue-5.2/mm-vmscan-check-if-mem-cgroup-is-disabled-or-not-before-calling-memcg-slab-shrinker.patch [new file with mode: 0644]
queue-5.2/mtd-rawnand-micron-handle-on-die-ecc-off-devices-correctly.patch [new file with mode: 0644]
queue-5.2/nbd-replace-kill_bdev-with-__invalidate_device-again.patch [new file with mode: 0644]
queue-5.2/parisc-add-archclean-makefile-target.patch [new file with mode: 0644]
queue-5.2/parisc-fix-build-of-compressed-kernel-even-with-debug-enabled.patch [new file with mode: 0644]
queue-5.2/parisc-strip-debug-info-from-kernel-before-creating-compressed-vmlinuz.patch [new file with mode: 0644]
queue-5.2/powerpc-kasan-fix-early-boot-failure-on-ppc32.patch [new file with mode: 0644]
queue-5.2/rdma-bnxt_re-honor-vlan_id-in-gid-entry-comparison.patch [new file with mode: 0644]
queue-5.2/rdma-devices-do-not-deadlock-during-client-removal.patch [new file with mode: 0644]
queue-5.2/s390-dasd-fix-endless-loop-after-read-unit-address-configuration.patch [new file with mode: 0644]
queue-5.2/scsi-mpt3sas-use-63-bit-dma-addressing-on-sas35-hba.patch [new file with mode: 0644]
queue-5.2/selinux-fix-memory-leak-in-policydb_init.patch [new file with mode: 0644]
queue-5.2/series
queue-5.2/ubsan-build-ubsan.c-more-conservatively.patch [new file with mode: 0644]
queue-5.2/xen-gntdev.c-replace-vm_map_pages-with-vm_map_pages_zero.patch [new file with mode: 0644]
queue-5.2/xen-swiotlb-fix-condition-for-calling-xen_destroy_contiguous_region.patch [new file with mode: 0644]

diff --git a/queue-5.2/alsa-hda-fix-1-minute-detection-delay-when-i915-module-is-not-available.patch b/queue-5.2/alsa-hda-fix-1-minute-detection-delay-when-i915-module-is-not-available.patch
new file mode 100644 (file)
index 0000000..b0b1fe8
--- /dev/null
@@ -0,0 +1,52 @@
+From 74bf71ed792ab0f64631cc65ccdb54c356c36d45 Mon Sep 17 00:00:00 2001
+From: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Date: Fri, 26 Jul 2019 23:47:02 +0200
+Subject: ALSA: hda: Fix 1-minute detection delay when i915 module is not available
+
+From: Samuel Thibault <samuel.thibault@ens-lyon.org>
+
+commit 74bf71ed792ab0f64631cc65ccdb54c356c36d45 upstream.
+
+Distribution installation images such as Debian include different sets
+of modules which can be downloaded dynamically.  Such images may notably
+include the hda sound modules but not the i915 DRM module, even if the
+latter was enabled at build time, as reported on
+https://bugs.debian.org/931507
+
+In such a case hdac_i915 would be linked in and try to load the i915
+module, fail since it is not there, but still wait for a whole minute
+before giving up binding with it.
+
+This fixes such as case by only waiting for the binding if the module
+was properly loaded (or module support is disabled, in which case i915
+is already compiled-in anyway).
+
+Fixes: f9b54e1961c7 ("ALSA: hda/i915: Allow delayed i915 audio component binding")
+Signed-off-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/hda/hdac_i915.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/sound/hda/hdac_i915.c
++++ b/sound/hda/hdac_i915.c
+@@ -136,10 +136,12 @@ int snd_hdac_i915_init(struct hdac_bus *
+       if (!acomp)
+               return -ENODEV;
+       if (!acomp->ops) {
+-              request_module("i915");
+-              /* 60s timeout */
+-              wait_for_completion_timeout(&bind_complete,
+-                                          msecs_to_jiffies(60 * 1000));
++              if (!IS_ENABLED(CONFIG_MODULES) ||
++                  !request_module("i915")) {
++                      /* 60s timeout */
++                      wait_for_completion_timeout(&bind_complete,
++                                                 msecs_to_jiffies(60 * 1000));
++              }
+       }
+       if (!acomp->ops) {
+               dev_info(bus->dev, "couldn't bind with audio component\n");
diff --git a/queue-5.2/arm64-compat-allow-single-byte-watchpoints-on-all-addresses.patch b/queue-5.2/arm64-compat-allow-single-byte-watchpoints-on-all-addresses.patch
new file mode 100644 (file)
index 0000000..fb593bd
--- /dev/null
@@ -0,0 +1,42 @@
+From 849adec41203ac5837c40c2d7e08490ffdef3c2c Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Mon, 29 Jul 2019 11:06:17 +0100
+Subject: arm64: compat: Allow single-byte watchpoints on all addresses
+
+From: Will Deacon <will@kernel.org>
+
+commit 849adec41203ac5837c40c2d7e08490ffdef3c2c upstream.
+
+Commit d968d2b801d8 ("ARM: 7497/1: hw_breakpoint: allow single-byte
+watchpoints on all addresses") changed the validation requirements for
+hardware watchpoints on arch/arm/. Update our compat layer to implement
+the same relaxation.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/hw_breakpoint.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/hw_breakpoint.c
++++ b/arch/arm64/kernel/hw_breakpoint.c
+@@ -536,13 +536,14 @@ int hw_breakpoint_arch_parse(struct perf
+                       /* Aligned */
+                       break;
+               case 1:
+-                      /* Allow single byte watchpoint. */
+-                      if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+-                              break;
+               case 2:
+                       /* Allow halfword watchpoints and breakpoints. */
+                       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
+                               break;
++              case 3:
++                      /* Allow single byte watchpoint. */
++                      if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
++                              break;
+               default:
+                       return -EINVAL;
+               }
diff --git a/queue-5.2/arm64-cpufeature-fix-feature-comparison-for-ctr_el0.-cwg-erg.patch b/queue-5.2/arm64-cpufeature-fix-feature-comparison-for-ctr_el0.-cwg-erg.patch
new file mode 100644 (file)
index 0000000..4f43265
--- /dev/null
@@ -0,0 +1,70 @@
+From 147b9635e6347104b91f48ca9dca61eb0fbf2a54 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Tue, 30 Jul 2019 15:40:20 +0100
+Subject: arm64: cpufeature: Fix feature comparison for CTR_EL0.{CWG,ERG}
+
+From: Will Deacon <will@kernel.org>
+
+commit 147b9635e6347104b91f48ca9dca61eb0fbf2a54 upstream.
+
+If CTR_EL0.{CWG,ERG} are 0b0000 then they must be interpreted to have
+their architecturally maximum values, which defeats the use of
+FTR_HIGHER_SAFE when sanitising CPU ID registers on heterogeneous
+machines.
+
+Introduce FTR_HIGHER_OR_ZERO_SAFE so that these fields effectively
+saturate at zero.
+
+Fixes: 3c739b571084 ("arm64: Keep track of CPU feature registers")
+Cc: <stable@vger.kernel.org> # 4.4.x-
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/cpufeature.h |    7 ++++---
+ arch/arm64/kernel/cpufeature.c      |    8 ++++++--
+ 2 files changed, 10 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -35,9 +35,10 @@
+  */
+ enum ftr_type {
+-      FTR_EXACT,      /* Use a predefined safe value */
+-      FTR_LOWER_SAFE, /* Smaller value is safe */
+-      FTR_HIGHER_SAFE,/* Bigger value is safe */
++      FTR_EXACT,                      /* Use a predefined safe value */
++      FTR_LOWER_SAFE,                 /* Smaller value is safe */
++      FTR_HIGHER_SAFE,                /* Bigger value is safe */
++      FTR_HIGHER_OR_ZERO_SAFE,        /* Bigger value is safe, but 0 is biggest */
+ };
+ #define FTR_STRICT    true    /* SANITY check strict matching required */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -225,8 +225,8 @@ static const struct arm64_ftr_bits ftr_c
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
+       /*
+        * Linux can handle differing I-cache policies. Userspace JITs will
+@@ -468,6 +468,10 @@ static s64 arm64_ftr_safe_value(const st
+       case FTR_LOWER_SAFE:
+               ret = new < cur ? new : cur;
+               break;
++      case FTR_HIGHER_OR_ZERO_SAFE:
++              if (!cur || !new)
++                      break;
++              /* Fallthrough */
+       case FTR_HIGHER_SAFE:
+               ret = new > cur ? new : cur;
+               break;
diff --git a/queue-5.2/cgroup-kselftest-relax-fs_spec-checks.patch b/queue-5.2/cgroup-kselftest-relax-fs_spec-checks.patch
new file mode 100644 (file)
index 0000000..06895b6
--- /dev/null
@@ -0,0 +1,49 @@
+From b59b1baab789eacdde809135542e3d4f256f6878 Mon Sep 17 00:00:00 2001
+From: Chris Down <chris@chrisdown.name>
+Date: Fri, 2 Aug 2019 21:49:15 -0700
+Subject: cgroup: kselftest: relax fs_spec checks
+
+From: Chris Down <chris@chrisdown.name>
+
+commit b59b1baab789eacdde809135542e3d4f256f6878 upstream.
+
+On my laptop most memcg kselftests were being skipped because it claimed
+cgroup v2 hierarchy wasn't mounted, but this isn't correct.  Instead, it
+seems current systemd HEAD mounts it with the name "cgroup2" instead of
+"cgroup":
+
+    % grep cgroup /proc/mounts
+    cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate 0 0
+
+I can't think of a reason to need to check fs_spec explicitly
+since it's arbitrary, so we can just rely on fs_vfstype.
+
+After these changes, `make TARGETS=cgroup kselftest` actually runs the
+cgroup v2 tests in more cases.
+
+Link: http://lkml.kernel.org/r/20190723210737.GA487@chrisdown.name
+Signed-off-by: Chris Down <chris@chrisdown.name>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/cgroup/cgroup_util.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/cgroup/cgroup_util.c
++++ b/tools/testing/selftests/cgroup/cgroup_util.c
+@@ -191,8 +191,7 @@ int cg_find_unified_root(char *root, siz
+               strtok(NULL, delim);
+               strtok(NULL, delim);
+-              if (strcmp(fs, "cgroup") == 0 &&
+-                  strcmp(type, "cgroup2") == 0) {
++              if (strcmp(type, "cgroup2") == 0) {
+                       strncpy(root, mount, len);
+                       return 0;
+               }
diff --git a/queue-5.2/clk-mediatek-mt8183-register-13mhz-clock-earlier-for-clocksource.patch b/queue-5.2/clk-mediatek-mt8183-register-13mhz-clock-earlier-for-clocksource.patch
new file mode 100644 (file)
index 0000000..953c86b
--- /dev/null
@@ -0,0 +1,106 @@
+From c93d059a80450af99dd6c0e8c36790579343675a Mon Sep 17 00:00:00 2001
+From: Weiyi Lu <weiyi.lu@mediatek.com>
+Date: Fri, 28 Jun 2019 15:22:34 +0800
+Subject: clk: mediatek: mt8183: Register 13MHz clock earlier for clocksource
+
+From: Weiyi Lu <weiyi.lu@mediatek.com>
+
+commit c93d059a80450af99dd6c0e8c36790579343675a upstream.
+
+The 13MHz clock should be registered before clocksource driver is
+initialized. Use CLK_OF_DECLARE_DRIVER() to guarantee.
+
+Fixes: acddfc2c261b ("clk: mediatek: Add MT8183 clock support")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Weiyi Lu <weiyi.lu@mediatek.com>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/mediatek/clk-mt8183.c |   46 ++++++++++++++++++++++++++++----------
+ 1 file changed, 34 insertions(+), 12 deletions(-)
+
+--- a/drivers/clk/mediatek/clk-mt8183.c
++++ b/drivers/clk/mediatek/clk-mt8183.c
+@@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fi
+       FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
+ };
++static const struct mtk_fixed_factor top_early_divs[] = {
++      FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2),
++};
++
+ static const struct mtk_fixed_factor top_divs[] = {
+-      FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
+-              2),
+       FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
+               2),
+       FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
+@@ -1167,37 +1169,57 @@ static int clk_mt8183_apmixed_probe(stru
+       return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ }
++static struct clk_onecell_data *top_clk_data;
++
++static void clk_mt8183_top_init_early(struct device_node *node)
++{
++      int i;
++
++      top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++
++      for (i = 0; i < CLK_TOP_NR_CLK; i++)
++              top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
++
++      mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
++                      top_clk_data);
++
++      of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
++}
++
++CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
++                      clk_mt8183_top_init_early);
++
+ static int clk_mt8183_top_probe(struct platform_device *pdev)
+ {
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       void __iomem *base;
+-      struct clk_onecell_data *clk_data;
+       struct device_node *node = pdev->dev.of_node;
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+-      clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+-
+       mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+-              clk_data);
++              top_clk_data);
++
++      mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
++              top_clk_data);
+-      mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
++      mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+       mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
+-              node, &mt8183_clk_lock, clk_data);
++              node, &mt8183_clk_lock, top_clk_data);
+       mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
+-              base, &mt8183_clk_lock, clk_data);
++              base, &mt8183_clk_lock, top_clk_data);
+       mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
+-              base, &mt8183_clk_lock, clk_data);
++              base, &mt8183_clk_lock, top_clk_data);
+       mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+-              clk_data);
++              top_clk_data);
+-      return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
++      return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ }
+ static int clk_mt8183_infra_probe(struct platform_device *pdev)
diff --git a/queue-5.2/drivers-perf-arm_pmu-fix-failure-path-in-pm-notifier.patch b/queue-5.2/drivers-perf-arm_pmu-fix-failure-path-in-pm-notifier.patch
new file mode 100644 (file)
index 0000000..bd1f5aa
--- /dev/null
@@ -0,0 +1,36 @@
+From 0d7fd70f26039bd4b33444ca47f0e69ce3ae0354 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Mon, 29 Jul 2019 11:43:48 +0100
+Subject: drivers/perf: arm_pmu: Fix failure path in PM notifier
+
+From: Will Deacon <will@kernel.org>
+
+commit 0d7fd70f26039bd4b33444ca47f0e69ce3ae0354 upstream.
+
+Handling of the CPU_PM_ENTER_FAILED transition in the Arm PMU PM
+notifier code incorrectly skips restoration of the counters. Fix the
+logic so that CPU_PM_ENTER_FAILED follows the same path as CPU_PM_EXIT.
+
+Cc: <stable@vger.kernel.org>
+Fixes: da4e4f18afe0f372 ("drivers/perf: arm_pmu: implement CPU_PM notifier")
+Reported-by: Anders Roxell <anders.roxell@linaro.org>
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/perf/arm_pmu.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/perf/arm_pmu.c
++++ b/drivers/perf/arm_pmu.c
+@@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct noti
+               cpu_pm_pmu_setup(armpmu, cmd);
+               break;
+       case CPU_PM_EXIT:
+-              cpu_pm_pmu_setup(armpmu, cmd);
+       case CPU_PM_ENTER_FAILED:
++              cpu_pm_pmu_setup(armpmu, cmd);
+               armpmu->start(armpmu);
+               break;
+       default:
diff --git a/queue-5.2/eeprom-at24-make-spd-world-readable-again.patch b/queue-5.2/eeprom-at24-make-spd-world-readable-again.patch
new file mode 100644 (file)
index 0000000..8198192
--- /dev/null
@@ -0,0 +1,38 @@
+From 25e5ef302c24a6fead369c0cfe88c073d7b97ca8 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Sun, 28 Jul 2019 18:41:38 +0200
+Subject: eeprom: at24: make spd world-readable again
+
+From: Jean Delvare <jdelvare@suse.de>
+
+commit 25e5ef302c24a6fead369c0cfe88c073d7b97ca8 upstream.
+
+The integration of the at24 driver into the nvmem framework broke the
+world-readability of spd EEPROMs. Fix it.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Cc: stable@vger.kernel.org
+Fixes: 57d155506dd5 ("eeprom: at24: extend driver to plug into the NVMEM framework")
+Cc: Andrew Lunn <andrew@lunn.ch>
+Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Bartosz Golaszewski <brgl@bgdev.pl>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/eeprom/at24.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/misc/eeprom/at24.c
++++ b/drivers/misc/eeprom/at24.c
+@@ -719,7 +719,7 @@ static int at24_probe(struct i2c_client
+       nvmem_config.name = dev_name(dev);
+       nvmem_config.dev = dev;
+       nvmem_config.read_only = !writable;
+-      nvmem_config.root_only = true;
++      nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO);
+       nvmem_config.owner = THIS_MODULE;
+       nvmem_config.compat = true;
+       nvmem_config.base_dev = dev;
diff --git a/queue-5.2/i2c-at91-disable-txrdy-interrupt-after-sending-data.patch b/queue-5.2/i2c-at91-disable-txrdy-interrupt-after-sending-data.patch
new file mode 100644 (file)
index 0000000..6cfe060
--- /dev/null
@@ -0,0 +1,57 @@
+From d12e3aae160fb26b534c4496b211d6e60a5179ed Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= <mirq-linux@rere.qmqm.pl>
+Date: Mon, 22 Jul 2019 20:55:27 +0200
+Subject: i2c: at91: disable TXRDY interrupt after sending data
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+
+commit d12e3aae160fb26b534c4496b211d6e60a5179ed upstream.
+
+Driver was not disabling TXRDY interrupt after last TX byte.
+This caused interrupt storm until transfer timeouts for slow
+or broken device on the bus. The patch fixes the interrupt storm
+on my SAMA5D2-based board.
+
+Cc: stable@vger.kernel.org # 5.2.x
+[v5.2 introduced file split; the patch should apply to i2c-at91.c before the split]
+Fixes: fac368a04048 ("i2c: at91: add new driver")
+Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+Acked-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Tested-by: Raag Jadav <raagjadav@gmail.com>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-at91-master.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-at91-master.c
++++ b/drivers/i2c/busses/i2c-at91-master.c
+@@ -122,9 +122,11 @@ static void at91_twi_write_next_byte(str
+       writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
+       /* send stop when last byte has been written */
+-      if (--dev->buf_len == 0)
++      if (--dev->buf_len == 0) {
+               if (!dev->use_alt_cmd)
+                       at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
++              at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
++      }
+       dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
+@@ -542,9 +544,8 @@ static int at91_do_twi_transfer(struct a
+               } else {
+                       at91_twi_write_next_byte(dev);
+                       at91_twi_write(dev, AT91_TWI_IER,
+-                                     AT91_TWI_TXCOMP |
+-                                     AT91_TWI_NACK |
+-                                     AT91_TWI_TXRDY);
++                                     AT91_TWI_TXCOMP | AT91_TWI_NACK |
++                                     (dev->buf_len ? AT91_TWI_TXRDY : 0));
+               }
+       }
diff --git a/queue-5.2/i2c-at91-fix-clk_offset-for-sama5d2.patch b/queue-5.2/i2c-at91-fix-clk_offset-for-sama5d2.patch
new file mode 100644 (file)
index 0000000..6b7ed78
--- /dev/null
@@ -0,0 +1,38 @@
+From b1ac6704493fa14b5dc19eb6b69a73932361a131 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= <mirq-linux@rere.qmqm.pl>
+Date: Mon, 22 Jul 2019 21:05:56 +0200
+Subject: i2c: at91: fix clk_offset for sama5d2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+
+commit b1ac6704493fa14b5dc19eb6b69a73932361a131 upstream.
+
+In SAMA5D2 datasheet, TWIHS_CWGR register rescription mentions clock
+offset of 3 cycles (compared to 4 in eg. SAMA5D3).
+
+Cc: stable@vger.kernel.org # 5.2.x
+[needs applying to i2c-at91.c instead for earlier kernels]
+Fixes: 0ef6f3213dac ("i2c: at91: add support for new alternative command mode")
+Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+Acked-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-at91-core.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/i2c-at91-core.c
++++ b/drivers/i2c/busses/i2c-at91-core.c
+@@ -142,7 +142,7 @@ static struct at91_twi_pdata sama5d4_con
+ static struct at91_twi_pdata sama5d2_config = {
+       .clk_max_div = 7,
+-      .clk_offset = 4,
++      .clk_offset = 3,
+       .has_unre_flag = true,
+       .has_alt_cmd = true,
+       .has_hold_field = true,
diff --git a/queue-5.2/i2c-iproc-fix-i2c-master-read-more-than-63-bytes.patch b/queue-5.2/i2c-iproc-fix-i2c-master-read-more-than-63-bytes.patch
new file mode 100644 (file)
index 0000000..02a9773
--- /dev/null
@@ -0,0 +1,50 @@
+From fd01eecdf9591453177d7b06faaabef8c300114a Mon Sep 17 00:00:00 2001
+From: Rayagonda Kokatanur <rayagonda.kokatanur@broadcom.com>
+Date: Wed, 24 Jul 2019 13:58:27 +0530
+Subject: i2c: iproc: Fix i2c master read more than 63 bytes
+
+From: Rayagonda Kokatanur <rayagonda.kokatanur@broadcom.com>
+
+commit fd01eecdf9591453177d7b06faaabef8c300114a upstream.
+
+Use SMBUS_MASTER_DATA_READ.MASTER_RD_STATUS bit to check for RX
+FIFO empty condition because SMBUS_MASTER_FIFO_CONTROL.MASTER_RX_PKT_COUNT
+is not updated for read >= 64 bytes. This fixes the issue when trying to
+read from the I2C slave more than 63 bytes.
+
+Fixes: c24b8d574b7c ("i2c: iproc: Extend I2C read up to 255 bytes")
+Cc: stable@kernel.org
+Signed-off-by: Rayagonda Kokatanur <rayagonda.kokatanur@broadcom.com>
+Reviewed-by: Ray Jui <ray.jui@broadcom.com>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-bcm-iproc.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -403,16 +403,18 @@ static bool bcm_iproc_i2c_slave_isr(stru
+ static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
+ {
+       struct i2c_msg *msg = iproc_i2c->msg;
++      uint32_t val;
+       /* Read valid data from RX FIFO */
+       while (iproc_i2c->rx_bytes < msg->len) {
+-              if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT)
+-                    & M_FIFO_RX_CNT_MASK))
++              val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET);
++
++              /* rx fifo empty */
++              if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK))
+                       break;
+               msg->buf[iproc_i2c->rx_bytes] =
+-                      (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >>
+-                      M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
++                      (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+               iproc_i2c->rx_bytes++;
+       }
+ }
diff --git a/queue-5.2/ib-hfi1-check-for-error-on-call-to-alloc_rsm_map_table.patch b/queue-5.2/ib-hfi1-check-for-error-on-call-to-alloc_rsm_map_table.patch
new file mode 100644 (file)
index 0000000..99e8dc8
--- /dev/null
@@ -0,0 +1,66 @@
+From cd48a82087231fdba0e77521102386c6ed0168d6 Mon Sep 17 00:00:00 2001
+From: John Fleck <john.fleck@intel.com>
+Date: Mon, 15 Jul 2019 12:45:21 -0400
+Subject: IB/hfi1: Check for error on call to alloc_rsm_map_table
+
+From: John Fleck <john.fleck@intel.com>
+
+commit cd48a82087231fdba0e77521102386c6ed0168d6 upstream.
+
+The call to alloc_rsm_map_table does not check if the kmalloc fails.
+Check for a NULL on alloc, and bail if it fails.
+
+Fixes: 372cc85a13c9 ("IB/hfi1: Extract RSM map table init from QOS")
+Link: https://lore.kernel.org/r/20190715164521.74174.27047.stgit@awfm-01.aw.intel.com
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: John Fleck <john.fleck@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/chip.c |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_de
+               clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
+ }
+-static void init_rxe(struct hfi1_devdata *dd)
++static int init_rxe(struct hfi1_devdata *dd)
+ {
+       struct rsm_map_table *rmt;
+       u64 val;
+@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata
+       write_csr(dd, RCV_ERR_MASK, ~0ull);
+       rmt = alloc_rsm_map_table(dd);
++      if (!rmt)
++              return -ENOMEM;
++
+       /* set up QOS, including the QPN map table */
+       init_qos(dd, rmt);
+       init_fecn_handling(dd, rmt);
+@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata
+       val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
+               RCV_BYPASS_HDR_SIZE_SHIFT);
+       write_csr(dd, RCV_BYPASS, val);
++      return 0;
+ }
+ static void init_other(struct hfi1_devdata *dd)
+@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd
+               goto bail_cleanup;
+       /* set initial RXE CSRs */
+-      init_rxe(dd);
++      ret = init_rxe(dd);
++      if (ret)
++              goto bail_cleanup;
++
+       /* set initial TXE CSRs */
+       init_txe(dd);
+       /* set initial non-RXE, non-TXE CSRs */
diff --git a/queue-5.2/ib-hfi1-drop-all-tid-rdma-read-resp-packets-after-r_next_psn.patch b/queue-5.2/ib-hfi1-drop-all-tid-rdma-read-resp-packets-after-r_next_psn.patch
new file mode 100644 (file)
index 0000000..1696fde
--- /dev/null
@@ -0,0 +1,93 @@
+From f4d46119f214f9a7620b0d18b153d7e0e8c90b4f Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 15 Jul 2019 12:45:40 -0400
+Subject: IB/hfi1: Drop all TID RDMA READ RESP packets after r_next_psn
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit f4d46119f214f9a7620b0d18b153d7e0e8c90b4f upstream.
+
+When a TID sequence error occurs while receiving TID RDMA READ RESP
+packets, all packets after flow->flow_state.r_next_psn should be dropped,
+including those response packets for subsequent segments.
+
+The current implementation will drop the subsequent response packets for
+the segment to complete next, but may accept packets for subsequent
+segments and therefore mistakenly advance the r_next_psn fields for the
+corresponding software flows. This may result in failures to complete
+subsequent segments after the current segment is completed.
+
+The fix is to only use the flow pointed by req->clear_tail for checking
+KDETH PSN instead of finding a flow from the request's flow array.
+
+Fixes: b885d5be9ca1 ("IB/hfi1: Unify the software PSN check for TID RDMA READ/WRITE")
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20190715164540.74174.54702.stgit@awfm-01.aw.intel.com
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/tid_rdma.c |   42 ----------------------------------
+ 1 file changed, 1 insertion(+), 41 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
+@@ -1673,34 +1673,6 @@ static struct tid_rdma_flow *find_flow_i
+       return NULL;
+ }
+-static struct tid_rdma_flow *
+-__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
+-                 u32 psn, u16 *fidx)
+-{
+-      for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
+-            tail = CIRC_NEXT(tail, MAX_FLOWS)) {
+-              struct tid_rdma_flow *flow = &req->flows[tail];
+-              u32 spsn, lpsn;
+-
+-              spsn = full_flow_psn(flow, flow->flow_state.spsn);
+-              lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
+-
+-              if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
+-                      if (fidx)
+-                              *fidx = tail;
+-                      return flow;
+-              }
+-      }
+-      return NULL;
+-}
+-
+-static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
+-                                     u32 psn, u16 *fidx)
+-{
+-      return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
+-                                fidx);
+-}
+-
+ /* TID RDMA READ functions */
+ u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
+                                   struct ib_other_headers *ohdr, u32 *bth1,
+@@ -2790,19 +2762,7 @@ static bool handle_read_kdeth_eflags(str
+                        * to prevent continuous Flow Sequence errors for any
+                        * packets that could be still in the fabric.
+                        */
+-                      flow = find_flow(req, psn, NULL);
+-                      if (!flow) {
+-                              /*
+-                               * We can't find the IB PSN matching the
+-                               * received KDETH PSN. The only thing we can
+-                               * do at this point is report the error to
+-                               * the QP.
+-                               */
+-                              hfi1_kern_read_tid_flow_free(qp);
+-                              spin_unlock(&qp->s_lock);
+-                              rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+-                              return ret;
+-                      }
++                      flow = &req->flows[req->clear_tail];
+                       if (priv->s_flags & HFI1_R_TID_SW_PSN) {
+                               diff = cmp_psn(psn,
+                                              flow->flow_state.r_next_psn);
diff --git a/queue-5.2/ib-hfi1-field-not-zero-ed-when-allocating-tid-flow-memory.patch b/queue-5.2/ib-hfi1-field-not-zero-ed-when-allocating-tid-flow-memory.patch
new file mode 100644 (file)
index 0000000..69247b2
--- /dev/null
@@ -0,0 +1,43 @@
+From dc25b239ebeaa3c58e5ceaa732140427d386aa16 Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 15 Jul 2019 12:45:34 -0400
+Subject: IB/hfi1: Field not zero-ed when allocating TID flow memory
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit dc25b239ebeaa3c58e5ceaa732140427d386aa16 upstream.
+
+The field flow->resync_npkts is added for TID RDMA WRITE request and
+zero-ed when a TID RDMA WRITE RESP packet is received by the requester.
+This field is used to rewind a request during retry in the function
+hfi1_tid_rdma_restart_req() shared by both TID RDMA WRITE and TID RDMA
+READ requests. Therefore, when a TID RDMA READ request is retried, this
+field may not be initialized at all, which causes the retry to start at an
+incorrect psn, leading to the drop of the retry request by the responder.
+
+This patch fixes the problem by zeroing out the field when the flow memory
+is allocated.
+
+Fixes: 838b6fd2d9ca ("IB/hfi1: TID RDMA RcvArray programming and TID allocation")
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20190715164534.74174.6177.stgit@awfm-01.aw.intel.com
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/tid_rdma.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
+@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows
+               flows[i].req = req;
+               flows[i].npagesets = 0;
+               flows[i].pagesets[0].mapped =  0;
++              flows[i].resync_npkts = 0;
+       }
+       req->flows = flows;
+       return 0;
diff --git a/queue-5.2/ib-mlx5-fix-clean_mr-to-work-in-the-expected-order.patch b/queue-5.2/ib-mlx5-fix-clean_mr-to-work-in-the-expected-order.patch
new file mode 100644 (file)
index 0000000..bb7b8c9
--- /dev/null
@@ -0,0 +1,45 @@
+From b9332dad987018745a0c0bb718d12dacfa760489 Mon Sep 17 00:00:00 2001
+From: Yishai Hadas <yishaih@mellanox.com>
+Date: Tue, 23 Jul 2019 09:57:28 +0300
+Subject: IB/mlx5: Fix clean_mr() to work in the expected order
+
+From: Yishai Hadas <yishaih@mellanox.com>
+
+commit b9332dad987018745a0c0bb718d12dacfa760489 upstream.
+
+Any dma map underlying the MR should only be freed once the MR is fenced
+at the hardware.
+
+As of the above we first destroy the MKEY and just after that can safely
+call to dma_unmap_single().
+
+Link: https://lore.kernel.org/r/20190723065733.4899-6-leon@kernel.org
+Cc: <stable@vger.kernel.org> # 4.3
+Fixes: 8a187ee52b04 ("IB/mlx5: Support the new memory registration API")
+Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
+Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/mr.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1583,10 +1583,10 @@ static void clean_mr(struct mlx5_ib_dev
+               mr->sig = NULL;
+       }
+-      mlx5_free_priv_descs(mr);
+-
+-      if (!allocated_from_cache)
++      if (!allocated_from_cache) {
+               destroy_mkey(dev, mr);
++              mlx5_free_priv_descs(mr);
++      }
+ }
+ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
diff --git a/queue-5.2/ib-mlx5-fix-rss-toeplitz-setup-to-be-aligned-with-the-hw-specification.patch b/queue-5.2/ib-mlx5-fix-rss-toeplitz-setup-to-be-aligned-with-the-hw-specification.patch
new file mode 100644 (file)
index 0000000..e3eda24
--- /dev/null
@@ -0,0 +1,39 @@
+From b7165bd0d6cbb93732559be6ea8774653b204480 Mon Sep 17 00:00:00 2001
+From: Yishai Hadas <yishaih@mellanox.com>
+Date: Tue, 23 Jul 2019 09:57:29 +0300
+Subject: IB/mlx5: Fix RSS Toeplitz setup to be aligned with the HW specification
+
+From: Yishai Hadas <yishaih@mellanox.com>
+
+commit b7165bd0d6cbb93732559be6ea8774653b204480 upstream.
+
+The specification for the Toeplitz function doesn't require to set the key
+explicitly to be symmetric. In case a symmetric functionality is required
+a symmetric key can be simply used.
+
+Wrongly forcing the algorithm to symmetric causes the wrong packet
+distribution and a performance degradation.
+
+Link: https://lore.kernel.org/r/20190723065733.4899-7-leon@kernel.org
+Cc: <stable@vger.kernel.org> # 4.7
+Fixes: 28d6137008b2 ("IB/mlx5: Add RSS QP support")
+Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
+Reviewed-by: Alex Vainman <alexv@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/qp.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -1718,7 +1718,6 @@ static int create_rss_raw_qp_tir(struct
+               }
+               MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+-              MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+               memcpy(rss_key, ucmd.rx_hash_key, len);
+               break;
+       }
diff --git a/queue-5.2/ib-mlx5-fix-unreg_umr-to-ignore-the-mkey-state.patch b/queue-5.2/ib-mlx5-fix-unreg_umr-to-ignore-the-mkey-state.patch
new file mode 100644 (file)
index 0000000..1e84230
--- /dev/null
@@ -0,0 +1,76 @@
+From 6a053953739d23694474a5f9c81d1a30093da81a Mon Sep 17 00:00:00 2001
+From: Yishai Hadas <yishaih@mellanox.com>
+Date: Tue, 23 Jul 2019 09:57:25 +0300
+Subject: IB/mlx5: Fix unreg_umr to ignore the mkey state
+
+From: Yishai Hadas <yishaih@mellanox.com>
+
+commit 6a053953739d23694474a5f9c81d1a30093da81a upstream.
+
+Fix unreg_umr to ignore the mkey state and do not fail if was freed.  This
+prevents a case that a user space application already changed the mkey
+state to free and then the UMR operation will fail leaving the mkey in an
+inappropriate state.
+
+Link: https://lore.kernel.org/r/20190723065733.4899-3-leon@kernel.org
+Cc: <stable@vger.kernel.org> # 3.19
+Fixes: 968e78dd9644 ("IB/mlx5: Enhance UMR support to allow partial page table update")
+Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
+Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/mlx5_ib.h |    1 +
+ drivers/infiniband/hw/mlx5/mr.c      |    4 ++--
+ drivers/infiniband/hw/mlx5/qp.c      |   12 ++++++++----
+ 3 files changed, 11 insertions(+), 6 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -480,6 +480,7 @@ struct mlx5_umr_wr {
+       u64                             length;
+       int                             access_flags;
+       u32                             mkey;
++      u8                              ignore_free_state:1;
+ };
+ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1372,10 +1372,10 @@ static int unreg_umr(struct mlx5_ib_dev
+       if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+               return 0;
+-      umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
+-                            MLX5_IB_SEND_UMR_FAIL_IF_FREE;
++      umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR;
+       umrwr.wr.opcode = MLX5_IB_WR_UMR;
+       umrwr.mkey = mr->mmkey.key;
++      umrwr.ignore_free_state = 1;
+       return mlx5_ib_post_send_wait(dev, &umrwr);
+ }
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4262,10 +4262,14 @@ static int set_reg_umr_segment(struct ml
+       memset(umr, 0, sizeof(*umr));
+-      if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+-              umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
+-      else
+-              umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
++      if (!umrwr->ignore_free_state) {
++              if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
++                       /* fail if free */
++                      umr->flags = MLX5_UMR_CHECK_FREE;
++              else
++                      /* fail if not free */
++                      umr->flags = MLX5_UMR_CHECK_NOT_FREE;
++      }
+       umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
+       if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
diff --git a/queue-5.2/ib-mlx5-move-mrs-to-a-kernel-pd-when-freeing-them-to-the-mr-cache.patch b/queue-5.2/ib-mlx5-move-mrs-to-a-kernel-pd-when-freeing-them-to-the-mr-cache.patch
new file mode 100644 (file)
index 0000000..426472f
--- /dev/null
@@ -0,0 +1,47 @@
+From 9ec4483a3f0f71a228a5933bc040441322bfb090 Mon Sep 17 00:00:00 2001
+From: Yishai Hadas <yishaih@mellanox.com>
+Date: Tue, 23 Jul 2019 09:57:27 +0300
+Subject: IB/mlx5: Move MRs to a kernel PD when freeing them to the MR cache
+
+From: Yishai Hadas <yishaih@mellanox.com>
+
+commit 9ec4483a3f0f71a228a5933bc040441322bfb090 upstream.
+
+Fix unreg_umr to move the MR to a kernel owned PD (i.e. the UMR PD) which
+can't be accessed by userspace.
+
+This ensures that nothing can continue to access the MR once it has been
+placed in the kernels cache for reuse.
+
+MRs in the cache continue to have their HW state, including DMA tables,
+present. Even though the MR has been invalidated, changing the PD provides
+an additional layer of protection against use of the MR.
+
+Link: https://lore.kernel.org/r/20190723065733.4899-5-leon@kernel.org
+Cc: <stable@vger.kernel.org> # 3.10
+Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters")
+Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
+Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/mr.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1375,8 +1375,10 @@ static int unreg_umr(struct mlx5_ib_dev
+       if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+               return 0;
+-      umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR;
++      umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
++                            MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
+       umrwr.wr.opcode = MLX5_IB_WR_UMR;
++      umrwr.pd = dev->umrc.pd;
+       umrwr.mkey = mr->mmkey.key;
+       umrwr.ignore_free_state = 1;
diff --git a/queue-5.2/ib-mlx5-use-direct-mkey-destroy-command-upon-umr-unreg-failure.patch b/queue-5.2/ib-mlx5-use-direct-mkey-destroy-command-upon-umr-unreg-failure.patch
new file mode 100644 (file)
index 0000000..ac8b381
--- /dev/null
@@ -0,0 +1,59 @@
+From afd1417404fba6dbfa6c0a8e5763bd348da682e4 Mon Sep 17 00:00:00 2001
+From: Yishai Hadas <yishaih@mellanox.com>
+Date: Tue, 23 Jul 2019 09:57:26 +0300
+Subject: IB/mlx5: Use direct mkey destroy command upon UMR unreg failure
+
+From: Yishai Hadas <yishaih@mellanox.com>
+
+commit afd1417404fba6dbfa6c0a8e5763bd348da682e4 upstream.
+
+Use a direct firmware command to destroy the mkey in case the unreg UMR
+operation has failed.
+
+This prevents a case that a mkey will leak out from the cache post a
+failure to be destroyed by a UMR WR.
+
+In case the MR cache limit didn't reach a call to add another entry to the
+cache instead of the destroyed one is issued.
+
+In addition, replaced a warn message to WARN_ON() as this flow is fatal
+and can't happen unless some bug around.
+
+Link: https://lore.kernel.org/r/20190723065733.4899-4-leon@kernel.org
+Cc: <stable@vger.kernel.org> # 4.10
+Fixes: 49780d42dfc9 ("IB/mlx5: Expose MR cache for mlx5_ib")
+Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
+Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/mr.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -545,13 +545,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_d
+               return;
+       c = order2idx(dev, mr->order);
+-      if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
+-              mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
+-              return;
+-      }
++      WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
+-      if (unreg_umr(dev, mr))
++      if (unreg_umr(dev, mr)) {
++              mr->allocated_from_cache = false;
++              destroy_mkey(dev, mr);
++              ent = &cache->ent[c];
++              if (ent->cur < ent->limit)
++                      queue_work(cache->wq, &ent->work);
+               return;
++      }
+       ent = &cache->ent[c];
+       spin_lock_irq(&ent->lock);
diff --git a/queue-5.2/io_uring-fix-kasan-use-after-free-in-io_sq_wq_submit_work.patch b/queue-5.2/io_uring-fix-kasan-use-after-free-in-io_sq_wq_submit_work.patch
new file mode 100644 (file)
index 0000000..1965b01
--- /dev/null
@@ -0,0 +1,111 @@
+From d0ee879187df966ef638031b5f5183078d672141 Mon Sep 17 00:00:00 2001
+From: Jackie Liu <liuyun01@kylinos.cn>
+Date: Wed, 31 Jul 2019 14:39:33 +0800
+Subject: io_uring: fix KASAN use after free in io_sq_wq_submit_work
+
+From: Jackie Liu <liuyun01@kylinos.cn>
+
+commit d0ee879187df966ef638031b5f5183078d672141 upstream.
+
+[root@localhost ~]# ./liburing/test/link
+
+QEMU Standard PC report that:
+
+[   29.379892] CPU: 0 PID: 84 Comm: kworker/u2:2 Not tainted 5.3.0-rc2-00051-g4010b622f1d2-dirty #86
+[   29.379902] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+[   29.379913] Workqueue: io_ring-wq io_sq_wq_submit_work
+[   29.379929] Call Trace:
+[   29.379953]  dump_stack+0xa9/0x10e
+[   29.379970]  ? io_sq_wq_submit_work+0xbf4/0xe90
+[   29.379986]  print_address_description.cold.6+0x9/0x317
+[   29.379999]  ? io_sq_wq_submit_work+0xbf4/0xe90
+[   29.380010]  ? io_sq_wq_submit_work+0xbf4/0xe90
+[   29.380026]  __kasan_report.cold.7+0x1a/0x34
+[   29.380044]  ? io_sq_wq_submit_work+0xbf4/0xe90
+[   29.380061]  kasan_report+0xe/0x12
+[   29.380076]  io_sq_wq_submit_work+0xbf4/0xe90
+[   29.380104]  ? io_sq_thread+0xaf0/0xaf0
+[   29.380152]  process_one_work+0xb59/0x19e0
+[   29.380184]  ? pwq_dec_nr_in_flight+0x2c0/0x2c0
+[   29.380221]  worker_thread+0x8c/0xf40
+[   29.380248]  ? __kthread_parkme+0xab/0x110
+[   29.380265]  ? process_one_work+0x19e0/0x19e0
+[   29.380278]  kthread+0x30b/0x3d0
+[   29.380292]  ? kthread_create_on_node+0xe0/0xe0
+[   29.380311]  ret_from_fork+0x3a/0x50
+
+[   29.380635] Allocated by task 209:
+[   29.381255]  save_stack+0x19/0x80
+[   29.381268]  __kasan_kmalloc.constprop.6+0xc1/0xd0
+[   29.381279]  kmem_cache_alloc+0xc0/0x240
+[   29.381289]  io_submit_sqe+0x11bc/0x1c70
+[   29.381300]  io_ring_submit+0x174/0x3c0
+[   29.381311]  __x64_sys_io_uring_enter+0x601/0x780
+[   29.381322]  do_syscall_64+0x9f/0x4d0
+[   29.381336]  entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+[   29.381633] Freed by task 84:
+[   29.382186]  save_stack+0x19/0x80
+[   29.382198]  __kasan_slab_free+0x11d/0x160
+[   29.382210]  kmem_cache_free+0x8c/0x2f0
+[   29.382220]  io_put_req+0x22/0x30
+[   29.382230]  io_sq_wq_submit_work+0x28b/0xe90
+[   29.382241]  process_one_work+0xb59/0x19e0
+[   29.382251]  worker_thread+0x8c/0xf40
+[   29.382262]  kthread+0x30b/0x3d0
+[   29.382272]  ret_from_fork+0x3a/0x50
+
+[   29.382569] The buggy address belongs to the object at ffff888067172140
+                which belongs to the cache io_kiocb of size 224
+[   29.384692] The buggy address is located 120 bytes inside of
+                224-byte region [ffff888067172140, ffff888067172220)
+[   29.386723] The buggy address belongs to the page:
+[   29.387575] page:ffffea00019c5c80 refcount:1 mapcount:0 mapping:ffff88806ace5180 index:0x0
+[   29.387587] flags: 0x100000000000200(slab)
+[   29.387603] raw: 0100000000000200 dead000000000100 dead000000000122 ffff88806ace5180
+[   29.387617] raw: 0000000000000000 00000000800c000c 00000001ffffffff 0000000000000000
+[   29.387624] page dumped because: kasan: bad access detected
+
+[   29.387920] Memory state around the buggy address:
+[   29.388771]  ffff888067172080: fb fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
+[   29.390062]  ffff888067172100: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
+[   29.391325] >ffff888067172180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[   29.392578]                                         ^
+[   29.393480]  ffff888067172200: fb fb fb fb fc fc fc fc fc fc fc fc fc fc fc fc
+[   29.394744]  ffff888067172280: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[   29.396003] ==================================================================
+[   29.397260] Disabling lock debugging due to kernel taint
+
+io_sq_wq_submit_work free and read req again.
+
+Cc: Zhengyuan Liu <liuzhengyuan@kylinos.cn>
+Cc: linux-block@vger.kernel.org
+Cc: stable@vger.kernel.org
+Fixes: f7b76ac9d17e ("io_uring: fix counter inc/dec mismatch in async_list")
+Signed-off-by: Jackie Liu <liuyun01@kylinos.cn>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1692,6 +1692,7 @@ restart:
+       do {
+               struct sqe_submit *s = &req->submit;
+               const struct io_uring_sqe *sqe = s->sqe;
++              unsigned int flags = req->flags;
+               /* Ensure we clear previously set non-block flag */
+               req->rw.ki_flags &= ~IOCB_NOWAIT;
+@@ -1737,7 +1738,7 @@ restart:
+               kfree(sqe);
+               /* req from defer and link list needn't decrease async cnt */
+-              if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
++              if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
+                       goto out;
+               if (!async_list)
diff --git a/queue-5.2/loop-fix-mount-2-failure-due-to-race-with-loop_set_fd.patch b/queue-5.2/loop-fix-mount-2-failure-due-to-race-with-loop_set_fd.patch
new file mode 100644 (file)
index 0000000..bd10fa5
--- /dev/null
@@ -0,0 +1,210 @@
+From 89e524c04fa966330e2e80ab2bc50b9944c5847a Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 30 Jul 2019 13:10:14 +0200
+Subject: loop: Fix mount(2) failure due to race with LOOP_SET_FD
+
+From: Jan Kara <jack@suse.cz>
+
+commit 89e524c04fa966330e2e80ab2bc50b9944c5847a upstream.
+
+Commit 33ec3e53e7b1 ("loop: Don't change loop device under exclusive
+opener") made LOOP_SET_FD ioctl acquire exclusive block device reference
+while it updates loop device binding. However this can make perfectly
+valid mount(2) fail with EBUSY due to racing LOOP_SET_FD holding
+temporarily the exclusive bdev reference in cases like this:
+
+for i in {a..z}{a..z}; do
+        dd if=/dev/zero of=$i.image bs=1k count=0 seek=1024
+        mkfs.ext2 $i.image
+        mkdir mnt$i
+done
+
+echo "Run"
+for i in {a..z}{a..z}; do
+        mount -o loop -t ext2 $i.image mnt$i &
+done
+
+Fix the problem by not getting full exclusive bdev reference in
+LOOP_SET_FD but instead just mark the bdev as being claimed while we
+update the binding information. This just blocks new exclusive openers
+instead of failing them with EBUSY thus fixing the problem.
+
+Fixes: 33ec3e53e7b1 ("loop: Don't change loop device under exclusive opener")
+Cc: stable@vger.kernel.org
+Tested-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/loop.c |   16 +++++----
+ fs/block_dev.c       |   83 +++++++++++++++++++++++++++++++++++----------------
+ include/linux/fs.h   |    6 +++
+ 3 files changed, 73 insertions(+), 32 deletions(-)
+
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -932,6 +932,7 @@ static int loop_set_fd(struct loop_devic
+       struct file     *file;
+       struct inode    *inode;
+       struct address_space *mapping;
++      struct block_device *claimed_bdev = NULL;
+       int             lo_flags = 0;
+       int             error;
+       loff_t          size;
+@@ -950,10 +951,11 @@ static int loop_set_fd(struct loop_devic
+        * here to avoid changing device under exclusive owner.
+        */
+       if (!(mode & FMODE_EXCL)) {
+-              bdgrab(bdev);
+-              error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd);
+-              if (error)
++              claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
++              if (IS_ERR(claimed_bdev)) {
++                      error = PTR_ERR(claimed_bdev);
+                       goto out_putf;
++              }
+       }
+       error = mutex_lock_killable(&loop_ctl_mutex);
+@@ -1023,15 +1025,15 @@ static int loop_set_fd(struct loop_devic
+       mutex_unlock(&loop_ctl_mutex);
+       if (partscan)
+               loop_reread_partitions(lo, bdev);
+-      if (!(mode & FMODE_EXCL))
+-              blkdev_put(bdev, mode | FMODE_EXCL);
++      if (claimed_bdev)
++              bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
+       return 0;
+ out_unlock:
+       mutex_unlock(&loop_ctl_mutex);
+ out_bdev:
+-      if (!(mode & FMODE_EXCL))
+-              blkdev_put(bdev, mode | FMODE_EXCL);
++      if (claimed_bdev)
++              bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
+ out_putf:
+       fput(file);
+ out:
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1151,8 +1151,7 @@ static struct gendisk *bdev_get_gendisk(
+  * Pointer to the block device containing @bdev on success, ERR_PTR()
+  * value on failure.
+  */
+-static struct block_device *bd_start_claiming(struct block_device *bdev,
+-                                            void *holder)
++struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
+ {
+       struct gendisk *disk;
+       struct block_device *whole;
+@@ -1199,6 +1198,62 @@ static struct block_device *bd_start_cla
+               return ERR_PTR(err);
+       }
+ }
++EXPORT_SYMBOL(bd_start_claiming);
++
++static void bd_clear_claiming(struct block_device *whole, void *holder)
++{
++      lockdep_assert_held(&bdev_lock);
++      /* tell others that we're done */
++      BUG_ON(whole->bd_claiming != holder);
++      whole->bd_claiming = NULL;
++      wake_up_bit(&whole->bd_claiming, 0);
++}
++
++/**
++ * bd_finish_claiming - finish claiming of a block device
++ * @bdev: block device of interest
++ * @whole: whole block device (returned from bd_start_claiming())
++ * @holder: holder that has claimed @bdev
++ *
++ * Finish exclusive open of a block device. Mark the device as exlusively
++ * open by the holder and wake up all waiters for exclusive open to finish.
++ */
++void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
++                      void *holder)
++{
++      spin_lock(&bdev_lock);
++      BUG_ON(!bd_may_claim(bdev, whole, holder));
++      /*
++       * Note that for a whole device bd_holders will be incremented twice,
++       * and bd_holder will be set to bd_may_claim before being set to holder
++       */
++      whole->bd_holders++;
++      whole->bd_holder = bd_may_claim;
++      bdev->bd_holders++;
++      bdev->bd_holder = holder;
++      bd_clear_claiming(whole, holder);
++      spin_unlock(&bdev_lock);
++}
++EXPORT_SYMBOL(bd_finish_claiming);
++
++/**
++ * bd_abort_claiming - abort claiming of a block device
++ * @bdev: block device of interest
++ * @whole: whole block device (returned from bd_start_claiming())
++ * @holder: holder that has claimed @bdev
++ *
++ * Abort claiming of a block device when the exclusive open failed. This can be
++ * also used when exclusive open is not actually desired and we just needed
++ * to block other exclusive openers for a while.
++ */
++void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
++                     void *holder)
++{
++      spin_lock(&bdev_lock);
++      bd_clear_claiming(whole, holder);
++      spin_unlock(&bdev_lock);
++}
++EXPORT_SYMBOL(bd_abort_claiming);
+ #ifdef CONFIG_SYSFS
+ struct bd_holder_disk {
+@@ -1668,29 +1723,7 @@ int blkdev_get(struct block_device *bdev
+               /* finish claiming */
+               mutex_lock(&bdev->bd_mutex);
+-              spin_lock(&bdev_lock);
+-
+-              if (!res) {
+-                      BUG_ON(!bd_may_claim(bdev, whole, holder));
+-                      /*
+-                       * Note that for a whole device bd_holders
+-                       * will be incremented twice, and bd_holder
+-                       * will be set to bd_may_claim before being
+-                       * set to holder
+-                       */
+-                      whole->bd_holders++;
+-                      whole->bd_holder = bd_may_claim;
+-                      bdev->bd_holders++;
+-                      bdev->bd_holder = holder;
+-              }
+-
+-              /* tell others that we're done */
+-              BUG_ON(whole->bd_claiming != holder);
+-              whole->bd_claiming = NULL;
+-              wake_up_bit(&whole->bd_claiming, 0);
+-
+-              spin_unlock(&bdev_lock);
+-
++              bd_finish_claiming(bdev, whole, holder);
+               /*
+                * Block event polling for write claims if requested.  Any
+                * write holder makes the write_holder state stick until
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2615,6 +2615,12 @@ extern struct block_device *blkdev_get_b
+                                              void *holder);
+ extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
+                                             void *holder);
++extern struct block_device *bd_start_claiming(struct block_device *bdev,
++                                            void *holder);
++extern void bd_finish_claiming(struct block_device *bdev,
++                             struct block_device *whole, void *holder);
++extern void bd_abort_claiming(struct block_device *bdev,
++                            struct block_device *whole, void *holder);
+ extern void blkdev_put(struct block_device *bdev, fmode_t mode);
+ extern int __blkdev_reread_part(struct block_device *bdev);
+ extern int blkdev_reread_part(struct block_device *bdev);
diff --git a/queue-5.2/mm-compaction-avoid-100-cpu-usage-during-compaction-when-a-task-is-killed.patch b/queue-5.2/mm-compaction-avoid-100-cpu-usage-during-compaction-when-a-task-is-killed.patch
new file mode 100644 (file)
index 0000000..978ed88
--- /dev/null
@@ -0,0 +1,96 @@
+From 670105a25608affe01cb0ccdc2a1f4bd2327172b Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@techsingularity.net>
+Date: Fri, 2 Aug 2019 21:48:51 -0700
+Subject: mm: compaction: avoid 100% CPU usage during compaction when a task is killed
+
+From: Mel Gorman <mgorman@techsingularity.net>
+
+commit 670105a25608affe01cb0ccdc2a1f4bd2327172b upstream.
+
+"howaboutsynergy" reported via kernel buzilla number 204165 that
+compact_zone_order was consuming 100% CPU during a stress test for
+prolonged periods of time.  Specifically the following command, which
+should exit in 10 seconds, was taking an excessive time to finish while
+the CPU was pegged at 100%.
+
+  stress -m 220 --vm-bytes 1000000000 --timeout 10
+
+Tracing indicated a pattern as follows
+
+          stress-3923  [007]   519.106208: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+          stress-3923  [007]   519.106212: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+          stress-3923  [007]   519.106216: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+          stress-3923  [007]   519.106219: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+          stress-3923  [007]   519.106223: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+          stress-3923  [007]   519.106227: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+          stress-3923  [007]   519.106231: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+          stress-3923  [007]   519.106235: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+          stress-3923  [007]   519.106238: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+          stress-3923  [007]   519.106242: mm_compaction_isolate_migratepages: range=(0x70bb80 ~ 0x70bb80) nr_scanned=0 nr_taken=0
+
+Note that compaction is entered in rapid succession while scanning and
+isolating nothing.  The problem is that when a task that is compacting
+receives a fatal signal, it retries indefinitely instead of exiting
+while making no progress as a fatal signal is pending.
+
+It's not easy to trigger this condition although enabling zswap helps on
+the basis that the timing is altered.  A very small window has to be hit
+for the problem to occur (signal delivered while compacting and
+isolating a PFN for migration that is not aligned to SWAP_CLUSTER_MAX).
+
+This was reproduced locally -- 16G single socket system, 8G swap, 30%
+zswap configured, vm-bytes 22000000000 using Colin Kings stress-ng
+implementation from github running in a loop until the problem hits).
+Tracing recorded the problem occurring almost 200K times in a short
+window.  With this patch, the problem hit 4 times but the task existed
+normally instead of consuming CPU.
+
+This problem has existed for some time but it was made worse by commit
+cf66f0700c8f ("mm, compaction: do not consider a need to reschedule as
+contention").  Before that commit, if the same condition was hit then
+locks would be quickly contended and compaction would exit that way.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204165
+Link: http://lkml.kernel.org/r/20190718085708.GE24383@techsingularity.net
+Fixes: cf66f0700c8f ("mm, compaction: do not consider a need to reschedule as contention")
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>   [5.1+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/compaction.c |   11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -842,13 +842,15 @@ isolate_migratepages_block(struct compac
+               /*
+                * Periodically drop the lock (if held) regardless of its
+-               * contention, to give chance to IRQs. Abort async compaction
+-               * if contended.
++               * contention, to give chance to IRQs. Abort completely if
++               * a fatal signal is pending.
+                */
+               if (!(low_pfn % SWAP_CLUSTER_MAX)
+                   && compact_unlock_should_abort(&pgdat->lru_lock,
+-                                          flags, &locked, cc))
+-                      break;
++                                          flags, &locked, cc)) {
++                      low_pfn = 0;
++                      goto fatal_pending;
++              }
+               if (!pfn_valid_within(low_pfn))
+                       goto isolate_fail;
+@@ -1060,6 +1062,7 @@ isolate_abort:
+       trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
+                                               nr_scanned, nr_isolated);
++fatal_pending:
+       cc->total_migrate_scanned += nr_scanned;
+       if (nr_isolated)
+               count_compact_events(COMPACTISOLATED, nr_isolated);
diff --git a/queue-5.2/mm-migrate-fix-reference-check-race-between-__find_get_block-and-migration.patch b/queue-5.2/mm-migrate-fix-reference-check-race-between-__find_get_block-and-migration.patch
new file mode 100644 (file)
index 0000000..14a3747
--- /dev/null
@@ -0,0 +1,80 @@
+From ebdf4de5642fb6580b0763158b6b4b791c4d6a4d Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 2 Aug 2019 21:48:47 -0700
+Subject: mm: migrate: fix reference check race between __find_get_block() and migration
+
+From: Jan Kara <jack@suse.cz>
+
+commit ebdf4de5642fb6580b0763158b6b4b791c4d6a4d upstream.
+
+buffer_migrate_page_norefs() can race with bh users in the following
+way:
+
+CPU1                                    CPU2
+buffer_migrate_page_norefs()
+  buffer_migrate_lock_buffers()
+  checks bh refs
+  spin_unlock(&mapping->private_lock)
+                                        __find_get_block()
+                                          spin_lock(&mapping->private_lock)
+                                          grab bh ref
+                                          spin_unlock(&mapping->private_lock)
+  move page                               do bh work
+
+This can result in various issues like lost updates to buffers (i.e.
+metadata corruption) or use after free issues for the old page.
+
+This patch closes the race by holding mapping->private_lock while the
+mapping is being moved to a new page.  Ordinarily, a reference can be
+taken outside of the private_lock using the per-cpu BH LRU but the
+references are checked and the LRU invalidated if necessary.  The
+private_lock is held once the references are known so the buffer lookup
+slow path will spin on the private_lock.  Between the page lock and
+private_lock, it should be impossible for other references to be
+acquired and updates to happen during the migration.
+
+A user had reported data corruption issues on a distribution kernel with
+a similar page migration implementation as mainline.  The data
+corruption could not be reproduced with this patch applied.  A small
+number of migration-intensive tests were run and no performance problems
+were noted.
+
+[mgorman@techsingularity.net: Changelog, removed tracing]
+Link: http://lkml.kernel.org/r/20190718090238.GF24383@techsingularity.net
+Fixes: 89cb0888ca14 "mm: migrate: provide buffer_migrate_page_norefs()"
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Cc: <stable@vger.kernel.org>   [5.0+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/migrate.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -771,12 +771,12 @@ recheck_buffers:
+                       }
+                       bh = bh->b_this_page;
+               } while (bh != head);
+-              spin_unlock(&mapping->private_lock);
+               if (busy) {
+                       if (invalidated) {
+                               rc = -EAGAIN;
+                               goto unlock_buffers;
+                       }
++                      spin_unlock(&mapping->private_lock);
+                       invalidate_bh_lrus();
+                       invalidated = true;
+                       goto recheck_buffers;
+@@ -809,6 +809,8 @@ recheck_buffers:
+       rc = MIGRATEPAGE_SUCCESS;
+ unlock_buffers:
++      if (check_refs)
++              spin_unlock(&mapping->private_lock);
+       bh = head;
+       do {
+               unlock_buffer(bh);
diff --git a/queue-5.2/mm-migrate.c-initialize-pud_entry-in-migrate_vma.patch b/queue-5.2/mm-migrate.c-initialize-pud_entry-in-migrate_vma.patch
new file mode 100644 (file)
index 0000000..5eb3715
--- /dev/null
@@ -0,0 +1,59 @@
+From 7b358c6f12dc82364f6d317f8c8f1d794adbc3f5 Mon Sep 17 00:00:00 2001
+From: Ralph Campbell <rcampbell@nvidia.com>
+Date: Fri, 2 Aug 2019 21:49:08 -0700
+Subject: mm/migrate.c: initialize pud_entry in migrate_vma()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ralph Campbell <rcampbell@nvidia.com>
+
+commit 7b358c6f12dc82364f6d317f8c8f1d794adbc3f5 upstream.
+
+When CONFIG_MIGRATE_VMA_HELPER is enabled, migrate_vma() calls
+migrate_vma_collect() which initializes a struct mm_walk but didn't
+initialize mm_walk.pud_entry.  (Found by code inspection) Use a C
+structure initialization to make sure it is set to NULL.
+
+Link: http://lkml.kernel.org/r/20190719233225.12243-1-rcampbell@nvidia.com
+Fixes: 8763cb45ab967 ("mm/migrate: new memory migration helper for use with device memory")
+Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
+Reviewed-by: John Hubbard <jhubbard@nvidia.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: "Jérôme Glisse" <jglisse@redhat.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/migrate.c |   17 +++++++----------
+ 1 file changed, 7 insertions(+), 10 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -2347,16 +2347,13 @@ next:
+ static void migrate_vma_collect(struct migrate_vma *migrate)
+ {
+       struct mmu_notifier_range range;
+-      struct mm_walk mm_walk;
+-
+-      mm_walk.pmd_entry = migrate_vma_collect_pmd;
+-      mm_walk.pte_entry = NULL;
+-      mm_walk.pte_hole = migrate_vma_collect_hole;
+-      mm_walk.hugetlb_entry = NULL;
+-      mm_walk.test_walk = NULL;
+-      mm_walk.vma = migrate->vma;
+-      mm_walk.mm = migrate->vma->vm_mm;
+-      mm_walk.private = migrate;
++      struct mm_walk mm_walk = {
++              .pmd_entry = migrate_vma_collect_pmd,
++              .pte_hole = migrate_vma_collect_hole,
++              .vma = migrate->vma,
++              .mm = migrate->vma->vm_mm,
++              .private = migrate,
++      };
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
+                               migrate->start,
diff --git a/queue-5.2/mm-vmscan-check-if-mem-cgroup-is-disabled-or-not-before-calling-memcg-slab-shrinker.patch b/queue-5.2/mm-vmscan-check-if-mem-cgroup-is-disabled-or-not-before-calling-memcg-slab-shrinker.patch
new file mode 100644 (file)
index 0000000..c02d6a6
--- /dev/null
@@ -0,0 +1,61 @@
+From fa1e512fac717f34e7c12d7a384c46e90a647392 Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@linux.alibaba.com>
+Date: Fri, 2 Aug 2019 21:48:44 -0700
+Subject: mm: vmscan: check if mem cgroup is disabled or not before calling memcg slab shrinker
+
+From: Yang Shi <yang.shi@linux.alibaba.com>
+
+commit fa1e512fac717f34e7c12d7a384c46e90a647392 upstream.
+
+Shakeel Butt reported premature oom on kernel with
+"cgroup_disable=memory" since mem_cgroup_is_root() returns false even
+though memcg is actually NULL.  The drop_caches is also broken.
+
+It is because commit aeed1d325d42 ("mm/vmscan.c: generalize
+shrink_slab() calls in shrink_node()") removed the !memcg check before
+!mem_cgroup_is_root().  And, surprisingly root memcg is allocated even
+though memory cgroup is disabled by kernel boot parameter.
+
+Add mem_cgroup_disabled() check to make reclaimer work as expected.
+
+Link: http://lkml.kernel.org/r/1563385526-20805-1-git-send-email-yang.shi@linux.alibaba.com
+Fixes: aeed1d325d42 ("mm/vmscan.c: generalize shrink_slab() calls in shrink_node()")
+Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
+Reported-by: Shakeel Butt <shakeelb@google.com>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Jan Hadrava <had@kam.mff.cuni.cz>
+Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Qian Cai <cai@lca.pw>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: <stable@vger.kernel.org>   [4.19+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmscan.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -684,7 +684,14 @@ static unsigned long shrink_slab(gfp_t g
+       unsigned long ret, freed = 0;
+       struct shrinker *shrinker;
+-      if (!mem_cgroup_is_root(memcg))
++      /*
++       * The root memcg might be allocated even though memcg is disabled
++       * via "cgroup_disable=memory" boot parameter.  This could make
++       * mem_cgroup_is_root() return false, then just run memcg slab
++       * shrink, but skip global shrink.  This may result in premature
++       * oom.
++       */
++      if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
+               return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
+       if (!down_read_trylock(&shrinker_rwsem))
diff --git a/queue-5.2/mtd-rawnand-micron-handle-on-die-ecc-off-devices-correctly.patch b/queue-5.2/mtd-rawnand-micron-handle-on-die-ecc-off-devices-correctly.patch
new file mode 100644 (file)
index 0000000..a263d3a
--- /dev/null
@@ -0,0 +1,70 @@
+From 8493b2a06fc5b77ef5c579dc32b12761f7b7a84c Mon Sep 17 00:00:00 2001
+From: Marco Felsch <m.felsch@pengutronix.de>
+Date: Tue, 30 Jul 2019 15:44:07 +0200
+Subject: mtd: rawnand: micron: handle on-die "ECC-off" devices correctly
+
+From: Marco Felsch <m.felsch@pengutronix.de>
+
+commit 8493b2a06fc5b77ef5c579dc32b12761f7b7a84c upstream.
+
+Some devices are not supposed to support on-die ECC but experience
+shows that internal ECC machinery can actually be enabled through the
+"SET FEATURE (EFh)" command, even if a read of the "READ ID Parameter
+Tables" returns that it is not.
+
+Currently, the driver checks the "READ ID Parameter" field directly
+after having enabled the feature. If the check fails it returns
+immediately but leaves the ECC on. When using buggy chips like
+MT29F2G08ABAGA and MT29F2G08ABBGA, all future read/program cycles will
+go through the on-die ECC, confusing the host controller which is
+supposed to be the one handling correction.
+
+To address this in a common way we need to turn off the on-die ECC
+directly after reading the "READ ID Parameter" and before checking the
+"ECC status".
+
+Cc: stable@vger.kernel.org
+Fixes: dbc44edbf833 ("mtd: rawnand: micron: Fix on-die ECC detection logic")
+Signed-off-by: Marco Felsch <m.felsch@pengutronix.de>
+Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/raw/nand_micron.c |   14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/nand/raw/nand_micron.c
++++ b/drivers/mtd/nand/raw/nand_micron.c
+@@ -390,6 +390,14 @@ static int micron_supports_on_die_ecc(st
+           (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
+               return MICRON_ON_DIE_UNSUPPORTED;
++      /*
++       * It seems that there are devices which do not support ECC officially.
++       * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
++       * enabling the ECC feature but don't reflect that to the READ_ID table.
++       * So we have to guarantee that we disable the ECC feature directly
++       * after we did the READ_ID table command. Later we can evaluate the
++       * ECC_ENABLE support.
++       */
+       ret = micron_nand_on_die_ecc_setup(chip, true);
+       if (ret)
+               return MICRON_ON_DIE_UNSUPPORTED;
+@@ -398,13 +406,13 @@ static int micron_supports_on_die_ecc(st
+       if (ret)
+               return MICRON_ON_DIE_UNSUPPORTED;
+-      if (!(id[4] & MICRON_ID_ECC_ENABLED))
+-              return MICRON_ON_DIE_UNSUPPORTED;
+-
+       ret = micron_nand_on_die_ecc_setup(chip, false);
+       if (ret)
+               return MICRON_ON_DIE_UNSUPPORTED;
++      if (!(id[4] & MICRON_ID_ECC_ENABLED))
++              return MICRON_ON_DIE_UNSUPPORTED;
++
+       ret = nand_readid_op(chip, 0, id, sizeof(id));
+       if (ret)
+               return MICRON_ON_DIE_UNSUPPORTED;
diff --git a/queue-5.2/nbd-replace-kill_bdev-with-__invalidate_device-again.patch b/queue-5.2/nbd-replace-kill_bdev-with-__invalidate_device-again.patch
new file mode 100644 (file)
index 0000000..3e1b5da
--- /dev/null
@@ -0,0 +1,74 @@
+From 2b5c8f0063e4b263cf2de82029798183cf85c320 Mon Sep 17 00:00:00 2001
+From: Munehisa Kamata <kamatam@amazon.com>
+Date: Wed, 31 Jul 2019 20:13:10 +0800
+Subject: nbd: replace kill_bdev() with __invalidate_device() again
+
+From: Munehisa Kamata <kamatam@amazon.com>
+
+commit 2b5c8f0063e4b263cf2de82029798183cf85c320 upstream.
+
+Commit abbbdf12497d ("replace kill_bdev() with __invalidate_device()")
+once did this, but 29eaadc03649 ("nbd: stop using the bdev everywhere")
+resurrected kill_bdev() and it has been there since then. So buffer_head
+mappings still get killed on a server disconnection, and we can still
+hit the BUG_ON on a filesystem on the top of the nbd device.
+
+  EXT4-fs (nbd0): mounted filesystem with ordered data mode. Opts: (null)
+  block nbd0: Receive control failed (result -32)
+  block nbd0: shutting down sockets
+  print_req_error: I/O error, dev nbd0, sector 66264 flags 3000
+  EXT4-fs warning (device nbd0): htree_dirblock_to_tree:979: inode #2: lblock 0: comm ls: error -5 reading directory block
+  print_req_error: I/O error, dev nbd0, sector 2264 flags 3000
+  EXT4-fs error (device nbd0): __ext4_get_inode_loc:4690: inode #2: block 283: comm ls: unable to read itable block
+  EXT4-fs error (device nbd0) in ext4_reserve_inode_write:5894: IO failure
+  ------------[ cut here ]------------
+  kernel BUG at fs/buffer.c:3057!
+  invalid opcode: 0000 [#1] SMP PTI
+  CPU: 7 PID: 40045 Comm: jbd2/nbd0-8 Not tainted 5.1.0-rc3+ #4
+  Hardware name: Amazon EC2 m5.12xlarge/, BIOS 1.0 10/16/2017
+  RIP: 0010:submit_bh_wbc+0x18b/0x190
+  ...
+  Call Trace:
+   jbd2_write_superblock+0xf1/0x230 [jbd2]
+   ? account_entity_enqueue+0xc5/0xf0
+   jbd2_journal_update_sb_log_tail+0x94/0xe0 [jbd2]
+   jbd2_journal_commit_transaction+0x12f/0x1d20 [jbd2]
+   ? __switch_to_asm+0x40/0x70
+   ...
+   ? lock_timer_base+0x67/0x80
+   kjournald2+0x121/0x360 [jbd2]
+   ? remove_wait_queue+0x60/0x60
+   kthread+0xf8/0x130
+   ? commit_timeout+0x10/0x10 [jbd2]
+   ? kthread_bind+0x10/0x10
+   ret_from_fork+0x35/0x40
+
+With __invalidate_device(), I no longer hit the BUG_ON with sync or
+unmount on the disconnected device.
+
+Fixes: 29eaadc03649 ("nbd: stop using the bdev everywhere")
+Cc: linux-block@vger.kernel.org
+Cc: Ratna Manoj Bolla <manoj.br@gmail.com>
+Cc: nbd@other.debian.org
+Cc: stable@vger.kernel.org
+Cc: David Woodhouse <dwmw@amazon.com>
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Munehisa Kamata <kamatam@amazon.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/nbd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1229,7 +1229,7 @@ static void nbd_clear_sock_ioctl(struct
+                                struct block_device *bdev)
+ {
+       sock_shutdown(nbd);
+-      kill_bdev(bdev);
++      __invalidate_device(bdev, true);
+       nbd_bdev_reset(bdev);
+       if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+                              &nbd->config->runtime_flags))
diff --git a/queue-5.2/parisc-add-archclean-makefile-target.patch b/queue-5.2/parisc-add-archclean-makefile-target.patch
new file mode 100644 (file)
index 0000000..de6108d
--- /dev/null
@@ -0,0 +1,50 @@
+From f2c5ed0dd5004c2cff5c0e3d430a107576fcc17f Mon Sep 17 00:00:00 2001
+From: James Bottomley <James.Bottomley@HansenPartnership.com>
+Date: Thu, 1 Aug 2019 13:47:03 +0200
+Subject: parisc: Add archclean Makefile target
+
+From: James Bottomley <James.Bottomley@HansenPartnership.com>
+
+commit f2c5ed0dd5004c2cff5c0e3d430a107576fcc17f upstream.
+
+Apparently we don't have an archclean target in our
+arch/parisc/Makefile, so files in there never get cleaned out by make
+mrproper.  This, in turn means that the sizes.h file in
+arch/parisc/boot/compressed never gets removed and worse, when you
+transition to an O=build/parisc[64] build model it overrides the
+generated file.  The upshot being my bzImage was building with a SZ_end
+that was too small.
+
+I fixed it by making mrproper clean everything.
+
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/Makefile                 |    3 +++
+ arch/parisc/boot/compressed/Makefile |    1 +
+ 2 files changed, 4 insertions(+)
+
+--- a/arch/parisc/Makefile
++++ b/arch/parisc/Makefile
+@@ -164,5 +164,8 @@ define archhelp
+       @echo  '  zinstall      - Install compressed vmlinuz kernel'
+ endef
++archclean:
++      $(Q)$(MAKE) $(clean)=$(boot)
++
+ archheaders:
+       $(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all
+--- a/arch/parisc/boot/compressed/Makefile
++++ b/arch/parisc/boot/compressed/Makefile
+@@ -12,6 +12,7 @@ UBSAN_SANITIZE := n
+ targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+ targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
++targets += real2.S firmware.c
+ KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
+ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
diff --git a/queue-5.2/parisc-fix-build-of-compressed-kernel-even-with-debug-enabled.patch b/queue-5.2/parisc-fix-build-of-compressed-kernel-even-with-debug-enabled.patch
new file mode 100644 (file)
index 0000000..d77ad1b
--- /dev/null
@@ -0,0 +1,36 @@
+From 3fe6c873af2f2247544debdbe51ec29f690a2ccf Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Thu, 1 Aug 2019 13:33:39 +0200
+Subject: parisc: Fix build of compressed kernel even with debug enabled
+
+From: Helge Deller <deller@gmx.de>
+
+commit 3fe6c873af2f2247544debdbe51ec29f690a2ccf upstream.
+
+With debug info enabled (CONFIG_DEBUG_INFO=y) the resulting vmlinux may get
+that huge that we need to increase the start addresss for the decompression
+text section otherwise one will face a linker error.
+
+Reported-by: Sven Schnelle <svens@stackframe.org>
+Tested-by: Sven Schnelle <svens@stackframe.org>
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/boot/compressed/vmlinux.lds.S |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/boot/compressed/vmlinux.lds.S
++++ b/arch/parisc/boot/compressed/vmlinux.lds.S
+@@ -48,8 +48,8 @@ SECTIONS
+               *(.rodata.compressed)
+       }
+-      /* bootloader code and data starts behind area of extracted kernel */
+-      . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
++      /* bootloader code and data starts at least behind area of extracted kernel */
++      . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
+       /* align on next page boundary */
+       . = ALIGN(4096);
diff --git a/queue-5.2/parisc-strip-debug-info-from-kernel-before-creating-compressed-vmlinuz.patch b/queue-5.2/parisc-strip-debug-info-from-kernel-before-creating-compressed-vmlinuz.patch
new file mode 100644 (file)
index 0000000..b6938b6
--- /dev/null
@@ -0,0 +1,34 @@
+From e50beea8e7738377b4fa664078547be338038ff9 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Thu, 1 Aug 2019 13:42:18 +0200
+Subject: parisc: Strip debug info from kernel before creating compressed vmlinuz
+
+From: Helge Deller <deller@gmx.de>
+
+commit e50beea8e7738377b4fa664078547be338038ff9 upstream.
+
+Same as on x86-64, strip the .comment, .note and debug sections from the
+Linux kernel before creating the compressed image for the boot loader.
+
+Reported-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Reported-by: Sven Schnelle <svens@stackframe.org>
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/boot/compressed/Makefile |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/parisc/boot/compressed/Makefile
++++ b/arch/parisc/boot/compressed/Makefile
+@@ -56,7 +56,8 @@ $(obj)/misc.o: $(obj)/sizes.h
+ CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
+ $(obj)/vmlinux.lds: $(obj)/sizes.h
+-$(obj)/vmlinux.bin: vmlinux
++OBJCOPYFLAGS_vmlinux.bin := -R .comment -R .note -S
++$(obj)/vmlinux.bin: vmlinux FORCE
+       $(call if_changed,objcopy)
+ vmlinux.bin.all-y := $(obj)/vmlinux.bin
diff --git a/queue-5.2/powerpc-kasan-fix-early-boot-failure-on-ppc32.patch b/queue-5.2/powerpc-kasan-fix-early-boot-failure-on-ppc32.patch
new file mode 100644 (file)
index 0000000..9cf3e45
--- /dev/null
@@ -0,0 +1,50 @@
+From d7e23b887f67178c4f840781be7a6aa6aeb52ab1 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Wed, 31 Jul 2019 06:01:42 +0000
+Subject: powerpc/kasan: fix early boot failure on PPC32
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit d7e23b887f67178c4f840781be7a6aa6aeb52ab1 upstream.
+
+Due to commit 4a6d8cf90017 ("powerpc/mm: don't use pte_alloc_kernel()
+until slab is available on PPC32"), pte_alloc_kernel() cannot be used
+during early KASAN init.
+
+Fix it by using memblock_alloc() instead.
+
+Fixes: 2edb16efc899 ("powerpc/32: Add KASAN support")
+Cc: stable@vger.kernel.org # v5.2+
+Reported-by: Erhard F. <erhard_f@mailbox.org>
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/da89670093651437f27d2975224712e0a130b055.1564552796.git.christophe.leroy@c-s.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/kasan/kasan_init_32.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/mm/kasan/kasan_init_32.c
++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
+@@ -21,7 +21,7 @@ static void kasan_populate_pte(pte_t *pt
+               __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
+ }
+-static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
++static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+ {
+       pmd_t *pmd;
+       unsigned long k_cur, k_next;
+@@ -35,7 +35,10 @@ static int kasan_init_shadow_page_tables
+               if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
+                       continue;
+-              new = pte_alloc_one_kernel(&init_mm);
++              if (slab_is_available())
++                      new = pte_alloc_one_kernel(&init_mm);
++              else
++                      new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
+               if (!new)
+                       return -ENOMEM;
diff --git a/queue-5.2/rdma-bnxt_re-honor-vlan_id-in-gid-entry-comparison.patch b/queue-5.2/rdma-bnxt_re-honor-vlan_id-in-gid-entry-comparison.patch
new file mode 100644 (file)
index 0000000..15a3c5d
--- /dev/null
@@ -0,0 +1,193 @@
+From c56b593d2af4cbd189c6af5fd6790728fade80cc Mon Sep 17 00:00:00 2001
+From: Selvin Xavier <selvin.xavier@broadcom.com>
+Date: Mon, 15 Jul 2019 05:19:13 -0400
+Subject: RDMA/bnxt_re: Honor vlan_id in GID entry comparison
+
+From: Selvin Xavier <selvin.xavier@broadcom.com>
+
+commit c56b593d2af4cbd189c6af5fd6790728fade80cc upstream.
+
+A GID entry consists of GID, vlan, netdev and smac.  Extend GID duplicate
+check comparisons to consider vlan_id as well to support IPv6 VLAN based
+link local addresses. Introduce a new structure (bnxt_qplib_gid_info) to
+hold gid and vlan_id information.
+
+The issue is discussed in the following thread
+https://lore.kernel.org/r/AM0PR05MB4866CFEDCDF3CDA1D7D18AA5D1F20@AM0PR05MB4866.eurprd05.prod.outlook.com
+
+Fixes: 823b23da7113 ("IB/core: Allow vlan link local address based RoCE GIDs")
+Cc: <stable@vger.kernel.org> # v5.2+
+Link: https://lore.kernel.org/r/20190715091913.15726-1-selvin.xavier@broadcom.com
+Reported-by: Yi Zhang <yi.zhang@redhat.com>
+Co-developed-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Tested-by: Yi Zhang <yi.zhang@redhat.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/bnxt_re/ib_verbs.c  |    7 +++++--
+ drivers/infiniband/hw/bnxt_re/qplib_res.c |   13 +++++++++----
+ drivers/infiniband/hw/bnxt_re/qplib_res.h |    2 +-
+ drivers/infiniband/hw/bnxt_re/qplib_sp.c  |   14 +++++++++-----
+ drivers/infiniband/hw/bnxt_re/qplib_sp.h  |    7 ++++++-
+ 5 files changed, 30 insertions(+), 13 deletions(-)
+
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_
+       struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
+       struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+       struct bnxt_qplib_gid *gid_to_del;
++      u16 vlan_id = 0xFFFF;
+       /* Delete the entry from the hardware */
+       ctx = *context;
+@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_
+       if (sgid_tbl && sgid_tbl->active) {
+               if (ctx->idx >= sgid_tbl->max)
+                       return -EINVAL;
+-              gid_to_del = &sgid_tbl->tbl[ctx->idx];
++              gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
++              vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
+               /* DEL_GID is called in WQ context(netdevice_event_work_handler)
+                * or via the ib_unregister_device path. In the former case QP1
+                * may not be destroyed yet, in which case just return as FW
+@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_
+               }
+               ctx->refcnt--;
+               if (!ctx->refcnt) {
+-                      rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
++                      rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
++                                               vlan_id,  true);
+                       if (rc) {
+                               dev_err(rdev_to_dev(rdev),
+                                       "Failed to remove GID: %#x", rc);
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(str
+                                    struct bnxt_qplib_sgid_tbl *sgid_tbl,
+                                    u16 max)
+ {
+-      sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
++      sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
+       if (!sgid_tbl->tbl)
+               return -ENOMEM;
+@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(
+       for (i = 0; i < sgid_tbl->max; i++) {
+               if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
+                          sizeof(bnxt_qplib_gid_zero)))
+-                      bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
++                      bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
++                                          sgid_tbl->tbl[i].vlan_id, true);
+       }
+-      memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
++      memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
+       memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+       memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
+       sgid_tbl->active = 0;
+@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(
+ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+                                    struct net_device *netdev)
+ {
+-      memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
++      u32 i;
++
++      for (i = 0; i < sgid_tbl->max; i++)
++              sgid_tbl->tbl[i].vlan_id = 0xffff;
++
+       memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+ }
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
+ };
+ struct bnxt_qplib_sgid_tbl {
+-      struct bnxt_qplib_gid           *tbl;
++      struct bnxt_qplib_gid_info      *tbl;
+       u16                             *hw_id;
+       u16                             max;
+       u16                             active;
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qpli
+                       index, sgid_tbl->max);
+               return -EINVAL;
+       }
+-      memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
++      memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
+       return 0;
+ }
+ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+-                      struct bnxt_qplib_gid *gid, bool update)
++                      struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
+ {
+       struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
+                                                  struct bnxt_qplib_res,
+@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qpli
+               return -ENOMEM;
+       }
+       for (index = 0; index < sgid_tbl->max; index++) {
+-              if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
++              if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
++                  vlan_id == sgid_tbl->tbl[index].vlan_id)
+                       break;
+       }
+       if (index == sgid_tbl->max) {
+@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qpli
+               if (rc)
+                       return rc;
+       }
+-      memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
++      memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
+              sizeof(bnxt_qplib_gid_zero));
++      sgid_tbl->tbl[index].vlan_id = 0xFFFF;
+       sgid_tbl->vlan[index] = 0;
+       sgid_tbl->active--;
+       dev_dbg(&res->pdev->dev,
+@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qpli
+       }
+       free_idx = sgid_tbl->max;
+       for (i = 0; i < sgid_tbl->max; i++) {
+-              if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
++              if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
++                  sgid_tbl->tbl[i].vlan_id == vlan_id) {
+                       dev_dbg(&res->pdev->dev,
+                               "SGID entry already exist in entry %d!\n", i);
+                       *index = i;
+@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qpli
+       }
+       /* Add GID to the sgid_tbl */
+       memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
++      sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
+       sgid_tbl->active++;
+       if (vlan_id != 0xFFFF)
+               sgid_tbl->vlan[free_idx] = 1;
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
+       u8                              data[16];
+ };
++struct bnxt_qplib_gid_info {
++      struct bnxt_qplib_gid gid;
++      u16 vlan_id;
++};
++
+ struct bnxt_qplib_ah {
+       struct bnxt_qplib_gid           dgid;
+       struct bnxt_qplib_pd            *pd;
+@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qpli
+                       struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
+                       struct bnxt_qplib_gid *gid);
+ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+-                      struct bnxt_qplib_gid *gid, bool update);
++                      struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
+ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+                       struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
+                       bool update, u32 *index);
diff --git a/queue-5.2/rdma-devices-do-not-deadlock-during-client-removal.patch b/queue-5.2/rdma-devices-do-not-deadlock-during-client-removal.patch
new file mode 100644 (file)
index 0000000..a0f3229
--- /dev/null
@@ -0,0 +1,211 @@
+From 621e55ff5b8e0ab5d1063f0eae0ef3960bef8f6e Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@mellanox.com>
+Date: Wed, 31 Jul 2019 11:18:40 +0300
+Subject: RDMA/devices: Do not deadlock during client removal
+
+From: Jason Gunthorpe <jgg@mellanox.com>
+
+commit 621e55ff5b8e0ab5d1063f0eae0ef3960bef8f6e upstream.
+
+lockdep reports:
+
+   WARNING: possible circular locking dependency detected
+
+   modprobe/302 is trying to acquire lock:
+   0000000007c8919c ((wq_completion)ib_cm){+.+.}, at: flush_workqueue+0xdf/0x990
+
+   but task is already holding lock:
+   000000002d3d2ca9 (&device->client_data_rwsem){++++}, at: remove_client_context+0x79/0xd0 [ib_core]
+
+   which lock already depends on the new lock.
+
+   the existing dependency chain (in reverse order) is:
+
+   -> #2 (&device->client_data_rwsem){++++}:
+          down_read+0x3f/0x160
+          ib_get_net_dev_by_params+0xd5/0x200 [ib_core]
+          cma_ib_req_handler+0x5f6/0x2090 [rdma_cm]
+          cm_process_work+0x29/0x110 [ib_cm]
+          cm_req_handler+0x10f5/0x1c00 [ib_cm]
+          cm_work_handler+0x54c/0x311d [ib_cm]
+          process_one_work+0x4aa/0xa30
+          worker_thread+0x62/0x5b0
+          kthread+0x1ca/0x1f0
+          ret_from_fork+0x24/0x30
+
+   -> #1 ((work_completion)(&(&work->work)->work)){+.+.}:
+          process_one_work+0x45f/0xa30
+          worker_thread+0x62/0x5b0
+          kthread+0x1ca/0x1f0
+          ret_from_fork+0x24/0x30
+
+   -> #0 ((wq_completion)ib_cm){+.+.}:
+          lock_acquire+0xc8/0x1d0
+          flush_workqueue+0x102/0x990
+          cm_remove_one+0x30e/0x3c0 [ib_cm]
+          remove_client_context+0x94/0xd0 [ib_core]
+          disable_device+0x10a/0x1f0 [ib_core]
+          __ib_unregister_device+0x5a/0xe0 [ib_core]
+          ib_unregister_device+0x21/0x30 [ib_core]
+          mlx5_ib_stage_ib_reg_cleanup+0x9/0x10 [mlx5_ib]
+          __mlx5_ib_remove+0x3d/0x70 [mlx5_ib]
+          mlx5_ib_remove+0x12e/0x140 [mlx5_ib]
+          mlx5_remove_device+0x144/0x150 [mlx5_core]
+          mlx5_unregister_interface+0x3f/0xf0 [mlx5_core]
+          mlx5_ib_cleanup+0x10/0x3a [mlx5_ib]
+          __x64_sys_delete_module+0x227/0x350
+          do_syscall_64+0xc3/0x6a4
+          entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Which is due to the read side of the client_data_rwsem being obtained
+recursively through a work queue flush during cm client removal.
+
+The lock is being held across the remove in remove_client_context() so
+that the function is a fence, once it returns the client is removed. This
+is required so that the two callers do not proceed with destruction until
+the client completes removal.
+
+Instead of using client_data_rwsem use the existing device unregistration
+refcount and add a similar client unregistration (client->uses) refcount.
+
+This will fence the two unregistration paths without holding any locks.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 921eab1143aa ("RDMA/devices: Re-organize device.c locking")
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Link: https://lore.kernel.org/r/20190731081841.32345-2-leon@kernel.org
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/device.c |   54 +++++++++++++++++++++++++++++----------
+ include/rdma/ib_verbs.h          |    3 ++
+ 2 files changed, 44 insertions(+), 13 deletions(-)
+
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -98,6 +98,12 @@ static LIST_HEAD(client_list);
+ static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
+ static DECLARE_RWSEM(clients_rwsem);
++static void ib_client_put(struct ib_client *client)
++{
++      if (refcount_dec_and_test(&client->uses))
++              complete(&client->uses_zero);
++}
++
+ /*
+  * If client_data is registered then the corresponding client must also still
+  * be registered.
+@@ -651,6 +657,14 @@ static int add_client_context(struct ib_
+       down_write(&device->client_data_rwsem);
+       /*
++       * So long as the client is registered hold both the client and device
++       * unregistration locks.
++       */
++      if (!refcount_inc_not_zero(&client->uses))
++              goto out_unlock;
++      refcount_inc(&device->refcount);
++
++      /*
+        * Another caller to add_client_context got here first and has already
+        * completely initialized context.
+        */
+@@ -673,6 +687,9 @@ static int add_client_context(struct ib_
+       return 0;
+ out:
++      ib_device_put(device);
++      ib_client_put(client);
++out_unlock:
+       up_write(&device->client_data_rwsem);
+       return ret;
+ }
+@@ -692,7 +709,7 @@ static void remove_client_context(struct
+       client_data = xa_load(&device->client_data, client_id);
+       xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
+       client = xa_load(&clients, client_id);
+-      downgrade_write(&device->client_data_rwsem);
++      up_write(&device->client_data_rwsem);
+       /*
+        * Notice we cannot be holding any exclusive locks when calling the
+@@ -702,17 +719,13 @@ static void remove_client_context(struct
+        *
+        * For this reason clients and drivers should not call the
+        * unregistration functions will holdling any locks.
+-       *
+-       * It tempting to drop the client_data_rwsem too, but this is required
+-       * to ensure that unregister_client does not return until all clients
+-       * are completely unregistered, which is required to avoid module
+-       * unloading races.
+        */
+       if (client->remove)
+               client->remove(device, client_data);
+       xa_erase(&device->client_data, client_id);
+-      up_read(&device->client_data_rwsem);
++      ib_device_put(device);
++      ib_client_put(client);
+ }
+ static int alloc_port_data(struct ib_device *device)
+@@ -1696,6 +1709,8 @@ int ib_register_client(struct ib_client
+       unsigned long index;
+       int ret;
++      refcount_set(&client->uses, 1);
++      init_completion(&client->uses_zero);
+       ret = assign_client_id(client);
+       if (ret)
+               return ret;
+@@ -1731,16 +1746,29 @@ void ib_unregister_client(struct ib_clie
+       unsigned long index;
+       down_write(&clients_rwsem);
++      ib_client_put(client);
+       xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
+       up_write(&clients_rwsem);
++
++      /* We do not want to have locks while calling client->remove() */
++      rcu_read_lock();
++      xa_for_each (&devices, index, device) {
++              if (!ib_device_try_get(device))
++                      continue;
++              rcu_read_unlock();
++
++              remove_client_context(device, client->client_id);
++
++              ib_device_put(device);
++              rcu_read_lock();
++      }
++      rcu_read_unlock();
++
+       /*
+-       * Every device still known must be serialized to make sure we are
+-       * done with the client callbacks before we return.
++       * remove_client_context() is not a fence, it can return even though a
++       * removal is ongoing. Wait until all removals are completed.
+        */
+-      down_read(&devices_rwsem);
+-      xa_for_each (&devices, index, device)
+-              remove_client_context(device, client->client_id);
+-      up_read(&devices_rwsem);
++      wait_for_completion(&client->uses_zero);
+       down_write(&clients_rwsem);
+       list_del(&client->list);
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2722,6 +2722,9 @@ struct ib_client {
+                       const union ib_gid *gid,
+                       const struct sockaddr *addr,
+                       void *client_data);
++
++      refcount_t uses;
++      struct completion uses_zero;
+       struct list_head list;
+       u32 client_id;
diff --git a/queue-5.2/s390-dasd-fix-endless-loop-after-read-unit-address-configuration.patch b/queue-5.2/s390-dasd-fix-endless-loop-after-read-unit-address-configuration.patch
new file mode 100644 (file)
index 0000000..80ab904
--- /dev/null
@@ -0,0 +1,73 @@
+From 41995342b40c418a47603e1321256d2c4a2ed0fb Mon Sep 17 00:00:00 2001
+From: Stefan Haberland <sth@linux.ibm.com>
+Date: Thu, 1 Aug 2019 13:06:30 +0200
+Subject: s390/dasd: fix endless loop after read unit address configuration
+
+From: Stefan Haberland <sth@linux.ibm.com>
+
+commit 41995342b40c418a47603e1321256d2c4a2ed0fb upstream.
+
+After getting a storage server event that causes the DASD device driver
+to update its unit address configuration during a device shutdown there is
+the possibility of an endless loop in the device driver.
+
+In the system log there will be ongoing DASD error messages with RC: -19.
+
+The reason is that the loop starting the ruac request only terminates when
+the retry counter is decreased to 0. But in the sleep_on function there are
+early exit paths that do not decrease the retry counter.
+
+Prevent an endless loop by handling those cases separately.
+
+Remove the unnecessary do..while loop since the sleep_on function takes
+care of retries by itself.
+
+Fixes: 8e09f21574ea ("[S390] dasd: add hyper PAV support to DASD device driver, part 1")
+Cc: stable@vger.kernel.org # 2.6.25+
+Signed-off-by: Stefan Haberland <sth@linux.ibm.com>
+Reviewed-by: Jan Hoeppner <hoeppner@linux.ibm.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/block/dasd_alias.c |   22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_r
+       char msg_format;
+       char msg_no;
++      /*
++       * intrc values ENODEV, ENOLINK and EPERM
++       * will be optained from sleep_on to indicate that no
++       * IO operation can be started
++       */
++      if (cqr->intrc == -ENODEV)
++              return 1;
++
++      if (cqr->intrc == -ENOLINK)
++              return 1;
++
++      if (cqr->intrc == -EPERM)
++              return 1;
++
+       sense = dasd_get_sense(&cqr->irb);
+       if (!sense)
+               return 0;
+@@ -447,12 +461,8 @@ static int read_unit_address_configurati
+       lcu->flags &= ~NEED_UAC_UPDATE;
+       spin_unlock_irqrestore(&lcu->lock, flags);
+-      do {
+-              rc = dasd_sleep_on(cqr);
+-              if (rc && suborder_not_supported(cqr))
+-                      return -EOPNOTSUPP;
+-      } while (rc && (cqr->retries > 0));
+-      if (rc) {
++      rc = dasd_sleep_on(cqr);
++      if (rc && !suborder_not_supported(cqr)) {
+               spin_lock_irqsave(&lcu->lock, flags);
+               lcu->flags |= NEED_UAC_UPDATE;
+               spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/queue-5.2/scsi-mpt3sas-use-63-bit-dma-addressing-on-sas35-hba.patch b/queue-5.2/scsi-mpt3sas-use-63-bit-dma-addressing-on-sas35-hba.patch
new file mode 100644 (file)
index 0000000..f4e528c
--- /dev/null
@@ -0,0 +1,80 @@
+From df9a606184bfdb5ae3ca9d226184e9489f5c24f7 Mon Sep 17 00:00:00 2001
+From: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
+Date: Tue, 30 Jul 2019 03:43:57 -0400
+Subject: scsi: mpt3sas: Use 63-bit DMA addressing on SAS35 HBA
+
+From: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
+
+commit df9a606184bfdb5ae3ca9d226184e9489f5c24f7 upstream.
+
+Although SAS3 & SAS3.5 IT HBA controllers support 64-bit DMA addressing, as
+per hardware design, if DMA-able range contains all 64-bits
+set (0xFFFFFFFF-FFFFFFFF) then it results in a firmware fault.
+
+E.g. SGE's start address is 0xFFFFFFFF-FFFF000 and data length is 0x1000
+bytes. when HBA tries to DMA the data at 0xFFFFFFFF-FFFFFFFF location then
+HBA will fault the firmware.
+
+Driver will set 63-bit DMA mask to ensure the above address will not be
+used.
+
+Cc: <stable@vger.kernel.org> # 5.1.20+
+Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/mpt3sas/mpt3sas_base.c |   12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -2683,6 +2683,8 @@ _base_config_dma_addressing(struct MPT3S
+ {
+       u64 required_mask, coherent_mask;
+       struct sysinfo s;
++      /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
++      int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
+       if (ioc->is_mcpu_endpoint)
+               goto try_32bit;
+@@ -2692,17 +2694,17 @@ _base_config_dma_addressing(struct MPT3S
+               goto try_32bit;
+       if (ioc->dma_mask)
+-              coherent_mask = DMA_BIT_MASK(64);
++              coherent_mask = DMA_BIT_MASK(dma_mask);
+       else
+               coherent_mask = DMA_BIT_MASK(32);
+-      if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
++      if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
+           dma_set_coherent_mask(&pdev->dev, coherent_mask))
+               goto try_32bit;
+       ioc->base_add_sg_single = &_base_add_sg_single_64;
+       ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+-      ioc->dma_mask = 64;
++      ioc->dma_mask = dma_mask;
+       goto out;
+  try_32bit:
+@@ -2724,7 +2726,7 @@ static int
+ _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
+                                     struct pci_dev *pdev)
+ {
+-      if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
++      if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
+               if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+                       return -ENODEV;
+       }
+@@ -4631,7 +4633,7 @@ _base_allocate_memory_pools(struct MPT3S
+               total_sz += sz;
+       } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
+-      if (ioc->dma_mask == 64) {
++      if (ioc->dma_mask > 32) {
+               if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
+                       ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
+                                pci_name(ioc->pdev));
diff --git a/queue-5.2/selinux-fix-memory-leak-in-policydb_init.patch b/queue-5.2/selinux-fix-memory-leak-in-policydb_init.patch
new file mode 100644 (file)
index 0000000..a07dd4a
--- /dev/null
@@ -0,0 +1,47 @@
+From 45385237f65aeee73641f1ef737d7273905a233f Mon Sep 17 00:00:00 2001
+From: Ondrej Mosnacek <omosnace@redhat.com>
+Date: Thu, 25 Jul 2019 12:52:43 +0200
+Subject: selinux: fix memory leak in policydb_init()
+
+From: Ondrej Mosnacek <omosnace@redhat.com>
+
+commit 45385237f65aeee73641f1ef737d7273905a233f upstream.
+
+Since roles_init() adds some entries to the role hash table, we need to
+destroy also its keys/values on error, otherwise we get a memory leak in
+the error path.
+
+Cc: <stable@vger.kernel.org>
+Reported-by: syzbot+fee3a14d4cdf92646287@syzkaller.appspotmail.com
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/selinux/ss/policydb.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -272,6 +272,8 @@ static int rangetr_cmp(struct hashtab *h
+       return v;
+ }
++static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
++
+ /*
+  * Initialize a policy database structure.
+  */
+@@ -319,8 +321,10 @@ static int policydb_init(struct policydb
+ out:
+       hashtab_destroy(p->filename_trans);
+       hashtab_destroy(p->range_tr);
+-      for (i = 0; i < SYM_NUM; i++)
++      for (i = 0; i < SYM_NUM; i++) {
++              hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
+               hashtab_destroy(p->symtab[i].table);
++      }
+       return rc;
+ }
index 3fddb38224433ee8580eeefe7730b6c077ec3473..461988af10e27a4645d71f4fd401625b972537ac 100644 (file)
@@ -89,3 +89,41 @@ gpio-don-t-warn-on-null-descs-if-gpiolib-is-disabled.patch
 gpiolib-fix-incorrect-irq-requesting-of-an-active-low-lineevent.patch
 ib-hfi1-fix-spectre-v1-vulnerability.patch
 drm-nouveau-only-release-vcpi-slots-on-mode-changes.patch
+mtd-rawnand-micron-handle-on-die-ecc-off-devices-correctly.patch
+eeprom-at24-make-spd-world-readable-again.patch
+i2c-iproc-fix-i2c-master-read-more-than-63-bytes.patch
+i2c-at91-disable-txrdy-interrupt-after-sending-data.patch
+i2c-at91-fix-clk_offset-for-sama5d2.patch
+powerpc-kasan-fix-early-boot-failure-on-ppc32.patch
+selinux-fix-memory-leak-in-policydb_init.patch
+alsa-hda-fix-1-minute-detection-delay-when-i915-module-is-not-available.patch
+mm-vmscan-check-if-mem-cgroup-is-disabled-or-not-before-calling-memcg-slab-shrinker.patch
+mm-migrate-fix-reference-check-race-between-__find_get_block-and-migration.patch
+mm-compaction-avoid-100-cpu-usage-during-compaction-when-a-task-is-killed.patch
+ubsan-build-ubsan.c-more-conservatively.patch
+mm-migrate.c-initialize-pud_entry-in-migrate_vma.patch
+loop-fix-mount-2-failure-due-to-race-with-loop_set_fd.patch
+s390-dasd-fix-endless-loop-after-read-unit-address-configuration.patch
+cgroup-kselftest-relax-fs_spec-checks.patch
+parisc-add-archclean-makefile-target.patch
+parisc-strip-debug-info-from-kernel-before-creating-compressed-vmlinuz.patch
+parisc-fix-build-of-compressed-kernel-even-with-debug-enabled.patch
+drivers-perf-arm_pmu-fix-failure-path-in-pm-notifier.patch
+arm64-compat-allow-single-byte-watchpoints-on-all-addresses.patch
+arm64-cpufeature-fix-feature-comparison-for-ctr_el0.-cwg-erg.patch
+io_uring-fix-kasan-use-after-free-in-io_sq_wq_submit_work.patch
+clk-mediatek-mt8183-register-13mhz-clock-earlier-for-clocksource.patch
+scsi-mpt3sas-use-63-bit-dma-addressing-on-sas35-hba.patch
+nbd-replace-kill_bdev-with-__invalidate_device-again.patch
+xen-swiotlb-fix-condition-for-calling-xen_destroy_contiguous_region.patch
+xen-gntdev.c-replace-vm_map_pages-with-vm_map_pages_zero.patch
+rdma-bnxt_re-honor-vlan_id-in-gid-entry-comparison.patch
+rdma-devices-do-not-deadlock-during-client-removal.patch
+ib-mlx5-fix-unreg_umr-to-ignore-the-mkey-state.patch
+ib-mlx5-use-direct-mkey-destroy-command-upon-umr-unreg-failure.patch
+ib-mlx5-move-mrs-to-a-kernel-pd-when-freeing-them-to-the-mr-cache.patch
+ib-mlx5-fix-clean_mr-to-work-in-the-expected-order.patch
+ib-mlx5-fix-rss-toeplitz-setup-to-be-aligned-with-the-hw-specification.patch
+ib-hfi1-check-for-error-on-call-to-alloc_rsm_map_table.patch
+ib-hfi1-drop-all-tid-rdma-read-resp-packets-after-r_next_psn.patch
+ib-hfi1-field-not-zero-ed-when-allocating-tid-flow-memory.patch
diff --git a/queue-5.2/ubsan-build-ubsan.c-more-conservatively.patch b/queue-5.2/ubsan-build-ubsan.c-more-conservatively.patch
new file mode 100644 (file)
index 0000000..7f9cacc
--- /dev/null
@@ -0,0 +1,70 @@
+From af700eaed0564d5d3963a7a51cb0843629d7fe3d Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 2 Aug 2019 21:48:58 -0700
+Subject: ubsan: build ubsan.c more conservatively
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit af700eaed0564d5d3963a7a51cb0843629d7fe3d upstream.
+
+objtool points out several conditions that it does not like, depending
+on the combination with other configuration options and compiler
+variants:
+
+stack protector:
+  lib/ubsan.o: warning: objtool: __ubsan_handle_type_mismatch()+0xbf: call to __stack_chk_fail() with UACCESS enabled
+  lib/ubsan.o: warning: objtool: __ubsan_handle_type_mismatch_v1()+0xbe: call to __stack_chk_fail() with UACCESS enabled
+
+stackleak plugin:
+  lib/ubsan.o: warning: objtool: __ubsan_handle_type_mismatch()+0x4a: call to stackleak_track_stack() with UACCESS enabled
+  lib/ubsan.o: warning: objtool: __ubsan_handle_type_mismatch_v1()+0x4a: call to stackleak_track_stack() with UACCESS enabled
+
+kasan:
+  lib/ubsan.o: warning: objtool: __ubsan_handle_type_mismatch()+0x25: call to memcpy() with UACCESS enabled
+  lib/ubsan.o: warning: objtool: __ubsan_handle_type_mismatch_v1()+0x25: call to memcpy() with UACCESS enabled
+
+The stackleak and kasan options just need to be disabled for this file
+as we do for other files already.  For the stack protector, we already
+attempt to disable it, but this fails on clang because the check is
+mixed with the gcc specific -fno-conserve-stack option.  According to
+Andrey Ryabinin, that option is not even needed, dropping it here fixes
+the stackprotector issue.
+
+Link: http://lkml.kernel.org/r/20190722125139.1335385-1-arnd@arndb.de
+Link: https://lore.kernel.org/lkml/20190617123109.667090-1-arnd@arndb.de/t/
+Link: https://lore.kernel.org/lkml/20190722091050.2188664-1-arnd@arndb.de/t/
+Fixes: d08965a27e84 ("x86/uaccess, ubsan: Fix UBSAN vs. SMAP")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/Makefile |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -278,7 +278,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string
+ obj-$(CONFIG_UBSAN) += ubsan.o
+ UBSAN_SANITIZE_ubsan.o := n
+-CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
++KASAN_SANITIZE_ubsan.o := n
++CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
+ obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/queue-5.2/xen-gntdev.c-replace-vm_map_pages-with-vm_map_pages_zero.patch b/queue-5.2/xen-gntdev.c-replace-vm_map_pages-with-vm_map_pages_zero.patch
new file mode 100644 (file)
index 0000000..aab2105
--- /dev/null
@@ -0,0 +1,77 @@
+From 8d1502f629c9966743de45744f4c1ba93a57d105 Mon Sep 17 00:00:00 2001
+From: Souptick Joarder <jrdr.linux@gmail.com>
+Date: Wed, 31 Jul 2019 00:04:56 +0530
+Subject: xen/gntdev.c: Replace vm_map_pages() with vm_map_pages_zero()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Souptick Joarder <jrdr.linux@gmail.com>
+
+commit 8d1502f629c9966743de45744f4c1ba93a57d105 upstream.
+
+'commit df9bde015a72 ("xen/gntdev.c: convert to use vm_map_pages()")'
+breaks gntdev driver. If vma->vm_pgoff > 0, vm_map_pages()
+will:
+ - use map->pages starting at vma->vm_pgoff instead of 0
+ - verify map->count against vma_pages()+vma->vm_pgoff instead of just
+   vma_pages().
+
+In practice, this breaks using a single gntdev FD for mapping multiple
+grants.
+
+relevant strace output:
+[pid   857] ioctl(7, IOCTL_GNTDEV_MAP_GRANT_REF, 0x7ffd3407b6d0) = 0
+[pid   857] mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, 7, 0) =
+0x777f1211b000
+[pid   857] ioctl(7, IOCTL_GNTDEV_SET_UNMAP_NOTIFY, 0x7ffd3407b710) = 0
+[pid   857] ioctl(7, IOCTL_GNTDEV_MAP_GRANT_REF, 0x7ffd3407b6d0) = 0
+[pid   857] mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, 7,
+0x1000) = -1 ENXIO (No such device or address)
+
+details here:
+https://github.com/QubesOS/qubes-issues/issues/5199
+
+The reason is -> ( copying Marek's word from discussion)
+
+vma->vm_pgoff is used as index passed to gntdev_find_map_index. It's
+basically using this parameter for "which grant reference to map".
+map struct returned by gntdev_find_map_index() describes just the pages
+to be mapped. Specifically map->pages[0] should be mapped at
+vma->vm_start, not vma->vm_start+vma->vm_pgoff*PAGE_SIZE.
+
+When trying to map grant with index (aka vma->vm_pgoff) > 1,
+__vm_map_pages() will refuse to map it because it will expect map->count
+to be at least vma_pages(vma)+vma->vm_pgoff, while it is exactly
+vma_pages(vma).
+
+Converting vm_map_pages() to use vm_map_pages_zero() will fix the
+problem.
+
+Marek has tested and confirmed the same.
+
+Cc: stable@vger.kernel.org # v5.2+
+Fixes: df9bde015a72 ("xen/gntdev.c: convert to use vm_map_pages()")
+
+Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
+Tested-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/gntdev.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -1145,7 +1145,7 @@ static int gntdev_mmap(struct file *flip
+               goto out_put_map;
+       if (!use_ptemod) {
+-              err = vm_map_pages(vma, map->pages, map->count);
++              err = vm_map_pages_zero(vma, map->pages, map->count);
+               if (err)
+                       goto out_put_map;
+       } else {
diff --git a/queue-5.2/xen-swiotlb-fix-condition-for-calling-xen_destroy_contiguous_region.patch b/queue-5.2/xen-swiotlb-fix-condition-for-calling-xen_destroy_contiguous_region.patch
new file mode 100644 (file)
index 0000000..1bda65b
--- /dev/null
@@ -0,0 +1,44 @@
+From 50f6393f9654c561df4cdcf8e6cfba7260143601 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Fri, 14 Jun 2019 07:46:02 +0200
+Subject: xen/swiotlb: fix condition for calling xen_destroy_contiguous_region()
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 50f6393f9654c561df4cdcf8e6cfba7260143601 upstream.
+
+The condition in xen_swiotlb_free_coherent() for deciding whether to
+call xen_destroy_contiguous_region() is wrong: in case the region to
+be freed is not contiguous calling xen_destroy_contiguous_region() is
+the wrong thing to do: it would result in inconsistent mappings of
+multiple PFNs to the same MFN. This will lead to various strange
+crashes or data corruption.
+
+Instead of calling xen_destroy_contiguous_region() in that case a
+warning should be issued as that situation should never occur.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/swiotlb-xen.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -361,8 +361,8 @@ xen_swiotlb_free_coherent(struct device
+       /* Convert the size to actually allocated. */
+       size = 1UL << (order + XEN_PAGE_SHIFT);
+-      if (((dev_addr + size - 1 <= dma_mask)) ||
+-          range_straddles_page_boundary(phys, size))
++      if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
++                   range_straddles_page_boundary(phys, size)))
+               xen_destroy_contiguous_region(phys, order);
+       xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);