]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 19 Dec 2019 11:37:28 +0000 (12:37 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 19 Dec 2019 11:37:28 +0000 (12:37 +0100)
added patches:
arm-dts-s3c64xx-fix-init-order-of-clock-providers.patch
arm-tegra-fix-flow_ctlr_halt-register-clobbering-by-tegra_resume.patch
block-fix-check-bi_size-overflow-before-merge.patch
cifs-close-open-handle-after-interrupted-close.patch
cifs-do-not-miss-cancelled-open-responses.patch
cifs-don-t-display-rdma-transport-on-reconnect.patch
cifs-fix-null-pointer-dereference-in-mid-callback.patch
cifs-fix-retrieval-of-dfs-referrals-in-cifs_mount.patch
cifs-respect-o_sync-and-o_direct-flags-during-reconnect.patch
cifs-smbd-add-messages-on-rdma-session-destroy-and-reconnection.patch
cifs-smbd-only-queue-work-for-error-recovery-on-memory-registration.patch
cifs-smbd-return-eagain-when-transport-is-reconnecting.patch
cifs-smbd-return-econnaborted-when-trasnport-is-not-in-connected-state.patch
cifs-smbd-return-einval-when-the-number-of-iovs-exceeds-smbdirect_max_sge.patch
dm-btree-increase-rebalance-threshold-in-__rebalance2.patch
dm-clone-flush-destination-device-before-committing-metadata.patch
dm-clone-metadata-track-exact-changes-per-transaction.patch
dm-clone-metadata-use-a-two-phase-commit.patch
dm-mpath-remove-harmful-bio-based-optimization.patch
dm-thin-flush-data-device-before-committing-metadata.patch
dm-thin-metadata-add-support-for-a-pre-commit-callback.patch
dma-buf-fix-memory-leak-in-sync_file_merge.patch
drm-amd-display-add-default-clocks-if-not-able-to-fetch-them.patch
drm-amd-display-re-enable-wait-in-pipelock-but-add-timeout.patch
drm-amdgpu-gfx10-explicitly-wait-for-cp-idle-after-halt-unhalt.patch
drm-amdgpu-gfx10-re-init-clear-state-buffer-after-gpu-reset.patch
drm-amdgpu-initialize-vm_inv_eng0_sem-for-gfxhub-and-mmhub.patch
drm-amdgpu-invalidate-mmhub-semaphore-workaround-in-gmc9-gmc10.patch
drm-dp_mst-correct-the-bug-in-drm_dp_update_payload_part1.patch
drm-i915-fbc-disable-fbc-by-default-on-all-glk.patch
drm-i915-gvt-fix-cmd-length-check-for-mi_atomic.patch
drm-meson-venc-cvbs-fix-cvbs-mode-matching.patch
drm-mgag200-add-workaround-for-hw-that-does-not-support-startadd.patch
drm-mgag200-extract-device-type-from-flags.patch
drm-mgag200-flag-all-g200-se-a-machines-as-broken-wrt-startadd.patch
drm-mgag200-store-flags-from-pci-driver-data-in-device-structure.patch
drm-nouveau-kms-nv50-call-outp_atomic_check_view-before-handling-pbn.patch
drm-nouveau-kms-nv50-limit-mst-bpc-to-8.patch
drm-nouveau-kms-nv50-store-the-bpc-we-re-using-in-nv50_head_atom.patch
drm-panfrost-fix-a-bo-leak-in-panfrost_ioctl_mmap_bo.patch
drm-panfrost-fix-a-race-in-panfrost_gem_free_object.patch
drm-panfrost-fix-a-race-in-panfrost_ioctl_madvise.patch
drm-radeon-fix-r1xx-r2xx-register-checker-for-pot-textures.patch
gfs2-fix-glock-reference-problem-in-gfs2_trans_remove_revoke.patch
gfs2-multi-block-allocations-in-gfs2_page_mkwrite.patch
rpmsg-glink-don-t-send-pending-rx_done-during-remove.patch
rpmsg-glink-fix-reuse-intents-memory-leak-issue.patch
rpmsg-glink-fix-rpmsg_register_device-err-handling.patch
rpmsg-glink-fix-use-after-free-in-open_ack-timeout-case.patch
rpmsg-glink-free-pending-deferred-work-on-remove.patch
rpmsg-glink-put-an-extra-reference-during-cleanup.patch
rpmsg-glink-set-tail-pointer-to-0-at-end-of-fifo.patch
scsi-iscsi-fix-a-potential-deadlock-in-the-timeout-handler.patch
scsi-qla2xxx-added-support-for-mpi-and-pep-regions-for-isp28xx.patch
scsi-qla2xxx-change-discovery-state-before-plogi.patch
scsi-qla2xxx-correctly-retrieve-and-interpret-active-flash-region.patch
scsi-qla2xxx-fix-incorrect-sfub-length-used-for-secure-flash-update-mb-cmd.patch
scsi-qla2xxx-ignore-null-pointer-in-tcm_qla2xxx_free_mcmd.patch
scsi-qla2xxx-initialize-free_work-before-flushing-it.patch
scsi-ufs-disable-autohibern8-feature-in-cadence-ufs.patch
vfio-pci-call-irq_bypass_unregister_producer-before-freeing-irq.patch
xtensa-fix-syscall_set_return_value.patch
xtensa-fix-tlb-sanity-checker.patch
xtensa-use-memblock_alloc_anywhere-for-kasan-shadow-map.patch

65 files changed:
queue-5.4/arm-dts-s3c64xx-fix-init-order-of-clock-providers.patch [new file with mode: 0644]
queue-5.4/arm-tegra-fix-flow_ctlr_halt-register-clobbering-by-tegra_resume.patch [new file with mode: 0644]
queue-5.4/block-fix-check-bi_size-overflow-before-merge.patch [new file with mode: 0644]
queue-5.4/cifs-close-open-handle-after-interrupted-close.patch [new file with mode: 0644]
queue-5.4/cifs-do-not-miss-cancelled-open-responses.patch [new file with mode: 0644]
queue-5.4/cifs-don-t-display-rdma-transport-on-reconnect.patch [new file with mode: 0644]
queue-5.4/cifs-fix-null-pointer-dereference-in-mid-callback.patch [new file with mode: 0644]
queue-5.4/cifs-fix-retrieval-of-dfs-referrals-in-cifs_mount.patch [new file with mode: 0644]
queue-5.4/cifs-respect-o_sync-and-o_direct-flags-during-reconnect.patch [new file with mode: 0644]
queue-5.4/cifs-smbd-add-messages-on-rdma-session-destroy-and-reconnection.patch [new file with mode: 0644]
queue-5.4/cifs-smbd-only-queue-work-for-error-recovery-on-memory-registration.patch [new file with mode: 0644]
queue-5.4/cifs-smbd-return-eagain-when-transport-is-reconnecting.patch [new file with mode: 0644]
queue-5.4/cifs-smbd-return-econnaborted-when-trasnport-is-not-in-connected-state.patch [new file with mode: 0644]
queue-5.4/cifs-smbd-return-einval-when-the-number-of-iovs-exceeds-smbdirect_max_sge.patch [new file with mode: 0644]
queue-5.4/dm-btree-increase-rebalance-threshold-in-__rebalance2.patch [new file with mode: 0644]
queue-5.4/dm-clone-flush-destination-device-before-committing-metadata.patch [new file with mode: 0644]
queue-5.4/dm-clone-metadata-track-exact-changes-per-transaction.patch [new file with mode: 0644]
queue-5.4/dm-clone-metadata-use-a-two-phase-commit.patch [new file with mode: 0644]
queue-5.4/dm-mpath-remove-harmful-bio-based-optimization.patch [new file with mode: 0644]
queue-5.4/dm-thin-flush-data-device-before-committing-metadata.patch [new file with mode: 0644]
queue-5.4/dm-thin-metadata-add-support-for-a-pre-commit-callback.patch [new file with mode: 0644]
queue-5.4/dma-buf-fix-memory-leak-in-sync_file_merge.patch [new file with mode: 0644]
queue-5.4/drm-amd-display-add-default-clocks-if-not-able-to-fetch-them.patch [new file with mode: 0644]
queue-5.4/drm-amd-display-re-enable-wait-in-pipelock-but-add-timeout.patch [new file with mode: 0644]
queue-5.4/drm-amdgpu-gfx10-explicitly-wait-for-cp-idle-after-halt-unhalt.patch [new file with mode: 0644]
queue-5.4/drm-amdgpu-gfx10-re-init-clear-state-buffer-after-gpu-reset.patch [new file with mode: 0644]
queue-5.4/drm-amdgpu-initialize-vm_inv_eng0_sem-for-gfxhub-and-mmhub.patch [new file with mode: 0644]
queue-5.4/drm-amdgpu-invalidate-mmhub-semaphore-workaround-in-gmc9-gmc10.patch [new file with mode: 0644]
queue-5.4/drm-dp_mst-correct-the-bug-in-drm_dp_update_payload_part1.patch [new file with mode: 0644]
queue-5.4/drm-i915-fbc-disable-fbc-by-default-on-all-glk.patch [new file with mode: 0644]
queue-5.4/drm-i915-gvt-fix-cmd-length-check-for-mi_atomic.patch [new file with mode: 0644]
queue-5.4/drm-meson-venc-cvbs-fix-cvbs-mode-matching.patch [new file with mode: 0644]
queue-5.4/drm-mgag200-add-workaround-for-hw-that-does-not-support-startadd.patch [new file with mode: 0644]
queue-5.4/drm-mgag200-extract-device-type-from-flags.patch [new file with mode: 0644]
queue-5.4/drm-mgag200-flag-all-g200-se-a-machines-as-broken-wrt-startadd.patch [new file with mode: 0644]
queue-5.4/drm-mgag200-store-flags-from-pci-driver-data-in-device-structure.patch [new file with mode: 0644]
queue-5.4/drm-nouveau-kms-nv50-call-outp_atomic_check_view-before-handling-pbn.patch [new file with mode: 0644]
queue-5.4/drm-nouveau-kms-nv50-limit-mst-bpc-to-8.patch [new file with mode: 0644]
queue-5.4/drm-nouveau-kms-nv50-store-the-bpc-we-re-using-in-nv50_head_atom.patch [new file with mode: 0644]
queue-5.4/drm-panfrost-fix-a-bo-leak-in-panfrost_ioctl_mmap_bo.patch [new file with mode: 0644]
queue-5.4/drm-panfrost-fix-a-race-in-panfrost_gem_free_object.patch [new file with mode: 0644]
queue-5.4/drm-panfrost-fix-a-race-in-panfrost_ioctl_madvise.patch [new file with mode: 0644]
queue-5.4/drm-radeon-fix-r1xx-r2xx-register-checker-for-pot-textures.patch [new file with mode: 0644]
queue-5.4/gfs2-fix-glock-reference-problem-in-gfs2_trans_remove_revoke.patch [new file with mode: 0644]
queue-5.4/gfs2-multi-block-allocations-in-gfs2_page_mkwrite.patch [new file with mode: 0644]
queue-5.4/rpmsg-glink-don-t-send-pending-rx_done-during-remove.patch [new file with mode: 0644]
queue-5.4/rpmsg-glink-fix-reuse-intents-memory-leak-issue.patch [new file with mode: 0644]
queue-5.4/rpmsg-glink-fix-rpmsg_register_device-err-handling.patch [new file with mode: 0644]
queue-5.4/rpmsg-glink-fix-use-after-free-in-open_ack-timeout-case.patch [new file with mode: 0644]
queue-5.4/rpmsg-glink-free-pending-deferred-work-on-remove.patch [new file with mode: 0644]
queue-5.4/rpmsg-glink-put-an-extra-reference-during-cleanup.patch [new file with mode: 0644]
queue-5.4/rpmsg-glink-set-tail-pointer-to-0-at-end-of-fifo.patch [new file with mode: 0644]
queue-5.4/scsi-iscsi-fix-a-potential-deadlock-in-the-timeout-handler.patch [new file with mode: 0644]
queue-5.4/scsi-qla2xxx-added-support-for-mpi-and-pep-regions-for-isp28xx.patch [new file with mode: 0644]
queue-5.4/scsi-qla2xxx-change-discovery-state-before-plogi.patch [new file with mode: 0644]
queue-5.4/scsi-qla2xxx-correctly-retrieve-and-interpret-active-flash-region.patch [new file with mode: 0644]
queue-5.4/scsi-qla2xxx-fix-incorrect-sfub-length-used-for-secure-flash-update-mb-cmd.patch [new file with mode: 0644]
queue-5.4/scsi-qla2xxx-ignore-null-pointer-in-tcm_qla2xxx_free_mcmd.patch [new file with mode: 0644]
queue-5.4/scsi-qla2xxx-initialize-free_work-before-flushing-it.patch [new file with mode: 0644]
queue-5.4/scsi-ufs-disable-autohibern8-feature-in-cadence-ufs.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/vfio-pci-call-irq_bypass_unregister_producer-before-freeing-irq.patch [new file with mode: 0644]
queue-5.4/xtensa-fix-syscall_set_return_value.patch [new file with mode: 0644]
queue-5.4/xtensa-fix-tlb-sanity-checker.patch [new file with mode: 0644]
queue-5.4/xtensa-use-memblock_alloc_anywhere-for-kasan-shadow-map.patch [new file with mode: 0644]

diff --git a/queue-5.4/arm-dts-s3c64xx-fix-init-order-of-clock-providers.patch b/queue-5.4/arm-dts-s3c64xx-fix-init-order-of-clock-providers.patch
new file mode 100644 (file)
index 0000000..22d7ea9
--- /dev/null
@@ -0,0 +1,59 @@
+From d60d0cff4ab01255b25375425745c3cff69558ad Mon Sep 17 00:00:00 2001
+From: Lihua Yao <ylhuajnu@outlook.com>
+Date: Tue, 10 Sep 2019 13:22:28 +0000
+Subject: ARM: dts: s3c64xx: Fix init order of clock providers
+
+From: Lihua Yao <ylhuajnu@outlook.com>
+
+commit d60d0cff4ab01255b25375425745c3cff69558ad upstream.
+
+fin_pll is the parent of clock-controller@7e00f000, specify
+the dependency to ensure proper initialization order of clock
+providers.
+
+without this patch:
+[    0.000000] S3C6410 clocks: apll = 0, mpll = 0
+[    0.000000]  epll = 0, arm_clk = 0
+
+with this patch:
+[    0.000000] S3C6410 clocks: apll = 532000000, mpll = 532000000
+[    0.000000]  epll = 24000000, arm_clk = 532000000
+
+Cc: <stable@vger.kernel.org>
+Fixes: 3f6d439f2022 ("clk: reverse default clk provider initialization order in of_clk_init()")
+Signed-off-by: Lihua Yao <ylhuajnu@outlook.com>
+Reviewed-by: Sylwester Nawrocki <s.nawrocki@samsung.com>
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/s3c6410-mini6410.dts |    4 ++++
+ arch/arm/boot/dts/s3c6410-smdk6410.dts |    4 ++++
+ 2 files changed, 8 insertions(+)
+
+--- a/arch/arm/boot/dts/s3c6410-mini6410.dts
++++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
+@@ -165,6 +165,10 @@
+       };
+ };
++&clocks {
++      clocks = <&fin_pll>;
++};
++
+ &sdhci0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
+--- a/arch/arm/boot/dts/s3c6410-smdk6410.dts
++++ b/arch/arm/boot/dts/s3c6410-smdk6410.dts
+@@ -69,6 +69,10 @@
+       };
+ };
++&clocks {
++      clocks = <&fin_pll>;
++};
++
+ &sdhci0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
diff --git a/queue-5.4/arm-tegra-fix-flow_ctlr_halt-register-clobbering-by-tegra_resume.patch b/queue-5.4/arm-tegra-fix-flow_ctlr_halt-register-clobbering-by-tegra_resume.patch
new file mode 100644 (file)
index 0000000..9f966c1
--- /dev/null
@@ -0,0 +1,44 @@
+From d70f7d31a9e2088e8a507194354d41ea10062994 Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <digetx@gmail.com>
+Date: Tue, 30 Jul 2019 20:23:39 +0300
+Subject: ARM: tegra: Fix FLOW_CTLR_HALT register clobbering by tegra_resume()
+
+From: Dmitry Osipenko <digetx@gmail.com>
+
+commit d70f7d31a9e2088e8a507194354d41ea10062994 upstream.
+
+There is an unfortunate typo in the code that results in writing to
+FLOW_CTLR_HALT instead of FLOW_CTLR_CSR.
+
+Cc: <stable@vger.kernel.org>
+Acked-by: Peter De Schrijver <pdeschrijver@nvidia.com>
+Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-tegra/reset-handler.S |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/mach-tegra/reset-handler.S
++++ b/arch/arm/mach-tegra/reset-handler.S
+@@ -44,16 +44,16 @@ ENTRY(tegra_resume)
+       cmp     r6, #TEGRA20
+       beq     1f                              @ Yes
+       /* Clear the flow controller flags for this CPU. */
+-      cpu_to_csr_reg r1, r0
++      cpu_to_csr_reg r3, r0
+       mov32   r2, TEGRA_FLOW_CTRL_BASE
+-      ldr     r1, [r2, r1]
++      ldr     r1, [r2, r3]
+       /* Clear event & intr flag */
+       orr     r1, r1, \
+               #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
+       movw    r0, #0x3FFD     @ enable, cluster_switch, immed, bitmaps
+                               @ & ext flags for CPU power mgnt
+       bic     r1, r1, r0
+-      str     r1, [r2]
++      str     r1, [r2, r3]
+ 1:
+       mov32   r9, 0xc09
diff --git a/queue-5.4/block-fix-check-bi_size-overflow-before-merge.patch b/queue-5.4/block-fix-check-bi_size-overflow-before-merge.patch
new file mode 100644 (file)
index 0000000..d770988
--- /dev/null
@@ -0,0 +1,43 @@
+From cc90bc68422318eb8e75b15cd74bc8d538a7df29 Mon Sep 17 00:00:00 2001
+From: Andreas Gruenbacher <agruenba@redhat.com>
+Date: Mon, 9 Dec 2019 20:11:14 +0100
+Subject: block: fix "check bi_size overflow before merge"
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+commit cc90bc68422318eb8e75b15cd74bc8d538a7df29 upstream.
+
+This partially reverts commit e3a5d8e386c3fb973fa75f2403622a8f3640ec06.
+
+Commit e3a5d8e386c3 ("check bi_size overflow before merge") adds a bio_full
+check to __bio_try_merge_page.  This will cause __bio_try_merge_page to fail
+when the last bi_io_vec has been reached.  Instead, what we want here is only
+the bi_size overflow check.
+
+Fixes: e3a5d8e386c3 ("block: check bi_size overflow before merge")
+Cc: stable@vger.kernel.org # v5.4+
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -751,10 +751,12 @@ bool __bio_try_merge_page(struct bio *bi
+       if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
+               return false;
+-      if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
++      if (bio->bi_vcnt > 0) {
+               struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
+               if (page_is_mergeable(bv, page, len, off, same_page)) {
++                      if (bio->bi_iter.bi_size > UINT_MAX - len)
++                              return false;
+                       bv->bv_len += len;
+                       bio->bi_iter.bi_size += len;
+                       return true;
diff --git a/queue-5.4/cifs-close-open-handle-after-interrupted-close.patch b/queue-5.4/cifs-close-open-handle-after-interrupted-close.patch
new file mode 100644 (file)
index 0000000..fad7a8f
--- /dev/null
@@ -0,0 +1,152 @@
+From 9150c3adbf24d77cfba37f03639d4a908ca4ac25 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilov@microsoft.com>
+Date: Thu, 21 Nov 2019 11:35:12 -0800
+Subject: CIFS: Close open handle after interrupted close
+
+From: Pavel Shilovsky <pshilov@microsoft.com>
+
+commit 9150c3adbf24d77cfba37f03639d4a908ca4ac25 upstream.
+
+If Close command is interrupted before sending a request
+to the server the client ends up leaking an open file
+handle. This wastes server resources and can potentially
+block applications that try to remove the file or any
+directory containing this file.
+
+Fix this by putting the close command into a worker queue,
+so another thread retries it later.
+
+Cc: Stable <stable@vger.kernel.org>
+Tested-by: Frank Sorenson <sorenson@redhat.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2misc.c  |   59 +++++++++++++++++++++++++++++++++++++++-------------
+ fs/cifs/smb2pdu.c   |   16 +++++++++++++-
+ fs/cifs/smb2proto.h |    3 ++
+ 3 files changed, 63 insertions(+), 15 deletions(-)
+
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -743,36 +743,67 @@ smb2_cancelled_close_fid(struct work_str
+       kfree(cancelled);
+ }
++/* Caller should already has an extra reference to @tcon */
++static int
++__smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
++                            __u64 volatile_fid)
++{
++      struct close_cancelled_open *cancelled;
++
++      cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
++      if (!cancelled)
++              return -ENOMEM;
++
++      cancelled->fid.persistent_fid = persistent_fid;
++      cancelled->fid.volatile_fid = volatile_fid;
++      cancelled->tcon = tcon;
++      INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
++      WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
++
++      return 0;
++}
++
++int
++smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
++                          __u64 volatile_fid)
++{
++      int rc;
++
++      cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
++      spin_lock(&cifs_tcp_ses_lock);
++      tcon->tc_count++;
++      spin_unlock(&cifs_tcp_ses_lock);
++
++      rc = __smb2_handle_cancelled_close(tcon, persistent_fid, volatile_fid);
++      if (rc)
++              cifs_put_tcon(tcon);
++
++      return rc;
++}
++
+ int
+ smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+ {
+       struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer;
+       struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+       struct cifs_tcon *tcon;
+-      struct close_cancelled_open *cancelled;
++      int rc;
+       if (sync_hdr->Command != SMB2_CREATE ||
+           sync_hdr->Status != STATUS_SUCCESS)
+               return 0;
+-      cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+-      if (!cancelled)
+-              return -ENOMEM;
+-
+       tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
+                                 sync_hdr->TreeId);
+-      if (!tcon) {
+-              kfree(cancelled);
++      if (!tcon)
+               return -ENOENT;
+-      }
+-      cancelled->fid.persistent_fid = rsp->PersistentFileId;
+-      cancelled->fid.volatile_fid = rsp->VolatileFileId;
+-      cancelled->tcon = tcon;
+-      INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+-      queue_work(cifsiod_wq, &cancelled->work);
++      rc = __smb2_handle_cancelled_close(tcon, rsp->PersistentFileId,
++                                         rsp->VolatileFileId);
++      if (rc)
++              cifs_put_tcon(tcon);
+-      return 0;
++      return rc;
+ }
+ /**
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2972,7 +2972,21 @@ int
+ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+          u64 persistent_fid, u64 volatile_fid)
+ {
+-      return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
++      int rc;
++      int tmp_rc;
++
++      rc = SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
++
++      /* retry close in a worker thread if this one is interrupted */
++      if (rc == -EINTR) {
++              tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
++                                                   volatile_fid);
++              if (tmp_rc)
++                      cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
++                               persistent_fid, tmp_rc);
++      }
++
++      return rc;
+ }
+ int
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -212,6 +212,9 @@ extern int SMB2_set_compression(const un
+ extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
+                            const u64 persistent_fid, const u64 volatile_fid,
+                            const __u8 oplock_level);
++extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
++                                     __u64 persistent_fid,
++                                     __u64 volatile_fid);
+ extern int smb2_handle_cancelled_mid(char *buffer,
+                                       struct TCP_Server_Info *server);
+ void smb2_cancelled_close_fid(struct work_struct *work);
diff --git a/queue-5.4/cifs-do-not-miss-cancelled-open-responses.patch b/queue-5.4/cifs-do-not-miss-cancelled-open-responses.patch
new file mode 100644 (file)
index 0000000..75b86db
--- /dev/null
@@ -0,0 +1,76 @@
+From 7b71843fa7028475b052107664cbe120156a2cfc Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilov@microsoft.com>
+Date: Thu, 21 Nov 2019 11:35:14 -0800
+Subject: CIFS: Do not miss cancelled OPEN responses
+
+From: Pavel Shilovsky <pshilov@microsoft.com>
+
+commit 7b71843fa7028475b052107664cbe120156a2cfc upstream.
+
+When an OPEN command is cancelled we mark a mid as
+cancelled and let the demultiplex thread process it
+by closing an open handle. The problem is there is
+a race between a system call thread and the demultiplex
+thread and there may be a situation when the mid has
+been already processed before it is set as cancelled.
+
+Fix this by processing cancelled requests when mids
+are being destroyed which means that there is only
+one thread referencing a particular mid. Also set
+mids as cancelled unconditionally on their state.
+
+Cc: Stable <stable@vger.kernel.org>
+Tested-by: Frank Sorenson <sorenson@redhat.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/connect.c   |    6 ------
+ fs/cifs/transport.c |   10 ++++++++--
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1222,12 +1222,6 @@ next_pdu:
+               for (i = 0; i < num_mids; i++) {
+                       if (mids[i] != NULL) {
+                               mids[i]->resp_buf_size = server->pdu_size;
+-                              if ((mids[i]->mid_flags & MID_WAIT_CANCELLED) &&
+-                                  mids[i]->mid_state == MID_RESPONSE_RECEIVED &&
+-                                  server->ops->handle_cancelled_mid)
+-                                      server->ops->handle_cancelled_mid(
+-                                                      mids[i]->resp_buf,
+-                                                      server);
+                               if (!mids[i]->multiRsp || mids[i]->multiEnd)
+                                       mids[i]->callback(mids[i]);
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -93,8 +93,14 @@ static void _cifs_mid_q_entry_release(st
+       __u16 smb_cmd = le16_to_cpu(midEntry->command);
+       unsigned long now;
+       unsigned long roundtrip_time;
+-      struct TCP_Server_Info *server = midEntry->server;
+ #endif
++      struct TCP_Server_Info *server = midEntry->server;
++
++      if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
++          midEntry->mid_state == MID_RESPONSE_RECEIVED &&
++          server->ops->handle_cancelled_mid)
++              server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
++
+       midEntry->mid_state = MID_FREE;
+       atomic_dec(&midCount);
+       if (midEntry->large_buf)
+@@ -1122,8 +1128,8 @@ compound_send_recv(const unsigned int xi
+                                midQ[i]->mid, le16_to_cpu(midQ[i]->command));
+                       send_cancel(server, &rqst[i], midQ[i]);
+                       spin_lock(&GlobalMid_Lock);
++                      midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
+                       if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
+-                              midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
+                               midQ[i]->callback = cifs_cancelled_callback;
+                               cancelled_mid[i] = true;
+                               credits[i].value = 0;
diff --git a/queue-5.4/cifs-don-t-display-rdma-transport-on-reconnect.patch b/queue-5.4/cifs-don-t-display-rdma-transport-on-reconnect.patch
new file mode 100644 (file)
index 0000000..505ad4b
--- /dev/null
@@ -0,0 +1,35 @@
+From 14cc639c17ab0b6671526a7459087352507609e4 Mon Sep 17 00:00:00 2001
+From: Long Li <longli@microsoft.com>
+Date: Wed, 16 Oct 2019 13:51:50 -0700
+Subject: cifs: Don't display RDMA transport on reconnect
+
+From: Long Li <longli@microsoft.com>
+
+commit 14cc639c17ab0b6671526a7459087352507609e4 upstream.
+
+On reconnect, the transport data structure is NULL and its information is not
+available.
+
+Signed-off-by: Long Li <longli@microsoft.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifs_debug.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -256,6 +256,11 @@ static int cifs_debug_data_proc_show(str
+               if (!server->rdma)
+                       goto skip_rdma;
++              if (!server->smbd_conn) {
++                      seq_printf(m, "\nSMBDirect transport not available");
++                      goto skip_rdma;
++              }
++
+               seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
+                       "transport status: %x",
+                       server->smbd_conn->protocol,
diff --git a/queue-5.4/cifs-fix-null-pointer-dereference-in-mid-callback.patch b/queue-5.4/cifs-fix-null-pointer-dereference-in-mid-callback.patch
new file mode 100644 (file)
index 0000000..468eb83
--- /dev/null
@@ -0,0 +1,93 @@
+From 86a7964be7afaf3df6b64faaa10a7032d2444e51 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilov@microsoft.com>
+Date: Thu, 21 Nov 2019 11:35:13 -0800
+Subject: CIFS: Fix NULL pointer dereference in mid callback
+
+From: Pavel Shilovsky <pshilov@microsoft.com>
+
+commit 86a7964be7afaf3df6b64faaa10a7032d2444e51 upstream.
+
+There is a race between a system call processing thread
+and the demultiplex thread when mid->resp_buf becomes NULL
+and later is being accessed to get credits. It happens when
+the 1st thread wakes up before a mid callback is called in
+the 2nd one but the mid state has already been set to
+MID_RESPONSE_RECEIVED. This causes NULL pointer dereference
+in mid callback.
+
+Fix this by saving credits from the response before we
+update the mid state and then use this value in the mid
+callback rather then accessing a response buffer.
+
+Cc: Stable <stable@vger.kernel.org>
+Fixes: ee258d79159afed5 ("CIFS: Move credit processing to mid callbacks for SMB3")
+Tested-by: Frank Sorenson <sorenson@redhat.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsglob.h |    1 +
+ fs/cifs/connect.c  |   15 +++++++++++++++
+ fs/cifs/smb2ops.c  |    8 +-------
+ 3 files changed, 17 insertions(+), 7 deletions(-)
+
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1524,6 +1524,7 @@ struct mid_q_entry {
+       struct TCP_Server_Info *server; /* server corresponding to this mid */
+       __u64 mid;              /* multiplex id */
+       __u16 credits;          /* number of credits consumed by this mid */
++      __u16 credits_received; /* number of credits from the response */
+       __u32 pid;              /* process id */
+       __u32 sequence_number;  /* for CIFS signing */
+       unsigned long when_alloc;  /* when mid was created */
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -905,6 +905,20 @@ dequeue_mid(struct mid_q_entry *mid, boo
+       spin_unlock(&GlobalMid_Lock);
+ }
++static unsigned int
++smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
++{
++      struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buffer;
++
++      /*
++       * SMB1 does not use credits.
++       */
++      if (server->vals->header_preamble_size)
++              return 0;
++
++      return le16_to_cpu(shdr->CreditRequest);
++}
++
+ static void
+ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+          char *buf, int malformed)
+@@ -912,6 +926,7 @@ handle_mid(struct mid_q_entry *mid, stru
+       if (server->ops->check_trans2 &&
+           server->ops->check_trans2(mid, server, buf, malformed))
+               return;
++      mid->credits_received = smb2_get_credits_from_hdr(buf, server);
+       mid->resp_buf = buf;
+       mid->large_buf = server->large_buf;
+       /* Was previous buf put in mpx struct for multi-rsp? */
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -151,13 +151,7 @@ smb2_get_credits_field(struct TCP_Server
+ static unsigned int
+ smb2_get_credits(struct mid_q_entry *mid)
+ {
+-      struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
+-
+-      if (mid->mid_state == MID_RESPONSE_RECEIVED
+-          || mid->mid_state == MID_RESPONSE_MALFORMED)
+-              return le16_to_cpu(shdr->CreditRequest);
+-
+-      return 0;
++      return mid->credits_received;
+ }
+ static int
diff --git a/queue-5.4/cifs-fix-retrieval-of-dfs-referrals-in-cifs_mount.patch b/queue-5.4/cifs-fix-retrieval-of-dfs-referrals-in-cifs_mount.patch
new file mode 100644 (file)
index 0000000..baa047c
--- /dev/null
@@ -0,0 +1,80 @@
+From 5bb30a4dd60e2a10a4de9932daff23e503f1dd2b Mon Sep 17 00:00:00 2001
+From: "Paulo Alcantara (SUSE)" <pc@cjr.nz>
+Date: Fri, 22 Nov 2019 12:30:56 -0300
+Subject: cifs: Fix retrieval of DFS referrals in cifs_mount()
+
+From: Paulo Alcantara (SUSE) <pc@cjr.nz>
+
+commit 5bb30a4dd60e2a10a4de9932daff23e503f1dd2b upstream.
+
+Make sure that DFS referrals are sent to newly resolved root targets
+as in a multi tier DFS setup.
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Link: https://lkml.kernel.org/r/05aa2995-e85e-0ff4-d003-5bb08bd17a22@canonical.com
+Cc: stable@vger.kernel.org
+Tested-by: Matthew Ruffell <matthew.ruffell@canonical.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/connect.c |   32 ++++++++++++++++++++++----------
+ 1 file changed, 22 insertions(+), 10 deletions(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -4709,6 +4709,17 @@ static int is_path_remote(struct cifs_sb
+ }
+ #ifdef CONFIG_CIFS_DFS_UPCALL
++static inline void set_root_tcon(struct cifs_sb_info *cifs_sb,
++                               struct cifs_tcon *tcon,
++                               struct cifs_tcon **root)
++{
++      spin_lock(&cifs_tcp_ses_lock);
++      tcon->tc_count++;
++      tcon->remap = cifs_remap(cifs_sb);
++      spin_unlock(&cifs_tcp_ses_lock);
++      *root = tcon;
++}
++
+ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
+ {
+       int rc = 0;
+@@ -4810,18 +4821,10 @@ int cifs_mount(struct cifs_sb_info *cifs
+       /* Cache out resolved root server */
+       (void)dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
+                            root_path + 1, NULL, NULL);
+-      /*
+-       * Save root tcon for additional DFS requests to update or create a new
+-       * DFS cache entry, or even perform DFS failover.
+-       */
+-      spin_lock(&cifs_tcp_ses_lock);
+-      tcon->tc_count++;
+-      tcon->dfs_path = root_path;
++      kfree(root_path);
+       root_path = NULL;
+-      tcon->remap = cifs_remap(cifs_sb);
+-      spin_unlock(&cifs_tcp_ses_lock);
+-      root_tcon = tcon;
++      set_root_tcon(cifs_sb, tcon, &root_tcon);
+       for (count = 1; ;) {
+               if (!rc && tcon) {
+@@ -4858,6 +4861,15 @@ int cifs_mount(struct cifs_sb_info *cifs
+                       mount_put_conns(cifs_sb, xid, server, ses, tcon);
+                       rc = mount_get_conns(vol, cifs_sb, &xid, &server, &ses,
+                                            &tcon);
++                      /*
++                       * Ensure that DFS referrals go through new root server.
++                       */
++                      if (!rc && tcon &&
++                          (tcon->share_flags & (SHI1005_FLAGS_DFS |
++                                                SHI1005_FLAGS_DFS_ROOT))) {
++                              cifs_put_tcon(root_tcon);
++                              set_root_tcon(cifs_sb, tcon, &root_tcon);
++                      }
+               }
+               if (rc) {
+                       if (rc == -EACCES || rc == -EOPNOTSUPP)
diff --git a/queue-5.4/cifs-respect-o_sync-and-o_direct-flags-during-reconnect.patch b/queue-5.4/cifs-respect-o_sync-and-o_direct-flags-during-reconnect.patch
new file mode 100644 (file)
index 0000000..e22e203
--- /dev/null
@@ -0,0 +1,46 @@
+From 44805b0e62f15e90d233485420e1847133716bdc Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilov@microsoft.com>
+Date: Tue, 12 Nov 2019 17:16:35 -0800
+Subject: CIFS: Respect O_SYNC and O_DIRECT flags during reconnect
+
+From: Pavel Shilovsky <pshilov@microsoft.com>
+
+commit 44805b0e62f15e90d233485420e1847133716bdc upstream.
+
+Currently the client translates O_SYNC and O_DIRECT flags
+into corresponding SMB create options when openning a file.
+The problem is that on reconnect when the file is being
+re-opened the client doesn't set those flags and it causes
+a server to reject re-open requests because create options
+don't match. The latter means that any subsequent system
+call against that open file fail until a share is re-mounted.
+
+Fix this by properly setting SMB create options when
+re-openning files after reconnects.
+
+Fixes: 1013e760d10e6: ("SMB3: Don't ignore O_SYNC/O_DSYNC and O_DIRECT flags")
+Cc: Stable <stable@vger.kernel.org>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -729,6 +729,13 @@ cifs_reopen_file(struct cifsFileInfo *cf
+       if (backup_cred(cifs_sb))
+               create_options |= CREATE_OPEN_BACKUP_INTENT;
++      /* O_SYNC also has bit for O_DSYNC so following check picks up either */
++      if (cfile->f_flags & O_SYNC)
++              create_options |= CREATE_WRITE_THROUGH;
++
++      if (cfile->f_flags & O_DIRECT)
++              create_options |= CREATE_NO_BUFFER;
++
+       if (server->ops->get_lease_key)
+               server->ops->get_lease_key(inode, &cfile->fid);
diff --git a/queue-5.4/cifs-smbd-add-messages-on-rdma-session-destroy-and-reconnection.patch b/queue-5.4/cifs-smbd-add-messages-on-rdma-session-destroy-and-reconnection.patch
new file mode 100644 (file)
index 0000000..62261d7
--- /dev/null
@@ -0,0 +1,42 @@
+From d63cdbae60ac6fbb2864bd3d8df7404f12b7407d Mon Sep 17 00:00:00 2001
+From: Long Li <longli@microsoft.com>
+Date: Wed, 16 Oct 2019 13:51:53 -0700
+Subject: cifs: smbd: Add messages on RDMA session destroy and reconnection
+
+From: Long Li <longli@microsoft.com>
+
+commit d63cdbae60ac6fbb2864bd3d8df7404f12b7407d upstream.
+
+Log these activities to help production support.
+
+Signed-off-by: Long Li <longli@microsoft.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smbdirect.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -1476,6 +1476,7 @@ void smbd_destroy(struct TCP_Server_Info
+       info->transport_status = SMBD_DESTROYED;
+       destroy_workqueue(info->workqueue);
++      log_rdma_event(INFO,  "rdma session destroyed\n");
+       kfree(info);
+ }
+@@ -1505,8 +1506,9 @@ create_conn:
+       log_rdma_event(INFO, "creating rdma session\n");
+       server->smbd_conn = smbd_get_connection(
+               server, (struct sockaddr *) &server->dstaddr);
+-      log_rdma_event(INFO, "created rdma session info=%p\n",
+-              server->smbd_conn);
++
++      if (server->smbd_conn)
++              cifs_dbg(VFS, "RDMA transport re-established\n");
+       return server->smbd_conn ? 0 : -ENOENT;
+ }
diff --git a/queue-5.4/cifs-smbd-only-queue-work-for-error-recovery-on-memory-registration.patch b/queue-5.4/cifs-smbd-only-queue-work-for-error-recovery-on-memory-registration.patch
new file mode 100644 (file)
index 0000000..8dd3cef
--- /dev/null
@@ -0,0 +1,65 @@
+From c21ce58eab1eda4c66507897207e20c82e62a5ac Mon Sep 17 00:00:00 2001
+From: Long Li <longli@microsoft.com>
+Date: Wed, 16 Oct 2019 13:51:55 -0700
+Subject: cifs: smbd: Only queue work for error recovery on memory registration
+
+From: Long Li <longli@microsoft.com>
+
+commit c21ce58eab1eda4c66507897207e20c82e62a5ac upstream.
+
+It's not necessary to queue invalidated memory registration to work queue, as
+all we need to do is to unmap the SG and make it usable again. This can save
+CPU cycles in normal data paths as memory registration errors are rare and
+normally only happens during reconnection.
+
+Signed-off-by: Long Li <longli@microsoft.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smbdirect.c |   26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -2269,12 +2269,7 @@ static void smbd_mr_recovery_work(struct
+       int rc;
+       list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
+-              if (smbdirect_mr->state == MR_INVALIDATED)
+-                      ib_dma_unmap_sg(
+-                              info->id->device, smbdirect_mr->sgl,
+-                              smbdirect_mr->sgl_count,
+-                              smbdirect_mr->dir);
+-              else if (smbdirect_mr->state == MR_ERROR) {
++              if (smbdirect_mr->state == MR_ERROR) {
+                       /* recover this MR entry */
+                       rc = ib_dereg_mr(smbdirect_mr->mr);
+@@ -2602,11 +2597,20 @@ int smbd_deregister_mr(struct smbd_mr *s
+                */
+               smbdirect_mr->state = MR_INVALIDATED;
+-      /*
+-       * Schedule the work to do MR recovery for future I/Os
+-       * MR recovery is slow and we don't want it to block the current I/O
+-       */
+-      queue_work(info->workqueue, &info->mr_recovery_work);
++      if (smbdirect_mr->state == MR_INVALIDATED) {
++              ib_dma_unmap_sg(
++                      info->id->device, smbdirect_mr->sgl,
++                      smbdirect_mr->sgl_count,
++                      smbdirect_mr->dir);
++              smbdirect_mr->state = MR_READY;
++              if (atomic_inc_return(&info->mr_ready_count) == 1)
++                      wake_up_interruptible(&info->wait_mr);
++      } else
++              /*
++               * Schedule the work to do MR recovery for future I/Os MR
++               * recovery is slow and don't want it to block current I/O
++               */
++              queue_work(info->workqueue, &info->mr_recovery_work);
+ done:
+       if (atomic_dec_and_test(&info->mr_used_count))
diff --git a/queue-5.4/cifs-smbd-return-eagain-when-transport-is-reconnecting.patch b/queue-5.4/cifs-smbd-return-eagain-when-transport-is-reconnecting.patch
new file mode 100644 (file)
index 0000000..1c74261
--- /dev/null
@@ -0,0 +1,38 @@
+From 4357d45f50e58672e1d17648d792f27df01dfccd Mon Sep 17 00:00:00 2001
+From: Long Li <longli@microsoft.com>
+Date: Wed, 16 Oct 2019 13:51:56 -0700
+Subject: cifs: smbd: Return -EAGAIN when transport is reconnecting
+
+From: Long Li <longli@microsoft.com>
+
+commit 4357d45f50e58672e1d17648d792f27df01dfccd upstream.
+
+During reconnecting, the transport may have already been destroyed and is in
+the process being reconnected. In this case, return -EAGAIN to not fail and
+to retry this I/O.
+
+Signed-off-by: Long Li <longli@microsoft.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/transport.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -319,8 +319,11 @@ __smb_send_rqst(struct TCP_Server_Info *
+       int val = 1;
+       __be32 rfc1002_marker;
+-      if (cifs_rdma_enabled(server) && server->smbd_conn) {
+-              rc = smbd_send(server, num_rqst, rqst);
++      if (cifs_rdma_enabled(server)) {
++              /* return -EAGAIN when connecting or reconnecting */
++              rc = -EAGAIN;
++              if (server->smbd_conn)
++                      rc = smbd_send(server, num_rqst, rqst);
+               goto smbd_done;
+       }
diff --git a/queue-5.4/cifs-smbd-return-econnaborted-when-trasnport-is-not-in-connected-state.patch b/queue-5.4/cifs-smbd-return-econnaborted-when-trasnport-is-not-in-connected-state.patch
new file mode 100644 (file)
index 0000000..c3ddfd4
--- /dev/null
@@ -0,0 +1,31 @@
+From acd4680e2bef2405a0e1ef2149fbb01cce7e116c Mon Sep 17 00:00:00 2001
+From: Long Li <longli@microsoft.com>
+Date: Wed, 16 Oct 2019 13:51:54 -0700
+Subject: cifs: smbd: Return -ECONNABORTED when trasnport is not in connected state
+
+From: Long Li <longli@microsoft.com>
+
+commit acd4680e2bef2405a0e1ef2149fbb01cce7e116c upstream.
+
+The transport should return this error so the upper layer will reconnect.
+
+Signed-off-by: Long Li <longli@microsoft.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smbdirect.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -1972,7 +1972,7 @@ read_rfc1002_done:
+       if (info->transport_status != SMBD_CONNECTED) {
+               log_read(ERR, "disconnected\n");
+-              return 0;
++              return -ECONNABORTED;
+       }
+       goto again;
diff --git a/queue-5.4/cifs-smbd-return-einval-when-the-number-of-iovs-exceeds-smbdirect_max_sge.patch b/queue-5.4/cifs-smbd-return-einval-when-the-number-of-iovs-exceeds-smbdirect_max_sge.patch
new file mode 100644 (file)
index 0000000..4585f48
--- /dev/null
@@ -0,0 +1,33 @@
+From 37941ea17d3f8eb2f5ac2f59346fab9e8439271a Mon Sep 17 00:00:00 2001
+From: Long Li <longli@microsoft.com>
+Date: Wed, 16 Oct 2019 13:51:52 -0700
+Subject: cifs: smbd: Return -EINVAL when the number of iovs exceeds SMBDIRECT_MAX_SGE
+
+From: Long Li <longli@microsoft.com>
+
+commit 37941ea17d3f8eb2f5ac2f59346fab9e8439271a upstream.
+
+While it's not friendly to fail user processes that issue more iovs
+than we support, at least we should return the correct error code so the
+user process gets a chance to retry with smaller number of iovs.
+
+Signed-off-by: Long Li <longli@microsoft.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smbdirect.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -1069,7 +1069,7 @@ static int smbd_post_send_data(
+       if (n_vec > SMBDIRECT_MAX_SGE) {
+               cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
+-              return -ENOMEM;
++              return -EINVAL;
+       }
+       sg_init_table(sgl, n_vec);
diff --git a/queue-5.4/dm-btree-increase-rebalance-threshold-in-__rebalance2.patch b/queue-5.4/dm-btree-increase-rebalance-threshold-in-__rebalance2.patch
new file mode 100644 (file)
index 0000000..a95525c
--- /dev/null
@@ -0,0 +1,67 @@
+From 474e559567fa631dea8fb8407ab1b6090c903755 Mon Sep 17 00:00:00 2001
+From: Hou Tao <houtao1@huawei.com>
+Date: Tue, 3 Dec 2019 19:42:58 +0800
+Subject: dm btree: increase rebalance threshold in __rebalance2()
+
+From: Hou Tao <houtao1@huawei.com>
+
+commit 474e559567fa631dea8fb8407ab1b6090c903755 upstream.
+
+We got the following warnings from thin_check during thin-pool setup:
+
+  $ thin_check /dev/vdb
+  examining superblock
+  examining devices tree
+    missing devices: [1, 84]
+      too few entries in btree_node: 41, expected at least 42 (block 138, max_entries = 126)
+  examining mapping tree
+
+The phenomenon is the number of entries in one node of details_info tree is
+less than (max_entries / 3). And it can be easily reproduced by the following
+procedures:
+
+  $ new a thin pool
+  $ presume the max entries of details_info tree is 126
+  $ new 127 thin devices (e.g. 1~127) to make the root node being full
+    and then split
+  $ remove the first 43 (e.g. 1~43) thin devices to make the children
+    reblance repeatedly
+  $ stop the thin pool
+  $ thin_check
+
+The root cause is that the B-tree removal procedure in __rebalance2()
+doesn't guarantee the invariance: the minimal number of entries in
+non-root node should be >= (max_entries / 3).
+
+Simply fix the problem by increasing the rebalance threshold to
+make sure the number of entries in each child will be greater
+than or equal to (max_entries / 3 + 1), so no matter which
+child is used for removal, the number will still be valid.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-btree-remove.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree
+       struct btree_node *right = r->n;
+       uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+       uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+-      unsigned threshold = 2 * merge_threshold(left) + 1;
++      /*
++       * Ensure the number of entries in each child will be greater
++       * than or equal to (max_entries / 3 + 1), so no matter which
++       * child is used for removal, the number will still be not
++       * less than (max_entries / 3).
++       */
++      unsigned int threshold = 2 * (merge_threshold(left) + 1);
+       if (nr_left + nr_right < threshold) {
+               /*
diff --git a/queue-5.4/dm-clone-flush-destination-device-before-committing-metadata.patch b/queue-5.4/dm-clone-flush-destination-device-before-committing-metadata.patch
new file mode 100644 (file)
index 0000000..3e89f17
--- /dev/null
@@ -0,0 +1,199 @@
+From 8b3fd1f53af3591d5624ab9df718369b14d09ed1 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Wed, 4 Dec 2019 16:06:54 +0200
+Subject: dm clone: Flush destination device before committing metadata
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 8b3fd1f53af3591d5624ab9df718369b14d09ed1 upstream.
+
+dm-clone maintains an on-disk bitmap which records which regions are
+valid in the destination device, i.e., which regions have already been
+hydrated, or have been written to directly, via user I/O.
+
+Setting a bit in the on-disk bitmap meas the corresponding region is
+valid in the destination device and we redirect all I/O regarding it to
+the destination device.
+
+Suppose the destination device has a volatile write-back cache and the
+following sequence of events occur:
+
+1. A region gets hydrated, either through the background hydration or
+   because it was written to directly, via user I/O.
+
+2. The commit timeout expires and we commit the metadata, marking that
+   region as valid in the destination device.
+
+3. The system crashes and the destination device's cache has not been
+   flushed, meaning the region's data are lost.
+
+The next time we read that region we read it from the destination
+device, since the metadata have been successfully committed, but the
+data are lost due to the crash, so we read garbage instead of the old
+data.
+
+This has several implications:
+
+1. In case of background hydration or of writes with size smaller than
+   the region size (which means we first copy the whole region and then
+   issue the smaller write), we corrupt data that the user never
+   touched.
+
+2. In case of writes with size equal to the device's logical block size,
+   we fail to provide atomic sector writes. When the system recovers the
+   user will read garbage from the sector instead of the old data or the
+   new data.
+
+3. In case of writes without the FUA flag set, after the system
+   recovers, the written sectors will contain garbage instead of a
+   random mix of sectors containing either old data or new data, thus we
+   fail again to provide atomic sector writes.
+
+4. Even when the user flushes the dm-clone device, because we first
+   commit the metadata and then pass down the flush, the same risk for
+   corruption exists (if the system crashes after the metadata have been
+   committed but before the flush is passed down).
+
+The only case which is unaffected is that of writes with size equal to
+the region size and with the FUA flag set. But, because FUA writes
+trigger metadata commits, this case can trigger the corruption
+indirectly.
+
+To solve this and avoid the potential data corruption we flush the
+destination device **before** committing the metadata.
+
+This ensures that any freshly hydrated regions, for which we commit the
+metadata, are properly written to non-volatile storage and won't be lost
+in case of a crash.
+
+Fixes: 7431b7835f55 ("dm: add clone target")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-clone-target.c |   46 +++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 40 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -86,6 +86,12 @@ struct clone {
+       struct dm_clone_metadata *cmd;
++      /*
++       * bio used to flush the destination device, before committing the
++       * metadata.
++       */
++      struct bio flush_bio;
++
+       /* Region hydration hash table */
+       struct hash_table_bucket *ht;
+@@ -1106,10 +1112,13 @@ static bool need_commit_due_to_time(stru
+ /*
+  * A non-zero return indicates read-only or fail mode.
+  */
+-static int commit_metadata(struct clone *clone)
++static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
+ {
+       int r = 0;
++      if (dest_dev_flushed)
++              *dest_dev_flushed = false;
++
+       mutex_lock(&clone->commit_lock);
+       if (!dm_clone_changed_this_transaction(clone->cmd))
+@@ -1126,6 +1135,19 @@ static int commit_metadata(struct clone
+               goto out;
+       }
++      bio_reset(&clone->flush_bio);
++      bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
++      clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
++
++      r = submit_bio_wait(&clone->flush_bio);
++      if (unlikely(r)) {
++              __metadata_operation_failed(clone, "flush destination device", r);
++              goto out;
++      }
++
++      if (dest_dev_flushed)
++              *dest_dev_flushed = true;
++
+       r = dm_clone_metadata_commit(clone->cmd);
+       if (unlikely(r)) {
+               __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
+@@ -1199,6 +1221,7 @@ static void process_deferred_flush_bios(
+ {
+       struct bio *bio;
+       unsigned long flags;
++      bool dest_dev_flushed;
+       struct bio_list bios = BIO_EMPTY_LIST;
+       struct bio_list bio_completions = BIO_EMPTY_LIST;
+@@ -1218,7 +1241,7 @@ static void process_deferred_flush_bios(
+           !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
+               return;
+-      if (commit_metadata(clone)) {
++      if (commit_metadata(clone, &dest_dev_flushed)) {
+               bio_list_merge(&bios, &bio_completions);
+               while ((bio = bio_list_pop(&bios)))
+@@ -1232,8 +1255,17 @@ static void process_deferred_flush_bios(
+       while ((bio = bio_list_pop(&bio_completions)))
+               bio_endio(bio);
+-      while ((bio = bio_list_pop(&bios)))
+-              generic_make_request(bio);
++      while ((bio = bio_list_pop(&bios))) {
++              if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
++                      /* We just flushed the destination device as part of
++                       * the metadata commit, so there is no reason to send
++                       * another flush.
++                       */
++                      bio_endio(bio);
++              } else {
++                      generic_make_request(bio);
++              }
++      }
+ }
+ static void do_worker(struct work_struct *work)
+@@ -1405,7 +1437,7 @@ static void clone_status(struct dm_targe
+               /* Commit to ensure statistics aren't out-of-date */
+               if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
+-                      (void) commit_metadata(clone);
++                      (void) commit_metadata(clone, NULL);
+               r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
+@@ -1839,6 +1871,7 @@ static int clone_ctr(struct dm_target *t
+       bio_list_init(&clone->deferred_flush_completions);
+       clone->hydration_offset = 0;
+       atomic_set(&clone->hydrations_in_flight, 0);
++      bio_init(&clone->flush_bio, NULL, 0);
+       clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
+       if (!clone->wq) {
+@@ -1912,6 +1945,7 @@ static void clone_dtr(struct dm_target *
+       struct clone *clone = ti->private;
+       mutex_destroy(&clone->commit_lock);
++      bio_uninit(&clone->flush_bio);
+       for (i = 0; i < clone->nr_ctr_args; i++)
+               kfree(clone->ctr_args[i]);
+@@ -1966,7 +2000,7 @@ static void clone_postsuspend(struct dm_
+       wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
+       flush_workqueue(clone->wq);
+-      (void) commit_metadata(clone);
++      (void) commit_metadata(clone, NULL);
+ }
+ static void clone_resume(struct dm_target *ti)
diff --git a/queue-5.4/dm-clone-metadata-track-exact-changes-per-transaction.patch b/queue-5.4/dm-clone-metadata-track-exact-changes-per-transaction.patch
new file mode 100644 (file)
index 0000000..0a40149
--- /dev/null
@@ -0,0 +1,204 @@
+From e6a505f3f9fae572fb3ab3bc486e755ac9cef32c Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Wed, 4 Dec 2019 16:06:52 +0200
+Subject: dm clone metadata: Track exact changes per transaction
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit e6a505f3f9fae572fb3ab3bc486e755ac9cef32c upstream.
+
+Extend struct dirty_map with a second bitmap which tracks the exact
+regions that were hydrated during the current metadata transaction.
+
+Moreover, fix __flush_dmap() to only commit the metadata of the regions
+that were hydrated during the current transaction.
+
+This is required by the following commits to fix a data corruption bug.
+
+Fixes: 7431b7835f55 ("dm: add clone target")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-clone-metadata.c |   90 ++++++++++++++++++++++++++++-------------
+ 1 file changed, 62 insertions(+), 28 deletions(-)
+
+--- a/drivers/md/dm-clone-metadata.c
++++ b/drivers/md/dm-clone-metadata.c
+@@ -67,23 +67,34 @@ struct superblock_disk {
+  * To save constantly doing look ups on disk we keep an in core copy of the
+  * on-disk bitmap, the region_map.
+  *
+- * To further reduce metadata I/O overhead we use a second bitmap, the dmap
+- * (dirty bitmap), which tracks the dirty words, i.e. longs, of the region_map.
++ * In order to track which regions are hydrated during a metadata transaction,
++ * we use a second set of bitmaps, the dmap (dirty bitmap), which includes two
++ * bitmaps, namely dirty_regions and dirty_words. The dirty_regions bitmap
++ * tracks the regions that got hydrated during the current metadata
++ * transaction. The dirty_words bitmap tracks the dirty words, i.e. longs, of
++ * the dirty_regions bitmap.
++ *
++ * This allows us to precisely track the regions that were hydrated during the
++ * current metadata transaction and update the metadata accordingly, when we
++ * commit the current transaction. This is important because dm-clone should
++ * only commit the metadata of regions that were properly flushed to the
++ * destination device beforehand. Otherwise, in case of a crash, we could end
++ * up with a corrupted dm-clone device.
+  *
+  * When a region finishes hydrating dm-clone calls
+  * dm_clone_set_region_hydrated(), or for discard requests
+  * dm_clone_cond_set_range(), which sets the corresponding bits in region_map
+  * and dmap.
+  *
+- * During a metadata commit we scan the dmap for dirty region_map words (longs)
+- * and update accordingly the on-disk metadata. Thus, we don't have to flush to
+- * disk the whole region_map. We can just flush the dirty region_map words.
++ * During a metadata commit we scan dmap->dirty_words and dmap->dirty_regions
++ * and update the on-disk metadata accordingly. Thus, we don't have to flush to
++ * disk the whole region_map. We can just flush the dirty region_map bits.
+  *
+- * We use a dirty bitmap, which is smaller than the original region_map, to
+- * reduce the amount of memory accesses during a metadata commit. As dm-bitset
+- * accesses the on-disk bitmap in 64-bit word granularity, there is no
+- * significant benefit in tracking the dirty region_map bits with a smaller
+- * granularity.
++ * We use the helper dmap->dirty_words bitmap, which is smaller than the
++ * original region_map, to reduce the amount of memory accesses during a
++ * metadata commit. Moreover, as dm-bitset also accesses the on-disk bitmap in
++ * 64-bit word granularity, the dirty_words bitmap helps us avoid useless disk
++ * accesses.
+  *
+  * We could update directly the on-disk bitmap, when dm-clone calls either
+  * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), buts this
+@@ -92,12 +103,13 @@ struct superblock_disk {
+  * e.g., in a hooked overwrite bio's completion routine, and further reduce the
+  * I/O completion latency.
+  *
+- * We maintain two dirty bitmaps. During a metadata commit we atomically swap
+- * the currently used dmap with the unused one. This allows the metadata update
+- * functions to run concurrently with an ongoing commit.
++ * We maintain two dirty bitmap sets. During a metadata commit we atomically
++ * swap the currently used dmap with the unused one. This allows the metadata
++ * update functions to run concurrently with an ongoing commit.
+  */
+ struct dirty_map {
+       unsigned long *dirty_words;
++      unsigned long *dirty_regions;
+       unsigned int changed;
+ };
+@@ -461,22 +473,40 @@ static size_t bitmap_size(unsigned long
+       return BITS_TO_LONGS(nr_bits) * sizeof(long);
+ }
+-static int dirty_map_init(struct dm_clone_metadata *cmd)
++static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words,
++                          unsigned long nr_regions)
+ {
+-      cmd->dmap[0].changed = 0;
+-      cmd->dmap[0].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL);
++      dmap->changed = 0;
+-      if (!cmd->dmap[0].dirty_words) {
+-              DMERR("Failed to allocate dirty bitmap");
++      dmap->dirty_words = kvzalloc(bitmap_size(nr_words), GFP_KERNEL);
++      if (!dmap->dirty_words)
++              return -ENOMEM;
++
++      dmap->dirty_regions = kvzalloc(bitmap_size(nr_regions), GFP_KERNEL);
++      if (!dmap->dirty_regions) {
++              kvfree(dmap->dirty_words);
+               return -ENOMEM;
+       }
+-      cmd->dmap[1].changed = 0;
+-      cmd->dmap[1].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL);
++      return 0;
++}
+-      if (!cmd->dmap[1].dirty_words) {
++static void __dirty_map_exit(struct dirty_map *dmap)
++{
++      kvfree(dmap->dirty_words);
++      kvfree(dmap->dirty_regions);
++}
++
++static int dirty_map_init(struct dm_clone_metadata *cmd)
++{
++      if (__dirty_map_init(&cmd->dmap[0], cmd->nr_words, cmd->nr_regions)) {
+               DMERR("Failed to allocate dirty bitmap");
+-              kvfree(cmd->dmap[0].dirty_words);
++              return -ENOMEM;
++      }
++
++      if (__dirty_map_init(&cmd->dmap[1], cmd->nr_words, cmd->nr_regions)) {
++              DMERR("Failed to allocate dirty bitmap");
++              __dirty_map_exit(&cmd->dmap[0]);
+               return -ENOMEM;
+       }
+@@ -487,8 +517,8 @@ static int dirty_map_init(struct dm_clon
+ static void dirty_map_exit(struct dm_clone_metadata *cmd)
+ {
+-      kvfree(cmd->dmap[0].dirty_words);
+-      kvfree(cmd->dmap[1].dirty_words);
++      __dirty_map_exit(&cmd->dmap[0]);
++      __dirty_map_exit(&cmd->dmap[1]);
+ }
+ static int __load_bitset_in_core(struct dm_clone_metadata *cmd)
+@@ -633,21 +663,23 @@ unsigned long dm_clone_find_next_unhydra
+       return find_next_zero_bit(cmd->region_map, cmd->nr_regions, start);
+ }
+-static int __update_metadata_word(struct dm_clone_metadata *cmd, unsigned long word)
++static int __update_metadata_word(struct dm_clone_metadata *cmd,
++                                unsigned long *dirty_regions,
++                                unsigned long word)
+ {
+       int r;
+       unsigned long index = word * BITS_PER_LONG;
+       unsigned long max_index = min(cmd->nr_regions, (word + 1) * BITS_PER_LONG);
+       while (index < max_index) {
+-              if (test_bit(index, cmd->region_map)) {
++              if (test_bit(index, dirty_regions)) {
+                       r = dm_bitset_set_bit(&cmd->bitset_info, cmd->bitset_root,
+                                             index, &cmd->bitset_root);
+-
+                       if (r) {
+                               DMERR("dm_bitset_set_bit failed");
+                               return r;
+                       }
++                      __clear_bit(index, dirty_regions);
+               }
+               index++;
+       }
+@@ -721,7 +753,7 @@ static int __flush_dmap(struct dm_clone_
+               if (word == cmd->nr_words)
+                       break;
+-              r = __update_metadata_word(cmd, word);
++              r = __update_metadata_word(cmd, dmap->dirty_regions, word);
+               if (r)
+                       return r;
+@@ -803,6 +835,7 @@ int dm_clone_set_region_hydrated(struct
+       dmap = cmd->current_dmap;
+       __set_bit(word, dmap->dirty_words);
++      __set_bit(region_nr, dmap->dirty_regions);
+       __set_bit(region_nr, cmd->region_map);
+       dmap->changed = 1;
+@@ -831,6 +864,7 @@ int dm_clone_cond_set_range(struct dm_cl
+               if (!test_bit(region_nr, cmd->region_map)) {
+                       word = region_nr / BITS_PER_LONG;
+                       __set_bit(word, dmap->dirty_words);
++                      __set_bit(region_nr, dmap->dirty_regions);
+                       __set_bit(region_nr, cmd->region_map);
+                       dmap->changed = 1;
+               }
diff --git a/queue-5.4/dm-clone-metadata-use-a-two-phase-commit.patch b/queue-5.4/dm-clone-metadata-use-a-two-phase-commit.patch
new file mode 100644 (file)
index 0000000..fe16559
--- /dev/null
@@ -0,0 +1,177 @@
+From 8fdbfe8d1690e8a38d497d83a30607d0d90cc15a Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Wed, 4 Dec 2019 16:06:53 +0200
+Subject: dm clone metadata: Use a two phase commit
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 8fdbfe8d1690e8a38d497d83a30607d0d90cc15a upstream.
+
+Split the metadata commit in two parts:
+
+1. dm_clone_metadata_pre_commit(): Prepare the current transaction for
+   committing. After this is called, all subsequent metadata updates,
+   done through either dm_clone_set_region_hydrated() or
+   dm_clone_cond_set_range(), will be part of the next transaction.
+
+2. dm_clone_metadata_commit(): Actually commit the current transaction
+   to disk and start a new transaction.
+
+This is required by the following commit. It allows dm-clone to flush
+the destination device after step (1) to ensure that all freshly
+hydrated regions, for which we are updating the metadata, are properly
+written to non-volatile storage and won't be lost in case of a crash.
+
+Fixes: 7431b7835f55 ("dm: add clone target")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-clone-metadata.c |   46 ++++++++++++++++++++++++++++++++---------
+ drivers/md/dm-clone-metadata.h |   17 +++++++++++++++
+ drivers/md/dm-clone-target.c   |    7 +++++-
+ 3 files changed, 60 insertions(+), 10 deletions(-)
+
+--- a/drivers/md/dm-clone-metadata.c
++++ b/drivers/md/dm-clone-metadata.c
+@@ -127,6 +127,9 @@ struct dm_clone_metadata {
+       struct dirty_map dmap[2];
+       struct dirty_map *current_dmap;
++      /* Protected by lock */
++      struct dirty_map *committing_dmap;
++
+       /*
+        * In core copy of the on-disk bitmap to save constantly doing look ups
+        * on disk.
+@@ -511,6 +514,7 @@ static int dirty_map_init(struct dm_clon
+       }
+       cmd->current_dmap = &cmd->dmap[0];
++      cmd->committing_dmap = NULL;
+       return 0;
+ }
+@@ -775,16 +779,18 @@ static int __flush_dmap(struct dm_clone_
+       return 0;
+ }
+-int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
++int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
+ {
+-      int r = -EPERM;
++      int r = 0;
+       unsigned long flags;
+       struct dirty_map *dmap, *next_dmap;
+       down_write(&cmd->lock);
+-      if (cmd->fail_io || dm_bm_is_read_only(cmd->bm))
++      if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
++              r = -EPERM;
+               goto out;
++      }
+       /* Get current dirty bitmap */
+       dmap = cmd->current_dmap;
+@@ -796,7 +802,7 @@ int dm_clone_metadata_commit(struct dm_c
+        * The last commit failed, so we don't have a clean dirty-bitmap to
+        * use.
+        */
+-      if (WARN_ON(next_dmap->changed)) {
++      if (WARN_ON(next_dmap->changed || cmd->committing_dmap)) {
+               r = -EINVAL;
+               goto out;
+       }
+@@ -806,11 +812,33 @@ int dm_clone_metadata_commit(struct dm_c
+       cmd->current_dmap = next_dmap;
+       spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+-      /*
+-       * No one is accessing the old dirty bitmap anymore, so we can flush
+-       * it.
+-       */
+-      r = __flush_dmap(cmd, dmap);
++      /* Set old dirty bitmap as currently committing */
++      cmd->committing_dmap = dmap;
++out:
++      up_write(&cmd->lock);
++
++      return r;
++}
++
++int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
++{
++      int r = -EPERM;
++
++      down_write(&cmd->lock);
++
++      if (cmd->fail_io || dm_bm_is_read_only(cmd->bm))
++              goto out;
++
++      if (WARN_ON(!cmd->committing_dmap)) {
++              r = -EINVAL;
++              goto out;
++      }
++
++      r = __flush_dmap(cmd, cmd->committing_dmap);
++      if (!r) {
++              /* Clear committing dmap */
++              cmd->committing_dmap = NULL;
++      }
+ out:
+       up_write(&cmd->lock);
+--- a/drivers/md/dm-clone-metadata.h
++++ b/drivers/md/dm-clone-metadata.h
+@@ -73,7 +73,23 @@ void dm_clone_metadata_close(struct dm_c
+ /*
+  * Commit dm-clone metadata to disk.
++ *
++ * We use a two phase commit:
++ *
++ * 1. dm_clone_metadata_pre_commit(): Prepare the current transaction for
++ *    committing. After this is called, all subsequent metadata updates, done
++ *    through either dm_clone_set_region_hydrated() or
++ *    dm_clone_cond_set_range(), will be part of the **next** transaction.
++ *
++ * 2. dm_clone_metadata_commit(): Actually commit the current transaction to
++ *    disk and start a new transaction.
++ *
++ * This allows dm-clone to flush the destination device after step (1) to
++ * ensure that all freshly hydrated regions, for which we are updating the
++ * metadata, are properly written to non-volatile storage and won't be lost in
++ * case of a crash.
+  */
++int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd);
+ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd);
+ /*
+@@ -110,6 +126,7 @@ int dm_clone_metadata_abort(struct dm_cl
+  * Switches metadata to a read only mode. Once read-only mode has been entered
+  * the following functions will return -EPERM:
+  *
++ *   dm_clone_metadata_pre_commit()
+  *   dm_clone_metadata_commit()
+  *   dm_clone_set_region_hydrated()
+  *   dm_clone_cond_set_range()
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -1120,8 +1120,13 @@ static int commit_metadata(struct clone
+               goto out;
+       }
+-      r = dm_clone_metadata_commit(clone->cmd);
++      r = dm_clone_metadata_pre_commit(clone->cmd);
++      if (unlikely(r)) {
++              __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
++              goto out;
++      }
++      r = dm_clone_metadata_commit(clone->cmd);
+       if (unlikely(r)) {
+               __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
+               goto out;
diff --git a/queue-5.4/dm-mpath-remove-harmful-bio-based-optimization.patch b/queue-5.4/dm-mpath-remove-harmful-bio-based-optimization.patch
new file mode 100644 (file)
index 0000000..479a5b8
--- /dev/null
@@ -0,0 +1,82 @@
+From dbaf971c9cdf10843071a60dcafc1aaab3162354 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Tue, 26 Nov 2019 10:08:29 -0500
+Subject: dm mpath: remove harmful bio-based optimization
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit dbaf971c9cdf10843071a60dcafc1aaab3162354 upstream.
+
+Removes the branching for edge-case where no SCSI device handler
+exists.  The __map_bio_fast() method was far too limited, by only
+selecting a new pathgroup or path IFF there was a path failure, fix this
+be eliminating it in favor of __map_bio().  __map_bio()'s extra SCSI
+device handler specific MPATHF_PG_INIT_REQUIRED test is not in the fast
+path anyway.
+
+This change restores full path selector functionality for bio-based
+configurations that don't haave a SCSI device handler.  But it should be
+noted that the path selectors do have an impact on performance for
+certain networks that are extremely fast (and don't require frequent
+switching).
+
+Fixes: 8d47e65948dd ("dm mpath: remove unnecessary NVMe branching in favor of scsi_dh checks")
+Cc: stable@vger.kernel.org
+Reported-by: Drew Hastings <dhastings@crucialwebhost.com>
+Suggested-by: Martin Wilck <mwilck@suse.de>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-mpath.c |   37 +------------------------------------
+ 1 file changed, 1 insertion(+), 36 deletions(-)
+
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -599,45 +599,10 @@ static struct pgpath *__map_bio(struct m
+       return pgpath;
+ }
+-static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
+-{
+-      struct pgpath *pgpath;
+-      unsigned long flags;
+-
+-      /* Do we need to select a new pgpath? */
+-      /*
+-       * FIXME: currently only switching path if no path (due to failure, etc)
+-       * - which negates the point of using a path selector
+-       */
+-      pgpath = READ_ONCE(m->current_pgpath);
+-      if (!pgpath)
+-              pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
+-
+-      if (!pgpath) {
+-              if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+-                      /* Queue for the daemon to resubmit */
+-                      spin_lock_irqsave(&m->lock, flags);
+-                      bio_list_add(&m->queued_bios, bio);
+-                      spin_unlock_irqrestore(&m->lock, flags);
+-                      queue_work(kmultipathd, &m->process_queued_bios);
+-
+-                      return ERR_PTR(-EAGAIN);
+-              }
+-              return NULL;
+-      }
+-
+-      return pgpath;
+-}
+-
+ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
+                              struct dm_mpath_io *mpio)
+ {
+-      struct pgpath *pgpath;
+-
+-      if (!m->hw_handler_name)
+-              pgpath = __map_bio_fast(m, bio);
+-      else
+-              pgpath = __map_bio(m, bio);
++      struct pgpath *pgpath = __map_bio(m, bio);
+       if (IS_ERR(pgpath))
+               return DM_MAPIO_SUBMITTED;
diff --git a/queue-5.4/dm-thin-flush-data-device-before-committing-metadata.patch b/queue-5.4/dm-thin-flush-data-device-before-committing-metadata.patch
new file mode 100644 (file)
index 0000000..ad10792
--- /dev/null
@@ -0,0 +1,170 @@
+From 694cfe7f31db36912725e63a38a5179c8628a496 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Wed, 4 Dec 2019 16:07:42 +0200
+Subject: dm thin: Flush data device before committing metadata
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 694cfe7f31db36912725e63a38a5179c8628a496 upstream.
+
+The thin provisioning target maintains per thin device mappings that map
+virtual blocks to data blocks in the data device.
+
+When we write to a shared block, in case of internal snapshots, or
+provision a new block, in case of external snapshots, we copy the shared
+block to a new data block (COW), update the mapping for the relevant
+virtual block and then issue the write to the new data block.
+
+Suppose the data device has a volatile write-back cache and the
+following sequence of events occur:
+
+1. We write to a shared block
+2. A new data block is allocated
+3. We copy the shared block to the new data block using kcopyd (COW)
+4. We insert the new mapping for the virtual block in the btree for that
+   thin device.
+5. The commit timeout expires and we commit the metadata, that now
+   includes the new mapping from step (4).
+6. The system crashes and the data device's cache has not been flushed,
+   meaning that the COWed data are lost.
+
+The next time we read that virtual block of the thin device we read it
+from the data block allocated in step (2), since the metadata have been
+successfully committed. The data are lost due to the crash, so we read
+garbage instead of the old, shared data.
+
+This has the following implications:
+
+1. In case of writes to shared blocks, with size smaller than the pool's
+   block size (which means we first copy the whole block and then issue
+   the smaller write), we corrupt data that the user never touched.
+
+2. In case of writes to shared blocks, with size equal to the device's
+   logical block size, we fail to provide atomic sector writes. When the
+   system recovers the user will read garbage from that sector instead
+   of the old data or the new data.
+
+3. Even for writes to shared blocks, with size equal to the pool's block
+   size (overwrites), after the system recovers, the written sectors
+   will contain garbage instead of a random mix of sectors containing
+   either old data or new data, thus we fail again to provide atomic
+   sectors writes.
+
+4. Even when the user flushes the thin device, because we first commit
+   the metadata and then pass down the flush, the same risk for
+   corruption exists (if the system crashes after the metadata have been
+   committed but before the flush is passed down to the data device.)
+
+The only case which is unaffected is that of writes with size equal to
+the pool's block size and with the FUA flag set. But, because FUA writes
+trigger metadata commits, this case can trigger the corruption
+indirectly.
+
+Moreover, apart from internal and external snapshots, the same issue
+exists for newly provisioned blocks, when block zeroing is enabled.
+After the system recovers the provisioned blocks might contain garbage
+instead of zeroes.
+
+To solve this and avoid the potential data corruption we flush the
+pool's data device **before** committing its metadata.
+
+This ensures that the data blocks of any newly inserted mappings are
+properly written to non-volatile storage and won't be lost in case of a
+crash.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c |   42 ++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 40 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -328,6 +328,7 @@ struct pool_c {
+       dm_block_t low_water_blocks;
+       struct pool_features requested_pf; /* Features requested during table load */
+       struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
++      struct bio flush_bio;
+ };
+ /*
+@@ -2392,8 +2393,16 @@ static void process_deferred_bios(struct
+       while ((bio = bio_list_pop(&bio_completions)))
+               bio_endio(bio);
+-      while ((bio = bio_list_pop(&bios)))
+-              generic_make_request(bio);
++      while ((bio = bio_list_pop(&bios))) {
++              /*
++               * The data device was flushed as part of metadata commit,
++               * so complete redundant flushes immediately.
++               */
++              if (bio->bi_opf & REQ_PREFLUSH)
++                      bio_endio(bio);
++              else
++                      generic_make_request(bio);
++      }
+ }
+ static void do_worker(struct work_struct *ws)
+@@ -3127,6 +3136,7 @@ static void pool_dtr(struct dm_target *t
+       __pool_dec(pt->pool);
+       dm_put_device(ti, pt->metadata_dev);
+       dm_put_device(ti, pt->data_dev);
++      bio_uninit(&pt->flush_bio);
+       kfree(pt);
+       mutex_unlock(&dm_thin_pool_table.mutex);
+@@ -3192,6 +3202,29 @@ static void metadata_low_callback(void *
+       dm_table_event(pool->ti->table);
+ }
++/*
++ * We need to flush the data device **before** committing the metadata.
++ *
++ * This ensures that the data blocks of any newly inserted mappings are
++ * properly written to non-volatile storage and won't be lost in case of a
++ * crash.
++ *
++ * Failure to do so can result in data corruption in the case of internal or
++ * external snapshots and in the case of newly provisioned blocks, when block
++ * zeroing is enabled.
++ */
++static int metadata_pre_commit_callback(void *context)
++{
++      struct pool_c *pt = context;
++      struct bio *flush_bio = &pt->flush_bio;
++
++      bio_reset(flush_bio);
++      bio_set_dev(flush_bio, pt->data_dev->bdev);
++      flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
++
++      return submit_bio_wait(flush_bio);
++}
++
+ static sector_t get_dev_size(struct block_device *bdev)
+ {
+       return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+@@ -3360,6 +3393,7 @@ static int pool_ctr(struct dm_target *ti
+       pt->data_dev = data_dev;
+       pt->low_water_blocks = low_water_blocks;
+       pt->adjusted_pf = pt->requested_pf = pf;
++      bio_init(&pt->flush_bio, NULL, 0);
+       ti->num_flush_bios = 1;
+       /*
+@@ -3386,6 +3420,10 @@ static int pool_ctr(struct dm_target *ti
+       if (r)
+               goto out_flags_changed;
++      dm_pool_register_pre_commit_callback(pt->pool->pmd,
++                                           metadata_pre_commit_callback,
++                                           pt);
++
+       pt->callbacks.congested_fn = pool_is_congested;
+       dm_table_add_target_callbacks(ti->table, &pt->callbacks);
diff --git a/queue-5.4/dm-thin-metadata-add-support-for-a-pre-commit-callback.patch b/queue-5.4/dm-thin-metadata-add-support-for-a-pre-commit-callback.patch
new file mode 100644 (file)
index 0000000..e36688d
--- /dev/null
@@ -0,0 +1,101 @@
+From ecda7c0280e6b3398459dc589b9a41c1adb45529 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Wed, 4 Dec 2019 16:07:41 +0200
+Subject: dm thin metadata: Add support for a pre-commit callback
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit ecda7c0280e6b3398459dc589b9a41c1adb45529 upstream.
+
+Add support for one pre-commit callback which is run right before the
+metadata are committed.
+
+This allows the thin provisioning target to run a callback before the
+metadata are committed and is required by the next commit.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin-metadata.c |   29 +++++++++++++++++++++++++++++
+ drivers/md/dm-thin-metadata.h |    7 +++++++
+ 2 files changed, 36 insertions(+)
+
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -189,6 +189,15 @@ struct dm_pool_metadata {
+       sector_t data_block_size;
+       /*
++       * Pre-commit callback.
++       *
++       * This allows the thin provisioning target to run a callback before
++       * the metadata are committed.
++       */
++      dm_pool_pre_commit_fn pre_commit_fn;
++      void *pre_commit_context;
++
++      /*
+        * We reserve a section of the metadata for commit overhead.
+        * All reported space does *not* include this.
+        */
+@@ -826,6 +835,14 @@ static int __commit_transaction(struct d
+       if (unlikely(!pmd->in_service))
+               return 0;
++      if (pmd->pre_commit_fn) {
++              r = pmd->pre_commit_fn(pmd->pre_commit_context);
++              if (r < 0) {
++                      DMERR("pre-commit callback failed");
++                      return r;
++              }
++      }
++
+       r = __write_changed_details(pmd);
+       if (r < 0)
+               return r;
+@@ -892,6 +909,8 @@ struct dm_pool_metadata *dm_pool_metadat
+       pmd->in_service = false;
+       pmd->bdev = bdev;
+       pmd->data_block_size = data_block_size;
++      pmd->pre_commit_fn = NULL;
++      pmd->pre_commit_context = NULL;
+       r = __create_persistent_data_objects(pmd, format_device);
+       if (r) {
+@@ -2044,6 +2063,16 @@ int dm_pool_register_metadata_threshold(
+       return r;
+ }
++void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
++                                        dm_pool_pre_commit_fn fn,
++                                        void *context)
++{
++      pmd_write_lock_in_core(pmd);
++      pmd->pre_commit_fn = fn;
++      pmd->pre_commit_context = context;
++      pmd_write_unlock(pmd);
++}
++
+ int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
+ {
+       int r = -EINVAL;
+--- a/drivers/md/dm-thin-metadata.h
++++ b/drivers/md/dm-thin-metadata.h
+@@ -230,6 +230,13 @@ bool dm_pool_metadata_needs_check(struct
+  */
+ void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd);
++/* Pre-commit callback */
++typedef int (*dm_pool_pre_commit_fn)(void *context);
++
++void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
++                                        dm_pool_pre_commit_fn fn,
++                                        void *context);
++
+ /*----------------------------------------------------------------*/
+ #endif
diff --git a/queue-5.4/dma-buf-fix-memory-leak-in-sync_file_merge.patch b/queue-5.4/dma-buf-fix-memory-leak-in-sync_file_merge.patch
new file mode 100644 (file)
index 0000000..56133a5
--- /dev/null
@@ -0,0 +1,34 @@
+From 6645d42d79d33e8a9fe262660a75d5f4556bbea9 Mon Sep 17 00:00:00 2001
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+Date: Fri, 22 Nov 2019 16:09:55 -0600
+Subject: dma-buf: Fix memory leak in sync_file_merge()
+
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+
+commit 6645d42d79d33e8a9fe262660a75d5f4556bbea9 upstream.
+
+In the implementation of sync_file_merge() the allocated sync_file is
+leaked if number of fences overflows. Release sync_file by goto err.
+
+Fixes: a02b9dc90d84 ("dma-buf/sync_file: refactor fence storage in struct sync_file")
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191122220957.30427-1-navid.emamdoost@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma-buf/sync_file.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/dma-buf/sync_file.c
++++ b/drivers/dma-buf/sync_file.c
+@@ -221,7 +221,7 @@ static struct sync_file *sync_file_merge
+       a_fences = get_fences(a, &a_num_fences);
+       b_fences = get_fences(b, &b_num_fences);
+       if (a_num_fences > INT_MAX - b_num_fences)
+-              return NULL;
++              goto err;
+       num_fences = a_num_fences + b_num_fences;
diff --git a/queue-5.4/drm-amd-display-add-default-clocks-if-not-able-to-fetch-them.patch b/queue-5.4/drm-amd-display-add-default-clocks-if-not-able-to-fetch-them.patch
new file mode 100644 (file)
index 0000000..4412f1c
--- /dev/null
@@ -0,0 +1,35 @@
+From 946621691f9919c263b4679b77f81f06019d3636 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 19 Nov 2019 15:54:17 -0500
+Subject: drm/amd/display: add default clocks if not able to fetch them
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 946621691f9919c263b4679b77f81f06019d3636 upstream.
+
+dm_pp_get_clock_levels_by_type needs to add the default clocks
+to the powerplay case as well.  This was accidently dropped.
+
+Fixes: b3ea88fef321de ("drm/amd/powerplay: add get_clock_by_type interface for display")
+Bug: https://gitlab.freedesktop.org/drm/amd/issues/906
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -342,7 +342,8 @@ bool dm_pp_get_clock_levels_by_type(
+       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
+               if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
+                       dc_to_pp_clock_type(clk_type), &pp_clks)) {
+-              /* Error in pplib. Provide default values. */
++                      /* Error in pplib. Provide default values. */
++                      get_default_clock_levels(clk_type, dc_clks);
+                       return true;
+               }
+       } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
diff --git a/queue-5.4/drm-amd-display-re-enable-wait-in-pipelock-but-add-timeout.patch b/queue-5.4/drm-amd-display-re-enable-wait-in-pipelock-but-add-timeout.patch
new file mode 100644 (file)
index 0000000..eaba32c
--- /dev/null
@@ -0,0 +1,51 @@
+From 627f75d18910b287472593a4a2c41de9a386f5a2 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 15 Nov 2019 10:02:44 -0500
+Subject: drm/amd/display: re-enable wait in pipelock, but add timeout
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 627f75d18910b287472593a4a2c41de9a386f5a2 upstream.
+
+Removing this causes hangs in some games, so re-add it, but add
+a timeout so we don't hang while switching flip types.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205169
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=112266
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c |   19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1103,6 +1103,25 @@ void dcn20_pipe_control_lock(
+       if (pipe->plane_state != NULL)
+               flip_immediate = pipe->plane_state->flip_immediate;
++      if (flip_immediate && lock) {
++              const int TIMEOUT_FOR_FLIP_PENDING = 100000;
++              int i;
++
++              for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
++                      if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp))
++                              break;
++                      udelay(1);
++              }
++
++              if (pipe->bottom_pipe != NULL) {
++                      for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
++                              if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp))
++                                      break;
++                              udelay(1);
++                      }
++              }
++      }
++
+       /* In flip immediate and pipe splitting case, we need to use GSL
+        * for synchronization. Only do setup on locking and on flip type change.
+        */
diff --git a/queue-5.4/drm-amdgpu-gfx10-explicitly-wait-for-cp-idle-after-halt-unhalt.patch b/queue-5.4/drm-amdgpu-gfx10-explicitly-wait-for-cp-idle-after-halt-unhalt.patch
new file mode 100644 (file)
index 0000000..2ea63ff
--- /dev/null
@@ -0,0 +1,52 @@
+From 1e902a6d32d73e4a6b3bc9d7cd43d4ee2b242dea Mon Sep 17 00:00:00 2001
+From: Xiaojie Yuan <xiaojie.yuan@amd.com>
+Date: Thu, 14 Nov 2019 16:56:08 +0800
+Subject: drm/amdgpu/gfx10: explicitly wait for cp idle after halt/unhalt
+
+From: Xiaojie Yuan <xiaojie.yuan@amd.com>
+
+commit 1e902a6d32d73e4a6b3bc9d7cd43d4ee2b242dea upstream.
+
+50us is not enough to wait for cp ready after gpu reset on some navi asics.
+
+Signed-off-by: Xiaojie Yuan <xiaojie.yuan@amd.com>
+Suggested-by: Jack Xiao <Jack.Xiao@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -2400,7 +2400,7 @@ static int gfx_v10_0_wait_for_rlc_autolo
+       return 0;
+ }
+-static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
++static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+ {
+       int i;
+       u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
+@@ -2413,7 +2413,17 @@ static void gfx_v10_0_cp_gfx_enable(stru
+                       adev->gfx.gfx_ring[i].sched.ready = false;
+       }
+       WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
+-      udelay(50);
++
++      for (i = 0; i < adev->usec_timeout; i++) {
++              if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
++                      break;
++              udelay(1);
++      }
++
++      if (i >= adev->usec_timeout)
++              DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
++
++      return 0;
+ }
+ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
diff --git a/queue-5.4/drm-amdgpu-gfx10-re-init-clear-state-buffer-after-gpu-reset.patch b/queue-5.4/drm-amdgpu-gfx10-re-init-clear-state-buffer-after-gpu-reset.patch
new file mode 100644 (file)
index 0000000..4eb2319
--- /dev/null
@@ -0,0 +1,107 @@
+From 210b3b3c7563df391bd81d49c51af303b928de4a Mon Sep 17 00:00:00 2001
+From: Xiaojie Yuan <xiaojie.yuan@amd.com>
+Date: Wed, 20 Nov 2019 14:02:22 +0800
+Subject: drm/amdgpu/gfx10: re-init clear state buffer after gpu reset
+
+From: Xiaojie Yuan <xiaojie.yuan@amd.com>
+
+commit 210b3b3c7563df391bd81d49c51af303b928de4a upstream.
+
+This patch fixes 2nd baco reset failure with gfxoff enabled on navi1x.
+
+clear state buffer (resides in vram) is corrupted after 1st baco reset,
+upon gfxoff exit, CPF gets garbage header in CSIB and hangs.
+
+Signed-off-by: Xiaojie Yuan <xiaojie.yuan@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c |   43 ++++++++++++++++++++++++++++-----
+ 1 file changed, 37 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -1785,27 +1785,52 @@ static void gfx_v10_0_enable_gui_idle_in
+       WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
+ }
+-static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
++static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
+ {
++      int r;
++
++      if (adev->in_gpu_reset) {
++              r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
++              if (r)
++                      return r;
++
++              r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj,
++                                 (void **)&adev->gfx.rlc.cs_ptr);
++              if (!r) {
++                      adev->gfx.rlc.funcs->get_csb_buffer(adev,
++                                      adev->gfx.rlc.cs_ptr);
++                      amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
++              }
++
++              amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
++              if (r)
++                      return r;
++      }
++
+       /* csib */
+       WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
+                    adev->gfx.rlc.clear_state_gpu_addr >> 32);
+       WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
+                    adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+       WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
++
++      return 0;
+ }
+-static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
++static int gfx_v10_0_init_pg(struct amdgpu_device *adev)
+ {
+       int i;
++      int r;
+-      gfx_v10_0_init_csb(adev);
++      r = gfx_v10_0_init_csb(adev);
++      if (r)
++              return r;
+       for (i = 0; i < adev->num_vmhubs; i++)
+               amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
+       /* TODO: init power gating */
+-      return;
++      return 0;
+ }
+ void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
+@@ -1907,7 +1932,10 @@ static int gfx_v10_0_rlc_resume(struct a
+               r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
+               if (r)
+                       return r;
+-              gfx_v10_0_init_pg(adev);
++
++              r = gfx_v10_0_init_pg(adev);
++              if (r)
++                      return r;
+               /* enable RLC SRM */
+               gfx_v10_0_rlc_enable_srm(adev);
+@@ -1933,7 +1961,10 @@ static int gfx_v10_0_rlc_resume(struct a
+                               return r;
+               }
+-              gfx_v10_0_init_pg(adev);
++              r = gfx_v10_0_init_pg(adev);
++              if (r)
++                      return r;
++
+               adev->gfx.rlc.funcs->start(adev);
+               if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
diff --git a/queue-5.4/drm-amdgpu-initialize-vm_inv_eng0_sem-for-gfxhub-and-mmhub.patch b/queue-5.4/drm-amdgpu-initialize-vm_inv_eng0_sem-for-gfxhub-and-mmhub.patch
new file mode 100644 (file)
index 0000000..2895404
--- /dev/null
@@ -0,0 +1,100 @@
+From 6c2c8972374ac5c35078d36d7559f64c368f7b33 Mon Sep 17 00:00:00 2001
+From: changzhu <Changfeng.Zhu@amd.com>
+Date: Tue, 19 Nov 2019 10:18:39 +0800
+Subject: drm/amdgpu: initialize vm_inv_eng0_sem for gfxhub and mmhub
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: changzhu <Changfeng.Zhu@amd.com>
+
+commit 6c2c8972374ac5c35078d36d7559f64c368f7b33 upstream.
+
+SW must acquire/release one of the vm_invalidate_eng*_sem around the
+invalidation req/ack. Through this way,it can avoid losing invalidate
+acknowledge state across power-gating off cycle.
+To use vm_invalidate_eng*_sem, it needs to initialize
+vm_invalidate_eng*_sem firstly.
+
+Signed-off-by: changzhu <Changfeng.Zhu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h  |    1 +
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c |    2 ++
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c |    2 ++
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c  |    2 ++
+ drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c  |    2 ++
+ drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c  |    4 ++++
+ 6 files changed, 13 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -77,6 +77,7 @@ struct amdgpu_gmc_fault {
+ struct amdgpu_vmhub {
+       uint32_t        ctx0_ptb_addr_lo32;
+       uint32_t        ctx0_ptb_addr_hi32;
++      uint32_t        vm_inv_eng0_sem;
+       uint32_t        vm_inv_eng0_req;
+       uint32_t        vm_inv_eng0_ack;
+       uint32_t        vm_context0_cntl;
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+@@ -365,6 +365,8 @@ void gfxhub_v1_0_init(struct amdgpu_devi
+       hub->ctx0_ptb_addr_hi32 =
+               SOC15_REG_OFFSET(GC, 0,
+                                mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
++      hub->vm_inv_eng0_sem =
++              SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
+       hub->vm_inv_eng0_req =
+               SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
+       hub->vm_inv_eng0_ack =
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+@@ -350,6 +350,8 @@ void gfxhub_v2_0_init(struct amdgpu_devi
+       hub->ctx0_ptb_addr_hi32 =
+               SOC15_REG_OFFSET(GC, 0,
+                                mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
++      hub->vm_inv_eng0_sem =
++              SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
+       hub->vm_inv_eng0_req =
+               SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
+       hub->vm_inv_eng0_ack =
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -418,6 +418,8 @@ void mmhub_v1_0_init(struct amdgpu_devic
+       hub->ctx0_ptb_addr_hi32 =
+               SOC15_REG_OFFSET(MMHUB, 0,
+                                mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
++      hub->vm_inv_eng0_sem =
++              SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
+       hub->vm_inv_eng0_req =
+               SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
+       hub->vm_inv_eng0_ack =
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+@@ -341,6 +341,8 @@ void mmhub_v2_0_init(struct amdgpu_devic
+       hub->ctx0_ptb_addr_hi32 =
+               SOC15_REG_OFFSET(MMHUB, 0,
+                                mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
++      hub->vm_inv_eng0_sem =
++              SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM);
+       hub->vm_inv_eng0_req =
+               SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
+       hub->vm_inv_eng0_ack =
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+@@ -502,6 +502,10 @@ void mmhub_v9_4_init(struct amdgpu_devic
+                       SOC15_REG_OFFSET(MMHUB, 0,
+                           mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
+                           i * MMHUB_INSTANCE_REGISTER_OFFSET;
++              hub[i]->vm_inv_eng0_sem =
++                      SOC15_REG_OFFSET(MMHUB, 0,
++                                       mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
++                                       i * MMHUB_INSTANCE_REGISTER_OFFSET;
+               hub[i]->vm_inv_eng0_req =
+                       SOC15_REG_OFFSET(MMHUB, 0,
+                                        mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
diff --git a/queue-5.4/drm-amdgpu-invalidate-mmhub-semaphore-workaround-in-gmc9-gmc10.patch b/queue-5.4/drm-amdgpu-invalidate-mmhub-semaphore-workaround-in-gmc9-gmc10.patch
new file mode 100644 (file)
index 0000000..b48594d
--- /dev/null
@@ -0,0 +1,219 @@
+From f920d1bb9c4e77efb08c41d70b6d442f46fd8902 Mon Sep 17 00:00:00 2001
+From: changzhu <Changfeng.Zhu@amd.com>
+Date: Tue, 19 Nov 2019 11:13:29 +0800
+Subject: drm/amdgpu: invalidate mmhub semaphore workaround in gmc9/gmc10
+
+From: changzhu <Changfeng.Zhu@amd.com>
+
+commit f920d1bb9c4e77efb08c41d70b6d442f46fd8902 upstream.
+
+It may lose gpuvm invalidate acknowldege state across power-gating off
+cycle. To avoid this issue in gmc9/gmc10 invalidation, add semaphore acquire
+before invalidation and semaphore release after invalidation.
+
+After adding semaphore acquire before invalidation, the semaphore
+register become read-only if another process try to acquire semaphore.
+Then it will not be able to release this semaphore. Then it may cause
+deadlock problem. If this deadlock problem happens, it needs a semaphore
+firmware fix.
+
+Signed-off-by: changzhu <Changfeng.Zhu@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c |   57 +++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c  |   57 +++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/soc15.h     |    4 +-
+ 3 files changed, 116 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -235,6 +235,29 @@ static void gmc_v10_0_flush_vm_hub(struc
+       const unsigned eng = 17;
+       unsigned int i;
++      spin_lock(&adev->gmc.invalidate_lock);
++      /*
++       * It may lose gpuvm invalidate acknowldege state across power-gating
++       * off cycle, add semaphore acquire before invalidation and semaphore
++       * release after invalidation to avoid entering power gated state
++       * to WA the Issue
++       */
++
++      /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
++      if (vmhub == AMDGPU_MMHUB_0 ||
++          vmhub == AMDGPU_MMHUB_1) {
++              for (i = 0; i < adev->usec_timeout; i++) {
++                      /* a read return value of 1 means semaphore acuqire */
++                      tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
++                      if (tmp & 0x1)
++                              break;
++                      udelay(1);
++              }
++
++              if (i >= adev->usec_timeout)
++                      DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
++      }
++
+       WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+       /*
+@@ -254,6 +277,17 @@ static void gmc_v10_0_flush_vm_hub(struc
+               udelay(1);
+       }
++      /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
++      if (vmhub == AMDGPU_MMHUB_0 ||
++          vmhub == AMDGPU_MMHUB_1)
++              /*
++               * add semaphore release after invalidation,
++               * write with 0 means semaphore release
++               */
++              WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
++
++      spin_unlock(&adev->gmc.invalidate_lock);
++
+       if (i < adev->usec_timeout)
+               return;
+@@ -338,6 +372,20 @@ static uint64_t gmc_v10_0_emit_flush_gpu
+       uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
+       unsigned eng = ring->vm_inv_eng;
++      /*
++       * It may lose gpuvm invalidate acknowldege state across power-gating
++       * off cycle, add semaphore acquire before invalidation and semaphore
++       * release after invalidation to avoid entering power gated state
++       * to WA the Issue
++       */
++
++      /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
++      if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
++          ring->funcs->vmhub == AMDGPU_MMHUB_1)
++              /* a read return value of 1 means semaphore acuqire */
++              amdgpu_ring_emit_reg_wait(ring,
++                                        hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
++
+       amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
+                             lower_32_bits(pd_addr));
+@@ -348,6 +396,15 @@ static uint64_t gmc_v10_0_emit_flush_gpu
+                                           hub->vm_inv_eng0_ack + eng,
+                                           req, 1 << vmid);
++      /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
++      if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
++          ring->funcs->vmhub == AMDGPU_MMHUB_1)
++              /*
++               * add semaphore release after invalidation,
++               * write with 0 means semaphore release
++               */
++              amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
++
+       return pd_addr;
+ }
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -491,6 +491,29 @@ static void gmc_v9_0_flush_gpu_tlb(struc
+       }
+       spin_lock(&adev->gmc.invalidate_lock);
++
++      /*
++       * It may lose gpuvm invalidate acknowldege state across power-gating
++       * off cycle, add semaphore acquire before invalidation and semaphore
++       * release after invalidation to avoid entering power gated state
++       * to WA the Issue
++       */
++
++      /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
++      if (vmhub == AMDGPU_MMHUB_0 ||
++          vmhub == AMDGPU_MMHUB_1) {
++              for (j = 0; j < adev->usec_timeout; j++) {
++                      /* a read return value of 1 means semaphore acuqire */
++                      tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
++                      if (tmp & 0x1)
++                              break;
++                      udelay(1);
++              }
++
++              if (j >= adev->usec_timeout)
++                      DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
++      }
++
+       WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+       /*
+@@ -506,7 +529,18 @@ static void gmc_v9_0_flush_gpu_tlb(struc
+                       break;
+               udelay(1);
+       }
++
++      /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
++      if (vmhub == AMDGPU_MMHUB_0 ||
++          vmhub == AMDGPU_MMHUB_1)
++              /*
++               * add semaphore release after invalidation,
++               * write with 0 means semaphore release
++               */
++              WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
++
+       spin_unlock(&adev->gmc.invalidate_lock);
++
+       if (j < adev->usec_timeout)
+               return;
+@@ -521,6 +555,20 @@ static uint64_t gmc_v9_0_emit_flush_gpu_
+       uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
+       unsigned eng = ring->vm_inv_eng;
++      /*
++       * It may lose gpuvm invalidate acknowldege state across power-gating
++       * off cycle, add semaphore acquire before invalidation and semaphore
++       * release after invalidation to avoid entering power gated state
++       * to WA the Issue
++       */
++
++      /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
++      if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
++          ring->funcs->vmhub == AMDGPU_MMHUB_1)
++              /* a read return value of 1 means semaphore acuqire */
++              amdgpu_ring_emit_reg_wait(ring,
++                                        hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
++
+       amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
+                             lower_32_bits(pd_addr));
+@@ -531,6 +579,15 @@ static uint64_t gmc_v9_0_emit_flush_gpu_
+                                           hub->vm_inv_eng0_ack + eng,
+                                           req, 1 << vmid);
++      /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
++      if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
++          ring->funcs->vmhub == AMDGPU_MMHUB_1)
++              /*
++               * add semaphore release after invalidation,
++               * write with 0 means semaphore release
++               */
++              amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
++
+       return pd_addr;
+ }
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
+@@ -28,8 +28,8 @@
+ #include "nbio_v7_0.h"
+ #include "nbio_v7_4.h"
+-#define SOC15_FLUSH_GPU_TLB_NUM_WREG          4
+-#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT      1
++#define SOC15_FLUSH_GPU_TLB_NUM_WREG          6
++#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT      3
+ extern const struct amd_ip_funcs soc15_common_ip_funcs;
diff --git a/queue-5.4/drm-dp_mst-correct-the-bug-in-drm_dp_update_payload_part1.patch b/queue-5.4/drm-dp_mst-correct-the-bug-in-drm_dp_update_payload_part1.patch
new file mode 100644 (file)
index 0000000..a1423ad
--- /dev/null
@@ -0,0 +1,60 @@
+From e5a6ca27eb72c67533ddfc11c06df84beaa167fa Mon Sep 17 00:00:00 2001
+From: Wayne Lin <Wayne.Lin@amd.com>
+Date: Tue, 3 Dec 2019 12:24:23 +0800
+Subject: drm/dp_mst: Correct the bug in drm_dp_update_payload_part1()
+
+From: Wayne Lin <Wayne.Lin@amd.com>
+
+commit e5a6ca27eb72c67533ddfc11c06df84beaa167fa upstream.
+
+[Why]
+If the payload_state is DP_PAYLOAD_DELETE_LOCAL in series, current
+code doesn't delete the payload at current index and just move the
+index to next one after shuffling payloads.
+
+[How]
+Drop the i++ increasing part in for loop head and decide whether
+to increase the index or not according to payload_state of current
+payload.
+
+Changes since v1:
+* Refine the code to have it easy reading
+* Amend the commit message to meet the way code is modified now.
+
+Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Fixes: 706246c761dd ("drm/dp_mst: Refactor drm_dp_update_payload_part1()")
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Juston Li <juston.li@intel.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: Sean Paul <sean@poorly.run>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v5.1+
+[Added cc for stable]
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191203042423.5961-1-Wayne.Lin@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2465,9 +2465,11 @@ int drm_dp_update_payload_part1(struct d
+                       drm_dp_mst_topology_put_port(port);
+       }
+-      for (i = 0; i < mgr->max_payloads; i++) {
+-              if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
++      for (i = 0; i < mgr->max_payloads; /* do nothing */) {
++              if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
++                      i++;
+                       continue;
++              }
+               DRM_DEBUG_KMS("removing payload %d\n", i);
+               for (j = i; j < mgr->max_payloads - 1; j++) {
diff --git a/queue-5.4/drm-i915-fbc-disable-fbc-by-default-on-all-glk.patch b/queue-5.4/drm-i915-fbc-disable-fbc-by-default-on-all-glk.patch
new file mode 100644 (file)
index 0000000..7c6b6e0
--- /dev/null
@@ -0,0 +1,45 @@
+From 0eb8e74f7202a4a98bbc0c1adeed3986cf50b66a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 27 Nov 2019 22:12:09 +0200
+Subject: drm/i915/fbc: Disable fbc by default on all glk+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 0eb8e74f7202a4a98bbc0c1adeed3986cf50b66a upstream.
+
+We're missing a workaround in the fbc code for all glk+ platforms
+which can cause corruption around the top of the screen. So
+enabling fbc by default is a bad idea. I'm not keen to backport
+the w/a so let's start by disabling fbc by default on all glk+.
+We'll lift the restriction once the w/a is in place.
+
+Cc: stable@vger.kernel.org
+Cc: Daniel Drake <drake@endlessm.com>
+Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
+Cc: Jian-Hong Pan <jian-hong@endlessm.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191127201222.16669-2-ville.syrjala@linux.intel.com
+Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+(cherry picked from commit cd8c021b36a66833cefe2c90a79a9e312a2a5690)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/display/intel_fbc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_fbc.c
++++ b/drivers/gpu/drm/i915/display/intel_fbc.c
+@@ -1284,7 +1284,7 @@ static int intel_sanitize_fbc_option(str
+               return 0;
+       /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
+-      if (IS_GEMINILAKE(dev_priv))
++      if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               return 0;
+       if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
diff --git a/queue-5.4/drm-i915-gvt-fix-cmd-length-check-for-mi_atomic.patch b/queue-5.4/drm-i915-gvt-fix-cmd-length-check-for-mi_atomic.patch
new file mode 100644 (file)
index 0000000..c091a77
--- /dev/null
@@ -0,0 +1,38 @@
+From 92b1aa773fadb4e2a90ed5d3beecb422d568ad9a Mon Sep 17 00:00:00 2001
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Date: Thu, 21 Nov 2019 13:57:45 +0800
+Subject: drm/i915/gvt: Fix cmd length check for MI_ATOMIC
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+commit 92b1aa773fadb4e2a90ed5d3beecb422d568ad9a upstream.
+
+Correct valid command length check for MI_ATOMIC, need to check inline
+data available field instead of operand data length for whole command.
+
+Fixes: 00a33be40634 ("drm/i915/gvt: Add valid length check for MI variable commands")
+Reported-by: Alex Williamson <alex.williamson@redhat.com>
+Acked-by: Gao Fred <fred.gao@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gvt/cmd_parser.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
++++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
+@@ -1597,9 +1597,9 @@ static int cmd_handler_mi_op_2f(struct p
+       if (!(cmd_val(s, 0) & (1 << 22)))
+               return ret;
+-      /* check if QWORD */
+-      if (DWORD_FIELD(0, 20, 19) == 1)
+-              valid_len += 8;
++      /* check inline data */
++      if (cmd_val(s, 0) & BIT(18))
++              valid_len = CMD_LEN(9);
+       ret = gvt_check_valid_cmd_length(cmd_length(s),
+                       valid_len);
+       if (ret)
diff --git a/queue-5.4/drm-meson-venc-cvbs-fix-cvbs-mode-matching.patch b/queue-5.4/drm-meson-venc-cvbs-fix-cvbs-mode-matching.patch
new file mode 100644 (file)
index 0000000..68ac213
--- /dev/null
@@ -0,0 +1,121 @@
+From 43cb86799ff03e9819c07f37f72f80f8246ad7ed Mon Sep 17 00:00:00 2001
+From: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Date: Sun, 8 Dec 2019 18:18:31 +0100
+Subject: drm: meson: venc: cvbs: fix CVBS mode matching
+
+From: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+commit 43cb86799ff03e9819c07f37f72f80f8246ad7ed upstream.
+
+With commit 222ec1618c3ace ("drm: Add aspect ratio parsing in DRM
+layer") the drm core started honoring the picture_aspect_ratio field
+when comparing two drm_display_modes. Prior to that it was ignored.
+When the CVBS encoder driver was initially submitted there was no aspect
+ratio check.
+
+Switch from drm_mode_equal() to drm_mode_match() without
+DRM_MODE_MATCH_ASPECT_RATIO to fix "kmscube" and X.org output using the
+CVBS connector. When (for example) kmscube sets the output mode when
+using the CVBS connector it passes HDMI_PICTURE_ASPECT_NONE, making the
+drm_mode_equal() fail as it include the aspect ratio.
+
+Prior to this patch kmscube reported:
+  failed to set mode: Invalid argument
+
+The CVBS mode checking in the sun4i (drivers/gpu/drm/sun4i/sun4i_tv.c
+sun4i_tv_mode_to_drm_mode) and ZTE (drivers/gpu/drm/zte/zx_tvenc.c
+tvenc_mode_{pal,ntsc}) drivers don't set the "picture_aspect_ratio" at
+all. The Meson VPU driver does not rely on the aspect ratio for the CVBS
+output so we can safely decouple it from the hdmi_picture_aspect
+setting.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 222ec1618c3ace ("drm: Add aspect ratio parsing in DRM layer")
+Fixes: bbbe775ec5b5da ("drm: Add support for Amlogic Meson Graphic Controller")
+Signed-off-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Acked-by: Neil Armstrong <narmstrong@baylibre.com>
+[narmstrong: squashed with drm: meson: venc: cvbs: deduplicate the meson_cvbs_mode lookup code]
+Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191208171832.1064772-3-martin.blumenstingl@googlemail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/meson/meson_venc_cvbs.c |   48 ++++++++++++++++++--------------
+ 1 file changed, 27 insertions(+), 21 deletions(-)
+
+--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
++++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
+@@ -64,6 +64,25 @@ struct meson_cvbs_mode meson_cvbs_modes[
+       },
+ };
++static const struct meson_cvbs_mode *
++meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
++{
++      int i;
++
++      for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
++              struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
++
++              if (drm_mode_match(req_mode, &meson_mode->mode,
++                                 DRM_MODE_MATCH_TIMINGS |
++                                 DRM_MODE_MATCH_CLOCK |
++                                 DRM_MODE_MATCH_FLAGS |
++                                 DRM_MODE_MATCH_3D_FLAGS))
++                      return meson_mode;
++      }
++
++      return NULL;
++}
++
+ /* Connector */
+ static void meson_cvbs_connector_destroy(struct drm_connector *connector)
+@@ -136,14 +155,8 @@ static int meson_venc_cvbs_encoder_atomi
+                                       struct drm_crtc_state *crtc_state,
+                                       struct drm_connector_state *conn_state)
+ {
+-      int i;
+-
+-      for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+-              struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+-
+-              if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
+-                      return 0;
+-      }
++      if (meson_cvbs_get_mode(&crtc_state->mode))
++              return 0;
+       return -EINVAL;
+ }
+@@ -191,24 +204,17 @@ static void meson_venc_cvbs_encoder_mode
+                                  struct drm_display_mode *mode,
+                                  struct drm_display_mode *adjusted_mode)
+ {
++      const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
+       struct meson_venc_cvbs *meson_venc_cvbs =
+                                       encoder_to_meson_venc_cvbs(encoder);
+       struct meson_drm *priv = meson_venc_cvbs->priv;
+-      int i;
+-      for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+-              struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
++      if (meson_mode) {
++              meson_venci_cvbs_mode_set(priv, meson_mode->enci);
+-              if (drm_mode_equal(mode, &meson_mode->mode)) {
+-                      meson_venci_cvbs_mode_set(priv,
+-                                                meson_mode->enci);
+-
+-                      /* Setup 27MHz vclk2 for ENCI and VDAC */
+-                      meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+-                                       MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+-                                       MESON_VCLK_CVBS, true);
+-                      break;
+-              }
++              /* Setup 27MHz vclk2 for ENCI and VDAC */
++              meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
++                               MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
+       }
+ }
diff --git a/queue-5.4/drm-mgag200-add-workaround-for-hw-that-does-not-support-startadd.patch b/queue-5.4/drm-mgag200-add-workaround-for-hw-that-does-not-support-startadd.patch
new file mode 100644 (file)
index 0000000..3e64c63
--- /dev/null
@@ -0,0 +1,115 @@
+From 1591fadf857cdbaf2baa55e421af99a61354713c Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 26 Nov 2019 11:15:29 +0100
+Subject: drm/mgag200: Add workaround for HW that does not support 'startadd'
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 1591fadf857cdbaf2baa55e421af99a61354713c upstream.
+
+There's at least one system that does not interpret the value of
+the device's 'startadd' field correctly, which leads to incorrectly
+displayed scanout buffers. Always placing the active scanout buffer
+at offset 0 works around the problem.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reported-by: John Donnelly <john.p.donnelly@oracle.com>
+Tested-by: John Donnelly <john.p.donnelly@oracle.com>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Fixes: 81da87f63a1e ("drm: Replace drm_gem_vram_push_to_system() with kunmap + unpin")
+Cc: Gerd Hoffmann <kraxel@redhat.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: "Y.C. Chen" <yc_chen@aspeedtech.com>
+Cc: Neil Armstrong <narmstrong@baylibre.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: "José Roberto de Souza" <jose.souza@intel.com>
+Cc: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v5.3+
+Link: https://gitlab.freedesktop.org/drm/misc/issues/7
+Link: https://patchwork.freedesktop.org/patch/msgid/20191126101529.20356-4-tzimmermann@suse.de
+[drop debugfs_init callback - gregkh]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/mgag200/mgag200_drv.c |   35 +++++++++++++++++++++++++++++++++-
+ drivers/gpu/drm/mgag200/mgag200_drv.h |    3 ++
+ 2 files changed, 37 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
+@@ -30,6 +30,8 @@ module_param_named(modeset, mgag200_mode
+ static struct drm_driver driver;
+ static const struct pci_device_id pciidlist[] = {
++      { PCI_VENDOR_ID_MATROX, 0x522, PCI_VENDOR_ID_SUN, 0x4852, 0, 0,
++              G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
+       { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+       { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
+       { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
+@@ -63,6 +65,35 @@ static const struct file_operations mgag
+       DRM_VRAM_MM_FILE_OPERATIONS
+ };
++static bool mgag200_pin_bo_at_0(const struct mga_device *mdev)
++{
++      return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD;
++}
++
++int mgag200_driver_dumb_create(struct drm_file *file,
++                             struct drm_device *dev,
++                             struct drm_mode_create_dumb *args)
++{
++      struct mga_device *mdev = dev->dev_private;
++      unsigned long pg_align;
++
++      if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
++              return -EINVAL;
++
++      pg_align = 0ul;
++
++      /*
++       * Aligning scanout buffers to the size of the video ram forces
++       * placement at offset 0. Works around a bug where HW does not
++       * respect 'startadd' field.
++       */
++      if (mgag200_pin_bo_at_0(mdev))
++              pg_align = PFN_UP(mdev->mc.vram_size);
++
++      return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev,
++                                           pg_align, false, args);
++}
++
+ static struct drm_driver driver = {
+       .driver_features = DRIVER_GEM | DRIVER_MODESET,
+       .load = mgag200_driver_load,
+@@ -74,7 +105,9 @@ static struct drm_driver driver = {
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
+-      DRM_GEM_VRAM_DRIVER
++      .dumb_create = mgag200_driver_dumb_create,
++      .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
++      .gem_prime_mmap = drm_gem_prime_mmap,
+ };
+ static struct pci_driver mgag200_pci_driver = {
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
+@@ -159,6 +159,9 @@ enum mga_type {
+       G200_EW3,
+ };
++/* HW does not handle 'startadd' field correct. */
++#define MGAG200_FLAG_HW_BUG_NO_STARTADD       (1ul << 8)
++
+ #define MGAG200_TYPE_MASK     (0x000000ff)
+ #define MGAG200_FLAG_MASK     (0x00ffff00)
diff --git a/queue-5.4/drm-mgag200-extract-device-type-from-flags.patch b/queue-5.4/drm-mgag200-extract-device-type-from-flags.patch
new file mode 100644 (file)
index 0000000..1a6dbc2
--- /dev/null
@@ -0,0 +1,75 @@
+From 3a8a5aba142a44eaeba0cb0ec1b4a8f177b5e59a Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 26 Nov 2019 11:15:27 +0100
+Subject: drm/mgag200: Extract device type from flags
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 3a8a5aba142a44eaeba0cb0ec1b4a8f177b5e59a upstream.
+
+Adds a conversion function that extracts the device type from the
+PCI id-table flags. Allows for storing additional information in the
+other flag bits.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 81da87f63a1e ("drm: Replace drm_gem_vram_push_to_system() with kunmap + unpin")
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: John Donnelly <john.p.donnelly@oracle.com>
+Cc: Gerd Hoffmann <kraxel@redhat.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: Emil Velikov <emil.velikov@collabora.com>
+Cc: "Y.C. Chen" <yc_chen@aspeedtech.com>
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: "José Roberto de Souza" <jose.souza@intel.com>
+Cc: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v5.3+
+Link: https://patchwork.freedesktop.org/patch/msgid/20191126101529.20356-2-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/mgag200/mgag200_drv.h  |    7 +++++++
+ drivers/gpu/drm/mgag200/mgag200_main.c |    2 +-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
+@@ -159,6 +159,8 @@ enum mga_type {
+       G200_EW3,
+ };
++#define MGAG200_TYPE_MASK     (0x000000ff)
++
+ #define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+ struct mga_device {
+@@ -188,6 +190,11 @@ struct mga_device {
+       u32 unique_rev_id;
+ };
++static inline enum mga_type
++mgag200_type_from_driver_data(kernel_ulong_t driver_data)
++{
++      return (enum mga_type)(driver_data & MGAG200_TYPE_MASK);
++}
+                               /* mgag200_mode.c */
+ int mgag200_modeset_init(struct mga_device *mdev);
+ void mgag200_modeset_fini(struct mga_device *mdev);
+--- a/drivers/gpu/drm/mgag200/mgag200_main.c
++++ b/drivers/gpu/drm/mgag200/mgag200_main.c
+@@ -94,7 +94,7 @@ static int mgag200_device_init(struct dr
+       struct mga_device *mdev = dev->dev_private;
+       int ret, option;
+-      mdev->type = flags;
++      mdev->type = mgag200_type_from_driver_data(flags);
+       /* Hardcode the number of CRTCs to 1 */
+       mdev->num_crtc = 1;
diff --git a/queue-5.4/drm-mgag200-flag-all-g200-se-a-machines-as-broken-wrt-startadd.patch b/queue-5.4/drm-mgag200-flag-all-g200-se-a-machines-as-broken-wrt-startadd.patch
new file mode 100644 (file)
index 0000000..e41bc5f
--- /dev/null
@@ -0,0 +1,61 @@
+From 4adf0b49eea926a55fd956ef7d86750f771435ff Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Fri, 6 Dec 2019 09:19:01 +0100
+Subject: drm/mgag200: Flag all G200 SE A machines as broken wrt <startadd>
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 4adf0b49eea926a55fd956ef7d86750f771435ff upstream.
+
+Several MGA G200 SE machines don't respect the value of the startadd
+register field. After more feedback on affected machines, neither PCI
+subvendor ID nor the internal ID seem to hint towards the bug. All
+affected machines have a PCI ID of 0x0522 (i.e., G200 SE A). It was
+decided to flag all G200 SE A machines as broken.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Acked-by: Gerd Hoffmann <kraxel@redhat.com>
+Fixes: 1591fadf857c ("drm/mgag200: Add workaround for HW that does not support 'startadd'")
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: John Donnelly <john.p.donnelly@oracle.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Gerd Hoffmann <kraxel@redhat.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: "Y.C. Chen" <yc_chen@aspeedtech.com>
+Cc: Neil Armstrong <narmstrong@baylibre.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: "José Roberto de Souza" <jose.souza@intel.com>
+Cc: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v5.3+
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Allison Randal <allison@lohutok.net>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: "Noralf Trønnes" <noralf@tronnes.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191206081901.9938-1-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/mgag200/mgag200_drv.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
+@@ -30,9 +30,8 @@ module_param_named(modeset, mgag200_mode
+ static struct drm_driver driver;
+ static const struct pci_device_id pciidlist[] = {
+-      { PCI_VENDOR_ID_MATROX, 0x522, PCI_VENDOR_ID_SUN, 0x4852, 0, 0,
++      { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
+-      { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+       { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
+       { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
+       { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
diff --git a/queue-5.4/drm-mgag200-store-flags-from-pci-driver-data-in-device-structure.patch b/queue-5.4/drm-mgag200-store-flags-from-pci-driver-data-in-device-structure.patch
new file mode 100644 (file)
index 0000000..20f7d10
--- /dev/null
@@ -0,0 +1,74 @@
+From d6d437d97d54c85a1a93967b2745e31dff03365a Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 26 Nov 2019 11:15:28 +0100
+Subject: drm/mgag200: Store flags from PCI driver data in device structure
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit d6d437d97d54c85a1a93967b2745e31dff03365a upstream.
+
+The flags field in struct mga_device has been unused so far. We now
+use it to store flag bits from the PCI driver.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Fixes: 81da87f63a1e ("drm: Replace drm_gem_vram_push_to_system() with kunmap + unpin")
+Cc: John Donnelly <john.p.donnelly@oracle.com>
+Cc: Gerd Hoffmann <kraxel@redhat.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: "Y.C. Chen" <yc_chen@aspeedtech.com>
+Cc: Neil Armstrong <narmstrong@baylibre.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: "José Roberto de Souza" <jose.souza@intel.com>
+Cc: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v5.3+
+Link: https://patchwork.freedesktop.org/patch/msgid/20191126101529.20356-3-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/mgag200/mgag200_drv.h  |    8 ++++++++
+ drivers/gpu/drm/mgag200/mgag200_main.c |    1 +
+ 2 files changed, 9 insertions(+)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
+@@ -160,6 +160,7 @@ enum mga_type {
+ };
+ #define MGAG200_TYPE_MASK     (0x000000ff)
++#define MGAG200_FLAG_MASK     (0x00ffff00)
+ #define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+@@ -195,6 +196,13 @@ mgag200_type_from_driver_data(kernel_ulo
+ {
+       return (enum mga_type)(driver_data & MGAG200_TYPE_MASK);
+ }
++
++static inline unsigned long
++mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
++{
++      return driver_data & MGAG200_FLAG_MASK;
++}
++
+                               /* mgag200_mode.c */
+ int mgag200_modeset_init(struct mga_device *mdev);
+ void mgag200_modeset_fini(struct mga_device *mdev);
+--- a/drivers/gpu/drm/mgag200/mgag200_main.c
++++ b/drivers/gpu/drm/mgag200/mgag200_main.c
+@@ -94,6 +94,7 @@ static int mgag200_device_init(struct dr
+       struct mga_device *mdev = dev->dev_private;
+       int ret, option;
++      mdev->flags = mgag200_flags_from_driver_data(flags);
+       mdev->type = mgag200_type_from_driver_data(flags);
+       /* Hardcode the number of CRTCs to 1 */
diff --git a/queue-5.4/drm-nouveau-kms-nv50-call-outp_atomic_check_view-before-handling-pbn.patch b/queue-5.4/drm-nouveau-kms-nv50-call-outp_atomic_check_view-before-handling-pbn.patch
new file mode 100644 (file)
index 0000000..69a9e25
--- /dev/null
@@ -0,0 +1,92 @@
+From 310d35771ee9040f5744109fc277206ad96ba253 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Fri, 15 Nov 2019 16:07:18 -0500
+Subject: drm/nouveau/kms/nv50-: Call outp_atomic_check_view() before handling PBN
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit 310d35771ee9040f5744109fc277206ad96ba253 upstream.
+
+Since nv50_outp_atomic_check_view() can set crtc_state->mode_changed, we
+probably should be calling it before handling any PBN changes. Just a
+precaution.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 232c9eec417a ("drm/nouveau: Use atomic VCPI helpers for MST")
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: David Airlie <airlied@redhat.com>
+Cc: Jerry Zuo <Jerry.Zuo@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Juston Li <juston.li@intel.com>
+Cc: Sean Paul <seanpaul@chromium.org>
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: <stable@vger.kernel.org> # v5.1+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/dispnv50/disp.c |   48 +++++++++++++++++---------------
+ 1 file changed, 26 insertions(+), 22 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -770,32 +770,36 @@ nv50_msto_atomic_check(struct drm_encode
+       struct nv50_mstm *mstm = mstc->mstm;
+       struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+       int slots;
++      int ret;
+-      if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+-              /*
+-               * When restoring duplicated states, we need to make sure that
+-               * the bw remains the same and avoid recalculating it, as the
+-               * connector's bpc may have changed after the state was
+-               * duplicated
+-               */
+-              if (!state->duplicated) {
+-                      const int bpp = connector->display_info.bpc * 3;
+-                      const int clock = crtc_state->adjusted_mode.clock;
+-
+-                      asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
+-              }
+-
+-              slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
+-                                                    mstc->port,
+-                                                    asyh->dp.pbn);
+-              if (slots < 0)
+-                      return slots;
++      ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
++                                        mstc->native);
++      if (ret)
++              return ret;
++
++      if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
++              return 0;
++
++      /*
++       * When restoring duplicated states, we need to make sure that the bw
++       * remains the same and avoid recalculating it, as the connector's bpc
++       * may have changed after the state was duplicated
++       */
++      if (!state->duplicated) {
++              const int bpp = connector->display_info.bpc * 3;
++              const int clock = crtc_state->adjusted_mode.clock;
+-              asyh->dp.tu = slots;
++              asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
+       }
+-      return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+-                                         mstc->native);
++      slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
++                                            asyh->dp.pbn);
++      if (slots < 0)
++              return slots;
++
++      asyh->dp.tu = slots;
++
++      return 0;
+ }
+ static void
diff --git a/queue-5.4/drm-nouveau-kms-nv50-limit-mst-bpc-to-8.patch b/queue-5.4/drm-nouveau-kms-nv50-limit-mst-bpc-to-8.patch
new file mode 100644 (file)
index 0000000..ad1940f
--- /dev/null
@@ -0,0 +1,63 @@
+From ae5769d4670982bc483885b120b557a9ffd57527 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Fri, 15 Nov 2019 16:07:20 -0500
+Subject: drm/nouveau/kms/nv50-: Limit MST BPC to 8
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit ae5769d4670982bc483885b120b557a9ffd57527 upstream.
+
+Noticed this while working on some unrelated CRC stuff. Currently,
+userspace has very little support for BPCs higher than 8. While this
+doesn't matter for most things, on MST topologies we need to be careful
+about ensuring that we do our best to make any given display
+configuration fit within the bandwidth restraints of the topology, since
+otherwise less people's monitor configurations will work.
+
+Allowing for BPC settings higher than 8 dramatically increases the
+required bandwidth for displays in most configurations, and consequently
+makes it a lot less likely that said display configurations will pass
+the atomic check.
+
+In the future we want to fix this correctly by making it so that we
+adjust the bpp for each display in a topology to be as high as possible,
+while making sure to lower the bpp of each display in the event that we
+run out of bandwidth and need to rerun our atomic check. But for now,
+follow the behavior that both i915 and amdgpu are sticking to.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 232c9eec417a ("drm/nouveau: Use atomic VCPI helpers for MST")
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: David Airlie <airlied@redhat.com>
+Cc: Jerry Zuo <Jerry.Zuo@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Juston Li <juston.li@intel.com>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: Sean Paul <seanpaul@chromium.org>
+Cc: <stable@vger.kernel.org> # v5.1+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/dispnv50/disp.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -798,7 +798,14 @@ nv50_msto_atomic_check(struct drm_encode
+       if (!state->duplicated) {
+               const int clock = crtc_state->adjusted_mode.clock;
+-              asyh->or.bpc = connector->display_info.bpc;
++              /*
++               * XXX: Since we don't use HDR in userspace quite yet, limit
++               * the bpc to 8 to save bandwidth on the topology. In the
++               * future, we'll want to properly fix this by dynamically
++               * selecting the highest possible bpc that would fit in the
++               * topology
++               */
++              asyh->or.bpc = min(connector->display_info.bpc, 8U);
+               asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3);
+       }
diff --git a/queue-5.4/drm-nouveau-kms-nv50-store-the-bpc-we-re-using-in-nv50_head_atom.patch b/queue-5.4/drm-nouveau-kms-nv50-store-the-bpc-we-re-using-in-nv50_head_atom.patch
new file mode 100644 (file)
index 0000000..fe15e5b
--- /dev/null
@@ -0,0 +1,186 @@
+From ac2d9275f371346922b31a388bbaa6a54f1154a4 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Fri, 15 Nov 2019 16:07:19 -0500
+Subject: drm/nouveau/kms/nv50-: Store the bpc we're using in nv50_head_atom
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit ac2d9275f371346922b31a388bbaa6a54f1154a4 upstream.
+
+In order to be able to use bpc values that are different from what the
+connector reports, we want to be able to store the bpc value we decide
+on using for an atomic state in nv50_head_atom and refer to that instead
+of simply using the value that the connector reports throughout the
+whole atomic check phase and commit phase. This will let us (eventually)
+implement the max bpc connector property, and will also be needed for
+limiting the bpc we use on MST displays to 8 in the next commit.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 232c9eec417a ("drm/nouveau: Use atomic VCPI helpers for MST")
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: David Airlie <airlied@redhat.com>
+Cc: Jerry Zuo <Jerry.Zuo@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Juston Li <juston.li@intel.com>
+Cc: Sean Paul <seanpaul@chromium.org>
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: <stable@vger.kernel.org> # v5.1+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/dispnv50/atom.h |    1 
+ drivers/gpu/drm/nouveau/dispnv50/disp.c |   57 ++++++++++++++++++--------------
+ drivers/gpu/drm/nouveau/dispnv50/head.c |    5 +-
+ 3 files changed, 36 insertions(+), 27 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
++++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
+@@ -114,6 +114,7 @@ struct nv50_head_atom {
+               u8 nhsync:1;
+               u8 nvsync:1;
+               u8 depth:4;
++              u8 bpc;
+       } or;
+       /* Currently only used for MST */
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -353,10 +353,20 @@ nv50_outp_atomic_check(struct drm_encode
+                      struct drm_crtc_state *crtc_state,
+                      struct drm_connector_state *conn_state)
+ {
+-      struct nouveau_connector *nv_connector =
+-              nouveau_connector(conn_state->connector);
+-      return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+-                                         nv_connector->native_mode);
++      struct drm_connector *connector = conn_state->connector;
++      struct nouveau_connector *nv_connector = nouveau_connector(connector);
++      struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
++      int ret;
++
++      ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
++                                        nv_connector->native_mode);
++      if (ret)
++              return ret;
++
++      if (crtc_state->mode_changed || crtc_state->connectors_changed)
++              asyh->or.bpc = connector->display_info.bpc;
++
++      return 0;
+ }
+ /******************************************************************************
+@@ -786,10 +796,10 @@ nv50_msto_atomic_check(struct drm_encode
+        * may have changed after the state was duplicated
+        */
+       if (!state->duplicated) {
+-              const int bpp = connector->display_info.bpc * 3;
+               const int clock = crtc_state->adjusted_mode.clock;
+-              asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
++              asyh->or.bpc = connector->display_info.bpc;
++              asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3);
+       }
+       slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
+@@ -802,6 +812,17 @@ nv50_msto_atomic_check(struct drm_encode
+       return 0;
+ }
++static u8
++nv50_dp_bpc_to_depth(unsigned int bpc)
++{
++      switch (bpc) {
++      case  6: return 0x2;
++      case  8: return 0x5;
++      case 10: /* fall-through */
++      default: return 0x6;
++      }
++}
++
+ static void
+ nv50_msto_enable(struct drm_encoder *encoder)
+ {
+@@ -812,7 +833,7 @@ nv50_msto_enable(struct drm_encoder *enc
+       struct nv50_mstm *mstm = NULL;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+-      u8 proto, depth;
++      u8 proto;
+       bool r;
+       drm_connector_list_iter_begin(encoder->dev, &conn_iter);
+@@ -841,14 +862,8 @@ nv50_msto_enable(struct drm_encoder *enc
+       else
+               proto = 0x9;
+-      switch (mstc->connector.display_info.bpc) {
+-      case  6: depth = 0x2; break;
+-      case  8: depth = 0x5; break;
+-      case 10:
+-      default: depth = 0x6; break;
+-      }
+-
+-      mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth);
++      mstm->outp->update(mstm->outp, head->base.index, armh, proto,
++                         nv50_dp_bpc_to_depth(armh->or.bpc));
+       msto->head = head;
+       msto->mstc = mstc;
+@@ -1502,20 +1517,14 @@ nv50_sor_enable(struct drm_encoder *enco
+                                       lvds.lvds.script |= 0x0200;
+                       }
+-                      if (nv_connector->base.display_info.bpc == 8)
++                      if (asyh->or.bpc == 8)
+                               lvds.lvds.script |= 0x0200;
+               }
+               nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
+               break;
+       case DCB_OUTPUT_DP:
+-              if (nv_connector->base.display_info.bpc == 6)
+-                      depth = 0x2;
+-              else
+-              if (nv_connector->base.display_info.bpc == 8)
+-                      depth = 0x5;
+-              else
+-                      depth = 0x6;
++              depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
+               if (nv_encoder->link & 1)
+                       proto = 0x8;
+@@ -1666,7 +1675,7 @@ nv50_pior_enable(struct drm_encoder *enc
+       nv50_outp_acquire(nv_encoder);
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+-      switch (nv_connector->base.display_info.bpc) {
++      switch (asyh->or.bpc) {
+       case 10: asyh->or.depth = 0x6; break;
+       case  8: asyh->or.depth = 0x5; break;
+       case  6: asyh->or.depth = 0x2; break;
+--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
+@@ -81,18 +81,17 @@ nv50_head_atomic_check_dither(struct nv5
+                             struct nv50_head_atom *asyh,
+                             struct nouveau_conn_atom *asyc)
+ {
+-      struct drm_connector *connector = asyc->state.connector;
+       u32 mode = 0x00;
+       if (asyc->dither.mode == DITHERING_MODE_AUTO) {
+-              if (asyh->base.depth > connector->display_info.bpc * 3)
++              if (asyh->base.depth > asyh->or.bpc * 3)
+                       mode = DITHERING_MODE_DYNAMIC2X2;
+       } else {
+               mode = asyc->dither.mode;
+       }
+       if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
+-              if (connector->display_info.bpc >= 8)
++              if (asyh->or.bpc >= 8)
+                       mode |= DITHERING_DEPTH_8BPC;
+       } else {
+               mode |= asyc->dither.depth;
diff --git a/queue-5.4/drm-panfrost-fix-a-bo-leak-in-panfrost_ioctl_mmap_bo.patch b/queue-5.4/drm-panfrost-fix-a-bo-leak-in-panfrost_ioctl_mmap_bo.patch
new file mode 100644 (file)
index 0000000..3ceeed8
--- /dev/null
@@ -0,0 +1,47 @@
+From 3bb69dbcb9e8430e0cc9990cff427ca3ae25ffdc Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@collabora.com>
+Date: Fri, 29 Nov 2019 14:59:03 +0100
+Subject: drm/panfrost: Fix a BO leak in panfrost_ioctl_mmap_bo()
+
+From: Boris Brezillon <boris.brezillon@collabora.com>
+
+commit 3bb69dbcb9e8430e0cc9990cff427ca3ae25ffdc upstream.
+
+We should release the reference we grabbed when an error occurs.
+
+Fixes: 187d2929206e ("drm/panfrost: Add support for GPU heap allocations")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Reviewed-by: Steven Price <steven.price@arm.com>
+Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191129135908.2439529-4-boris.brezillon@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/panfrost/panfrost_drv.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -303,14 +303,17 @@ static int panfrost_ioctl_mmap_bo(struct
+       }
+       /* Don't allow mmapping of heap objects as pages are not pinned. */
+-      if (to_panfrost_bo(gem_obj)->is_heap)
+-              return -EINVAL;
++      if (to_panfrost_bo(gem_obj)->is_heap) {
++              ret = -EINVAL;
++              goto out;
++      }
+       ret = drm_gem_create_mmap_offset(gem_obj);
+       if (ret == 0)
+               args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+-      drm_gem_object_put_unlocked(gem_obj);
++out:
++      drm_gem_object_put_unlocked(gem_obj);
+       return ret;
+ }
diff --git a/queue-5.4/drm-panfrost-fix-a-race-in-panfrost_gem_free_object.patch b/queue-5.4/drm-panfrost-fix-a-race-in-panfrost_gem_free_object.patch
new file mode 100644 (file)
index 0000000..8c1ae3e
--- /dev/null
@@ -0,0 +1,58 @@
+From aed44cbeae2b7674cd155ba5cc6506aafe46a94e Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@collabora.com>
+Date: Fri, 29 Nov 2019 14:59:04 +0100
+Subject: drm/panfrost: Fix a race in panfrost_gem_free_object()
+
+From: Boris Brezillon <boris.brezillon@collabora.com>
+
+commit aed44cbeae2b7674cd155ba5cc6506aafe46a94e upstream.
+
+panfrost_gem_shrinker_scan() might purge a BO (release the sgt and
+kill the GPU mapping) that's being freed by panfrost_gem_free_object()
+if we don't remove the BO from the shrinker list at the beginning of
+panfrost_gem_free_object().
+
+Fixes: 013b65101315 ("drm/panfrost: Add madvise and shrinker support")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Reviewed-by: Steven Price <steven.price@arm.com>
+Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191129135908.2439529-5-boris.brezillon@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/panfrost/panfrost_gem.c |   15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
+@@ -19,6 +19,16 @@ static void panfrost_gem_free_object(str
+       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+       struct panfrost_device *pfdev = obj->dev->dev_private;
++      /*
++       * Make sure the BO is no longer inserted in the shrinker list before
++       * taking care of the destruction itself. If we don't do that we have a
++       * race condition between this function and what's done in
++       * panfrost_gem_shrinker_scan().
++       */
++      mutex_lock(&pfdev->shrinker_lock);
++      list_del_init(&bo->base.madv_list);
++      mutex_unlock(&pfdev->shrinker_lock);
++
+       if (bo->sgts) {
+               int i;
+               int n_sgt = bo->base.base.size / SZ_2M;
+@@ -33,11 +43,6 @@ static void panfrost_gem_free_object(str
+               kfree(bo->sgts);
+       }
+-      mutex_lock(&pfdev->shrinker_lock);
+-      if (!list_empty(&bo->base.madv_list))
+-              list_del(&bo->base.madv_list);
+-      mutex_unlock(&pfdev->shrinker_lock);
+-
+       drm_gem_shmem_free_object(obj);
+ }
diff --git a/queue-5.4/drm-panfrost-fix-a-race-in-panfrost_ioctl_madvise.patch b/queue-5.4/drm-panfrost-fix-a-race-in-panfrost_ioctl_madvise.patch
new file mode 100644 (file)
index 0000000..8c0de6f
--- /dev/null
@@ -0,0 +1,56 @@
+From 70cc77952efebf6722d483cb83cfb563ac9768db Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@collabora.com>
+Date: Fri, 29 Nov 2019 14:59:02 +0100
+Subject: drm/panfrost: Fix a race in panfrost_ioctl_madvise()
+
+From: Boris Brezillon <boris.brezillon@collabora.com>
+
+commit 70cc77952efebf6722d483cb83cfb563ac9768db upstream.
+
+If 2 threads change the MADVISE property of the same BO in parallel we
+might end up with an shmem->madv value that's inconsistent with the
+presence of the BO in the shrinker list.
+
+The easiest solution to fix that is to protect the
+drm_gem_shmem_madvise() call with the shrinker lock.
+
+Fixes: 013b65101315 ("drm/panfrost: Add madvise and shrinker support")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Reviewed-by: Steven Price <steven.price@arm.com>
+Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191129135908.2439529-3-boris.brezillon@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/panfrost/panfrost_drv.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -347,20 +347,19 @@ static int panfrost_ioctl_madvise(struct
+               return -ENOENT;
+       }
++      mutex_lock(&pfdev->shrinker_lock);
+       args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
+       if (args->retained) {
+               struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
+-              mutex_lock(&pfdev->shrinker_lock);
+-
+               if (args->madv == PANFROST_MADV_DONTNEED)
+-                      list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
++                      list_add_tail(&bo->base.madv_list,
++                                    &pfdev->shrinker_list);
+               else if (args->madv == PANFROST_MADV_WILLNEED)
+                       list_del_init(&bo->base.madv_list);
+-
+-              mutex_unlock(&pfdev->shrinker_lock);
+       }
++      mutex_unlock(&pfdev->shrinker_lock);
+       drm_gem_object_put_unlocked(gem_obj);
+       return 0;
diff --git a/queue-5.4/drm-radeon-fix-r1xx-r2xx-register-checker-for-pot-textures.patch b/queue-5.4/drm-radeon-fix-r1xx-r2xx-register-checker-for-pot-textures.patch
new file mode 100644 (file)
index 0000000..df4f5a0
--- /dev/null
@@ -0,0 +1,51 @@
+From 008037d4d972c9c47b273e40e52ae34f9d9e33e7 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 26 Nov 2019 09:41:46 -0500
+Subject: drm/radeon: fix r1xx/r2xx register checker for POT textures
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 008037d4d972c9c47b273e40e52ae34f9d9e33e7 upstream.
+
+Shift and mask were reversed.  Noticed by chance.
+
+Tested-by: Meelis Roos <mroos@linux.ee>
+Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/r100.c |    4 ++--
+ drivers/gpu/drm/radeon/r200.c |    4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -1826,8 +1826,8 @@ static int r100_packet0_check(struct rad
+                       track->textures[i].use_pitch = 1;
+               } else {
+                       track->textures[i].use_pitch = 0;
+-                      track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+-                      track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
++                      track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
++                      track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
+               }
+               if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
+                       track->textures[i].tex_coord_type = 2;
+--- a/drivers/gpu/drm/radeon/r200.c
++++ b/drivers/gpu/drm/radeon/r200.c
+@@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_
+                       track->textures[i].use_pitch = 1;
+               } else {
+                       track->textures[i].use_pitch = 0;
+-                      track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+-                      track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
++                      track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
++                      track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
+               }
+               if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+                       track->textures[i].lookup_disable = true;
diff --git a/queue-5.4/gfs2-fix-glock-reference-problem-in-gfs2_trans_remove_revoke.patch b/queue-5.4/gfs2-fix-glock-reference-problem-in-gfs2_trans_remove_revoke.patch
new file mode 100644 (file)
index 0000000..f70e366
--- /dev/null
@@ -0,0 +1,83 @@
+From fe5e7ba11fcf1d75af8173836309e8562aefedef Mon Sep 17 00:00:00 2001
+From: Bob Peterson <rpeterso@redhat.com>
+Date: Thu, 14 Nov 2019 09:49:11 -0500
+Subject: gfs2: fix glock reference problem in gfs2_trans_remove_revoke
+
+From: Bob Peterson <rpeterso@redhat.com>
+
+commit fe5e7ba11fcf1d75af8173836309e8562aefedef upstream.
+
+Commit 9287c6452d2b fixed a situation in which gfs2 could use a glock
+after it had been freed. To do that, it temporarily added a new glock
+reference by calling gfs2_glock_hold in function gfs2_add_revoke.
+However, if the bd element was removed by gfs2_trans_remove_revoke, it
+failed to drop the additional reference.
+
+This patch adds logic to gfs2_trans_remove_revoke to properly drop the
+additional glock reference.
+
+Fixes: 9287c6452d2b ("gfs2: Fix occasional glock use-after-free")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/gfs2/log.c   |    8 ++++++++
+ fs/gfs2/log.h   |    1 +
+ fs/gfs2/lops.c  |    5 +----
+ fs/gfs2/trans.c |    2 ++
+ 4 files changed, 12 insertions(+), 4 deletions(-)
+
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -609,6 +609,14 @@ void gfs2_add_revoke(struct gfs2_sbd *sd
+       list_add(&bd->bd_list, &sdp->sd_log_revokes);
+ }
++void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
++{
++      if (atomic_dec_return(&gl->gl_revokes) == 0) {
++              clear_bit(GLF_LFLUSH, &gl->gl_flags);
++              gfs2_glock_queue_put(gl);
++      }
++}
++
+ void gfs2_write_revokes(struct gfs2_sbd *sdp)
+ {
+       struct gfs2_trans *tr;
+--- a/fs/gfs2/log.h
++++ b/fs/gfs2/log.h
+@@ -77,6 +77,7 @@ extern void gfs2_ail1_flush(struct gfs2_
+ extern void gfs2_log_shutdown(struct gfs2_sbd *sdp);
+ extern int gfs2_logd(void *data);
+ extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
++extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
+ extern void gfs2_write_revokes(struct gfs2_sbd *sdp);
+ #endif /* __LOG_DOT_H__ */
+--- a/fs/gfs2/lops.c
++++ b/fs/gfs2/lops.c
+@@ -882,10 +882,7 @@ static void revoke_lo_after_commit(struc
+               bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
+               list_del_init(&bd->bd_list);
+               gl = bd->bd_gl;
+-              if (atomic_dec_return(&gl->gl_revokes) == 0) {
+-                      clear_bit(GLF_LFLUSH, &gl->gl_flags);
+-                      gfs2_glock_queue_put(gl);
+-              }
++              gfs2_glock_remove_revoke(gl);
+               kmem_cache_free(gfs2_bufdata_cachep, bd);
+       }
+ }
+--- a/fs/gfs2/trans.c
++++ b/fs/gfs2/trans.c
+@@ -262,6 +262,8 @@ void gfs2_trans_remove_revoke(struct gfs
+                       list_del_init(&bd->bd_list);
+                       gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
+                       sdp->sd_log_num_revoke--;
++                      if (bd->bd_gl)
++                              gfs2_glock_remove_revoke(bd->bd_gl);
+                       kmem_cache_free(gfs2_bufdata_cachep, bd);
+                       tr->tr_num_revoke--;
+                       if (--n == 0)
diff --git a/queue-5.4/gfs2-multi-block-allocations-in-gfs2_page_mkwrite.patch b/queue-5.4/gfs2-multi-block-allocations-in-gfs2_page_mkwrite.patch
new file mode 100644 (file)
index 0000000..4c1abef
--- /dev/null
@@ -0,0 +1,68 @@
+From f53056c43063257ae4159d83c425eaeb772bcd71 Mon Sep 17 00:00:00 2001
+From: Andreas Gruenbacher <agruenba@redhat.com>
+Date: Thu, 7 Nov 2019 18:06:14 +0000
+Subject: gfs2: Multi-block allocations in gfs2_page_mkwrite
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+commit f53056c43063257ae4159d83c425eaeb772bcd71 upstream.
+
+In gfs2_page_mkwrite's gfs2_allocate_page_backing helper, try to
+allocate as many blocks at once as we need.  Pass in the size of the
+requested allocation.
+
+Fixes: 35af80aef99b ("gfs2: don't use buffer_heads in gfs2_allocate_page_backing")
+Cc: stable@vger.kernel.org # v5.3+
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/gfs2/file.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -381,27 +381,28 @@ static void gfs2_size_hint(struct file *
+ /**
+  * gfs2_allocate_page_backing - Allocate blocks for a write fault
+  * @page: The (locked) page to allocate backing for
++ * @length: Size of the allocation
+  *
+  * We try to allocate all the blocks required for the page in one go.  This
+  * might fail for various reasons, so we keep trying until all the blocks to
+  * back this page are allocated.  If some of the blocks are already allocated,
+  * that is ok too.
+  */
+-static int gfs2_allocate_page_backing(struct page *page)
++static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
+ {
+       u64 pos = page_offset(page);
+-      u64 size = PAGE_SIZE;
+       do {
+               struct iomap iomap = { };
+-              if (gfs2_iomap_get_alloc(page->mapping->host, pos, 1, &iomap))
++              if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap))
+                       return -EIO;
+-              iomap.length = min(iomap.length, size);
+-              size -= iomap.length;
++              if (length < iomap.length)
++                      iomap.length = length;
++              length -= iomap.length;
+               pos += iomap.length;
+-      } while (size > 0);
++      } while (length > 0);
+       return 0;
+ }
+@@ -501,7 +502,7 @@ static vm_fault_t gfs2_page_mkwrite(stru
+       if (gfs2_is_stuffed(ip))
+               ret = gfs2_unstuff_dinode(ip, page);
+       if (ret == 0)
+-              ret = gfs2_allocate_page_backing(page);
++              ret = gfs2_allocate_page_backing(page, PAGE_SIZE);
+ out_trans_end:
+       if (ret)
diff --git a/queue-5.4/rpmsg-glink-don-t-send-pending-rx_done-during-remove.patch b/queue-5.4/rpmsg-glink-don-t-send-pending-rx_done-during-remove.patch
new file mode 100644 (file)
index 0000000..fc99de7
--- /dev/null
@@ -0,0 +1,79 @@
+From c3dadc19b7564c732598b30d637c6f275c3b77b6 Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Fri, 4 Oct 2019 15:27:01 -0700
+Subject: rpmsg: glink: Don't send pending rx_done during remove
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit c3dadc19b7564c732598b30d637c6f275c3b77b6 upstream.
+
+Attempting to transmit rx_done messages after the GLINK instance is
+being torn down will cause use after free and memory leaks. So cancel
+the intent_work and free up the pending intents.
+
+With this there are no concurrent accessors of the channel left during
+qcom_glink_native_remove() and there is therefor no need to hold the
+spinlock during this operation - which would prohibit the use of
+cancel_work_sync() in the release function. So remove this.
+
+Fixes: 1d2ea36eead9 ("rpmsg: glink: Add rx done command")
+Cc: stable@vger.kernel.org
+Acked-by: Chris Lew <clew@codeaurora.org>
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -241,11 +241,23 @@ static void qcom_glink_channel_release(s
+ {
+       struct glink_channel *channel = container_of(ref, struct glink_channel,
+                                                    refcount);
++      struct glink_core_rx_intent *intent;
+       struct glink_core_rx_intent *tmp;
+       unsigned long flags;
+       int iid;
++      /* cancel pending rx_done work */
++      cancel_work_sync(&channel->intent_work);
++
+       spin_lock_irqsave(&channel->intent_lock, flags);
++      /* Free all non-reuse intents pending rx_done work */
++      list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
++              if (!intent->reuse) {
++                      kfree(intent->data);
++                      kfree(intent);
++              }
++      }
++
+       idr_for_each_entry(&channel->liids, tmp, iid) {
+               kfree(tmp->data);
+               kfree(tmp);
+@@ -1625,7 +1637,6 @@ void qcom_glink_native_remove(struct qco
+       struct glink_channel *channel;
+       int cid;
+       int ret;
+-      unsigned long flags;
+       disable_irq(glink->irq);
+       cancel_work_sync(&glink->rx_work);
+@@ -1634,7 +1645,6 @@ void qcom_glink_native_remove(struct qco
+       if (ret)
+               dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
+-      spin_lock_irqsave(&glink->idr_lock, flags);
+       /* Release any defunct local channels, waiting for close-ack */
+       idr_for_each_entry(&glink->lcids, channel, cid)
+               kref_put(&channel->refcount, qcom_glink_channel_release);
+@@ -1645,7 +1655,6 @@ void qcom_glink_native_remove(struct qco
+       idr_destroy(&glink->lcids);
+       idr_destroy(&glink->rcids);
+-      spin_unlock_irqrestore(&glink->idr_lock, flags);
+       mbox_free_channel(glink->mbox_chan);
+ }
+ EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
diff --git a/queue-5.4/rpmsg-glink-fix-reuse-intents-memory-leak-issue.patch b/queue-5.4/rpmsg-glink-fix-reuse-intents-memory-leak-issue.patch
new file mode 100644 (file)
index 0000000..443b38c
--- /dev/null
@@ -0,0 +1,49 @@
+From b85f6b601407347f5425c4c058d1b7871f5bf4f0 Mon Sep 17 00:00:00 2001
+From: Arun Kumar Neelakantam <aneela@codeaurora.org>
+Date: Fri, 4 Oct 2019 15:26:57 -0700
+Subject: rpmsg: glink: Fix reuse intents memory leak issue
+
+From: Arun Kumar Neelakantam <aneela@codeaurora.org>
+
+commit b85f6b601407347f5425c4c058d1b7871f5bf4f0 upstream.
+
+Memory allocated for re-usable intents are not freed during channel
+cleanup which causes memory leak in system.
+
+Check and free all re-usable memory to avoid memory leak.
+
+Fixes: 933b45da5d1d ("rpmsg: glink: Add support for TX intents")
+Cc: stable@vger.kernel.org
+Acked-By: Chris Lew <clew@codeaurora.org>
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Arun Kumar Neelakantam <aneela@codeaurora.org>
+Reported-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -241,10 +241,19 @@ static void qcom_glink_channel_release(s
+ {
+       struct glink_channel *channel = container_of(ref, struct glink_channel,
+                                                    refcount);
++      struct glink_core_rx_intent *tmp;
+       unsigned long flags;
++      int iid;
+       spin_lock_irqsave(&channel->intent_lock, flags);
++      idr_for_each_entry(&channel->liids, tmp, iid) {
++              kfree(tmp->data);
++              kfree(tmp);
++      }
+       idr_destroy(&channel->liids);
++
++      idr_for_each_entry(&channel->riids, tmp, iid)
++              kfree(tmp);
+       idr_destroy(&channel->riids);
+       spin_unlock_irqrestore(&channel->intent_lock, flags);
diff --git a/queue-5.4/rpmsg-glink-fix-rpmsg_register_device-err-handling.patch b/queue-5.4/rpmsg-glink-fix-rpmsg_register_device-err-handling.patch
new file mode 100644 (file)
index 0000000..f59fddc
--- /dev/null
@@ -0,0 +1,44 @@
+From f7e714988edaffe6ac578318e99501149b067ba0 Mon Sep 17 00:00:00 2001
+From: Chris Lew <clew@codeaurora.org>
+Date: Fri, 4 Oct 2019 15:27:00 -0700
+Subject: rpmsg: glink: Fix rpmsg_register_device err handling
+
+From: Chris Lew <clew@codeaurora.org>
+
+commit f7e714988edaffe6ac578318e99501149b067ba0 upstream.
+
+The device release function is set before registering with rpmsg. If
+rpmsg registration fails, the framework will call device_put(), which
+invokes the release function. The channel create logic does not need to
+free rpdev if rpmsg_register_device() fails and release is called.
+
+Fixes: b4f8e52b89f6 ("rpmsg: Introduce Qualcomm RPM glink driver")
+Cc: stable@vger.kernel.org
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Chris Lew <clew@codeaurora.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1423,15 +1423,13 @@ static int qcom_glink_rx_open(struct qco
+               ret = rpmsg_register_device(rpdev);
+               if (ret)
+-                      goto free_rpdev;
++                      goto rcid_remove;
+               channel->rpdev = rpdev;
+       }
+       return 0;
+-free_rpdev:
+-      kfree(rpdev);
+ rcid_remove:
+       spin_lock_irqsave(&glink->idr_lock, flags);
+       idr_remove(&glink->rcids, channel->rcid);
diff --git a/queue-5.4/rpmsg-glink-fix-use-after-free-in-open_ack-timeout-case.patch b/queue-5.4/rpmsg-glink-fix-use-after-free-in-open_ack-timeout-case.patch
new file mode 100644 (file)
index 0000000..b1631e3
--- /dev/null
@@ -0,0 +1,44 @@
+From ac74ea01860170699fb3b6ea80c0476774c8e94f Mon Sep 17 00:00:00 2001
+From: Arun Kumar Neelakantam <aneela@codeaurora.org>
+Date: Fri, 4 Oct 2019 15:26:58 -0700
+Subject: rpmsg: glink: Fix use after free in open_ack TIMEOUT case
+
+From: Arun Kumar Neelakantam <aneela@codeaurora.org>
+
+commit ac74ea01860170699fb3b6ea80c0476774c8e94f upstream.
+
+Extra channel reference put when remote sending OPEN_ACK after timeout
+causes use-after-free while handling next remote CLOSE command.
+
+Remove extra reference put in timeout case to avoid use-after-free.
+
+Fixes: b4f8e52b89f6 ("rpmsg: Introduce Qualcomm RPM glink driver")
+Cc: stable@vger.kernel.org
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Arun Kumar Neelakantam <aneela@codeaurora.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1103,13 +1103,12 @@ static int qcom_glink_create_remote(stru
+ close_link:
+       /*
+        * Send a close request to "undo" our open-ack. The close-ack will
+-       * release the last reference.
++       * release qcom_glink_send_open_req() reference and the last reference
++       * will be relesed after receiving remote_close or transport unregister
++       * by calling qcom_glink_native_remove().
+        */
+       qcom_glink_send_close_req(glink, channel);
+-      /* Release qcom_glink_send_open_req() reference */
+-      kref_put(&channel->refcount, qcom_glink_channel_release);
+-
+       return ret;
+ }
diff --git a/queue-5.4/rpmsg-glink-free-pending-deferred-work-on-remove.patch b/queue-5.4/rpmsg-glink-free-pending-deferred-work-on-remove.patch
new file mode 100644 (file)
index 0000000..93b9fad
--- /dev/null
@@ -0,0 +1,53 @@
+From 278bcb7300f61785dba63840bd2a8cf79f14554c Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Fri, 4 Oct 2019 15:27:02 -0700
+Subject: rpmsg: glink: Free pending deferred work on remove
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit 278bcb7300f61785dba63840bd2a8cf79f14554c upstream.
+
+By just cancelling the deferred rx worker during GLINK instance teardown
+any pending deferred commands are leaked, so free them.
+
+Fixes: b4f8e52b89f6 ("rpmsg: Introduce Qualcomm RPM glink driver")
+Cc: stable@vger.kernel.org
+Acked-by: Chris Lew <clew@codeaurora.org>
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c |   14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1562,6 +1562,18 @@ static void qcom_glink_work(struct work_
+       }
+ }
++static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
++{
++      struct glink_defer_cmd *dcmd;
++      struct glink_defer_cmd *tmp;
++
++      /* cancel any pending deferred rx_work */
++      cancel_work_sync(&glink->rx_work);
++
++      list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
++              kfree(dcmd);
++}
++
+ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
+                                          unsigned long features,
+                                          struct qcom_glink_pipe *rx,
+@@ -1639,7 +1651,7 @@ void qcom_glink_native_remove(struct qco
+       int ret;
+       disable_irq(glink->irq);
+-      cancel_work_sync(&glink->rx_work);
++      qcom_glink_cancel_rx_work(glink);
+       ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
+       if (ret)
diff --git a/queue-5.4/rpmsg-glink-put-an-extra-reference-during-cleanup.patch b/queue-5.4/rpmsg-glink-put-an-extra-reference-during-cleanup.patch
new file mode 100644 (file)
index 0000000..644da60
--- /dev/null
@@ -0,0 +1,39 @@
+From b646293e272816dd0719529dcebbd659de0722f7 Mon Sep 17 00:00:00 2001
+From: Chris Lew <clew@codeaurora.org>
+Date: Fri, 4 Oct 2019 15:26:59 -0700
+Subject: rpmsg: glink: Put an extra reference during cleanup
+
+From: Chris Lew <clew@codeaurora.org>
+
+commit b646293e272816dd0719529dcebbd659de0722f7 upstream.
+
+In a remote processor crash scenario, there is no guarantee the remote
+processor sent close requests before it went into a bad state. Remove
+the reference that is normally handled by the close command in the
+so channel resources can be released.
+
+Fixes: b4f8e52b89f6 ("rpmsg: Introduce Qualcomm RPM glink driver")
+Cc: stable@vger.kernel.org
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Chris Lew <clew@codeaurora.org>
+Reported-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1641,6 +1641,10 @@ void qcom_glink_native_remove(struct qco
+       idr_for_each_entry(&glink->lcids, channel, cid)
+               kref_put(&channel->refcount, qcom_glink_channel_release);
++      /* Release any defunct local channels, waiting for close-req */
++      idr_for_each_entry(&glink->rcids, channel, cid)
++              kref_put(&channel->refcount, qcom_glink_channel_release);
++
+       idr_destroy(&glink->lcids);
+       idr_destroy(&glink->rcids);
+       spin_unlock_irqrestore(&glink->idr_lock, flags);
diff --git a/queue-5.4/rpmsg-glink-set-tail-pointer-to-0-at-end-of-fifo.patch b/queue-5.4/rpmsg-glink-set-tail-pointer-to-0-at-end-of-fifo.patch
new file mode 100644 (file)
index 0000000..5c36e4b
--- /dev/null
@@ -0,0 +1,33 @@
+From 4623e8bf1de0b86e23a56cdb39a72f054e89c3bd Mon Sep 17 00:00:00 2001
+From: Chris Lew <clew@codeaurora.org>
+Date: Wed, 27 Jun 2018 18:19:57 -0700
+Subject: rpmsg: glink: Set tail pointer to 0 at end of FIFO
+
+From: Chris Lew <clew@codeaurora.org>
+
+commit 4623e8bf1de0b86e23a56cdb39a72f054e89c3bd upstream.
+
+When wrapping around the FIFO, the remote expects the tail pointer to
+be reset to 0 on the edge case where the tail equals the FIFO length.
+
+Fixes: caf989c350e8 ("rpmsg: glink: Introduce glink smem based transport")
+Cc: stable@vger.kernel.org
+Signed-off-by: Chris Lew <clew@codeaurora.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_smem.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/rpmsg/qcom_glink_smem.c
++++ b/drivers/rpmsg/qcom_glink_smem.c
+@@ -105,7 +105,7 @@ static void glink_smem_rx_advance(struct
+       tail = le32_to_cpu(*pipe->tail);
+       tail += count;
+-      if (tail > pipe->native.length)
++      if (tail >= pipe->native.length)
+               tail -= pipe->native.length;
+       *pipe->tail = cpu_to_le32(tail);
diff --git a/queue-5.4/scsi-iscsi-fix-a-potential-deadlock-in-the-timeout-handler.patch b/queue-5.4/scsi-iscsi-fix-a-potential-deadlock-in-the-timeout-handler.patch
new file mode 100644 (file)
index 0000000..9532a17
--- /dev/null
@@ -0,0 +1,119 @@
+From 5480e299b5ae57956af01d4839c9fc88a465eeab Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Mon, 9 Dec 2019 09:34:57 -0800
+Subject: scsi: iscsi: Fix a potential deadlock in the timeout handler
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit 5480e299b5ae57956af01d4839c9fc88a465eeab upstream.
+
+Some time ago the block layer was modified such that timeout handlers are
+called from thread context instead of interrupt context. Make it safe to
+run the iSCSI timeout handler in thread context. This patch fixes the
+following lockdep complaint:
+
+================================
+WARNING: inconsistent lock state
+5.5.1-dbg+ #11 Not tainted
+--------------------------------
+inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
+kworker/7:1H/206 [HC0[0]:SC0[0]:HE1:SE1] takes:
+ffff88802d9827e8 (&(&session->frwd_lock)->rlock){+.?.}, at: iscsi_eh_cmd_timed_out+0xa6/0x6d0 [libiscsi]
+{IN-SOFTIRQ-W} state was registered at:
+  lock_acquire+0x106/0x240
+  _raw_spin_lock+0x38/0x50
+  iscsi_check_transport_timeouts+0x3e/0x210 [libiscsi]
+  call_timer_fn+0x132/0x470
+  __run_timers.part.0+0x39f/0x5b0
+  run_timer_softirq+0x63/0xc0
+  __do_softirq+0x12d/0x5fd
+  irq_exit+0xb3/0x110
+  smp_apic_timer_interrupt+0x131/0x3d0
+  apic_timer_interrupt+0xf/0x20
+  default_idle+0x31/0x230
+  arch_cpu_idle+0x13/0x20
+  default_idle_call+0x53/0x60
+  do_idle+0x38a/0x3f0
+  cpu_startup_entry+0x24/0x30
+  start_secondary+0x222/0x290
+  secondary_startup_64+0xa4/0xb0
+irq event stamp: 1383705
+hardirqs last  enabled at (1383705): [<ffffffff81aace5c>] _raw_spin_unlock_irq+0x2c/0x50
+hardirqs last disabled at (1383704): [<ffffffff81aacb98>] _raw_spin_lock_irq+0x18/0x50
+softirqs last  enabled at (1383690): [<ffffffffa0e2efea>] iscsi_queuecommand+0x76a/0xa20 [libiscsi]
+softirqs last disabled at (1383682): [<ffffffffa0e2e998>] iscsi_queuecommand+0x118/0xa20 [libiscsi]
+
+other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+       CPU0
+       ----
+  lock(&(&session->frwd_lock)->rlock);
+  <Interrupt>
+    lock(&(&session->frwd_lock)->rlock);
+
+ *** DEADLOCK ***
+
+2 locks held by kworker/7:1H/206:
+ #0: ffff8880d57bf928 ((wq_completion)kblockd){+.+.}, at: process_one_work+0x472/0xab0
+ #1: ffff88802b9c7de8 ((work_completion)(&q->timeout_work)){+.+.}, at: process_one_work+0x476/0xab0
+
+stack backtrace:
+CPU: 7 PID: 206 Comm: kworker/7:1H Not tainted 5.5.1-dbg+ #11
+Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+Workqueue: kblockd blk_mq_timeout_work
+Call Trace:
+ dump_stack+0xa5/0xe6
+ print_usage_bug.cold+0x232/0x23b
+ mark_lock+0x8dc/0xa70
+ __lock_acquire+0xcea/0x2af0
+ lock_acquire+0x106/0x240
+ _raw_spin_lock+0x38/0x50
+ iscsi_eh_cmd_timed_out+0xa6/0x6d0 [libiscsi]
+ scsi_times_out+0xf4/0x440 [scsi_mod]
+ scsi_timeout+0x1d/0x20 [scsi_mod]
+ blk_mq_check_expired+0x365/0x3a0
+ bt_iter+0xd6/0xf0
+ blk_mq_queue_tag_busy_iter+0x3de/0x650
+ blk_mq_timeout_work+0x1af/0x380
+ process_one_work+0x56d/0xab0
+ worker_thread+0x7a/0x5d0
+ kthread+0x1bc/0x210
+ ret_from_fork+0x24/0x30
+
+Fixes: 287922eb0b18 ("block: defer timeouts to a workqueue")
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Keith Busch <keith.busch@intel.com>
+Cc: Lee Duncan <lduncan@suse.com>
+Cc: Chris Leech <cleech@redhat.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191209173457.187370-1-bvanassche@acm.org
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Lee Duncan <lduncan@suse.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/libiscsi.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1945,7 +1945,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_ti
+       ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
+-      spin_lock(&session->frwd_lock);
++      spin_lock_bh(&session->frwd_lock);
+       task = (struct iscsi_task *)sc->SCp.ptr;
+       if (!task) {
+               /*
+@@ -2072,7 +2072,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_ti
+ done:
+       if (task)
+               task->last_timeout = jiffies;
+-      spin_unlock(&session->frwd_lock);
++      spin_unlock_bh(&session->frwd_lock);
+       ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
+                    "timer reset" : "shutdown or nh");
+       return rc;
diff --git a/queue-5.4/scsi-qla2xxx-added-support-for-mpi-and-pep-regions-for-isp28xx.patch b/queue-5.4/scsi-qla2xxx-added-support-for-mpi-and-pep-regions-for-isp28xx.patch
new file mode 100644 (file)
index 0000000..5820911
--- /dev/null
@@ -0,0 +1,81 @@
+From a530bf691f0e4691214562c165e6c8889dc51e57 Mon Sep 17 00:00:00 2001
+From: Michael Hernandez <mhernandez@marvell.com>
+Date: Tue, 3 Dec 2019 14:36:56 -0800
+Subject: scsi: qla2xxx: Added support for MPI and PEP regions for ISP28XX
+
+From: Michael Hernandez <mhernandez@marvell.com>
+
+commit a530bf691f0e4691214562c165e6c8889dc51e57 upstream.
+
+This patch adds support for MPI/PEP region updates which is required with
+secure flash updates for ISP28XX.
+
+Fixes: 3f006ac342c0 ("scsi: qla2xxx: Secure flash update support for ISP28XX")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191203223657.22109-3-hmadhani@marvell.com
+Signed-off-by: Michael Hernandez <mhernandez@marvell.com>
+Signed-off-by: Himanshu Madhani <hmadhani@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/qla2xxx/qla_fw.h  |    4 ++++
+ drivers/scsi/qla2xxx/qla_sup.c |   27 ++++++++++++++++++++++-----
+ 2 files changed, 26 insertions(+), 5 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_fw.h
++++ b/drivers/scsi/qla2xxx/qla_fw.h
+@@ -1523,6 +1523,10 @@ struct qla_flt_header {
+ #define FLT_REG_NVRAM_SEC_28XX_1      0x10F
+ #define FLT_REG_NVRAM_SEC_28XX_2      0x111
+ #define FLT_REG_NVRAM_SEC_28XX_3      0x113
++#define FLT_REG_MPI_PRI_28XX          0xD3
++#define FLT_REG_MPI_SEC_28XX          0xF0
++#define FLT_REG_PEP_PRI_28XX          0xD1
++#define FLT_REG_PEP_SEC_28XX          0xF1
+ struct qla_flt_region {
+       uint16_t code;
+--- a/drivers/scsi/qla2xxx/qla_sup.c
++++ b/drivers/scsi/qla2xxx/qla_sup.c
+@@ -2725,8 +2725,11 @@ qla28xx_write_flash_data(scsi_qla_host_t
+               ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+                   "Region %x is secure\n", region.code);
+-              if (region.code == FLT_REG_FW ||
+-                  region.code == FLT_REG_FW_SEC_27XX) {
++              switch (region.code) {
++              case FLT_REG_FW:
++              case FLT_REG_FW_SEC_27XX:
++              case FLT_REG_MPI_PRI_28XX:
++              case FLT_REG_MPI_SEC_28XX:
+                       fw_array = dwptr;
+                       /* 1st fw array */
+@@ -2757,9 +2760,23 @@ qla28xx_write_flash_data(scsi_qla_host_t
+                               buf_size_without_sfub += risc_size;
+                               fw_array += risc_size;
+                       }
+-              } else {
+-                      ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+-                          "Secure region %x not supported\n",
++                      break;
++
++              case FLT_REG_PEP_PRI_28XX:
++              case FLT_REG_PEP_SEC_28XX:
++                      fw_array = dwptr;
++
++                      /* 1st fw array */
++                      risc_size = be32_to_cpu(fw_array[3]);
++                      risc_attr = be32_to_cpu(fw_array[9]);
++
++                      buf_size_without_sfub = risc_size;
++                      fw_array += risc_size;
++                      break;
++
++              default:
++                      ql_log(ql_log_warn + ql_dbg_verbose, vha,
++                          0xffff, "Secure region %x not supported\n",
+                           region.code);
+                       rval = QLA_COMMAND_ERROR;
+                       goto done;
diff --git a/queue-5.4/scsi-qla2xxx-change-discovery-state-before-plogi.patch b/queue-5.4/scsi-qla2xxx-change-discovery-state-before-plogi.patch
new file mode 100644 (file)
index 0000000..e103033
--- /dev/null
@@ -0,0 +1,40 @@
+From 58e39a2ce4be08162c0368030cdc405f7fd849aa Mon Sep 17 00:00:00 2001
+From: Roman Bolshakov <r.bolshakov@yadro.com>
+Date: Mon, 25 Nov 2019 19:56:54 +0300
+Subject: scsi: qla2xxx: Change discovery state before PLOGI
+
+From: Roman Bolshakov <r.bolshakov@yadro.com>
+
+commit 58e39a2ce4be08162c0368030cdc405f7fd849aa upstream.
+
+When a port sends PLOGI, discovery state should be changed to login
+pending, otherwise RELOGIN_NEEDED bit is set in
+qla24xx_handle_plogi_done_event(). RELOGIN_NEEDED triggers another PLOGI,
+and it never goes out of the loop until login timer expires.
+
+Fixes: 8777e4314d397 ("scsi: qla2xxx: Migrate NVME N2N handling into state machine")
+Fixes: 8b5292bcfcacf ("scsi: qla2xxx: Fix Relogin to prevent modifying scan_state flag")
+Cc: Quinn Tran <qutran@marvell.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191125165702.1013-6-r.bolshakov@yadro.com
+Acked-by: Himanshu Madhani <hmadhani@marvell.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Tested-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/qla2xxx/qla_init.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -534,6 +534,7 @@ static int qla_post_els_plogi_work(struc
+       e->u.fcport.fcport = fcport;
+       fcport->flags |= FCF_ASYNC_ACTIVE;
++      fcport->disc_state = DSC_LOGIN_PEND;
+       return qla2x00_post_work(vha, e);
+ }
diff --git a/queue-5.4/scsi-qla2xxx-correctly-retrieve-and-interpret-active-flash-region.patch b/queue-5.4/scsi-qla2xxx-correctly-retrieve-and-interpret-active-flash-region.patch
new file mode 100644 (file)
index 0000000..815d733
--- /dev/null
@@ -0,0 +1,71 @@
+From 4e71dcae0c4cd1e9d19b8b3d80214a4bcdca5a42 Mon Sep 17 00:00:00 2001
+From: Himanshu Madhani <hmadhani@marvell.com>
+Date: Tue, 3 Dec 2019 14:36:55 -0800
+Subject: scsi: qla2xxx: Correctly retrieve and interpret active flash region
+
+From: Himanshu Madhani <hmadhani@marvell.com>
+
+commit 4e71dcae0c4cd1e9d19b8b3d80214a4bcdca5a42 upstream.
+
+ISP27XX/28XX supports multiple flash regions. This patch fixes issue where
+active flash region was not interpreted correctly during secure flash
+update process.
+
+[mkp: typo]
+
+Fixes: 5fa8774c7f38c ("scsi: qla2xxx: Add 28xx flash primary/secondary status/image mechanism")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191203223657.22109-2-hmadhani@marvell.com
+Signed-off-by: Michael Hernandez <mhernandez@marvell.com>
+Signed-off-by: Himanshu Madhani <hmadhani@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/qla2xxx/qla_attr.c |    1 +
+ drivers/scsi/qla2xxx/qla_bsg.c  |    2 +-
+ drivers/scsi/qla2xxx/qla_sup.c  |    6 +++---
+ 3 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -176,6 +176,7 @@ qla2x00_sysfs_read_nvram(struct file *fi
+       faddr = ha->flt_region_nvram;
+       if (IS_QLA28XX(ha)) {
++              qla28xx_get_aux_images(vha, &active_regions);
+               if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+                       faddr = ha->flt_region_nvram_sec;
+       }
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -2399,7 +2399,7 @@ qla2x00_get_flash_image_status(struct bs
+       struct qla_active_regions regions = { };
+       struct active_regions active_regions = { };
+-      qla28xx_get_aux_images(vha, &active_regions);
++      qla27xx_get_active_image(vha, &active_regions);
+       regions.global_image = active_regions.global;
+       if (IS_QLA28XX(ha)) {
+--- a/drivers/scsi/qla2xxx/qla_sup.c
++++ b/drivers/scsi/qla2xxx/qla_sup.c
+@@ -847,15 +847,15 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vh
+                               ha->flt_region_img_status_pri = start;
+                       break;
+               case FLT_REG_IMG_SEC_27XX:
+-                      if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
++                      if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               ha->flt_region_img_status_sec = start;
+                       break;
+               case FLT_REG_FW_SEC_27XX:
+-                      if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
++                      if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               ha->flt_region_fw_sec = start;
+                       break;
+               case FLT_REG_BOOTLOAD_SEC_27XX:
+-                      if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
++                      if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               ha->flt_region_boot_sec = start;
+                       break;
+               case FLT_REG_AUX_IMG_PRI_28XX:
diff --git a/queue-5.4/scsi-qla2xxx-fix-incorrect-sfub-length-used-for-secure-flash-update-mb-cmd.patch b/queue-5.4/scsi-qla2xxx-fix-incorrect-sfub-length-used-for-secure-flash-update-mb-cmd.patch
new file mode 100644 (file)
index 0000000..ec9dadb
--- /dev/null
@@ -0,0 +1,34 @@
+From c868907e1ac6a08a17f8fa9ce482c0a496896e9e Mon Sep 17 00:00:00 2001
+From: Michael Hernandez <mhernandez@marvell.com>
+Date: Tue, 3 Dec 2019 14:36:57 -0800
+Subject: scsi: qla2xxx: Fix incorrect SFUB length used for Secure Flash Update MB Cmd
+
+From: Michael Hernandez <mhernandez@marvell.com>
+
+commit c868907e1ac6a08a17f8fa9ce482c0a496896e9e upstream.
+
+SFUB length should be in DWORDs when passed to FW.
+
+Fixes: 3f006ac342c03 ("scsi: qla2xxx: Secure flash update support for ISP28XX")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191203223657.22109-4-hmadhani@marvell.com
+Signed-off-by: Michael Hernandez <mhernandez@marvell.com>
+Signed-off-by: Himanshu Madhani <hmadhani@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/qla2xxx/qla_sup.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/qla2xxx/qla_sup.c
++++ b/drivers/scsi/qla2xxx/qla_sup.c
+@@ -2897,7 +2897,7 @@ qla28xx_write_flash_data(scsi_qla_host_t
+                           "Sending Secure Flash MB Cmd\n");
+                       rval = qla28xx_secure_flash_update(vha, 0, region.code,
+                               buf_size_without_sfub, sfub_dma,
+-                              sizeof(struct secure_flash_update_block));
++                              sizeof(struct secure_flash_update_block) >> 2);
+                       if (rval != QLA_SUCCESS) {
+                               ql_log(ql_log_warn, vha, 0xffff,
+                                   "Secure Flash MB Cmd failed %x.", rval);
diff --git a/queue-5.4/scsi-qla2xxx-ignore-null-pointer-in-tcm_qla2xxx_free_mcmd.patch b/queue-5.4/scsi-qla2xxx-ignore-null-pointer-in-tcm_qla2xxx_free_mcmd.patch
new file mode 100644 (file)
index 0000000..41f0de3
--- /dev/null
@@ -0,0 +1,91 @@
+From f2c9ee54a56995a293efef290657d8a1d80e14ab Mon Sep 17 00:00:00 2001
+From: Roman Bolshakov <r.bolshakov@yadro.com>
+Date: Mon, 25 Nov 2019 19:56:50 +0300
+Subject: scsi: qla2xxx: Ignore NULL pointer in tcm_qla2xxx_free_mcmd
+
+From: Roman Bolshakov <r.bolshakov@yadro.com>
+
+commit f2c9ee54a56995a293efef290657d8a1d80e14ab upstream.
+
+If ABTS cannot be completed in target mode, the driver attempts to free
+related management command and crashes:
+
+  NIP [d000000019181ee8] tcm_qla2xxx_free_mcmd+0x40/0x80 [tcm_qla2xxx]
+  LR [d00000001dc1e6f8] qlt_response_pkt+0x190/0xa10 [qla2xxx]
+  Call Trace:
+  [c000003fff27bb50] [c000003fff27bc10] 0xc000003fff27bc10 (unreliable)
+  [c000003fff27bb70] [d00000001dc1e6f8] qlt_response_pkt+0x190/0xa10 [qla2xxx]
+  [c000003fff27bc10] [d00000001dbc2be0] qla24xx_process_response_queue+0x5d8/0xbd0 [qla2xxx]
+  [c000003fff27bd50] [d00000001dbc632c] qla24xx_msix_rsp_q+0x64/0x150 [qla2xxx]
+  [c000003fff27bde0] [c000000000187200] __handle_irq_event_percpu+0x90/0x310
+  [c000003fff27bea0] [c0000000001874b8] handle_irq_event_percpu+0x38/0x90
+  [c000003fff27bee0] [c000000000187574] handle_irq_event+0x64/0xb0
+  [c000003fff27bf10] [c00000000018cd38] handle_fasteoi_irq+0xe8/0x280
+  [c000003fff27bf40] [c000000000185ccc] generic_handle_irq+0x4c/0x70
+  [c000003fff27bf60] [c000000000016cec] __do_irq+0x7c/0x1d0
+  [c000003fff27bf90] [c00000000002a530] call_do_irq+0x14/0x24
+  [c00000207d2cba90] [c000000000016edc] do_IRQ+0x9c/0x130
+  [c00000207d2cbae0] [c000000000008bf4] hardware_interrupt_common+0x114/0x120
+  --- interrupt: 501 at arch_local_irq_restore+0x74/0x90
+      LR = arch_local_irq_restore+0x74/0x90
+  [c00000207d2cbdd0] [c0000000001c64fc] tick_broadcast_oneshot_control+0x4c/0x60 (unreliable)
+  [c00000207d2cbdf0] [c0000000007ac840] cpuidle_enter_state+0xf0/0x450
+  [c00000207d2cbe50] [c00000000016b81c] call_cpuidle+0x4c/0x90
+  [c00000207d2cbe70] [c00000000016bc30] do_idle+0x2b0/0x330
+  [c00000207d2cbec0] [c00000000016beec] cpu_startup_entry+0x3c/0x50
+  [c00000207d2cbef0] [c00000000004a06c] start_secondary+0x63c/0x670
+  [c00000207d2cbf90] [c00000000000aa6c] start_secondary_prolog+0x10/0x14
+
+The crash can be triggered by ACL deletion when there's active I/O.
+
+During ACL deletion, qla2xxx performs implicit LOGO that's invisible for
+the initiator. Only the driver and firmware are aware of the logout.
+Therefore the initiator continues to send SCSI commands and the target
+always responds with SAM STATUS BUSY as it can't find the session.
+
+The command times out after a while and initiator invokes ABORT TASK TMF
+for the command. The TMF is mapped to ABTS-LS in FCP. The target can't find
+session for S_ID originating ABTS-LS so it never allocates mcmd.  And since
+N_Port handle was deleted after LOGO, it is no longer valid and ABTS
+Response IOCB is returned from firmware with status 31. Then free_mcmd is
+invoked on NULL pointer and the kernel crashes.
+
+[ 7734.578642] qla2xxx [0000:00:0c.0]-e837:6: ABTS_RECV_24XX: instance 0
+[ 7734.578644] qla2xxx [0000:00:0c.0]-f811:6: qla_target(0): task abort (s_id=1:2:0, tag=1209504, param=0)
+[ 7734.578645] find_sess_by_s_id: 0x010200
+[ 7734.578645] Unable to locate s_id: 0x010200
+[ 7734.578646] qla2xxx [0000:00:0c.0]-f812:6: qla_target(0): task abort for non-existent session
+[ 7734.578648] qla2xxx [0000:00:0c.0]-e806:6: Sending task mgmt ABTS response (ha=c0000000d5819000, atio=c0000000d3fd4700, status=4
+[ 7734.578730] qla2xxx [0000:00:0c.0]-e838:6: ABTS_RESP_24XX: compl_status 31
+[ 7734.578732] qla2xxx [0000:00:0c.0]-e863:6: qla_target(0): ABTS_RESP_24XX failed 31 (subcode 19:a)
+[ 7734.578740] Unable to handle kernel paging request for data at address 0x00000200
+
+Fixes: 6b0431d6fa20b ("scsi: qla2xxx: Fix out of order Termination and ABTS response")
+Cc: Quinn Tran <qutran@marvell.com>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Cc: Thomas Abraham <tabraham@suse.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191125165702.1013-2-r.bolshakov@yadro.com
+Acked-by: Himanshu Madhani <hmadhani@marvell.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Tested-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/qla2xxx/tcm_qla2xxx.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -246,6 +246,8 @@ static void tcm_qla2xxx_complete_mcmd(st
+  */
+ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+ {
++      if (!mcmd)
++              return;
+       INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
+       queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
+ }
diff --git a/queue-5.4/scsi-qla2xxx-initialize-free_work-before-flushing-it.patch b/queue-5.4/scsi-qla2xxx-initialize-free_work-before-flushing-it.patch
new file mode 100644 (file)
index 0000000..f7dca65
--- /dev/null
@@ -0,0 +1,82 @@
+From 4c86b037a6db3ad2922ef3ba8a8989eb7794e040 Mon Sep 17 00:00:00 2001
+From: Roman Bolshakov <r.bolshakov@yadro.com>
+Date: Mon, 25 Nov 2019 19:56:52 +0300
+Subject: scsi: qla2xxx: Initialize free_work before flushing it
+
+From: Roman Bolshakov <r.bolshakov@yadro.com>
+
+commit 4c86b037a6db3ad2922ef3ba8a8989eb7794e040 upstream.
+
+Target creation triggers a new BUG_ON introduced in in commit 4d43d395fed1
+("workqueue: Try to catch flush_work() without INIT_WORK().").  The BUG_ON
+reveals an attempt to flush free_work in qla24xx_do_nack_work before it's
+initialized in qlt_unreg_sess:
+
+  WARNING: CPU: 7 PID: 211 at kernel/workqueue.c:3031 __flush_work.isra.38+0x40/0x2e0
+  CPU: 7 PID: 211 Comm: kworker/7:1 Kdump: loaded Tainted: G            E     5.3.0-rc7-vanilla+ #2
+  Workqueue: qla2xxx_wq qla2x00_iocb_work_fn [qla2xxx]
+  NIP:  c000000000159620 LR: c0080000009d91b0 CTR: c0000000001598c0
+  REGS: c000000005f3f730 TRAP: 0700   Tainted: G            E      (5.3.0-rc7-vanilla+)
+  MSR:  800000000282b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE>  CR: 24002222  XER: 00000000
+  CFAR: c0000000001598d0 IRQMASK: 0
+  GPR00: c0080000009d91b0 c000000005f3f9c0 c000000001670a00 c0000003f8655ca8
+  GPR04: c0000003f8655c00 000000000000ffff 0000000000000011 ffffffffffffffff
+  GPR08: c008000000949228 0000000000000000 0000000000000001 c0080000009e7780
+  GPR12: 0000000000002200 c00000003fff6200 c000000000161bc8 0000000000000004
+  GPR16: c0000003f9d68280 0000000002000000 0000000000000005 0000000000000003
+  GPR20: 0000000000000002 000000000000ffff 0000000000000000 fffffffffffffef7
+  GPR24: c000000004f73848 c000000004f73838 c000000004f73f28 c000000005f3fb60
+  GPR28: c000000004f73e48 c000000004f73c80 c000000004f73818 c0000003f9d68280
+  NIP [c000000000159620] __flush_work.isra.38+0x40/0x2e0
+  LR [c0080000009d91b0] qla24xx_do_nack_work+0x88/0x180 [qla2xxx]
+  Call Trace:
+  [c000000005f3f9c0] [c000000000159644] __flush_work.isra.38+0x64/0x2e0 (unreliable)
+  [c000000005f3fa50] [c0080000009d91a0] qla24xx_do_nack_work+0x78/0x180 [qla2xxx]
+  [c000000005f3fae0] [c0080000009496ec] qla2x00_do_work+0x604/0xb90 [qla2xxx]
+  [c000000005f3fc40] [c008000000949cd8] qla2x00_iocb_work_fn+0x60/0xe0 [qla2xxx]
+  [c000000005f3fc80] [c000000000157bb8] process_one_work+0x2c8/0x5b0
+  [c000000005f3fd10] [c000000000157f28] worker_thread+0x88/0x660
+  [c000000005f3fdb0] [c000000000161d64] kthread+0x1a4/0x1b0
+  [c000000005f3fe20] [c00000000000b960] ret_from_kernel_thread+0x5c/0x7c
+  Instruction dump:
+  3d22001d 892966b1 7d908026 91810008 f821ff71 69290001 0b090000 2e290000
+  40920200 e9230018 7d2a0074 794ad182 <0b0a0000> 2fa90000 419e01e8 7c0802a6
+  ---[ end trace 5ccf335d4f90fcb8 ]---
+
+Fixes: 1021f0bc2f3d6 ("scsi: qla2xxx: allow session delete to finish before create.")
+Cc: Quinn Tran <qutran@marvell.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191125165702.1013-4-r.bolshakov@yadro.com
+Acked-by: Himanshu Madhani <hmadhani@marvell.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Tested-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/qla2xxx/qla_init.c   |    1 +
+ drivers/scsi/qla2xxx/qla_target.c |    1 -
+ 2 files changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -4847,6 +4847,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vh
+       }
+       INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
++      INIT_WORK(&fcport->free_work, qlt_free_session_done);
+       INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
+       INIT_LIST_HEAD(&fcport->gnl_entry);
+       INIT_LIST_HEAD(&fcport->list);
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1160,7 +1160,6 @@ void qlt_unreg_sess(struct fc_port *sess
+       sess->last_rscn_gen = sess->rscn_gen;
+       sess->last_login_gen = sess->login_gen;
+-      INIT_WORK(&sess->free_work, qlt_free_session_done);
+       queue_work(sess->vha->hw->wq, &sess->free_work);
+ }
+ EXPORT_SYMBOL(qlt_unreg_sess);
diff --git a/queue-5.4/scsi-ufs-disable-autohibern8-feature-in-cadence-ufs.patch b/queue-5.4/scsi-ufs-disable-autohibern8-feature-in-cadence-ufs.patch
new file mode 100644 (file)
index 0000000..a09546e
--- /dev/null
@@ -0,0 +1,41 @@
+From d168001d14eccfda229b4a41a2c31a21e3c379da Mon Sep 17 00:00:00 2001
+From: sheebab <sheebab@cadence.com>
+Date: Tue, 3 Dec 2019 11:07:15 +0100
+Subject: scsi: ufs: Disable autohibern8 feature in Cadence UFS
+
+From: sheebab <sheebab@cadence.com>
+
+commit d168001d14eccfda229b4a41a2c31a21e3c379da upstream.
+
+This patch disables autohibern8 feature in Cadence UFS.  The autohibern8
+feature has issues due to which unexpected interrupt trigger is happening.
+After the interrupt issue is sorted out, autohibern8 feature will be
+re-enabled
+
+Link: https://lore.kernel.org/r/1575367635-22662-1-git-send-email-sheebab@cadence.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: sheebab <sheebab@cadence.com>
+Reviewed-by: Alim Akhtar <alim.akhtar@samsung.com>
+Tested-by: Vignesh Raghavendra <vigneshr@ti.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ufs/cdns-pltfrm.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/scsi/ufs/cdns-pltfrm.c
++++ b/drivers/scsi/ufs/cdns-pltfrm.c
+@@ -99,6 +99,12 @@ static int cdns_ufs_link_startup_notify(
+        */
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
++      /*
++       * Disabling Autohibern8 feature in cadence UFS
++       * to mask unexpected interrupt trigger.
++       */
++      hba->ahit = 0;
++
+       return 0;
+ }
index c789ebb73017be791a6acb8c60dfe614ffc963c2..bdfe6e915bccdbf1b85e1fcd1948bf057b8b08fb 100644 (file)
@@ -12,3 +12,67 @@ pci-do-not-use-bus-number-zero-from-ea-capability.patch
 pci-rcar-fix-missing-macctlr-register-setting-in-initialization-sequence.patch
 pci-apply-cavium-acs-quirk-to-thunderx2-and-thunderx3.patch
 pm-qos-redefine-freq_qos_max_default_value-to-s32_max.patch
+block-fix-check-bi_size-overflow-before-merge.patch
+xtensa-use-memblock_alloc_anywhere-for-kasan-shadow-map.patch
+gfs2-multi-block-allocations-in-gfs2_page_mkwrite.patch
+gfs2-fix-glock-reference-problem-in-gfs2_trans_remove_revoke.patch
+xtensa-fix-tlb-sanity-checker.patch
+xtensa-fix-syscall_set_return_value.patch
+rpmsg-glink-set-tail-pointer-to-0-at-end-of-fifo.patch
+rpmsg-glink-fix-reuse-intents-memory-leak-issue.patch
+rpmsg-glink-fix-use-after-free-in-open_ack-timeout-case.patch
+rpmsg-glink-put-an-extra-reference-during-cleanup.patch
+rpmsg-glink-fix-rpmsg_register_device-err-handling.patch
+rpmsg-glink-don-t-send-pending-rx_done-during-remove.patch
+rpmsg-glink-free-pending-deferred-work-on-remove.patch
+cifs-smbd-return-eagain-when-transport-is-reconnecting.patch
+cifs-smbd-only-queue-work-for-error-recovery-on-memory-registration.patch
+cifs-smbd-add-messages-on-rdma-session-destroy-and-reconnection.patch
+cifs-smbd-return-einval-when-the-number-of-iovs-exceeds-smbdirect_max_sge.patch
+cifs-smbd-return-econnaborted-when-trasnport-is-not-in-connected-state.patch
+cifs-don-t-display-rdma-transport-on-reconnect.patch
+cifs-respect-o_sync-and-o_direct-flags-during-reconnect.patch
+cifs-close-open-handle-after-interrupted-close.patch
+cifs-do-not-miss-cancelled-open-responses.patch
+cifs-fix-null-pointer-dereference-in-mid-callback.patch
+cifs-fix-retrieval-of-dfs-referrals-in-cifs_mount.patch
+arm-dts-s3c64xx-fix-init-order-of-clock-providers.patch
+arm-tegra-fix-flow_ctlr_halt-register-clobbering-by-tegra_resume.patch
+vfio-pci-call-irq_bypass_unregister_producer-before-freeing-irq.patch
+dma-buf-fix-memory-leak-in-sync_file_merge.patch
+drm-panfrost-fix-a-race-in-panfrost_ioctl_madvise.patch
+drm-panfrost-fix-a-bo-leak-in-panfrost_ioctl_mmap_bo.patch
+drm-panfrost-fix-a-race-in-panfrost_gem_free_object.patch
+drm-mgag200-extract-device-type-from-flags.patch
+drm-mgag200-store-flags-from-pci-driver-data-in-device-structure.patch
+drm-mgag200-add-workaround-for-hw-that-does-not-support-startadd.patch
+drm-mgag200-flag-all-g200-se-a-machines-as-broken-wrt-startadd.patch
+drm-meson-venc-cvbs-fix-cvbs-mode-matching.patch
+dm-mpath-remove-harmful-bio-based-optimization.patch
+dm-btree-increase-rebalance-threshold-in-__rebalance2.patch
+dm-clone-metadata-track-exact-changes-per-transaction.patch
+dm-clone-metadata-use-a-two-phase-commit.patch
+dm-clone-flush-destination-device-before-committing-metadata.patch
+dm-thin-metadata-add-support-for-a-pre-commit-callback.patch
+dm-thin-flush-data-device-before-committing-metadata.patch
+scsi-ufs-disable-autohibern8-feature-in-cadence-ufs.patch
+scsi-iscsi-fix-a-potential-deadlock-in-the-timeout-handler.patch
+scsi-qla2xxx-ignore-null-pointer-in-tcm_qla2xxx_free_mcmd.patch
+scsi-qla2xxx-initialize-free_work-before-flushing-it.patch
+scsi-qla2xxx-added-support-for-mpi-and-pep-regions-for-isp28xx.patch
+scsi-qla2xxx-change-discovery-state-before-plogi.patch
+scsi-qla2xxx-correctly-retrieve-and-interpret-active-flash-region.patch
+scsi-qla2xxx-fix-incorrect-sfub-length-used-for-secure-flash-update-mb-cmd.patch
+drm-nouveau-kms-nv50-call-outp_atomic_check_view-before-handling-pbn.patch
+drm-nouveau-kms-nv50-store-the-bpc-we-re-using-in-nv50_head_atom.patch
+drm-nouveau-kms-nv50-limit-mst-bpc-to-8.patch
+drm-i915-fbc-disable-fbc-by-default-on-all-glk.patch
+drm-radeon-fix-r1xx-r2xx-register-checker-for-pot-textures.patch
+drm-dp_mst-correct-the-bug-in-drm_dp_update_payload_part1.patch
+drm-amd-display-re-enable-wait-in-pipelock-but-add-timeout.patch
+drm-amd-display-add-default-clocks-if-not-able-to-fetch-them.patch
+drm-amdgpu-initialize-vm_inv_eng0_sem-for-gfxhub-and-mmhub.patch
+drm-amdgpu-invalidate-mmhub-semaphore-workaround-in-gmc9-gmc10.patch
+drm-amdgpu-gfx10-explicitly-wait-for-cp-idle-after-halt-unhalt.patch
+drm-amdgpu-gfx10-re-init-clear-state-buffer-after-gpu-reset.patch
+drm-i915-gvt-fix-cmd-length-check-for-mi_atomic.patch
diff --git a/queue-5.4/vfio-pci-call-irq_bypass_unregister_producer-before-freeing-irq.patch b/queue-5.4/vfio-pci-call-irq_bypass_unregister_producer-before-freeing-irq.patch
new file mode 100644 (file)
index 0000000..63b5b74
--- /dev/null
@@ -0,0 +1,55 @@
+From d567fb8819162099035e546b11a736e29c2af0ea Mon Sep 17 00:00:00 2001
+From: Jiang Yi <giangyi@amazon.com>
+Date: Wed, 27 Nov 2019 17:49:10 +0100
+Subject: vfio/pci: call irq_bypass_unregister_producer() before freeing irq
+
+From: Jiang Yi <giangyi@amazon.com>
+
+commit d567fb8819162099035e546b11a736e29c2af0ea upstream.
+
+Since irq_bypass_register_producer() is called after request_irq(), we
+should do tear-down in reverse order: irq_bypass_unregister_producer()
+then free_irq().
+
+Specifically free_irq() may release resources required by the
+irqbypass del_producer() callback.  Notably an example provided by
+Marc Zyngier on arm64 with GICv4 that he indicates has the potential
+to wedge the hardware:
+
+ free_irq(irq)
+   __free_irq(irq)
+     irq_domain_deactivate_irq(irq)
+       its_irq_domain_deactivate()
+         [unmap the VLPI from the ITS]
+
+ kvm_arch_irq_bypass_del_producer(cons, prod)
+   kvm_vgic_v4_unset_forwarding(kvm, irq, ...)
+     its_unmap_vlpi(irq)
+       [Unmap the VLPI from the ITS (again), remap the original LPI]
+
+Signed-off-by: Jiang Yi <giangyi@amazon.com>
+Cc: stable@vger.kernel.org # v4.4+
+Fixes: 6d7425f109d26 ("vfio: Register/unregister irq_bypass_producer")
+Link: https://lore.kernel.org/kvm/20191127164910.15888-1-giangyi@amazon.com
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+[aw: commit log]
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vfio/pci/vfio_pci_intrs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -294,8 +294,8 @@ static int vfio_msi_set_vector_signal(st
+       irq = pci_irq_vector(pdev, vector);
+       if (vdev->ctx[vector].trigger) {
+-              free_irq(irq, vdev->ctx[vector].trigger);
+               irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
++              free_irq(irq, vdev->ctx[vector].trigger);
+               kfree(vdev->ctx[vector].name);
+               eventfd_ctx_put(vdev->ctx[vector].trigger);
+               vdev->ctx[vector].trigger = NULL;
diff --git a/queue-5.4/xtensa-fix-syscall_set_return_value.patch b/queue-5.4/xtensa-fix-syscall_set_return_value.patch
new file mode 100644 (file)
index 0000000..7481887
--- /dev/null
@@ -0,0 +1,31 @@
+From c2d9aa3b6e56de56c7f1ed9026ca6ec7cfbeef19 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Thu, 14 Nov 2019 15:05:40 -0800
+Subject: xtensa: fix syscall_set_return_value
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit c2d9aa3b6e56de56c7f1ed9026ca6ec7cfbeef19 upstream.
+
+syscall return value is in the register a2, not a0.
+
+Cc: stable@vger.kernel.org # v5.0+
+Fixes: 9f24f3c1067c ("xtensa: implement tracehook functions and enable HAVE_ARCH_TRACEHOOK")
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/include/asm/syscall.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/xtensa/include/asm/syscall.h
++++ b/arch/xtensa/include/asm/syscall.h
+@@ -51,7 +51,7 @@ static inline void syscall_set_return_va
+                                           struct pt_regs *regs,
+                                           int error, long val)
+ {
+-      regs->areg[0] = (long) error ? error : val;
++      regs->areg[2] = (long) error ? error : val;
+ }
+ #define SYSCALL_MAX_ARGS 6
diff --git a/queue-5.4/xtensa-fix-tlb-sanity-checker.patch b/queue-5.4/xtensa-fix-tlb-sanity-checker.patch
new file mode 100644 (file)
index 0000000..9fadddf
--- /dev/null
@@ -0,0 +1,47 @@
+From 36de10c4788efc6efe6ff9aa10d38cb7eea4c818 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Wed, 13 Nov 2019 13:18:31 -0800
+Subject: xtensa: fix TLB sanity checker
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 36de10c4788efc6efe6ff9aa10d38cb7eea4c818 upstream.
+
+Virtual and translated addresses retrieved by the xtensa TLB sanity
+checker must be consistent, i.e. correspond to the same state of the
+checked TLB entry. KASAN shadow memory is mapped dynamically using
+auto-refill TLB entries and thus may change TLB state between the
+virtual and translated address retrieval, resulting in false TLB
+insanity report.
+Move read_xtlb_translation close to read_xtlb_virtual to make sure that
+read values are consistent.
+
+Cc: stable@vger.kernel.org
+Fixes: a99e07ee5e88 ("xtensa: check TLB sanity on return to userspace")
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/mm/tlb.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/xtensa/mm/tlb.c
++++ b/arch/xtensa/mm/tlb.c
+@@ -216,6 +216,8 @@ static int check_tlb_entry(unsigned w, u
+       unsigned tlbidx = w | (e << PAGE_SHIFT);
+       unsigned r0 = dtlb ?
+               read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
++      unsigned r1 = dtlb ?
++              read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
+       unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
+       unsigned pte = get_pte_for_vaddr(vpn);
+       unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
+@@ -231,8 +233,6 @@ static int check_tlb_entry(unsigned w, u
+       }
+       if (tlb_asid == mm_asid) {
+-              unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
+-                      read_itlb_translation(tlbidx);
+               if ((pte ^ r1) & PAGE_MASK) {
+                       pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
+                                       dtlb ? 'D' : 'I', w, e, r0, r1, pte);
diff --git a/queue-5.4/xtensa-use-memblock_alloc_anywhere-for-kasan-shadow-map.patch b/queue-5.4/xtensa-use-memblock_alloc_anywhere-for-kasan-shadow-map.patch
new file mode 100644 (file)
index 0000000..490bb14
--- /dev/null
@@ -0,0 +1,37 @@
+From e64681b487c897ec871465083bf0874087d47b66 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Wed, 13 Nov 2019 16:06:42 -0800
+Subject: xtensa: use MEMBLOCK_ALLOC_ANYWHERE for KASAN shadow map
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit e64681b487c897ec871465083bf0874087d47b66 upstream.
+
+KASAN shadow map doesn't need to be accessible through the linear kernel
+mapping, allocate its pages with MEMBLOCK_ALLOC_ANYWHERE so that high
+memory can be used. This frees up to ~100MB of low memory on xtensa
+configurations with KASAN and high memory.
+
+Cc: stable@vger.kernel.org # v5.1+
+Fixes: f240ec09bb8a ("memblock: replace memblock_alloc_base(ANYWHERE) with memblock_phys_alloc")
+Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/mm/kasan_init.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/xtensa/mm/kasan_init.c
++++ b/arch/xtensa/mm/kasan_init.c
+@@ -56,7 +56,9 @@ static void __init populate(void *start,
+               for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
+                       phys_addr_t phys =
+-                              memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
++                              memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
++                                                        0,
++                                                        MEMBLOCK_ALLOC_ANYWHERE);
+                       if (!phys)
+                               panic("Failed to allocate page table page\n");