--- /dev/null
+From f2d57038ffe626ce22f60918f4402be0ddecfa59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2020 20:21:26 +0800
+Subject: drm/amdgpu: fix gfx hang during suspend with video playback (v2)
+
+From: Prike Liang <Prike.Liang@amd.com>
+
+[ Upstream commit 487eca11a321ef33bcf4ca5adb3c0c4954db1b58 ]
+
+The system will be hang up during S3 suspend because of SMU is pending
+for GC not respose the register CP_HQD_ACTIVE access request.This issue
+root cause of accessing the GC register under enter GFX CGGPG and can
+be fixed by disable GFX CGPG before perform suspend.
+
+v2: Use disable the GFX CGPG instead of RLC safe mode guard.
+
+Signed-off-by: Prike Liang <Prike.Liang@amd.com>
+Tested-by: Mengbing Wang <Mengbing.Wang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9a8a1c6ca3210..7d340c9ec3037 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2259,8 +2259,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+@@ -3242,6 +3240,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+ }
+ }
+
++ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
++ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
++
+ amdgpu_amdkfd_suspend(adev);
+
+ amdgpu_ras_suspend(adev);
+--
+2.20.1
+
--- /dev/null
+From d6ae8ef2607cb45748d0dfc2412d6ade5af90fc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Feb 2020 16:57:44 +0100
+Subject: drm/bridge: analogix-anx78xx: Fix drm_dp_link helper removal
+
+From: Torsten Duwe <duwe@lst.de>
+
+[ Upstream commit 3e138a63d6674a4567a018a31e467567c40b14d5 ]
+
+drm_dp_link_rate_to_bw_code and ...bw_code_to_link_rate simply divide by
+and multiply with 27000, respectively. Avoid an overflow in the u8 dpcd[0]
+and the multiply+divide alltogether.
+
+Signed-off-by: Torsten Duwe <duwe@lst.de>
+Fixes: ff1e8fb68ea0 ("drm/bridge: analogix-anx78xx: Avoid drm_dp_link helpers")
+Cc: Thierry Reding <treding@nvidia.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Andrzej Hajda <a.hajda@samsung.com>
+Cc: Neil Armstrong <narmstrong@baylibre.com>
+Cc: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
+Cc: Jonas Karlman <jonas@kwiboo.se>
+Cc: Jernej Skrabec <jernej.skrabec@siol.net>
+Cc: <stable@vger.kernel.org> # v5.5+
+Reviewed-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200218155744.9675368BE1@verein.lst.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/bridge/analogix-anx78xx.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
+index 274989f96a916..914263a1afab4 100644
+--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
++++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
+@@ -866,10 +866,9 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
+ if (err)
+ return err;
+
+- dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
+- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+- SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
++ SP_DP_MAIN_LINK_BW_SET_REG,
++ anx78xx->dpcd[DP_MAX_LINK_RATE]);
+ if (err)
+ return err;
+
+--
+2.20.1
+
--- /dev/null
+From 18353ac0aaa73e71f88dfa40e4118837da71e61f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2020 14:43:20 -0500
+Subject: drm/dp_mst: Fix clearing payload state on topology disable
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lyude Paul <lyude@redhat.com>
+
+[ Upstream commit 8732fe46b20c951493bfc4dba0ad08efdf41de81 ]
+
+The issues caused by:
+
+commit 64e62bdf04ab ("drm/dp_mst: Remove VCPI while disabling topology
+mgr")
+
+Prompted me to take a closer look at how we clear the payload state in
+general when disabling the topology, and it turns out there's actually
+two subtle issues here.
+
+The first is that we're not grabbing &mgr.payload_lock when clearing the
+payloads in drm_dp_mst_topology_mgr_set_mst(). Seeing as the canonical
+lock order is &mgr.payload_lock -> &mgr.lock (because we always want
+&mgr.lock to be the inner-most lock so topology validation always
+works), this makes perfect sense. It also means that -technically- there
+could be racing between someone calling
+drm_dp_mst_topology_mgr_set_mst() to disable the topology, along with a
+modeset occurring that's modifying the payload state at the same time.
+
+The second is the more obvious issue that Wayne Lin discovered, that
+we're not clearing proposed_payloads when disabling the topology.
+
+I actually can't see any obvious places where the racing caused by the
+first issue would break something, and it could be that some of our
+higher-level locks already prevent this by happenstance, but better safe
+then sorry. So, let's make it so that drm_dp_mst_topology_mgr_set_mst()
+first grabs &mgr.payload_lock followed by &mgr.lock so that we never
+race when modifying the payload state. Then, we also clear
+proposed_payloads to fix the original issue of enabling a new topology
+with a dirty payload state. This doesn't clear any of the drm_dp_vcpi
+structures, but those are getting destroyed along with the ports anyway.
+
+Changes since v1:
+* Use sizeof(mgr->payloads[0])/sizeof(mgr->proposed_vcpis[0]) instead -
+ vsyrjala
+
+Cc: Sean Paul <sean@poorly.run>
+Cc: Wayne Lin <Wayne.Lin@amd.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: stable@vger.kernel.org # v4.4+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200122194321.14953-1-lyude@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index c4b692dff5956..c9dd41175853e 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -3439,6 +3439,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
++ mutex_lock(&mgr->payload_lock);
+ mutex_lock(&mgr->lock);
+ if (mst_state == mgr->mst_state)
+ goto out_unlock;
+@@ -3497,7 +3498,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
+- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
++ memset(mgr->payloads, 0,
++ mgr->max_payloads * sizeof(mgr->payloads[0]));
++ memset(mgr->proposed_vcpis, 0,
++ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
+ mgr->payload_mask = 0;
+ set_bit(0, &mgr->payload_mask);
+ mgr->vcpi_mask = 0;
+@@ -3505,6 +3509,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+
+ out_unlock:
+ mutex_unlock(&mgr->lock);
++ mutex_unlock(&mgr->payload_lock);
+ if (mstb)
+ drm_dp_mst_topology_put_mstb(mstb);
+ return ret;
+--
+2.20.1
+
--- /dev/null
+From 3f8aaf7d5e039d3d6279f1215c1ccc3282357b3a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2020 18:22:44 +0300
+Subject: drm/i915/icl+: Don't enable DDI IO power on a TypeC port in TBT mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Imre Deak <imre.deak@intel.com>
+
+The DDI IO power well must not be enabled for a TypeC port in TBT mode,
+ensure this during driver loading/system resume.
+
+This gets rid of error messages like
+[drm] *ERROR* power well DDI E TC2 IO state mismatch (refcount 1/enabled 0)
+
+and avoids leaking the power ref when disabling the output.
+
+Cc: <stable@vger.kernel.org> # v5.4+
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200330152244.11316-1-imre.deak@intel.com
+(cherry picked from commit f77a2db27f26c3ccba0681f7e89fef083718f07f)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_ddi.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 1488822398fed..4872c357eb6da 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -2235,7 +2235,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
+ return;
+
+ dig_port = enc_to_dig_port(&encoder->base);
+- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
++
++ if (!intel_phy_is_tc(dev_priv, phy) ||
++ dig_port->tc_mode != TC_PORT_TBT_ALT)
++ intel_display_power_get(dev_priv,
++ dig_port->ddi_io_power_domain);
+
+ /*
+ * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
+--
+2.20.1
+
--- /dev/null
+From 661b510df6cd8f045e98014f27c5c023aed7c359 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jan 2020 16:21:49 +0530
+Subject: mmc: sdhci: Convert sdhci_set_timeout_irq() to non-static
+
+From: Faiz Abbas <faiz_abbas@ti.com>
+
+[ Upstream commit 7907ebe741a7f14ed12889ebe770438a4ff47613 ]
+
+Export sdhci_set_timeout_irq() so that it is accessible from platform drivers.
+
+Signed-off-by: Faiz Abbas <faiz_abbas@ti.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20200116105154.7685-6-faiz_abbas@ti.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci.c | 3 ++-
+ drivers/mmc/host/sdhci.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 659a9459ace34..29c854e48bc69 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -992,7 +992,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
+-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
++void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+ {
+ if (enable)
+ host->ier |= SDHCI_INT_DATA_TIMEOUT;
+@@ -1001,6 +1001,7 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
+
+ static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+ {
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index fe83ece6965b1..4613d71b3cd6e 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -795,5 +795,6 @@ void sdhci_end_tuning(struct sdhci_host *host);
+ void sdhci_reset_tuning(struct sdhci_host *host);
+ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
+ void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
++void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+
+ #endif /* __SDHCI_HW_H */
+--
+2.20.1
+
--- /dev/null
+From e521173455782d018f11f576e1ade8847a4e9e09 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jan 2020 16:21:50 +0530
+Subject: mmc: sdhci: Refactor sdhci_set_timeout()
+
+From: Faiz Abbas <faiz_abbas@ti.com>
+
+[ Upstream commit 7d76ed77cfbd39468ae58d419f537d35ca892d83 ]
+
+Refactor sdhci_set_timeout() such that platform drivers can do some
+functionality in a set_timeout() callback and then call
+__sdhci_set_timeout() to complete the operation.
+
+Signed-off-by: Faiz Abbas <faiz_abbas@ti.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20200116105154.7685-7-faiz_abbas@ti.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci.c | 38 ++++++++++++++++++++------------------
+ drivers/mmc/host/sdhci.h | 1 +
+ 2 files changed, 21 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 29c854e48bc69..1c9ca6864be36 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1003,27 +1003,29 @@ void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+ }
+ EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
+
+-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
++void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+ {
+- u8 count;
+-
+- if (host->ops->set_timeout) {
+- host->ops->set_timeout(host, cmd);
+- } else {
+- bool too_big = false;
+-
+- count = sdhci_calc_timeout(host, cmd, &too_big);
++ bool too_big = false;
++ u8 count = sdhci_calc_timeout(host, cmd, &too_big);
++
++ if (too_big &&
++ host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
++ sdhci_calc_sw_timeout(host, cmd);
++ sdhci_set_data_timeout_irq(host, false);
++ } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
++ sdhci_set_data_timeout_irq(host, true);
++ }
+
+- if (too_big &&
+- host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
+- sdhci_calc_sw_timeout(host, cmd);
+- sdhci_set_data_timeout_irq(host, false);
+- } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
+- sdhci_set_data_timeout_irq(host, true);
+- }
++ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
++}
++EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
+
+- sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+- }
++static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
++{
++ if (host->ops->set_timeout)
++ host->ops->set_timeout(host, cmd);
++ else
++ __sdhci_set_timeout(host, cmd);
+ }
+
+ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 4613d71b3cd6e..76e69288632db 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -796,5 +796,6 @@ void sdhci_reset_tuning(struct sdhci_host *host);
+ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
+ void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
+ void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
++void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
+
+ #endif /* __SDHCI_HW_H */
+--
+2.20.1
+
--- /dev/null
+From d7d2120a3ae5520dd447496272c44757409d2303 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Apr 2020 10:34:36 -0400
+Subject: NFS: finish_automount() requires us to hold 2 refs to the mount
+ record
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 75da98586af75eb80664714a67a9895bf0a5517e ]
+
+We must not return from nfs_d_automount() without holding 2 references
+to the mount record. Doing so, will trigger the BUG() in finish_automount().
+Also ensure that we don't try to reschedule the automount timer with
+a negative or zero timeout value.
+
+Fixes: 22a1ae9a93fb ("NFS: If nfs_mountpoint_expiry_timeout < 0, do not expire submounts")
+Cc: stable@vger.kernel.org # v5.5+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/namespace.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 5e0e9d29f5c57..0c5db17607411 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -143,6 +143,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
+ struct nfs_server *server = NFS_SERVER(d_inode(path->dentry));
+ struct nfs_fh *fh = NULL;
+ struct nfs_fattr *fattr = NULL;
++ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
+
+ if (IS_ROOT(path->dentry))
+ return ERR_PTR(-ESTALE);
+@@ -157,12 +158,12 @@ struct vfsmount *nfs_d_automount(struct path *path)
+ if (IS_ERR(mnt))
+ goto out;
+
+- if (nfs_mountpoint_expiry_timeout < 0)
++ mntget(mnt); /* prevent immediate expiration */
++ if (timeout <= 0)
+ goto out;
+
+- mntget(mnt); /* prevent immediate expiration */
+ mnt_set_expiry(mnt, &nfs_automount_list);
+- schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
++ schedule_delayed_work(&nfs_automount_task, timeout);
+
+ out:
+ nfs_free_fattr(fattr);
+@@ -201,10 +202,11 @@ const struct inode_operations nfs_referral_inode_operations = {
+ static void nfs_expire_automounts(struct work_struct *work)
+ {
+ struct list_head *list = &nfs_automount_list;
++ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
+
+ mark_mounts_for_expiry(list);
+- if (!list_empty(list))
+- schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
++ if (!list_empty(list) && timeout > 0)
++ schedule_delayed_work(&nfs_automount_task, timeout);
+ }
+
+ void nfs_release_automount_timer(void)
+--
+2.20.1
+
--- /dev/null
+From 3af2c3b2a10b8d4287918dcbe7d6163468443192 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2020 20:33:37 +0100
+Subject: perf/core: Fix event cgroup tracking
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 33238c50451596be86db1505ab65fee5172844d0 ]
+
+Song reports that installing cgroup events is broken since:
+
+ db0503e4f675 ("perf/core: Optimize perf_install_in_event()")
+
+The problem being that cgroup events try to track cpuctx->cgrp even
+for disabled events, which is pointless and actively harmful since the
+above commit. Rework the code to have explicit enable/disable hooks
+for cgroup events, such that we can limit cgroup tracking to active
+events.
+
+More specifically, since the above commit disabled events are no
+longer added to their context from the 'right' CPU, and we can't
+access things like the current cgroup for a remote CPU.
+
+Cc: <stable@vger.kernel.org> # v5.5+
+Fixes: db0503e4f675 ("perf/core: Optimize perf_install_in_event()")
+Reported-by: Song Liu <songliubraving@fb.com>
+Tested-by: Song Liu <songliubraving@fb.com>
+Reviewed-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/20200318193337.GB20760@hirez.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 70 +++++++++++++++++++++++++++-----------------
+ 1 file changed, 43 insertions(+), 27 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index b3d4f485bcfa6..8d8e52a0922f4 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -935,16 +935,10 @@ perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
+ event->shadow_ctx_time = now - t->timestamp;
+ }
+
+-/*
+- * Update cpuctx->cgrp so that it is set when first cgroup event is added and
+- * cleared when last cgroup event is removed.
+- */
+ static inline void
+-list_update_cgroup_event(struct perf_event *event,
+- struct perf_event_context *ctx, bool add)
++perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
+ {
+ struct perf_cpu_context *cpuctx;
+- struct list_head *cpuctx_entry;
+
+ if (!is_cgroup_event(event))
+ return;
+@@ -961,28 +955,41 @@ list_update_cgroup_event(struct perf_event *event,
+ * because if the first would mismatch, the second would not try again
+ * and we would leave cpuctx->cgrp unset.
+ */
+- if (add && !cpuctx->cgrp) {
++ if (ctx->is_active && !cpuctx->cgrp) {
+ struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
+
+ if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
+ cpuctx->cgrp = cgrp;
+ }
+
+- if (add && ctx->nr_cgroups++)
++ if (ctx->nr_cgroups++)
+ return;
+- else if (!add && --ctx->nr_cgroups)
++
++ list_add(&cpuctx->cgrp_cpuctx_entry,
++ per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
++}
++
++static inline void
++perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
++{
++ struct perf_cpu_context *cpuctx;
++
++ if (!is_cgroup_event(event))
+ return;
+
+- /* no cgroup running */
+- if (!add)
++ /*
++ * Because cgroup events are always per-cpu events,
++ * @ctx == &cpuctx->ctx.
++ */
++ cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
++
++ if (--ctx->nr_cgroups)
++ return;
++
++ if (ctx->is_active && cpuctx->cgrp)
+ cpuctx->cgrp = NULL;
+
+- cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
+- if (add)
+- list_add(cpuctx_entry,
+- per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
+- else
+- list_del(cpuctx_entry);
++ list_del(&cpuctx->cgrp_cpuctx_entry);
+ }
+
+ #else /* !CONFIG_CGROUP_PERF */
+@@ -1048,11 +1055,14 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
+ }
+
+ static inline void
+-list_update_cgroup_event(struct perf_event *event,
+- struct perf_event_context *ctx, bool add)
++perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
+ {
+ }
+
++static inline void
++perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
++{
++}
+ #endif
+
+ /*
+@@ -1682,13 +1692,14 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
+ add_event_to_groups(event, ctx);
+ }
+
+- list_update_cgroup_event(event, ctx, true);
+-
+ list_add_rcu(&event->event_entry, &ctx->event_list);
+ ctx->nr_events++;
+ if (event->attr.inherit_stat)
+ ctx->nr_stat++;
+
++ if (event->state > PERF_EVENT_STATE_OFF)
++ perf_cgroup_event_enable(event, ctx);
++
+ ctx->generation++;
+ }
+
+@@ -1864,8 +1875,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
+
+ event->attach_state &= ~PERF_ATTACH_CONTEXT;
+
+- list_update_cgroup_event(event, ctx, false);
+-
+ ctx->nr_events--;
+ if (event->attr.inherit_stat)
+ ctx->nr_stat--;
+@@ -1882,8 +1891,10 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
+ * of error state is by explicit re-enabling
+ * of the event
+ */
+- if (event->state > PERF_EVENT_STATE_OFF)
++ if (event->state > PERF_EVENT_STATE_OFF) {
++ perf_cgroup_event_disable(event, ctx);
+ perf_event_set_state(event, PERF_EVENT_STATE_OFF);
++ }
+
+ ctx->generation++;
+ }
+@@ -2114,6 +2125,7 @@ event_sched_out(struct perf_event *event,
+
+ if (READ_ONCE(event->pending_disable) >= 0) {
+ WRITE_ONCE(event->pending_disable, -1);
++ perf_cgroup_event_disable(event, ctx);
+ state = PERF_EVENT_STATE_OFF;
+ }
+ perf_event_set_state(event, state);
+@@ -2250,6 +2262,7 @@ static void __perf_event_disable(struct perf_event *event,
+ event_sched_out(event, cpuctx, ctx);
+
+ perf_event_set_state(event, PERF_EVENT_STATE_OFF);
++ perf_cgroup_event_disable(event, ctx);
+ }
+
+ /*
+@@ -2633,7 +2646,7 @@ static int __perf_install_in_context(void *info)
+ }
+
+ #ifdef CONFIG_CGROUP_PERF
+- if (is_cgroup_event(event)) {
++ if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
+ /*
+ * If the current cgroup doesn't match the event's
+ * cgroup, we should not try to schedule it.
+@@ -2793,6 +2806,7 @@ static void __perf_event_enable(struct perf_event *event,
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+
+ perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
++ perf_cgroup_event_enable(event, ctx);
+
+ if (!ctx->is_active)
+ return;
+@@ -3447,8 +3461,10 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ }
+
+ if (event->state == PERF_EVENT_STATE_INACTIVE) {
+- if (event->attr.pinned)
++ if (event->attr.pinned) {
++ perf_cgroup_event_disable(event, ctx);
+ perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
++ }
+
+ sid->can_add_hw = 0;
+ sid->ctx->rotate_necessary = 1;
+--
+2.20.1
+
--- /dev/null
+From 56bc8c6c2156f62217cd0cf442ddad5648af2596 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Aug 2019 11:45:01 +0200
+Subject: perf/core: Remove 'struct sched_in_data'
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 2c2366c7548ecee65adfd264517ddf50f9e2d029 ]
+
+We can deduce the ctx and cpuctx from the event, no need to pass them
+along. Remove the structure and pass in can_add_hw directly.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 36 +++++++++++-------------------------
+ 1 file changed, 11 insertions(+), 25 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 8d8e52a0922f4..78068b57cbba2 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3437,17 +3437,11 @@ static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
+ return 0;
+ }
+
+-struct sched_in_data {
+- struct perf_event_context *ctx;
+- struct perf_cpu_context *cpuctx;
+- int can_add_hw;
+-};
+-
+ static int merge_sched_in(struct perf_event *event, void *data)
+ {
+- struct sched_in_data *sid = data;
+-
+- WARN_ON_ONCE(event->ctx != sid->ctx);
++ struct perf_event_context *ctx = event->ctx;
++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
++ int *can_add_hw = data;
+
+ if (event->state <= PERF_EVENT_STATE_OFF)
+ return 0;
+@@ -3455,8 +3449,8 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ if (!event_filter_match(event))
+ return 0;
+
+- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
+- if (!group_sched_in(event, sid->cpuctx, sid->ctx))
++ if (group_can_go_on(event, cpuctx, *can_add_hw)) {
++ if (!group_sched_in(event, cpuctx, ctx))
+ list_add_tail(&event->active_list, get_event_list(event));
+ }
+
+@@ -3466,8 +3460,8 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+ }
+
+- sid->can_add_hw = 0;
+- sid->ctx->rotate_necessary = 1;
++ *can_add_hw = 0;
++ ctx->rotate_necessary = 1;
+ }
+
+ return 0;
+@@ -3477,30 +3471,22 @@ static void
+ ctx_pinned_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx)
+ {
+- struct sched_in_data sid = {
+- .ctx = ctx,
+- .cpuctx = cpuctx,
+- .can_add_hw = 1,
+- };
++ int can_add_hw = 1;
+
+ visit_groups_merge(&ctx->pinned_groups,
+ smp_processor_id(),
+- merge_sched_in, &sid);
++ merge_sched_in, &can_add_hw);
+ }
+
+ static void
+ ctx_flexible_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx)
+ {
+- struct sched_in_data sid = {
+- .ctx = ctx,
+- .cpuctx = cpuctx,
+- .can_add_hw = 1,
+- };
++ int can_add_hw = 1;
+
+ visit_groups_merge(&ctx->flexible_groups,
+ smp_processor_id(),
+- merge_sched_in, &sid);
++ merge_sched_in, &can_add_hw);
+ }
+
+ static void
+--
+2.20.1
+
--- /dev/null
+From 9eba3c51c186338f9abc92f66ae8a2e11ce23c97 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Aug 2019 11:17:00 +0200
+Subject: perf/core: Unify {pinned,flexible}_sched_in()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit ab6f824cfdf7363b5e529621cbc72ae6519c78d1 ]
+
+Less is more; unify the two very nearly identical function.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 58 ++++++++++++++++----------------------------
+ 1 file changed, 21 insertions(+), 37 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index fdb7f7ef380c4..b3d4f485bcfa6 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1986,6 +1986,12 @@ static int perf_get_aux_event(struct perf_event *event,
+ return 1;
+ }
+
++static inline struct list_head *get_event_list(struct perf_event *event)
++{
++ struct perf_event_context *ctx = event->ctx;
++ return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active;
++}
++
+ static void perf_group_detach(struct perf_event *event)
+ {
+ struct perf_event *sibling, *tmp;
+@@ -2028,12 +2034,8 @@ static void perf_group_detach(struct perf_event *event)
+ if (!RB_EMPTY_NODE(&event->group_node)) {
+ add_event_to_groups(sibling, event->ctx);
+
+- if (sibling->state == PERF_EVENT_STATE_ACTIVE) {
+- struct list_head *list = sibling->attr.pinned ?
+- &ctx->pinned_active : &ctx->flexible_active;
+-
+- list_add_tail(&sibling->active_list, list);
+- }
++ if (sibling->state == PERF_EVENT_STATE_ACTIVE)
++ list_add_tail(&sibling->active_list, get_event_list(sibling));
+ }
+
+ WARN_ON_ONCE(sibling->ctx != event->ctx);
+@@ -2350,6 +2352,8 @@ event_sched_in(struct perf_event *event,
+ {
+ int ret = 0;
+
++ WARN_ON_ONCE(event->ctx != ctx);
++
+ lockdep_assert_held(&ctx->lock);
+
+ if (event->state <= PERF_EVENT_STATE_OFF)
+@@ -3425,10 +3429,12 @@ struct sched_in_data {
+ int can_add_hw;
+ };
+
+-static int pinned_sched_in(struct perf_event *event, void *data)
++static int merge_sched_in(struct perf_event *event, void *data)
+ {
+ struct sched_in_data *sid = data;
+
++ WARN_ON_ONCE(event->ctx != sid->ctx);
++
+ if (event->state <= PERF_EVENT_STATE_OFF)
+ return 0;
+
+@@ -3437,37 +3443,15 @@ static int pinned_sched_in(struct perf_event *event, void *data)
+
+ if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
+ if (!group_sched_in(event, sid->cpuctx, sid->ctx))
+- list_add_tail(&event->active_list, &sid->ctx->pinned_active);
++ list_add_tail(&event->active_list, get_event_list(event));
+ }
+
+- /*
+- * If this pinned group hasn't been scheduled,
+- * put it in error state.
+- */
+- if (event->state == PERF_EVENT_STATE_INACTIVE)
+- perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+-
+- return 0;
+-}
+-
+-static int flexible_sched_in(struct perf_event *event, void *data)
+-{
+- struct sched_in_data *sid = data;
+-
+- if (event->state <= PERF_EVENT_STATE_OFF)
+- return 0;
+-
+- if (!event_filter_match(event))
+- return 0;
++ if (event->state == PERF_EVENT_STATE_INACTIVE) {
++ if (event->attr.pinned)
++ perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+
+- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
+- int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
+- if (ret) {
+- sid->can_add_hw = 0;
+- sid->ctx->rotate_necessary = 1;
+- return 0;
+- }
+- list_add_tail(&event->active_list, &sid->ctx->flexible_active);
++ sid->can_add_hw = 0;
++ sid->ctx->rotate_necessary = 1;
+ }
+
+ return 0;
+@@ -3485,7 +3469,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
+
+ visit_groups_merge(&ctx->pinned_groups,
+ smp_processor_id(),
+- pinned_sched_in, &sid);
++ merge_sched_in, &sid);
+ }
+
+ static void
+@@ -3500,7 +3484,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
+
+ visit_groups_merge(&ctx->flexible_groups,
+ smp_processor_id(),
+- flexible_sched_in, &sid);
++ merge_sched_in, &sid);
+ }
+
+ static void
+--
+2.20.1
+
--- /dev/null
+From a71c2839470e438907b99a4c0d583e267c99061e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Mar 2020 15:09:40 +0000
+Subject: powerpc/kasan: Fix kasan_remap_early_shadow_ro()
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+[ Upstream commit af92bad615be75c6c0d1b1c5b48178360250a187 ]
+
+At the moment kasan_remap_early_shadow_ro() does nothing, because
+k_end is 0 and k_cur < 0 is always true.
+
+Change the test to k_cur != k_end, as done in
+kasan_init_shadow_page_tables()
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Fixes: cbd18991e24f ("powerpc/mm: Fix an Oops in kasan_mmu_init()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/4e7b56865e01569058914c991143f5961b5d4719.1583507333.git.christophe.leroy@c-s.fr
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/mm/kasan/kasan_init_32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
+index 0e6ed4413eeac..1cfe57b51d7e3 100644
+--- a/arch/powerpc/mm/kasan/kasan_init_32.c
++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
+@@ -117,7 +117,7 @@ static void __init kasan_remap_early_shadow_ro(void)
+
+ kasan_populate_pte(kasan_early_shadow_pte, prot);
+
+- for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
++ for (k_cur = k_start & PAGE_MASK; k_cur != k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+ pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+
+--
+2.20.1
+
--- /dev/null
+From 51b34c081dcdc36a462d5623bd073059e8783364 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Apr 2020 22:08:22 -0400
+Subject: Revert "drm/dp_mst: Remove VCPI while disabling topology mgr"
+
+[ Upstream commit a86675968e2300fb567994459da3dbc4cd1b322a ]
+
+This reverts commit 64e62bdf04ab8529f45ed0a85122c703035dec3a.
+
+This commit ends up causing some lockdep splats due to trying to grab the
+payload lock while holding the mgr's lock:
+
+[ 54.010099]
+[ 54.011765] ======================================================
+[ 54.018670] WARNING: possible circular locking dependency detected
+[ 54.025577] 5.5.0-rc6-02274-g77381c23ee63 #47 Not tainted
+[ 54.031610] ------------------------------------------------------
+[ 54.038516] kworker/1:6/1040 is trying to acquire lock:
+[ 54.044354] ffff888272af3228 (&mgr->payload_lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.054957]
+[ 54.054957] but task is already holding lock:
+[ 54.061473] ffff888272af3060 (&mgr->lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x3c/0x2e4
+[ 54.071193]
+[ 54.071193] which lock already depends on the new lock.
+[ 54.071193]
+[ 54.080334]
+[ 54.080334] the existing dependency chain (in reverse order) is:
+[ 54.088697]
+[ 54.088697] -> #1 (&mgr->lock){+.+.}:
+[ 54.094440] __mutex_lock+0xc3/0x498
+[ 54.099015] drm_dp_mst_topology_get_port_validated+0x25/0x80
+[ 54.106018] drm_dp_update_payload_part1+0xa2/0x2e2
+[ 54.112051] intel_mst_pre_enable_dp+0x144/0x18f
+[ 54.117791] intel_encoders_pre_enable+0x63/0x70
+[ 54.123532] hsw_crtc_enable+0xa1/0x722
+[ 54.128396] intel_update_crtc+0x50/0x194
+[ 54.133455] skl_commit_modeset_enables+0x40c/0x540
+[ 54.139485] intel_atomic_commit_tail+0x5f7/0x130d
+[ 54.145418] intel_atomic_commit+0x2c8/0x2d8
+[ 54.150770] drm_atomic_helper_set_config+0x5a/0x70
+[ 54.156801] drm_mode_setcrtc+0x2ab/0x833
+[ 54.161862] drm_ioctl+0x2e5/0x424
+[ 54.166242] vfs_ioctl+0x21/0x2f
+[ 54.170426] do_vfs_ioctl+0x5fb/0x61e
+[ 54.175096] ksys_ioctl+0x55/0x75
+[ 54.179377] __x64_sys_ioctl+0x1a/0x1e
+[ 54.184146] do_syscall_64+0x5c/0x6d
+[ 54.188721] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+[ 54.194946]
+[ 54.194946] -> #0 (&mgr->payload_lock){+.+.}:
+[ 54.201463]
+[ 54.201463] other info that might help us debug this:
+[ 54.201463]
+[ 54.210410] Possible unsafe locking scenario:
+[ 54.210410]
+[ 54.217025] CPU0 CPU1
+[ 54.222082] ---- ----
+[ 54.227138] lock(&mgr->lock);
+[ 54.230643] lock(&mgr->payload_lock);
+[ 54.237742] lock(&mgr->lock);
+[ 54.244062] lock(&mgr->payload_lock);
+[ 54.248346]
+[ 54.248346] *** DEADLOCK ***
+[ 54.248346]
+[ 54.254959] 7 locks held by kworker/1:6/1040:
+[ 54.259822] #0: ffff888275c4f528 ((wq_completion)events){+.+.},
+at: worker_thread+0x455/0x6e2
+[ 54.269451] #1: ffffc9000119beb0
+((work_completion)(&(&dev_priv->hotplug.hotplug_work)->work)){+.+.},
+at: worker_thread+0x455/0x6e2
+[ 54.282768] #2: ffff888272a403f0 (&dev->mode_config.mutex){+.+.},
+at: i915_hotplug_work_func+0x4b/0x2be
+[ 54.293368] #3: ffffffff824fc6c0 (drm_connector_list_iter){.+.+},
+at: i915_hotplug_work_func+0x17e/0x2be
+[ 54.304061] #4: ffffc9000119bc58 (crtc_ww_class_acquire){+.+.},
+at: drm_helper_probe_detect_ctx+0x40/0xfd
+[ 54.314855] #5: ffff888272a40470 (crtc_ww_class_mutex){+.+.}, at:
+drm_modeset_lock+0x74/0xe2
+[ 54.324385] #6: ffff888272af3060 (&mgr->lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x3c/0x2e4
+[ 54.334597]
+[ 54.334597] stack backtrace:
+[ 54.339464] CPU: 1 PID: 1040 Comm: kworker/1:6 Not tainted
+5.5.0-rc6-02274-g77381c23ee63 #47
+[ 54.348893] Hardware name: Google Fizz/Fizz, BIOS
+Google_Fizz.10139.39.0 01/04/2018
+[ 54.357451] Workqueue: events i915_hotplug_work_func
+[ 54.362995] Call Trace:
+[ 54.365724] dump_stack+0x71/0x9c
+[ 54.369427] check_noncircular+0x91/0xbc
+[ 54.373809] ? __lock_acquire+0xc9e/0xf66
+[ 54.378286] ? __lock_acquire+0xc9e/0xf66
+[ 54.382763] ? lock_acquire+0x175/0x1ac
+[ 54.387048] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.393177] ? __mutex_lock+0xc3/0x498
+[ 54.397362] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.403492] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.409620] ? drm_dp_dpcd_access+0xd9/0x101
+[ 54.414390] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.420517] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.426645] ? intel_digital_port_connected+0x34d/0x35c
+[ 54.432482] ? intel_dp_detect+0x227/0x44e
+[ 54.437056] ? ww_mutex_lock+0x49/0x9a
+[ 54.441242] ? drm_helper_probe_detect_ctx+0x75/0xfd
+[ 54.446789] ? intel_encoder_hotplug+0x4b/0x97
+[ 54.451752] ? intel_ddi_hotplug+0x61/0x2e0
+[ 54.456423] ? mark_held_locks+0x53/0x68
+[ 54.460803] ? _raw_spin_unlock_irqrestore+0x3a/0x51
+[ 54.466347] ? lockdep_hardirqs_on+0x187/0x1a4
+[ 54.471310] ? drm_connector_list_iter_next+0x89/0x9a
+[ 54.476953] ? i915_hotplug_work_func+0x206/0x2be
+[ 54.482208] ? worker_thread+0x4d5/0x6e2
+[ 54.486587] ? worker_thread+0x455/0x6e2
+[ 54.490966] ? queue_work_on+0x64/0x64
+[ 54.495151] ? kthread+0x1e9/0x1f1
+[ 54.498946] ? queue_work_on+0x64/0x64
+[ 54.503130] ? kthread_unpark+0x5e/0x5e
+[ 54.507413] ? ret_from_fork+0x3a/0x50
+
+The proper fix for this is probably cleanup the VCPI allocations when we're
+enabling the topology, or on the first payload allocation. For now though,
+let's just revert.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 64e62bdf04ab ("drm/dp_mst: Remove VCPI while disabling topology mgr")
+Cc: Sean Paul <sean@poorly.run>
+Cc: Wayne Lin <Wayne.Lin@amd.com>
+Reviewed-by: Sean Paul <sean@poorly.run>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200117205149.97262-1-lyude@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 4a65ef8d8bff3..c4b692dff5956 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -3437,7 +3437,6 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
+ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
+ {
+ int ret = 0;
+- int i = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
+ mutex_lock(&mgr->lock);
+@@ -3498,21 +3497,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
+- mutex_lock(&mgr->payload_lock);
+ memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ mgr->payload_mask = 0;
+ set_bit(0, &mgr->payload_mask);
+- for (i = 0; i < mgr->max_payloads; i++) {
+- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+-
+- if (vcpi) {
+- vcpi->vcpi = 0;
+- vcpi->num_slots = 0;
+- }
+- mgr->proposed_vcpis[i] = NULL;
+- }
+ mgr->vcpi_mask = 0;
+- mutex_unlock(&mgr->payload_lock);
+ }
+
+ out_unlock:
+--
+2.20.1
+
powerpc-64-prevent-stack-protection-in-early-boot.patch
scsi-mpt3sas-fix-kernel-panic-observed-on-soft-hba-unplug.patch
arm64-always-force-a-branch-protection-mode-when-the-compiler-has-one.patch
+drm-bridge-analogix-anx78xx-fix-drm_dp_link-helper-r.patch
+revert-drm-dp_mst-remove-vcpi-while-disabling-topolo.patch
+drm-dp_mst-fix-clearing-payload-state-on-topology-di.patch
+drm-amdgpu-fix-gfx-hang-during-suspend-with-video-pl.patch
+drm-i915-icl-don-t-enable-ddi-io-power-on-a-typec-po.patch
+nfs-finish_automount-requires-us-to-hold-2-refs-to-t.patch
+perf-core-unify-pinned-flexible-_sched_in.patch
+perf-core-fix-event-cgroup-tracking.patch
+perf-core-remove-struct-sched_in_data.patch
+powerpc-kasan-fix-kasan_remap_early_shadow_ro.patch
+mmc-sdhci-convert-sdhci_set_timeout_irq-to-non-stati.patch
+mmc-sdhci-refactor-sdhci_set_timeout.patch