--- /dev/null
+From 77a7799e6a2ec1d29e92df069cbd43289b90684c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2020 20:21:26 +0800
+Subject: drm/amdgpu: fix gfx hang during suspend with video playback (v2)
+
+From: Prike Liang <Prike.Liang@amd.com>
+
+[ Upstream commit 487eca11a321ef33bcf4ca5adb3c0c4954db1b58 ]
+
+The system will be hang up during S3 suspend because of SMU is pending
+for GC not respose the register CP_HQD_ACTIVE access request.This issue
+root cause of accessing the GC register under enter GFX CGGPG and can
+be fixed by disable GFX CGPG before perform suspend.
+
+v2: Use disable the GFX CGPG instead of RLC safe mode guard.
+
+Signed-off-by: Prike Liang <Prike.Liang@amd.com>
+Tested-by: Mengbing Wang <Mengbing.Wang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index b8975857d60d6..48e2863461b7f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2285,8 +2285,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+@@ -3309,6 +3307,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+ }
+ }
+
++ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
++ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
++
+ amdgpu_amdkfd_suspend(adev);
+
+ amdgpu_ras_suspend(adev);
+--
+2.20.1
+
--- /dev/null
+From a03f43318955e8e0066ba9187ef27e8022634ca8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2020 14:43:20 -0500
+Subject: drm/dp_mst: Fix clearing payload state on topology disable
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lyude Paul <lyude@redhat.com>
+
+[ Upstream commit 8732fe46b20c951493bfc4dba0ad08efdf41de81 ]
+
+The issues caused by:
+
+commit 64e62bdf04ab ("drm/dp_mst: Remove VCPI while disabling topology
+mgr")
+
+Prompted me to take a closer look at how we clear the payload state in
+general when disabling the topology, and it turns out there's actually
+two subtle issues here.
+
+The first is that we're not grabbing &mgr.payload_lock when clearing the
+payloads in drm_dp_mst_topology_mgr_set_mst(). Seeing as the canonical
+lock order is &mgr.payload_lock -> &mgr.lock (because we always want
+&mgr.lock to be the inner-most lock so topology validation always
+works), this makes perfect sense. It also means that -technically- there
+could be racing between someone calling
+drm_dp_mst_topology_mgr_set_mst() to disable the topology, along with a
+modeset occurring that's modifying the payload state at the same time.
+
+The second is the more obvious issue that Wayne Lin discovered, that
+we're not clearing proposed_payloads when disabling the topology.
+
+I actually can't see any obvious places where the racing caused by the
+first issue would break something, and it could be that some of our
+higher-level locks already prevent this by happenstance, but better safe
+then sorry. So, let's make it so that drm_dp_mst_topology_mgr_set_mst()
+first grabs &mgr.payload_lock followed by &mgr.lock so that we never
+race when modifying the payload state. Then, we also clear
+proposed_payloads to fix the original issue of enabling a new topology
+with a dirty payload state. This doesn't clear any of the drm_dp_vcpi
+structures, but those are getting destroyed along with the ports anyway.
+
+Changes since v1:
+* Use sizeof(mgr->payloads[0])/sizeof(mgr->proposed_vcpis[0]) instead -
+ vsyrjala
+
+Cc: Sean Paul <sean@poorly.run>
+Cc: Wayne Lin <Wayne.Lin@amd.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: stable@vger.kernel.org # v4.4+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200122194321.14953-1-lyude@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index e993009fdb763..7b7f0da013467 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -3506,6 +3506,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
++ mutex_lock(&mgr->payload_lock);
+ mutex_lock(&mgr->lock);
+ if (mst_state == mgr->mst_state)
+ goto out_unlock;
+@@ -3564,7 +3565,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
+- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
++ memset(mgr->payloads, 0,
++ mgr->max_payloads * sizeof(mgr->payloads[0]));
++ memset(mgr->proposed_vcpis, 0,
++ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
+ mgr->payload_mask = 0;
+ set_bit(0, &mgr->payload_mask);
+ mgr->vcpi_mask = 0;
+@@ -3573,6 +3577,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+
+ out_unlock:
+ mutex_unlock(&mgr->lock);
++ mutex_unlock(&mgr->payload_lock);
+ if (mstb)
+ drm_dp_mst_topology_put_mstb(mstb);
+ return ret;
+--
+2.20.1
+
--- /dev/null
+From 2702b344fcac164c65170bfbce4ce91938b9093c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Feb 2020 10:56:57 -0800
+Subject: drm/i915/ggtt: do not set bits 1-11 in gen12 ptes
+
+From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+
+[ Upstream commit 69edc390a54268d41e45089cb972bf71510f5f07 ]
+
+On TGL, bits 2-4 in the GGTT PTE are not ignored anymore and are
+instead used for some extra VT-d capabilities. We don't (yet?) have
+support for those capabilities, but, given that we shared the pte_encode
+function betweed GGTT and PPGTT, we still set those bits to the PPGTT
+PPAT values. The DMA engine gets very confused when those bits are
+set while the iommu is enabled, leading to errors. E.g. when loading
+the GuC we get:
+
+[ 9.796218] DMAR: DRHD: handling fault status reg 2
+[ 9.796235] DMAR: [DMA Write] Request device [00:02.0] PASID ffffffff fault addr 0 [fault reason 02] Present bit in context entry is clear
+[ 9.899215] [drm:intel_guc_fw_upload [i915]] *ERROR* GuC firmware signature verification failed
+
+To fix this, just have dedicated gen8_pte_encode function per type of
+gtt. Also, explicitly set vm->pte_encode for gen8_ppgtt, even if we
+don't use it, to make sure we don't accidentally assign it to the GGTT
+one, like we do for gen6_ppgtt, in case we need it in the future.
+
+Reported-by: "Sodhi, Vunny" <vunny.sodhi@intel.com>
+Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
+Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200226185657.26445-1-daniele.ceraolospurio@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gt/gen8_ppgtt.c | 26 ++++++++++++++++++++++++++
+ drivers/gpu/drm/i915/gt/intel_ggtt.c | 13 ++++++++++---
+ drivers/gpu/drm/i915/gt/intel_gtt.c | 24 ------------------------
+ drivers/gpu/drm/i915/gt/intel_gtt.h | 4 ----
+ 4 files changed, 36 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+index 4d1de2d97d5cf..9aabc5815d388 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
++++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+@@ -25,6 +25,30 @@ static u64 gen8_pde_encode(const dma_addr_t addr,
+ return pde;
+ }
+
++static u64 gen8_pte_encode(dma_addr_t addr,
++ enum i915_cache_level level,
++ u32 flags)
++{
++ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
++
++ if (unlikely(flags & PTE_READ_ONLY))
++ pte &= ~_PAGE_RW;
++
++ switch (level) {
++ case I915_CACHE_NONE:
++ pte |= PPAT_UNCACHED;
++ break;
++ case I915_CACHE_WT:
++ pte |= PPAT_DISPLAY_ELLC;
++ break;
++ default:
++ pte |= PPAT_CACHED;
++ break;
++ }
++
++ return pte;
++}
++
+ static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
+ {
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+@@ -706,6 +730,8 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear;
+
++ ppgtt->vm.pte_encode = gen8_pte_encode;
++
+ if (intel_vgpu_active(gt->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, true);
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+index 531d501be01fa..5fd8b3e0cd19c 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+@@ -167,6 +167,13 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+ intel_gtt_chipset_flush();
+ }
+
++static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
++ enum i915_cache_level level,
++ u32 flags)
++{
++ return addr | _PAGE_PRESENT;
++}
++
+ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+ {
+ writeq(pte, addr);
+@@ -182,7 +189,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+- gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
++ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
+
+ ggtt->invalidate(ggtt);
+ }
+@@ -195,7 +202,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen8_pte_t __iomem *gtt_entries;
+- const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
++ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
+ dma_addr_t addr;
+
+ /*
+@@ -890,7 +897,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+- ggtt->vm.pte_encode = gen8_pte_encode;
++ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
+index 16acdc5d67340..f6fcf05d54f36 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
+@@ -454,30 +454,6 @@ void gtt_write_workarounds(struct intel_gt *gt)
+ }
+ }
+
+-u64 gen8_pte_encode(dma_addr_t addr,
+- enum i915_cache_level level,
+- u32 flags)
+-{
+- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+-
+- if (unlikely(flags & PTE_READ_ONLY))
+- pte &= ~_PAGE_RW;
+-
+- switch (level) {
+- case I915_CACHE_NONE:
+- pte |= PPAT_UNCACHED;
+- break;
+- case I915_CACHE_WT:
+- pte |= PPAT_DISPLAY_ELLC;
+- break;
+- default:
+- pte |= PPAT_CACHED;
+- break;
+- }
+-
+- return pte;
+-}
+-
+ static void tgl_setup_private_ppat(struct intel_uncore *uncore)
+ {
+ /* TGL doesn't support LLC or AGE settings */
+diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
+index 7da7681c20b1b..7db9f3ac9aedb 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
++++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
+@@ -515,10 +515,6 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
+ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
+ void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
+
+-u64 gen8_pte_encode(dma_addr_t addr,
+- enum i915_cache_level level,
+- u32 flags);
+-
+ int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+ void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+
+--
+2.20.1
+
--- /dev/null
+From de32f4290a6f3a93a7ecd84b249d5af7c6957c74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2020 16:23:48 +0100
+Subject: drm/i915/gt: Fill all the unused space in the GGTT
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+[ Upstream commit 0b72a251bf92ca2378530fa1f9b35a71830ab51c ]
+
+When we allocate space in the GGTT we may have to allocate a larger
+region than will be populated by the object to accommodate fencing. Make
+sure that this space beyond the end of the buffer points safely into
+scratch space, in case the HW tries to access it anyway (e.g. fenced
+access to the last tile row).
+
+v2: Preemptively / conservatively guard gen6 ggtt as well.
+
+Reported-by: Imre Deak <imre.deak@intel.com>
+References: https://gitlab.freedesktop.org/drm/intel/-/issues/1554
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Imre Deak <imre.deak@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Reviewed-by: Imre Deak <imre.deak@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200331152348.26946-1-chris@chris-wilson.co.uk
+(cherry picked from commit 4d6c18590870fbac1e65dde5e01e621c8e0ca096)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gt/intel_ggtt.c | 37 ++++++++++++++++++++--------
+ 1 file changed, 27 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+index 5fd8b3e0cd19c..d0d35c55170f8 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+@@ -199,10 +199,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ enum i915_cache_level level,
+ u32 flags)
+ {
+- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+- struct sgt_iter sgt_iter;
+- gen8_pte_t __iomem *gtt_entries;
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
++ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
++ gen8_pte_t __iomem *gte;
++ gen8_pte_t __iomem *end;
++ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ /*
+@@ -210,10 +211,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ * not to allow the user to override access to a read only page.
+ */
+
+- gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
+- gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
+- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+- gen8_set_pte(gtt_entries++, pte_encode | addr);
++ gte = (gen8_pte_t __iomem *)ggtt->gsm;
++ gte += vma->node.start / I915_GTT_PAGE_SIZE;
++ end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
++
++ for_each_sgt_daddr(addr, iter, vma->pages)
++ gen8_set_pte(gte++, pte_encode | addr);
++ GEM_BUG_ON(gte > end);
++
++ /* Fill the allocated but "unused" space beyond the end of the buffer */
++ while (gte < end)
++ gen8_set_pte(gte++, vm->scratch[0].encode);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+@@ -249,13 +257,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ u32 flags)
+ {
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+- gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
+- unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
++ gen6_pte_t __iomem *gte;
++ gen6_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
++ gte = (gen6_pte_t __iomem *)ggtt->gsm;
++ gte += vma->node.start / I915_GTT_PAGE_SIZE;
++ end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
++
+ for_each_sgt_daddr(addr, iter, vma->pages)
+- iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
++ iowrite32(vm->pte_encode(addr, level, flags), gte++);
++ GEM_BUG_ON(gte > end);
++
++ /* Fill the allocated but "unused" space beyond the end of the buffer */
++ while (gte < end)
++ iowrite32(vm->scratch[0].encode, gte++);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+--
+2.20.1
+
--- /dev/null
+From ab2e26406c14a3723679bc651ff405b9d4fde1ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2020 20:33:37 +0100
+Subject: perf/core: Fix event cgroup tracking
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 33238c50451596be86db1505ab65fee5172844d0 ]
+
+Song reports that installing cgroup events is broken since:
+
+ db0503e4f675 ("perf/core: Optimize perf_install_in_event()")
+
+The problem being that cgroup events try to track cpuctx->cgrp even
+for disabled events, which is pointless and actively harmful since the
+above commit. Rework the code to have explicit enable/disable hooks
+for cgroup events, such that we can limit cgroup tracking to active
+events.
+
+More specifically, since the above commit disabled events are no
+longer added to their context from the 'right' CPU, and we can't
+access things like the current cgroup for a remote CPU.
+
+Cc: <stable@vger.kernel.org> # v5.5+
+Fixes: db0503e4f675 ("perf/core: Optimize perf_install_in_event()")
+Reported-by: Song Liu <songliubraving@fb.com>
+Tested-by: Song Liu <songliubraving@fb.com>
+Reviewed-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/20200318193337.GB20760@hirez.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 70 +++++++++++++++++++++++++++-----------------
+ 1 file changed, 43 insertions(+), 27 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 95860901949e7..b816127367ffc 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -935,16 +935,10 @@ perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
+ event->shadow_ctx_time = now - t->timestamp;
+ }
+
+-/*
+- * Update cpuctx->cgrp so that it is set when first cgroup event is added and
+- * cleared when last cgroup event is removed.
+- */
+ static inline void
+-list_update_cgroup_event(struct perf_event *event,
+- struct perf_event_context *ctx, bool add)
++perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
+ {
+ struct perf_cpu_context *cpuctx;
+- struct list_head *cpuctx_entry;
+
+ if (!is_cgroup_event(event))
+ return;
+@@ -961,28 +955,41 @@ list_update_cgroup_event(struct perf_event *event,
+ * because if the first would mismatch, the second would not try again
+ * and we would leave cpuctx->cgrp unset.
+ */
+- if (add && !cpuctx->cgrp) {
++ if (ctx->is_active && !cpuctx->cgrp) {
+ struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
+
+ if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
+ cpuctx->cgrp = cgrp;
+ }
+
+- if (add && ctx->nr_cgroups++)
++ if (ctx->nr_cgroups++)
+ return;
+- else if (!add && --ctx->nr_cgroups)
++
++ list_add(&cpuctx->cgrp_cpuctx_entry,
++ per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
++}
++
++static inline void
++perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
++{
++ struct perf_cpu_context *cpuctx;
++
++ if (!is_cgroup_event(event))
+ return;
+
+- /* no cgroup running */
+- if (!add)
++ /*
++ * Because cgroup events are always per-cpu events,
++ * @ctx == &cpuctx->ctx.
++ */
++ cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
++
++ if (--ctx->nr_cgroups)
++ return;
++
++ if (ctx->is_active && cpuctx->cgrp)
+ cpuctx->cgrp = NULL;
+
+- cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
+- if (add)
+- list_add(cpuctx_entry,
+- per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
+- else
+- list_del(cpuctx_entry);
++ list_del(&cpuctx->cgrp_cpuctx_entry);
+ }
+
+ #else /* !CONFIG_CGROUP_PERF */
+@@ -1048,11 +1055,14 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
+ }
+
+ static inline void
+-list_update_cgroup_event(struct perf_event *event,
+- struct perf_event_context *ctx, bool add)
++perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
+ {
+ }
+
++static inline void
++perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
++{
++}
+ #endif
+
+ /*
+@@ -1682,13 +1692,14 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
+ add_event_to_groups(event, ctx);
+ }
+
+- list_update_cgroup_event(event, ctx, true);
+-
+ list_add_rcu(&event->event_entry, &ctx->event_list);
+ ctx->nr_events++;
+ if (event->attr.inherit_stat)
+ ctx->nr_stat++;
+
++ if (event->state > PERF_EVENT_STATE_OFF)
++ perf_cgroup_event_enable(event, ctx);
++
+ ctx->generation++;
+ }
+
+@@ -1864,8 +1875,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
+
+ event->attach_state &= ~PERF_ATTACH_CONTEXT;
+
+- list_update_cgroup_event(event, ctx, false);
+-
+ ctx->nr_events--;
+ if (event->attr.inherit_stat)
+ ctx->nr_stat--;
+@@ -1882,8 +1891,10 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
+ * of error state is by explicit re-enabling
+ * of the event
+ */
+- if (event->state > PERF_EVENT_STATE_OFF)
++ if (event->state > PERF_EVENT_STATE_OFF) {
++ perf_cgroup_event_disable(event, ctx);
+ perf_event_set_state(event, PERF_EVENT_STATE_OFF);
++ }
+
+ ctx->generation++;
+ }
+@@ -2114,6 +2125,7 @@ event_sched_out(struct perf_event *event,
+
+ if (READ_ONCE(event->pending_disable) >= 0) {
+ WRITE_ONCE(event->pending_disable, -1);
++ perf_cgroup_event_disable(event, ctx);
+ state = PERF_EVENT_STATE_OFF;
+ }
+ perf_event_set_state(event, state);
+@@ -2250,6 +2262,7 @@ static void __perf_event_disable(struct perf_event *event,
+ event_sched_out(event, cpuctx, ctx);
+
+ perf_event_set_state(event, PERF_EVENT_STATE_OFF);
++ perf_cgroup_event_disable(event, ctx);
+ }
+
+ /*
+@@ -2633,7 +2646,7 @@ static int __perf_install_in_context(void *info)
+ }
+
+ #ifdef CONFIG_CGROUP_PERF
+- if (is_cgroup_event(event)) {
++ if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
+ /*
+ * If the current cgroup doesn't match the event's
+ * cgroup, we should not try to schedule it.
+@@ -2793,6 +2806,7 @@ static void __perf_event_enable(struct perf_event *event,
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+
+ perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
++ perf_cgroup_event_enable(event, ctx);
+
+ if (!ctx->is_active)
+ return;
+@@ -3447,8 +3461,10 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ }
+
+ if (event->state == PERF_EVENT_STATE_INACTIVE) {
+- if (event->attr.pinned)
++ if (event->attr.pinned) {
++ perf_cgroup_event_disable(event, ctx);
+ perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
++ }
+
+ sid->can_add_hw = 0;
+ sid->ctx->rotate_necessary = 1;
+--
+2.20.1
+
--- /dev/null
+From 7758c9b04650ce17ec4ac28c209d1487bb91652b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Aug 2019 11:45:01 +0200
+Subject: perf/core: Remove 'struct sched_in_data'
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 2c2366c7548ecee65adfd264517ddf50f9e2d029 ]
+
+We can deduce the ctx and cpuctx from the event, no need to pass them
+along. Remove the structure and pass in can_add_hw directly.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 36 +++++++++++-------------------------
+ 1 file changed, 11 insertions(+), 25 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index b816127367ffc..243717177f446 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3437,17 +3437,11 @@ static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
+ return 0;
+ }
+
+-struct sched_in_data {
+- struct perf_event_context *ctx;
+- struct perf_cpu_context *cpuctx;
+- int can_add_hw;
+-};
+-
+ static int merge_sched_in(struct perf_event *event, void *data)
+ {
+- struct sched_in_data *sid = data;
+-
+- WARN_ON_ONCE(event->ctx != sid->ctx);
++ struct perf_event_context *ctx = event->ctx;
++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
++ int *can_add_hw = data;
+
+ if (event->state <= PERF_EVENT_STATE_OFF)
+ return 0;
+@@ -3455,8 +3449,8 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ if (!event_filter_match(event))
+ return 0;
+
+- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
+- if (!group_sched_in(event, sid->cpuctx, sid->ctx))
++ if (group_can_go_on(event, cpuctx, *can_add_hw)) {
++ if (!group_sched_in(event, cpuctx, ctx))
+ list_add_tail(&event->active_list, get_event_list(event));
+ }
+
+@@ -3466,8 +3460,8 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+ }
+
+- sid->can_add_hw = 0;
+- sid->ctx->rotate_necessary = 1;
++ *can_add_hw = 0;
++ ctx->rotate_necessary = 1;
+ }
+
+ return 0;
+@@ -3477,30 +3471,22 @@ static void
+ ctx_pinned_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx)
+ {
+- struct sched_in_data sid = {
+- .ctx = ctx,
+- .cpuctx = cpuctx,
+- .can_add_hw = 1,
+- };
++ int can_add_hw = 1;
+
+ visit_groups_merge(&ctx->pinned_groups,
+ smp_processor_id(),
+- merge_sched_in, &sid);
++ merge_sched_in, &can_add_hw);
+ }
+
+ static void
+ ctx_flexible_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx)
+ {
+- struct sched_in_data sid = {
+- .ctx = ctx,
+- .cpuctx = cpuctx,
+- .can_add_hw = 1,
+- };
++ int can_add_hw = 1;
+
+ visit_groups_merge(&ctx->flexible_groups,
+ smp_processor_id(),
+- merge_sched_in, &sid);
++ merge_sched_in, &can_add_hw);
+ }
+
+ static void
+--
+2.20.1
+
--- /dev/null
+From d310c34b5c02e3ee69481ad3062da3744c18c53e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Aug 2019 11:17:00 +0200
+Subject: perf/core: Unify {pinned,flexible}_sched_in()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit ab6f824cfdf7363b5e529621cbc72ae6519c78d1 ]
+
+Less is more; unify the two very nearly identical function.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 58 ++++++++++++++++----------------------------
+ 1 file changed, 21 insertions(+), 37 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e453589da97ca..95860901949e7 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1986,6 +1986,12 @@ static int perf_get_aux_event(struct perf_event *event,
+ return 1;
+ }
+
++static inline struct list_head *get_event_list(struct perf_event *event)
++{
++ struct perf_event_context *ctx = event->ctx;
++ return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active;
++}
++
+ static void perf_group_detach(struct perf_event *event)
+ {
+ struct perf_event *sibling, *tmp;
+@@ -2028,12 +2034,8 @@ static void perf_group_detach(struct perf_event *event)
+ if (!RB_EMPTY_NODE(&event->group_node)) {
+ add_event_to_groups(sibling, event->ctx);
+
+- if (sibling->state == PERF_EVENT_STATE_ACTIVE) {
+- struct list_head *list = sibling->attr.pinned ?
+- &ctx->pinned_active : &ctx->flexible_active;
+-
+- list_add_tail(&sibling->active_list, list);
+- }
++ if (sibling->state == PERF_EVENT_STATE_ACTIVE)
++ list_add_tail(&sibling->active_list, get_event_list(sibling));
+ }
+
+ WARN_ON_ONCE(sibling->ctx != event->ctx);
+@@ -2350,6 +2352,8 @@ event_sched_in(struct perf_event *event,
+ {
+ int ret = 0;
+
++ WARN_ON_ONCE(event->ctx != ctx);
++
+ lockdep_assert_held(&ctx->lock);
+
+ if (event->state <= PERF_EVENT_STATE_OFF)
+@@ -3425,10 +3429,12 @@ struct sched_in_data {
+ int can_add_hw;
+ };
+
+-static int pinned_sched_in(struct perf_event *event, void *data)
++static int merge_sched_in(struct perf_event *event, void *data)
+ {
+ struct sched_in_data *sid = data;
+
++ WARN_ON_ONCE(event->ctx != sid->ctx);
++
+ if (event->state <= PERF_EVENT_STATE_OFF)
+ return 0;
+
+@@ -3437,37 +3443,15 @@ static int pinned_sched_in(struct perf_event *event, void *data)
+
+ if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
+ if (!group_sched_in(event, sid->cpuctx, sid->ctx))
+- list_add_tail(&event->active_list, &sid->ctx->pinned_active);
++ list_add_tail(&event->active_list, get_event_list(event));
+ }
+
+- /*
+- * If this pinned group hasn't been scheduled,
+- * put it in error state.
+- */
+- if (event->state == PERF_EVENT_STATE_INACTIVE)
+- perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+-
+- return 0;
+-}
+-
+-static int flexible_sched_in(struct perf_event *event, void *data)
+-{
+- struct sched_in_data *sid = data;
+-
+- if (event->state <= PERF_EVENT_STATE_OFF)
+- return 0;
+-
+- if (!event_filter_match(event))
+- return 0;
++ if (event->state == PERF_EVENT_STATE_INACTIVE) {
++ if (event->attr.pinned)
++ perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+
+- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
+- int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
+- if (ret) {
+- sid->can_add_hw = 0;
+- sid->ctx->rotate_necessary = 1;
+- return 0;
+- }
+- list_add_tail(&event->active_list, &sid->ctx->flexible_active);
++ sid->can_add_hw = 0;
++ sid->ctx->rotate_necessary = 1;
+ }
+
+ return 0;
+@@ -3485,7 +3469,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
+
+ visit_groups_merge(&ctx->pinned_groups,
+ smp_processor_id(),
+- pinned_sched_in, &sid);
++ merge_sched_in, &sid);
+ }
+
+ static void
+@@ -3500,7 +3484,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
+
+ visit_groups_merge(&ctx->flexible_groups,
+ smp_processor_id(),
+- flexible_sched_in, &sid);
++ merge_sched_in, &sid);
+ }
+
+ static void
+--
+2.20.1
+
--- /dev/null
+From 807a85e42bfaf4689df04c697af298c89c156a30 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Mar 2020 15:09:40 +0000
+Subject: powerpc/kasan: Fix kasan_remap_early_shadow_ro()
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+[ Upstream commit af92bad615be75c6c0d1b1c5b48178360250a187 ]
+
+At the moment kasan_remap_early_shadow_ro() does nothing, because
+k_end is 0 and k_cur < 0 is always true.
+
+Change the test to k_cur != k_end, as done in
+kasan_init_shadow_page_tables()
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Fixes: cbd18991e24f ("powerpc/mm: Fix an Oops in kasan_mmu_init()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/4e7b56865e01569058914c991143f5961b5d4719.1583507333.git.christophe.leroy@c-s.fr
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/mm/kasan/kasan_init_32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
+index d2bed3fcb7194..1169ad1b6730a 100644
+--- a/arch/powerpc/mm/kasan/kasan_init_32.c
++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
+@@ -101,7 +101,7 @@ static void __init kasan_remap_early_shadow_ro(void)
+
+ kasan_populate_pte(kasan_early_shadow_pte, prot);
+
+- for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
++ for (k_cur = k_start & PAGE_MASK; k_cur != k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+ pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+
+--
+2.20.1
+
--- /dev/null
+From a120b1f37cc57bac36a4ffd83a9239c8fca60083 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jan 2020 15:51:49 -0500
+Subject: Revert "drm/dp_mst: Remove VCPI while disabling topology mgr"
+
+From: Lyude Paul <lyude@redhat.com>
+
+[ Upstream commit a86675968e2300fb567994459da3dbc4cd1b322a ]
+
+This reverts commit 64e62bdf04ab8529f45ed0a85122c703035dec3a.
+
+This commit ends up causing some lockdep splats due to trying to grab the
+payload lock while holding the mgr's lock:
+
+[ 54.010099]
+[ 54.011765] ======================================================
+[ 54.018670] WARNING: possible circular locking dependency detected
+[ 54.025577] 5.5.0-rc6-02274-g77381c23ee63 #47 Not tainted
+[ 54.031610] ------------------------------------------------------
+[ 54.038516] kworker/1:6/1040 is trying to acquire lock:
+[ 54.044354] ffff888272af3228 (&mgr->payload_lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.054957]
+[ 54.054957] but task is already holding lock:
+[ 54.061473] ffff888272af3060 (&mgr->lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x3c/0x2e4
+[ 54.071193]
+[ 54.071193] which lock already depends on the new lock.
+[ 54.071193]
+[ 54.080334]
+[ 54.080334] the existing dependency chain (in reverse order) is:
+[ 54.088697]
+[ 54.088697] -> #1 (&mgr->lock){+.+.}:
+[ 54.094440] __mutex_lock+0xc3/0x498
+[ 54.099015] drm_dp_mst_topology_get_port_validated+0x25/0x80
+[ 54.106018] drm_dp_update_payload_part1+0xa2/0x2e2
+[ 54.112051] intel_mst_pre_enable_dp+0x144/0x18f
+[ 54.117791] intel_encoders_pre_enable+0x63/0x70
+[ 54.123532] hsw_crtc_enable+0xa1/0x722
+[ 54.128396] intel_update_crtc+0x50/0x194
+[ 54.133455] skl_commit_modeset_enables+0x40c/0x540
+[ 54.139485] intel_atomic_commit_tail+0x5f7/0x130d
+[ 54.145418] intel_atomic_commit+0x2c8/0x2d8
+[ 54.150770] drm_atomic_helper_set_config+0x5a/0x70
+[ 54.156801] drm_mode_setcrtc+0x2ab/0x833
+[ 54.161862] drm_ioctl+0x2e5/0x424
+[ 54.166242] vfs_ioctl+0x21/0x2f
+[ 54.170426] do_vfs_ioctl+0x5fb/0x61e
+[ 54.175096] ksys_ioctl+0x55/0x75
+[ 54.179377] __x64_sys_ioctl+0x1a/0x1e
+[ 54.184146] do_syscall_64+0x5c/0x6d
+[ 54.188721] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+[ 54.194946]
+[ 54.194946] -> #0 (&mgr->payload_lock){+.+.}:
+[ 54.201463]
+[ 54.201463] other info that might help us debug this:
+[ 54.201463]
+[ 54.210410] Possible unsafe locking scenario:
+[ 54.210410]
+[ 54.217025] CPU0 CPU1
+[ 54.222082] ---- ----
+[ 54.227138] lock(&mgr->lock);
+[ 54.230643] lock(&mgr->payload_lock);
+[ 54.237742] lock(&mgr->lock);
+[ 54.244062] lock(&mgr->payload_lock);
+[ 54.248346]
+[ 54.248346] *** DEADLOCK ***
+[ 54.248346]
+[ 54.254959] 7 locks held by kworker/1:6/1040:
+[ 54.259822] #0: ffff888275c4f528 ((wq_completion)events){+.+.},
+at: worker_thread+0x455/0x6e2
+[ 54.269451] #1: ffffc9000119beb0
+((work_completion)(&(&dev_priv->hotplug.hotplug_work)->work)){+.+.},
+at: worker_thread+0x455/0x6e2
+[ 54.282768] #2: ffff888272a403f0 (&dev->mode_config.mutex){+.+.},
+at: i915_hotplug_work_func+0x4b/0x2be
+[ 54.293368] #3: ffffffff824fc6c0 (drm_connector_list_iter){.+.+},
+at: i915_hotplug_work_func+0x17e/0x2be
+[ 54.304061] #4: ffffc9000119bc58 (crtc_ww_class_acquire){+.+.},
+at: drm_helper_probe_detect_ctx+0x40/0xfd
+[ 54.314855] #5: ffff888272a40470 (crtc_ww_class_mutex){+.+.}, at:
+drm_modeset_lock+0x74/0xe2
+[ 54.324385] #6: ffff888272af3060 (&mgr->lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x3c/0x2e4
+[ 54.334597]
+[ 54.334597] stack backtrace:
+[ 54.339464] CPU: 1 PID: 1040 Comm: kworker/1:6 Not tainted
+5.5.0-rc6-02274-g77381c23ee63 #47
+[ 54.348893] Hardware name: Google Fizz/Fizz, BIOS
+Google_Fizz.10139.39.0 01/04/2018
+[ 54.357451] Workqueue: events i915_hotplug_work_func
+[ 54.362995] Call Trace:
+[ 54.365724] dump_stack+0x71/0x9c
+[ 54.369427] check_noncircular+0x91/0xbc
+[ 54.373809] ? __lock_acquire+0xc9e/0xf66
+[ 54.378286] ? __lock_acquire+0xc9e/0xf66
+[ 54.382763] ? lock_acquire+0x175/0x1ac
+[ 54.387048] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.393177] ? __mutex_lock+0xc3/0x498
+[ 54.397362] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.403492] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.409620] ? drm_dp_dpcd_access+0xd9/0x101
+[ 54.414390] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.420517] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.426645] ? intel_digital_port_connected+0x34d/0x35c
+[ 54.432482] ? intel_dp_detect+0x227/0x44e
+[ 54.437056] ? ww_mutex_lock+0x49/0x9a
+[ 54.441242] ? drm_helper_probe_detect_ctx+0x75/0xfd
+[ 54.446789] ? intel_encoder_hotplug+0x4b/0x97
+[ 54.451752] ? intel_ddi_hotplug+0x61/0x2e0
+[ 54.456423] ? mark_held_locks+0x53/0x68
+[ 54.460803] ? _raw_spin_unlock_irqrestore+0x3a/0x51
+[ 54.466347] ? lockdep_hardirqs_on+0x187/0x1a4
+[ 54.471310] ? drm_connector_list_iter_next+0x89/0x9a
+[ 54.476953] ? i915_hotplug_work_func+0x206/0x2be
+[ 54.482208] ? worker_thread+0x4d5/0x6e2
+[ 54.486587] ? worker_thread+0x455/0x6e2
+[ 54.490966] ? queue_work_on+0x64/0x64
+[ 54.495151] ? kthread+0x1e9/0x1f1
+[ 54.498946] ? queue_work_on+0x64/0x64
+[ 54.503130] ? kthread_unpark+0x5e/0x5e
+[ 54.507413] ? ret_from_fork+0x3a/0x50
+
+The proper fix for this is probably cleanup the VCPI allocations when we're
+enabling the topology, or on the first payload allocation. For now though,
+let's just revert.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 64e62bdf04ab ("drm/dp_mst: Remove VCPI while disabling topology mgr")
+Cc: Sean Paul <sean@poorly.run>
+Cc: Wayne Lin <Wayne.Lin@amd.com>
+Reviewed-by: Sean Paul <sean@poorly.run>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200117205149.97262-1-lyude@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 13 -------------
+ 1 file changed, 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index ed0fea2ac3223..e993009fdb763 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -3504,7 +3504,6 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
+ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
+ {
+ int ret = 0;
+- int i = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
+ mutex_lock(&mgr->lock);
+@@ -3565,22 +3564,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
+- mutex_lock(&mgr->payload_lock);
+ memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ mgr->payload_mask = 0;
+ set_bit(0, &mgr->payload_mask);
+- for (i = 0; i < mgr->max_payloads; i++) {
+- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+-
+- if (vcpi) {
+- vcpi->vcpi = 0;
+- vcpi->num_slots = 0;
+- }
+- mgr->proposed_vcpis[i] = NULL;
+- }
+ mgr->vcpi_mask = 0;
+- mutex_unlock(&mgr->payload_lock);
+-
+ mgr->payload_id_table_cleared = false;
+ }
+
+--
+2.20.1
+
powerpc-kprobes-ignore-traps-that-happened-in-real-mode.patch
powerpc-64-prevent-stack-protection-in-early-boot.patch
arm64-always-force-a-branch-protection-mode-when-the-compiler-has-one.patch
+btrfs-handle-logged-extent-failure-properly.patch
+revert-drm-dp_mst-remove-vcpi-while-disabling-topolo.patch
+drm-dp_mst-fix-clearing-payload-state-on-topology-di.patch
+drm-amdgpu-fix-gfx-hang-during-suspend-with-video-pl.patch
+drm-i915-ggtt-do-not-set-bits-1-11-in-gen12-ptes.patch
+drm-i915-gt-fill-all-the-unused-space-in-the-ggtt.patch
+perf-core-unify-pinned-flexible-_sched_in.patch
+perf-core-fix-event-cgroup-tracking.patch
+perf-core-remove-struct-sched_in_data.patch
+powerpc-kasan-fix-kasan_remap_early_shadow_ro.patch