--- /dev/null
+From 87b7ebc2e16c14d32a912f18206a4d6cc9abc3e8 Mon Sep 17 00:00:00 2001
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Date: Wed, 22 Apr 2020 17:43:26 -0400
+Subject: drm/amd/display: Fix green screen issue after suspend
+
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+
+commit 87b7ebc2e16c14d32a912f18206a4d6cc9abc3e8 upstream.
+
+[why]
+We have seen a green screen after resume from suspend in a Raven system
+connected with two displays (HDMI and DP) on X based system. We noticed
+that this issue is related to bad DCC metadata from user space which may
+generate hangs and consequently an underflow on HUBP. After taking a
+deep look at the code path we realized that after resume we try to
+restore the commit with the DCC enabled framebuffer but the framebuffer
+is no longer valid.
+
+[how]
+This problem was only reported on Raven based system and after suspend,
+for this reason, this commit adds a new parameter on
+fill_plane_dcc_attributes() to give the option of disabling DCC
+programmatically. In summary, for disabling DCC we first verify if is a
+Raven system and if it is in suspend; if both conditions are true we
+disable DCC temporarily, otherwise, it is enabled.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1099
+Co-developed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 38 ++++++++++++++++------
+ 1 file changed, 29 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3212,7 +3212,8 @@ fill_plane_dcc_attributes(struct amdgpu_
+ const union dc_tiling_info *tiling_info,
+ const uint64_t info,
+ struct dc_plane_dcc_param *dcc,
+- struct dc_plane_address *address)
++ struct dc_plane_address *address,
++ bool force_disable_dcc)
+ {
+ struct dc *dc = adev->dm.dc;
+ struct dc_dcc_surface_param input;
+@@ -3224,6 +3225,9 @@ fill_plane_dcc_attributes(struct amdgpu_
+ memset(&input, 0, sizeof(input));
+ memset(&output, 0, sizeof(output));
+
++ if (force_disable_dcc)
++ return 0;
++
+ if (!offset)
+ return 0;
+
+@@ -3273,7 +3277,8 @@ fill_plane_buffer_attributes(struct amdg
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+- struct dc_plane_address *address)
++ struct dc_plane_address *address,
++ bool force_disable_dcc)
+ {
+ const struct drm_framebuffer *fb = &afb->base;
+ int ret;
+@@ -3379,7 +3384,8 @@ fill_plane_buffer_attributes(struct amdg
+
+ ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
+ plane_size, tiling_info,
+- tiling_flags, dcc, address);
++ tiling_flags, dcc, address,
++ force_disable_dcc);
+ if (ret)
+ return ret;
+ }
+@@ -3471,7 +3477,8 @@ fill_dc_plane_info_and_addr(struct amdgp
+ const struct drm_plane_state *plane_state,
+ const uint64_t tiling_flags,
+ struct dc_plane_info *plane_info,
+- struct dc_plane_address *address)
++ struct dc_plane_address *address,
++ bool force_disable_dcc)
+ {
+ const struct drm_framebuffer *fb = plane_state->fb;
+ const struct amdgpu_framebuffer *afb =
+@@ -3550,7 +3557,8 @@ fill_dc_plane_info_and_addr(struct amdgp
+ plane_info->rotation, tiling_flags,
+ &plane_info->tiling_info,
+ &plane_info->plane_size,
+- &plane_info->dcc, address);
++ &plane_info->dcc, address,
++ force_disable_dcc);
+ if (ret)
+ return ret;
+
+@@ -3573,6 +3581,7 @@ static int fill_dc_plane_attributes(stru
+ struct dc_plane_info plane_info;
+ uint64_t tiling_flags;
+ int ret;
++ bool force_disable_dcc = false;
+
+ ret = fill_dc_scaling_info(plane_state, &scaling_info);
+ if (ret)
+@@ -3587,9 +3596,11 @@ static int fill_dc_plane_attributes(stru
+ if (ret)
+ return ret;
+
++ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
+ ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
+ &plane_info,
+- &dc_plane_state->address);
++ &dc_plane_state->address,
++ force_disable_dcc);
+ if (ret)
+ return ret;
+
+@@ -5171,6 +5182,7 @@ static int dm_plane_helper_prepare_fb(st
+ uint64_t tiling_flags;
+ uint32_t domain;
+ int r;
++ bool force_disable_dcc = false;
+
+ dm_plane_state_old = to_dm_plane_state(plane->state);
+ dm_plane_state_new = to_dm_plane_state(new_state);
+@@ -5229,11 +5241,13 @@ static int dm_plane_helper_prepare_fb(st
+ dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+ struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+
++ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
+ fill_plane_buffer_attributes(
+ adev, afb, plane_state->format, plane_state->rotation,
+ tiling_flags, &plane_state->tiling_info,
+ &plane_state->plane_size, &plane_state->dcc,
+- &plane_state->address);
++ &plane_state->address,
++ force_disable_dcc);
+ }
+
+ return 0;
+@@ -6514,7 +6528,12 @@ static void amdgpu_dm_commit_planes(stru
+ fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state, tiling_flags,
+ &bundle->plane_infos[planes_count],
+- &bundle->flip_addrs[planes_count].address);
++ &bundle->flip_addrs[planes_count].address,
++ false);
++
++ DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
++ new_plane_state->plane->index,
++ bundle->plane_infos[planes_count].dcc.enable);
+
+ bundle->surface_updates[planes_count].plane_info =
+ &bundle->plane_infos[planes_count];
+@@ -7935,7 +7954,8 @@ dm_determine_update_type_for_commit(stru
+ ret = fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state, tiling_flags,
+ plane_info,
+- &flip_addr->address);
++ &flip_addr->address,
++ false);
+ if (ret)
+ goto cleanup;
+
--- /dev/null
+From fdf83646c0542ecfb9adc4db8f741a1f43dca058 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Mon, 27 Apr 2020 15:59:22 -0400
+Subject: drm/amdgpu: invalidate L2 before SDMA IBs (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marek Olšák <marek.olsak@amd.com>
+
+commit fdf83646c0542ecfb9adc4db8f741a1f43dca058 upstream.
+
+This fixes GPU hangs due to cache coherency issues.
+
+v2: Split the version bump to a separate patch
+
+Signed-off-by: Marek Olšák <marek.olsak@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Tested-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h | 16 ++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 14 +++++++++++++-
+ 2 files changed, 29 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
++++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
+@@ -73,6 +73,22 @@
+ #define SDMA_OP_AQL_COPY 0
+ #define SDMA_OP_AQL_BARRIER_OR 0
+
++#define SDMA_GCR_RANGE_IS_PA (1 << 18)
++#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16)
++#define SDMA_GCR_GL2_WB (1 << 15)
++#define SDMA_GCR_GL2_INV (1 << 14)
++#define SDMA_GCR_GL2_DISCARD (1 << 13)
++#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11)
++#define SDMA_GCR_GL2_US (1 << 10)
++#define SDMA_GCR_GL1_INV (1 << 9)
++#define SDMA_GCR_GLV_INV (1 << 8)
++#define SDMA_GCR_GLK_INV (1 << 7)
++#define SDMA_GCR_GLK_WB (1 << 6)
++#define SDMA_GCR_GLM_INV (1 << 5)
++#define SDMA_GCR_GLM_WB (1 << 4)
++#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
++#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
++
+ /*define for op field*/
+ #define SDMA_PKT_HEADER_op_offset 0
+ #define SDMA_PKT_HEADER_op_mask 0x000000FF
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+@@ -382,6 +382,18 @@ static void sdma_v5_0_ring_emit_ib(struc
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
+
++ /* Invalidate L2, because if we don't do it, we might get stale cache
++ * lines from previous IBs.
++ */
++ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
++ amdgpu_ring_write(ring, 0);
++ amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
++ SDMA_GCR_GL2_WB |
++ SDMA_GCR_GLM_INV |
++ SDMA_GCR_GLM_WB) << 16);
++ amdgpu_ring_write(ring, 0xffffff80);
++ amdgpu_ring_write(ring, 0xffff);
++
+ /* An IB packet must end on a 8 DW boundary--the next dword
+ * must be on a 8-dword boundary. Our IB packet below is 6
+ * dwords long, thus add x number of NOPs, such that, in
+@@ -1597,7 +1609,7 @@ static const struct amdgpu_ring_funcs sd
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
+ 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
+- .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
++ .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
+ .emit_ib = sdma_v5_0_ring_emit_ib,
+ .emit_fence = sdma_v5_0_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
--- /dev/null
+From 6292b8efe32e6be408af364132f09572aed14382 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Thu, 23 Apr 2020 18:17:43 +0300
+Subject: drm/edid: Fix off-by-one in DispID DTD pixel clock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 6292b8efe32e6be408af364132f09572aed14382 upstream.
+
+The DispID DTD pixel clock is documented as:
+"00 00 00 h → FF FF FF h | Pixel clock ÷ 10,000 0.01 → 167,772.16 Mega Pixels per Sec"
+Which seems to imply that we to add one to the raw value.
+
+Reality seems to agree as there are tiled displays in the wild
+which currently show a 10kHz difference in the pixel clock
+between the tiles (one tile gets its mode from the base EDID,
+the other from the DispID block).
+
+Cc: stable@vger.kernel.org
+References: https://gitlab.freedesktop.org/drm/intel/-/issues/27
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200423151743.18767-1-ville.syrjala@linux.intel.com
+Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_edid.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -5009,7 +5009,7 @@ static struct drm_display_mode *drm_mode
+ struct drm_display_mode *mode;
+ unsigned pixel_clock = (timings->pixel_clock[0] |
+ (timings->pixel_clock[1] << 8) |
+- (timings->pixel_clock[2] << 16));
++ (timings->pixel_clock[2] << 16)) + 1;
+ unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
+ unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
+ unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
--- /dev/null
+From f524a774a4ff702bdfbfc094c9dd463ee623252b Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Wed, 22 Apr 2020 08:28:05 +0100
+Subject: drm/i915/gem: Hold obj->vma.lock over for_each_ggtt_vma()
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit f524a774a4ff702bdfbfc094c9dd463ee623252b upstream.
+
+While the ggtt vma are protected by their object lifetime, the list
+continues until it hits a non-ggtt vma, and that vma is not protected
+and may be freed as we inspect it. Hence, we require the obj->vma.lock
+to protect the list as we iterate.
+
+An example of forgetting to hold the obj->vma.lock is
+
+[1642834.464973] general protection fault, probably for non-canonical address 0xdead000000000122: 0000 [#1] SMP PTI
+[1642834.464977] CPU: 3 PID: 1954 Comm: Xorg Not tainted 5.6.0-300.fc32.x86_64 #1
+[1642834.464979] Hardware name: LENOVO 20ARS25701/20ARS25701, BIOS GJET94WW (2.44 ) 09/14/2017
+[1642834.465021] RIP: 0010:i915_gem_object_set_tiling+0x2c0/0x3e0 [i915]
+[1642834.465024] Code: 8b 84 24 18 01 00 00 f6 c4 80 74 59 49 8b 94 24 a0 00 00 00 49 8b 84 24 e0 00 00 00 49 8b 74 24 10 48 8b 92 30 01 00 00 89 c7 <80> ba 0a 06 00 00 03 0f 87 86 00 00 00 ba 00 00 08 00 b9 00 00 10
+[1642834.465025] RSP: 0018:ffffa98780c77d60 EFLAGS: 00010282
+[1642834.465028] RAX: ffff8d232bfb2578 RBX: 0000000000000002 RCX: ffff8d25873a0000
+[1642834.465029] RDX: dead000000000122 RSI: fffff0af8ac6e408 RDI: 000000002bfb2578
+[1642834.465030] RBP: ffff8d25873a0000 R08: ffff8d252bfb5638 R09: 0000000000000000
+[1642834.465031] R10: 0000000000000000 R11: ffff8d252bfb5640 R12: ffffa987801cb8f8
+[1642834.465032] R13: 0000000000001000 R14: ffff8d233e972e50 R15: ffff8d233e972d00
+[1642834.465034] FS: 00007f6a3d327f00(0000) GS:ffff8d25926c0000(0000) knlGS:0000000000000000
+[1642834.465036] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[1642834.465037] CR2: 00007f6a2064d000 CR3: 00000002fb57c001 CR4: 00000000001606e0
+[1642834.465038] Call Trace:
+[1642834.465083] i915_gem_set_tiling_ioctl+0x122/0x230 [i915]
+[1642834.465121] ? i915_gem_object_set_tiling+0x3e0/0x3e0 [i915]
+[1642834.465151] drm_ioctl_kernel+0x86/0xd0 [drm]
+[1642834.465156] ? avc_has_perm+0x3b/0x160
+[1642834.465178] drm_ioctl+0x206/0x390 [drm]
+[1642834.465216] ? i915_gem_object_set_tiling+0x3e0/0x3e0 [i915]
+[1642834.465221] ? selinux_file_ioctl+0x122/0x1c0
+[1642834.465226] ? __do_munmap+0x24b/0x4d0
+[1642834.465231] ksys_ioctl+0x82/0xc0
+[1642834.465235] __x64_sys_ioctl+0x16/0x20
+[1642834.465238] do_syscall_64+0x5b/0xf0
+[1642834.465243] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[1642834.465245] RIP: 0033:0x7f6a3d7b047b
+[1642834.465247] Code: 0f 1e fa 48 8b 05 1d aa 0c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d ed a9 0c 00 f7 d8 64 89 01 48
+[1642834.465249] RSP: 002b:00007ffe71adba28 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+[1642834.465251] RAX: ffffffffffffffda RBX: 000055f99048fa40 RCX: 00007f6a3d7b047b
+[1642834.465253] RDX: 00007ffe71adba30 RSI: 00000000c0106461 RDI: 000000000000000e
+[1642834.465254] RBP: 0000000000000002 R08: 000055f98f3f1798 R09: 0000000000000002
+[1642834.465255] R10: 0000000000001000 R11: 0000000000000246 R12: 0000000000000080
+[1642834.465257] R13: 000055f98f3f1690 R14: 00000000c0106461 R15: 00007ffe71adba30
+
+Now to take the spinlock during the list iteration, we need to break it
+down into two phases. In the first phase under the lock, we cannot sleep
+and so must defer the actual work to a second list, protected by the
+ggtt->mutex.
+
+We also need to hold the spinlock during creation of a new vma to
+serialise with updates of the tiling on the object.
+
+Reported-by: Dave Airlie <airlied@redhat.com>
+Fixes: 2850748ef876 ("drm/i915: Pull i915_vma_pin under the vm->mutex")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: <stable@vger.kernel.org> # v5.5+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200422072805.17340-1-chris@chris-wilson.co.uk
+(cherry picked from commit cb593e5d2b6d3ad489669914d9fd1c64c7a4a6af)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 20 ++++++++++++++++++--
+ drivers/gpu/drm/i915/i915_vma.c | 10 ++++++----
+ 2 files changed, 24 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+@@ -183,21 +183,35 @@ i915_gem_object_fence_prepare(struct drm
+ int tiling_mode, unsigned int stride)
+ {
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
+- struct i915_vma *vma;
++ struct i915_vma *vma, *vn;
++ LIST_HEAD(unbind);
+ int ret = 0;
+
+ if (tiling_mode == I915_TILING_NONE)
+ return 0;
+
+ mutex_lock(&ggtt->vm.mutex);
++
++ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj) {
++ GEM_BUG_ON(vma->vm != &ggtt->vm);
++
+ if (i915_vma_fence_prepare(vma, tiling_mode, stride))
+ continue;
+
++ list_move(&vma->vm_link, &unbind);
++ }
++ spin_unlock(&obj->vma.lock);
++
++ list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
+ ret = __i915_vma_unbind(vma);
+- if (ret)
++ if (ret) {
++ /* Restore the remaining vma on an error */
++ list_splice(&unbind, &ggtt->vm.bound_list);
+ break;
++ }
+ }
++
+ mutex_unlock(&ggtt->vm.mutex);
+
+ return ret;
+@@ -269,6 +283,7 @@ i915_gem_object_set_tiling(struct drm_i9
+ }
+ mutex_unlock(&obj->mm.lock);
+
++ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj) {
+ vma->fence_size =
+ i915_gem_fence_size(i915, vma->size, tiling, stride);
+@@ -279,6 +294,7 @@ i915_gem_object_set_tiling(struct drm_i9
+ if (vma->fence)
+ vma->fence->dirty = true;
+ }
++ spin_unlock(&obj->vma.lock);
+
+ obj->tiling_and_stride = tiling | stride;
+ i915_gem_object_unlock(obj);
+--- a/drivers/gpu/drm/i915/i915_vma.c
++++ b/drivers/gpu/drm/i915/i915_vma.c
+@@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *o
+
+ GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
+
++ spin_lock(&obj->vma.lock);
++
+ if (i915_is_ggtt(vm)) {
+ if (unlikely(overflows_type(vma->size, u32)))
+- goto err_vma;
++ goto err_unlock;
+
+ vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
+ if (unlikely(vma->fence_size < vma->size || /* overflow */
+ vma->fence_size > vm->total))
+- goto err_vma;
++ goto err_unlock;
+
+ GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
+
+@@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *o
+ __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
+ }
+
+- spin_lock(&obj->vma.lock);
+-
+ rb = NULL;
+ p = &obj->vma.tree.rb_node;
+ while (*p) {
+@@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *o
+
+ return vma;
+
++err_unlock:
++ spin_unlock(&obj->vma.lock);
+ err_vma:
+ i915_vma_free(vma);
+ return ERR_PTR(-E2BIG);
--- /dev/null
+From 2abaad4eb59d1cdc903ea84c06acb406e2fbb263 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 27 Apr 2020 10:30:38 +0100
+Subject: drm/i915/gt: Check cacheline is valid before acquiring
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 2abaad4eb59d1cdc903ea84c06acb406e2fbb263 upstream.
+
+The hwsp_cacheline pointer from i915_request is very, very flimsy. The
+i915_request.timeline (and the hwsp_cacheline) are lost upon retiring
+(after an RCU grace). Therefore we need to confirm that once we have the
+right pointer for the cacheline, it is not in the process of being
+retired and disposed of before we attempt to acquire a reference to the
+cacheline.
+
+<3>[ 547.208237] BUG: KASAN: use-after-free in active_debug_hint+0x6a/0x70 [i915]
+<3>[ 547.208366] Read of size 8 at addr ffff88822a0d2710 by task gem_exec_parall/2536
+
+<4>[ 547.208547] CPU: 3 PID: 2536 Comm: gem_exec_parall Tainted: G U 5.7.0-rc2-ged7a286b5d02d-kasan_117+ #1
+<4>[ 547.208556] Hardware name: Dell Inc. XPS 13 9350/, BIOS 1.4.12 11/30/2016
+<4>[ 547.208564] Call Trace:
+<4>[ 547.208579] dump_stack+0x96/0xdb
+<4>[ 547.208707] ? active_debug_hint+0x6a/0x70 [i915]
+<4>[ 547.208719] print_address_description.constprop.6+0x16/0x310
+<4>[ 547.208841] ? active_debug_hint+0x6a/0x70 [i915]
+<4>[ 547.208963] ? active_debug_hint+0x6a/0x70 [i915]
+<4>[ 547.208975] __kasan_report+0x137/0x190
+<4>[ 547.209106] ? active_debug_hint+0x6a/0x70 [i915]
+<4>[ 547.209127] kasan_report+0x32/0x50
+<4>[ 547.209257] ? i915_gemfs_fini+0x40/0x40 [i915]
+<4>[ 547.209376] active_debug_hint+0x6a/0x70 [i915]
+<4>[ 547.209389] debug_print_object+0xa7/0x220
+<4>[ 547.209405] ? lockdep_hardirqs_on+0x348/0x5f0
+<4>[ 547.209426] debug_object_assert_init+0x297/0x430
+<4>[ 547.209449] ? debug_object_free+0x360/0x360
+<4>[ 547.209472] ? lock_acquire+0x1ac/0x8a0
+<4>[ 547.209592] ? intel_timeline_read_hwsp+0x4f/0x840 [i915]
+<4>[ 547.209737] ? i915_active_acquire_if_busy+0x66/0x120 [i915]
+<4>[ 547.209861] i915_active_acquire_if_busy+0x66/0x120 [i915]
+<4>[ 547.209990] ? __live_alloc.isra.15+0xc0/0xc0 [i915]
+<4>[ 547.210005] ? rcu_read_lock_sched_held+0xd0/0xd0
+<4>[ 547.210017] ? print_usage_bug+0x580/0x580
+<4>[ 547.210153] intel_timeline_read_hwsp+0xbc/0x840 [i915]
+<4>[ 547.210284] __emit_semaphore_wait+0xd5/0x480 [i915]
+<4>[ 547.210415] ? i915_fence_get_timeline_name+0x110/0x110 [i915]
+<4>[ 547.210428] ? lockdep_hardirqs_on+0x348/0x5f0
+<4>[ 547.210442] ? _raw_spin_unlock_irq+0x2a/0x40
+<4>[ 547.210567] ? __await_execution.constprop.51+0x2e0/0x570 [i915]
+<4>[ 547.210706] i915_request_await_dma_fence+0x8f7/0xc70 [i915]
+
+Fixes: 85bedbf191e8 ("drm/i915/gt: Eliminate the trylock for reading a timeline's hwsp")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: <stable@vger.kernel.org> # v5.6+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200427093038.29219-1-chris@chris-wilson.co.uk
+(cherry picked from commit 2759e395358b2b909577928894f856ab75bea41a)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_timeline.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
++++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
+@@ -519,6 +519,8 @@ int intel_timeline_read_hwsp(struct i915
+
+ rcu_read_lock();
+ cl = rcu_dereference(from->hwsp_cacheline);
++ if (i915_request_completed(from)) /* confirm cacheline is valid */
++ goto unlock;
+ if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
+ goto unlock; /* seqno wrapped and completed! */
+ if (unlikely(i915_request_completed(from)))
--- /dev/null
+From 85e9b88af1e6164f19ec71381efd5e2bcfc17620 Mon Sep 17 00:00:00 2001
+From: Vasily Averin <vvs@virtuozzo.com>
+Date: Mon, 27 Apr 2020 08:32:46 +0300
+Subject: drm/qxl: qxl_release leak in qxl_draw_dirty_fb()
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+commit 85e9b88af1e6164f19ec71381efd5e2bcfc17620 upstream.
+
+ret should be changed to release allocated struct qxl_release
+
+Cc: stable@vger.kernel.org
+Fixes: 8002db6336dd ("qxl: convert qxl driver to proper use for reservations")
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/22cfd55f-07c8-95d0-a2f7-191b7153c3d4@virtuozzo.com
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/qxl/qxl_draw.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/qxl/qxl_draw.c
++++ b/drivers/gpu/drm/qxl/qxl_draw.c
+@@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device
+ goto out_release_backoff;
+
+ rects = drawable_set_clipping(qdev, num_clips, clips_bo);
+- if (!rects)
++ if (!rects) {
++ ret = -EINVAL;
+ goto out_release_backoff;
+-
++ }
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+ drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
--- /dev/null
+From a65aa9c3676ffccb21361d52fcfedd5b5ff387d7 Mon Sep 17 00:00:00 2001
+From: Vasily Averin <vvs@virtuozzo.com>
+Date: Mon, 27 Apr 2020 08:32:51 +0300
+Subject: drm/qxl: qxl_release leak in qxl_hw_surface_alloc()
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+commit a65aa9c3676ffccb21361d52fcfedd5b5ff387d7 upstream.
+
+Cc: stable@vger.kernel.org
+Fixes: 8002db6336dd ("qxl: convert qxl driver to proper use for reservations")
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/2e5a13ae-9ab2-5401-aa4d-03d5f5593423@virtuozzo.com
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/qxl/qxl_cmd.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/qxl/qxl_cmd.c
++++ b/drivers/gpu/drm/qxl/qxl_cmd.c
+@@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_devi
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+- if (ret)
++ if (ret) {
++ qxl_release_free(qdev, release);
+ return ret;
+-
++ }
+ cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_SURFACE_CMD_CREATE;
+ cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
--- /dev/null
+From 933db73351d359f74b14f4af095808260aff11f9 Mon Sep 17 00:00:00 2001
+From: Vasily Averin <vvs@virtuozzo.com>
+Date: Wed, 29 Apr 2020 12:01:24 +0300
+Subject: drm/qxl: qxl_release use after free
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+commit 933db73351d359f74b14f4af095808260aff11f9 upstream.
+
+qxl_release should not be accesses after qxl_push_*_ring_release() calls:
+userspace driver can process submitted command quickly, move qxl_release
+into release_ring, generate interrupt and trigger garbage collector.
+
+It can lead to crashes in qxl driver or trigger memory corruption
+in some kmalloc-192 slab object
+
+Gerd Hoffmann proposes to swap the qxl_release_fence_buffer_objects() +
+qxl_push_{cursor,command}_ring_release() calls to close that race window.
+
+cc: stable@vger.kernel.org
+Fixes: f64122c1f6ad ("drm: add new QXL driver. (v1.4)")
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/fa17b338-66ae-f299-68fe-8d32419d9071@virtuozzo.com
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/qxl/qxl_cmd.c | 5 ++---
+ drivers/gpu/drm/qxl/qxl_display.c | 6 +++---
+ drivers/gpu/drm/qxl/qxl_draw.c | 2 +-
+ drivers/gpu/drm/qxl/qxl_ioctl.c | 5 +----
+ 4 files changed, 7 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/qxl/qxl_cmd.c
++++ b/drivers/gpu/drm/qxl/qxl_cmd.c
+@@ -500,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_devi
+ /* no need to add a release to the fence for this surface bo,
+ since it is only released when we ask to destroy the surface
+ and it would never signal otherwise */
+- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+ qxl_release_fence_buffer_objects(release);
++ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+ surf->hw_surf_alloc = true;
+ spin_lock(&qdev->surf_id_idr_lock);
+@@ -543,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_de
+ cmd->surface_id = id;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+-
+ qxl_release_fence_buffer_objects(release);
++ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+ return 0;
+ }
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -523,8 +523,8 @@ static int qxl_primary_apply_cursor(stru
+ cmd->u.set.visible = 1;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ qxl_release_fence_buffer_objects(release);
++ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+
+ return ret;
+
+@@ -665,8 +665,8 @@ static void qxl_cursor_atomic_update(str
+ cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
+
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ qxl_release_fence_buffer_objects(release);
++ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+
+ if (old_cursor_bo != NULL)
+ qxl_bo_unpin(old_cursor_bo);
+@@ -713,8 +713,8 @@ static void qxl_cursor_atomic_disable(st
+ cmd->type = QXL_CURSOR_HIDE;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ qxl_release_fence_buffer_objects(release);
++ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ }
+
+ static void qxl_update_dumb_head(struct qxl_device *qdev,
+--- a/drivers/gpu/drm/qxl/qxl_draw.c
++++ b/drivers/gpu/drm/qxl/qxl_draw.c
+@@ -243,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device
+ }
+ qxl_bo_kunmap(clips_bo);
+
+- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+ qxl_release_fence_buffer_objects(release);
++ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+
+ out_release_backoff:
+ if (ret)
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -261,11 +261,8 @@ static int qxl_process_single_command(st
+ apply_surf_reloc(qdev, &reloc_info[i]);
+ }
+
++ qxl_release_fence_buffer_objects(release);
+ ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
+- if (ret)
+- qxl_release_backoff_reserve_list(release);
+- else
+- qxl_release_fence_buffer_objects(release);
+
+ out_free_bos:
+ out_free_release:
--- /dev/null
+From dff58530c4ca8ce7ee5a74db431c6e35362cf682 Mon Sep 17 00:00:00 2001
+From: Olga Kornievskaia <olga.kornievskaia@gmail.com>
+Date: Fri, 24 Apr 2020 17:45:50 -0400
+Subject: NFSv4.1: fix handling of backchannel binding in BIND_CONN_TO_SESSION
+
+From: Olga Kornievskaia <olga.kornievskaia@gmail.com>
+
+commit dff58530c4ca8ce7ee5a74db431c6e35362cf682 upstream.
+
+Currently, if the client sends BIND_CONN_TO_SESSION with
+NFS4_CDFC4_FORE_OR_BOTH but only gets NFS4_CDFS4_FORE back it ignores
+that it wasn't able to enable a backchannel.
+
+To make sure, the client sends BIND_CONN_TO_SESSION as the first
+operation on the connections (ie., no other session compounds haven't
+been sent before), and if the client's request to bind the backchannel
+is not satisfied, then reset the connection and retry.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 8 ++++++++
+ include/linux/nfs_xdr.h | 2 ++
+ include/linux/sunrpc/clnt.h | 5 +++++
+ 3 files changed, 15 insertions(+)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7893,6 +7893,7 @@ static void
+ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
+ {
+ struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
++ struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
+ struct nfs_client *clp = args->client;
+
+ switch (task->tk_status) {
+@@ -7901,6 +7902,12 @@ nfs4_bind_one_conn_to_session_done(struc
+ nfs4_schedule_session_recovery(clp->cl_session,
+ task->tk_status);
+ }
++ if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
++ res->dir != NFS4_CDFS4_BOTH) {
++ rpc_task_close_connection(task);
++ if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
++ rpc_restart_call(task);
++ }
+ }
+
+ static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
+@@ -7923,6 +7930,7 @@ int nfs4_proc_bind_one_conn_to_session(s
+ struct nfs41_bind_conn_to_session_args args = {
+ .client = clp,
+ .dir = NFS4_CDFC4_FORE_OR_BOTH,
++ .retries = 0,
+ };
+ struct nfs41_bind_conn_to_session_res res;
+ struct rpc_message msg = {
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1307,11 +1307,13 @@ struct nfs41_impl_id {
+ struct nfstime4 date;
+ };
+
++#define MAX_BIND_CONN_TO_SESSION_RETRIES 3
+ struct nfs41_bind_conn_to_session_args {
+ struct nfs_client *client;
+ struct nfs4_sessionid sessionid;
+ u32 dir;
+ bool use_conn_in_rdma_mode;
++ int retries;
+ };
+
+ struct nfs41_bind_conn_to_session_res {
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -236,4 +236,9 @@ static inline int rpc_reply_expected(str
+ (task->tk_msg.rpc_proc->p_decode != NULL);
+ }
+
++static inline void rpc_task_close_connection(struct rpc_task *task)
++{
++ if (task->tk_xprt)
++ xprt_force_disconnect(task->tk_xprt);
++}
+ #endif /* _LINUX_SUNRPC_CLNT_H */
drm-scheduler-fix-drm_sched_get_cleanup_job.patch
dma-buf-fix-set_name-ioctl-uapi.patch
+drm-amdgpu-invalidate-l2-before-sdma-ibs-v2.patch
+drm-edid-fix-off-by-one-in-dispid-dtd-pixel-clock.patch
+drm-amd-display-fix-green-screen-issue-after-suspend.patch
+drm-i915-gem-hold-obj-vma.lock-over-for_each_ggtt_vma.patch
+drm-i915-gt-check-cacheline-is-valid-before-acquiring.patch
+drm-qxl-qxl_release-leak-in-qxl_draw_dirty_fb.patch
+drm-qxl-qxl_release-leak-in-qxl_hw_surface_alloc.patch
+drm-qxl-qxl_release-use-after-free.patch
+nfsv4.1-fix-handling-of-backchannel-binding-in-bind_conn_to_session.patch