--- /dev/null
+From 88209a8ecb8b8752322908a3c3362a001bdc3a39 Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Thu, 9 Sep 2021 09:47:25 -0700
+Subject: drm/i915/guc: Don't drop ce->guc_active.lock when unwinding context
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit 88209a8ecb8b8752322908a3c3362a001bdc3a39 upstream.
+
+Don't drop ce->guc_active.lock when unwinding a context after reset.
+At one point we had to drop this because of a lock inversion but that is
+no longer the case. It is much safer to hold the lock so let's do that.
+
+Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface")
+Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-5-matthew.brost@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -814,8 +814,6 @@ __unwind_incomplete_requests(struct inte
+ continue;
+
+ list_del_init(&rq->sched.link);
+- spin_unlock(&ce->guc_active.lock);
+-
+ __i915_request_unsubmit(rq);
+
+ /* Push the request back into the queue for later resubmission. */
+@@ -828,8 +826,6 @@ __unwind_incomplete_requests(struct inte
+
+ list_add_tail(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+-
+- spin_lock(&ce->guc_active.lock);
+ }
+ spin_unlock(&ce->guc_active.lock);
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
--- /dev/null
+From 9888beaaf118b6878347e1fe2b369fc66d756d18 Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Thu, 9 Sep 2021 09:47:30 -0700
+Subject: drm/i915/guc: Don't enable scheduling on a banned context, guc_id invalid, not registered
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit 9888beaaf118b6878347e1fe2b369fc66d756d18 upstream.
+
+When unblocking a context, do not enable scheduling if the context is
+banned, guc_id invalid, or not registered.
+
+v2:
+ (Daniele)
+ - Add helper for unblock
+
+Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation")
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-10-matthew.brost@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 22 +++++++++++++++++++---
+ 1 file changed, 19 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -148,6 +148,7 @@ static inline void clr_context_registere
+ #define SCHED_STATE_BLOCKED_SHIFT 4
+ #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
+ #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
++
+ static inline void init_sched_state(struct intel_context *ce)
+ {
+ /* Only should be called from guc_lrc_desc_pin() */
+@@ -1549,6 +1550,23 @@ static struct i915_sw_fence *guc_context
+ return &ce->guc_blocked;
+ }
+
++#define SCHED_STATE_MULTI_BLOCKED_MASK \
++ (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
++#define SCHED_STATE_NO_UNBLOCK \
++ (SCHED_STATE_MULTI_BLOCKED_MASK | \
++ SCHED_STATE_PENDING_DISABLE | \
++ SCHED_STATE_BANNED)
++
++static bool context_cant_unblock(struct intel_context *ce)
++{
++ lockdep_assert_held(&ce->guc_state.lock);
++
++ return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
++ context_guc_id_invalid(ce) ||
++ !lrc_desc_registered(ce_to_guc(ce), ce->guc_id) ||
++ !intel_context_is_pinned(ce);
++}
++
+ static void guc_context_unblock(struct intel_context *ce)
+ {
+ struct intel_guc *guc = ce_to_guc(ce);
+@@ -1563,9 +1581,7 @@ static void guc_context_unblock(struct i
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+ if (unlikely(submission_disabled(guc) ||
+- !intel_context_is_pinned(ce) ||
+- context_pending_disable(ce) ||
+- context_blocked(ce) > 1)) {
++ context_cant_unblock(ce))) {
+ enable = false;
+ } else {
+ enable = true;
--- /dev/null
+From 669b949c1a44d0cb2bcd18ff6ab4fd0c21e7cf6f Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Thu, 9 Sep 2021 09:47:23 -0700
+Subject: drm/i915/guc: Fix outstanding G2H accounting
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit 669b949c1a44d0cb2bcd18ff6ab4fd0c21e7cf6f upstream.
+
+A small race that could result in incorrect accounting of the number
+of outstanding G2H. Basically prior to this patch we did not increment
+the number of outstanding G2H if we encoutered a GT reset while sending
+a H2G. This was incorrect as the context state had already been updated
+to anticipate a G2H response thus the counter should be incremented.
+
+As part of this change we remove a legacy (now unused) path that was the
+last caller requiring a G2H response that was not guaranteed to loop.
+This allows us to simplify the accounting as we don't need to handle the
+case where the send fails due to the channel being busy.
+
+Also always use helper when decrementing this value.
+
+v2 (Daniele): update GEM_BUG_ON check, pull in dead code removal from
+later patch, remove loop param from context_deregister.
+
+Fixes: f4eb1f3fe946 ("drm/i915/guc: Ensure G2H response has space in buffer")
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Cc: <stable@vger.kernel.org>
+Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-3-matthew.brost@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 79 ++++++++++------------
+ 1 file changed, 37 insertions(+), 42 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -352,20 +352,29 @@ static inline void set_lrc_desc_register
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+ }
+
++static void decr_outstanding_submission_g2h(struct intel_guc *guc)
++{
++ if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
++ wake_up_all(&guc->ct.wq);
++}
++
+ static int guc_submission_send_busy_loop(struct intel_guc *guc,
+ const u32 *action,
+ u32 len,
+ u32 g2h_len_dw,
+ bool loop)
+ {
+- int err;
+-
+- err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
++ /*
++ * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
++ * so we don't handle the case where we don't get a reply because we
++ * aborted the send due to the channel being busy.
++ */
++ GEM_BUG_ON(g2h_len_dw && !loop);
+
+- if (!err && g2h_len_dw)
++ if (g2h_len_dw)
+ atomic_inc(&guc->outstanding_submission_g2h);
+
+- return err;
++ return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+ }
+
+ int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
+@@ -616,7 +625,7 @@ static void scrub_guc_desc_for_outstandi
+ init_sched_state(ce);
+
+ if (pending_enable || destroyed || deregister) {
+- atomic_dec(&guc->outstanding_submission_g2h);
++ decr_outstanding_submission_g2h(guc);
+ if (deregister)
+ guc_signal_context_fence(ce);
+ if (destroyed) {
+@@ -635,7 +644,7 @@ static void scrub_guc_desc_for_outstandi
+ intel_engine_signal_breadcrumbs(ce->engine);
+ }
+ intel_context_sched_disable_unpin(ce);
+- atomic_dec(&guc->outstanding_submission_g2h);
++ decr_outstanding_submission_g2h(guc);
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ guc_blocked_fence_complete(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+@@ -1233,8 +1242,7 @@ static int register_context(struct intel
+ }
+
+ static int __guc_action_deregister_context(struct intel_guc *guc,
+- u32 guc_id,
+- bool loop)
++ u32 guc_id)
+ {
+ u32 action[] = {
+ INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
+@@ -1243,16 +1251,16 @@ static int __guc_action_deregister_conte
+
+ return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_DEREGISTER_CONTEXT,
+- loop);
++ true);
+ }
+
+-static int deregister_context(struct intel_context *ce, u32 guc_id, bool loop)
++static int deregister_context(struct intel_context *ce, u32 guc_id)
+ {
+ struct intel_guc *guc = ce_to_guc(ce);
+
+ trace_intel_context_deregister(ce);
+
+- return __guc_action_deregister_context(guc, guc_id, loop);
++ return __guc_action_deregister_context(guc, guc_id);
+ }
+
+ static intel_engine_mask_t adjust_engine_mask(u8 class, intel_engine_mask_t mask)
+@@ -1340,26 +1348,23 @@ static int guc_lrc_desc_pin(struct intel
+ * registering this context.
+ */
+ if (context_registered) {
++ bool disabled;
++ unsigned long flags;
++
+ trace_intel_context_steal_guc_id(ce);
+- if (!loop) {
++ GEM_BUG_ON(!loop);
++
++ /* Seal race with Reset */
++ spin_lock_irqsave(&ce->guc_state.lock, flags);
++ disabled = submission_disabled(guc);
++ if (likely(!disabled)) {
+ set_context_wait_for_deregister_to_register(ce);
+ intel_context_get(ce);
+- } else {
+- bool disabled;
+- unsigned long flags;
+-
+- /* Seal race with Reset */
+- spin_lock_irqsave(&ce->guc_state.lock, flags);
+- disabled = submission_disabled(guc);
+- if (likely(!disabled)) {
+- set_context_wait_for_deregister_to_register(ce);
+- intel_context_get(ce);
+- }
+- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+- if (unlikely(disabled)) {
+- reset_lrc_desc(guc, desc_idx);
+- return 0; /* Will get registered later */
+- }
++ }
++ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
++ if (unlikely(disabled)) {
++ reset_lrc_desc(guc, desc_idx);
++ return 0; /* Will get registered later */
+ }
+
+ /*
+@@ -1367,13 +1372,9 @@ static int guc_lrc_desc_pin(struct intel
+ * context whose guc_id was stolen.
+ */
+ with_intel_runtime_pm(runtime_pm, wakeref)
+- ret = deregister_context(ce, ce->guc_id, loop);
+- if (unlikely(ret == -EBUSY)) {
+- clr_context_wait_for_deregister_to_register(ce);
+- intel_context_put(ce);
+- } else if (unlikely(ret == -ENODEV)) {
++ ret = deregister_context(ce, ce->guc_id);
++ if (unlikely(ret == -ENODEV))
+ ret = 0; /* Will get registered later */
+- }
+ } else {
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ ret = register_context(ce, loop);
+@@ -1730,7 +1731,7 @@ static inline void guc_lrc_desc_unpin(st
+ GEM_BUG_ON(context_enabled(ce));
+
+ clr_context_registered(ce);
+- deregister_context(ce, ce->guc_id, true);
++ deregister_context(ce, ce->guc_id);
+ }
+
+ static void __guc_context_destroy(struct intel_context *ce)
+@@ -2583,12 +2584,6 @@ g2h_context_lookup(struct intel_guc *guc
+ return ce;
+ }
+
+-static void decr_outstanding_submission_g2h(struct intel_guc *guc)
+-{
+- if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
+- wake_up_all(&guc->ct.wq);
+-}
+-
+ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
+ const u32 *msg,
+ u32 len)
--- /dev/null
+From c39f51cc980dd918c5b3da61d54c4725785e766e Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Thu, 9 Sep 2021 09:47:24 -0700
+Subject: drm/i915/guc: Unwind context requests in reverse order
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit c39f51cc980dd918c5b3da61d54c4725785e766e upstream.
+
+When unwinding requests on a reset context, if other requests in the
+context are in the priority list the requests could be resubmitted out
+of seqno order. Traverse the list of active requests in reverse and
+append to the head of the priority list to fix this.
+
+Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface")
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-4-matthew.brost@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -807,9 +807,9 @@ __unwind_incomplete_requests(struct inte
+
+ spin_lock_irqsave(&sched_engine->lock, flags);
+ spin_lock(&ce->guc_active.lock);
+- list_for_each_entry_safe(rq, rn,
+- &ce->guc_active.requests,
+- sched.link) {
++ list_for_each_entry_safe_reverse(rq, rn,
++ &ce->guc_active.requests,
++ sched.link) {
+ if (i915_request_completed(rq))
+ continue;
+
+@@ -824,7 +824,7 @@ __unwind_incomplete_requests(struct inte
+ }
+ GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
+
+- list_add_tail(&rq->sched.link, pl);
++ list_add(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ }
+ spin_unlock(&ce->guc_active.lock);
--- /dev/null
+From 1ca36cff0166b0483fe3b99e711e9c800ebbfaa4 Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Thu, 9 Sep 2021 09:47:27 -0700
+Subject: drm/i915/guc: Workaround reset G2H is received after schedule done G2H
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit 1ca36cff0166b0483fe3b99e711e9c800ebbfaa4 upstream.
+
+If the context is reset as a result of the request cancellation the
+context reset G2H is received after schedule disable done G2H which is
+the wrong order. The schedule disable done G2H release the waiting
+request cancellation code which resubmits the context. This races
+with the context reset G2H which also wants to resubmit the context but
+in this case it really should be a NOP as request cancellation code owns
+the resubmit. Use some clever tricks of checking the context state to
+seal this race until the GuC firmware is fixed.
+
+v2:
+ (Checkpatch)
+ - Fix typos
+v3:
+ (Daniele)
+ - State that is a bug in the GuC firmware
+
+Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation")
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-7-matthew.brost@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 41 ++++++++++++++++++----
+ 1 file changed, 35 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -838,17 +838,33 @@ __unwind_incomplete_requests(struct inte
+ static void __guc_reset_context(struct intel_context *ce, bool stalled)
+ {
+ struct i915_request *rq;
++ unsigned long flags;
+ u32 head;
++ bool skip = false;
+
+ intel_context_get(ce);
+
+ /*
+- * GuC will implicitly mark the context as non-schedulable
+- * when it sends the reset notification. Make sure our state
+- * reflects this change. The context will be marked enabled
+- * on resubmission.
++ * GuC will implicitly mark the context as non-schedulable when it sends
++ * the reset notification. Make sure our state reflects this change. The
++ * context will be marked enabled on resubmission.
++ *
++ * XXX: If the context is reset as a result of the request cancellation
++ * this G2H is received after the schedule disable complete G2H which is
++ * wrong as this creates a race between the request cancellation code
++ * re-submitting the context and this G2H handler. This is a bug in the
++ * GuC but can be worked around in the meantime but converting this to a
++ * NOP if a pending enable is in flight as this indicates that a request
++ * cancellation has occurred.
+ */
+- clr_context_enabled(ce);
++ spin_lock_irqsave(&ce->guc_state.lock, flags);
++ if (likely(!context_pending_enable(ce)))
++ clr_context_enabled(ce);
++ else
++ skip = true;
++ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
++ if (unlikely(skip))
++ goto out_put;
+
+ rq = intel_context_find_active_request(ce);
+ if (!rq) {
+@@ -867,6 +883,7 @@ static void __guc_reset_context(struct i
+ out_replay:
+ guc_reset_state(ce, head, stalled);
+ __unwind_incomplete_requests(ce);
++out_put:
+ intel_context_put(ce);
+ }
+
+@@ -1618,6 +1635,13 @@ static void guc_context_cancel_request(s
+ guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
+ true);
+ }
++
++ /*
++ * XXX: Racey if context is reset, see comment in
++ * __guc_reset_context().
++ */
++ flush_work(&ce_to_guc(ce)->ct.requests.worker);
++
+ guc_context_unblock(ce);
+ }
+ }
+@@ -2732,7 +2756,12 @@ static void guc_handle_context_reset(str
+ {
+ trace_intel_context_reset(ce);
+
+- if (likely(!intel_context_is_banned(ce))) {
++ /*
++ * XXX: Racey if request cancellation has occurred, see comment in
++ * __guc_reset_context().
++ */
++ if (likely(!intel_context_is_banned(ce) &&
++ !context_blocked(ce))) {
+ capture_error_state(guc, ce);
+ guc_context_replay(ce);
+ }
--- /dev/null
+From 5591c8f79db1729d9c5ac7f5b4d3a5c26e262d93 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 25 Oct 2021 13:53:53 +0200
+Subject: drm/udl: fix control-message timeout
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 5591c8f79db1729d9c5ac7f5b4d3a5c26e262d93 upstream.
+
+USB control-message timeouts are specified in milliseconds and should
+specifically not vary with CONFIG_HZ.
+
+Fixes: 5320918b9a87 ("drm/udl: initial UDL driver (v4)")
+Cc: stable@vger.kernel.org # 3.4
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20211025115353.5089-1-johan@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/udl/udl_connector.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/udl/udl_connector.c
++++ b/drivers/gpu/drm/udl/udl_connector.c
+@@ -30,7 +30,7 @@ static int udl_get_edid_block(void *data
+ int bval = (i + block * EDID_LENGTH) << 8;
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ 0x02, (0x80 | (0x02 << 5)), bval,
+- 0xA1, read_buff, 2, HZ);
++ 0xA1, read_buff, 2, 1000);
+ if (ret < 1) {
+ DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
+ kfree(read_buff);
mac80211-drop-check-for-dont_reorder-in-__ieee80211_select_queue.patch
drm-amd-display-update-swizzle-mode-enums.patch
drm-amd-display-limit-max-dsc-target-bpp-for-specific-monitors.patch
+drm-i915-guc-fix-outstanding-g2h-accounting.patch
+drm-i915-guc-don-t-enable-scheduling-on-a-banned-context-guc_id-invalid-not-registered.patch
+drm-i915-guc-workaround-reset-g2h-is-received-after-schedule-done-g2h.patch
+drm-i915-guc-don-t-drop-ce-guc_active.lock-when-unwinding-context.patch
+drm-i915-guc-unwind-context-requests-in-reverse-order.patch
+drm-udl-fix-control-message-timeout.patch