--- /dev/null
+From 651dabe27f9638f569f6a794f9d3cc1889cd315e Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 28 Sep 2020 23:15:10 +0100
+Subject: drm/i915/gem: Always test execution status on closing the context
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 651dabe27f9638f569f6a794f9d3cc1889cd315e upstream.
+
+Verify that if a context is active at the time it is closed, that it is
+either persistent and preemptible (with hangcheck running) or it shall
+be removed from execution.
+
+Fixes: 9a40bddd47ca ("drm/i915/gt: Expose heartbeat interval via sysfs")
+Testcase: igt/gem_ctx_persistence/heartbeat-close
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v5.7+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200928221510.26044-3-chris@chris-wilson.co.uk
+(cherry picked from commit d3bb2f9b5ee66d5e000293edd6b6575e59d11db9)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_context.c | 48 +++++-----------------------
+ 1 file changed, 10 insertions(+), 38 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -390,24 +390,6 @@ __context_engines_static(const struct i9
+ return rcu_dereference_protected(ctx->engines, true);
+ }
+
+-static bool __reset_engine(struct intel_engine_cs *engine)
+-{
+- struct intel_gt *gt = engine->gt;
+- bool success = false;
+-
+- if (!intel_has_reset_engine(gt))
+- return false;
+-
+- if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
+- >->reset.flags)) {
+- success = intel_engine_reset(engine, NULL) == 0;
+- clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+- >->reset.flags);
+- }
+-
+- return success;
+-}
+-
+ static void __reset_context(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+ {
+@@ -431,12 +413,7 @@ static bool __cancel_engine(struct intel
+ * kill the banned context, we fallback to doing a local reset
+ * instead.
+ */
+- if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
+- !intel_engine_pulse(engine))
+- return true;
+-
+- /* If we are unable to send a pulse, try resetting this engine. */
+- return __reset_engine(engine);
++ return intel_engine_pulse(engine) == 0;
+ }
+
+ static bool
+@@ -493,7 +470,7 @@ static struct intel_engine_cs *active_en
+ return engine;
+ }
+
+-static void kill_engines(struct i915_gem_engines *engines)
++static void kill_engines(struct i915_gem_engines *engines, bool ban)
+ {
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+@@ -508,7 +485,7 @@ static void kill_engines(struct i915_gem
+ for_each_gem_engine(ce, engines, it) {
+ struct intel_engine_cs *engine;
+
+- if (intel_context_set_banned(ce))
++ if (ban && intel_context_set_banned(ce))
+ continue;
+
+ /*
+@@ -521,7 +498,7 @@ static void kill_engines(struct i915_gem
+ engine = active_engine(ce);
+
+ /* First attempt to gracefully cancel the context */
+- if (engine && !__cancel_engine(engine))
++ if (engine && !__cancel_engine(engine) && ban)
+ /*
+ * If we are unable to send a preemptive pulse to bump
+ * the context from the GPU, we have to resort to a full
+@@ -531,8 +508,10 @@ static void kill_engines(struct i915_gem
+ }
+ }
+
+-static void kill_stale_engines(struct i915_gem_context *ctx)
++static void kill_context(struct i915_gem_context *ctx)
+ {
++ bool ban = (!i915_gem_context_is_persistent(ctx) ||
++ !ctx->i915->params.enable_hangcheck);
+ struct i915_gem_engines *pos, *next;
+
+ spin_lock_irq(&ctx->stale.lock);
+@@ -545,7 +524,7 @@ static void kill_stale_engines(struct i9
+
+ spin_unlock_irq(&ctx->stale.lock);
+
+- kill_engines(pos);
++ kill_engines(pos, ban);
+
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
+@@ -557,11 +536,6 @@ static void kill_stale_engines(struct i9
+ spin_unlock_irq(&ctx->stale.lock);
+ }
+
+-static void kill_context(struct i915_gem_context *ctx)
+-{
+- kill_stale_engines(ctx);
+-}
+-
+ static void engines_idle_release(struct i915_gem_context *ctx,
+ struct i915_gem_engines *engines)
+ {
+@@ -596,7 +570,7 @@ static void engines_idle_release(struct
+
+ kill:
+ if (list_empty(&engines->link)) /* raced, already closed */
+- kill_engines(engines);
++ kill_engines(engines, true);
+
+ i915_sw_fence_commit(&engines->fence);
+ }
+@@ -654,9 +628,7 @@ static void context_close(struct i915_ge
+ * case we opt to forcibly kill off all remaining requests on
+ * context close.
+ */
+- if (!i915_gem_context_is_persistent(ctx) ||
+- !ctx->i915->params.enable_hangcheck)
+- kill_context(ctx);
++ kill_context(ctx);
+
+ i915_gem_context_put(ctx);
+ }
--- /dev/null
+From 4caf017ee93703ba1c4504f3d73b50e6bbd4249e Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Tue, 15 Sep 2020 10:14:15 +0100
+Subject: drm/i915/gem: Avoid implicit vmap for highmem on x86-32
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 4caf017ee93703ba1c4504f3d73b50e6bbd4249e upstream.
+
+On 32b, highmem using a finite set of indirect PTE (i.e. vmap) to provide
+virtual mappings of the high pages. As these are finite, map_new_virtual()
+must wait for some other kmap() to finish when it runs out. If we map a
+large number of objects, there is no method for it to tell us to release
+the mappings, and we deadlock.
+
+However, if we make an explicit vmap of the page, that uses a larger
+vmalloc arena, and also has the ability to tell us to release unwanted
+mappings. Most importantly, it will fail and propagate an error instead
+of waiting forever.
+
+Fixes: fb8621d3bee8 ("drm/i915: Avoid allocating a vmap arena for a single page") #x86-32
+References: e87666b52f00 ("drm/i915/shrinker: Hook up vmap allocation failure notifier")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Cc: <stable@vger.kernel.org> # v4.7+
+Link: https://patchwork.freedesktop.org/patch/msgid/20200915091417.4086-1-chris@chris-wilson.co.uk
+(cherry picked from commit 060bb115c2d664f04db9c7613a104dfaef3fdd98)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_pages.c | 26 ++++++++++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+@@ -255,8 +255,30 @@ static void *i915_gem_object_map(struct
+ return NULL;
+
+ /* A single page can always be kmapped */
+- if (n_pte == 1 && type == I915_MAP_WB)
+- return kmap(sg_page(sgt->sgl));
++ if (n_pte == 1 && type == I915_MAP_WB) {
++ struct page *page = sg_page(sgt->sgl);
++
++ /*
++ * On 32b, highmem using a finite set of indirect PTE (i.e.
++ * vmap) to provide virtual mappings of the high pages.
++ * As these are finite, map_new_virtual() must wait for some
++ * other kmap() to finish when it runs out. If we map a large
++ * number of objects, there is no method for it to tell us
++ * to release the mappings, and we deadlock.
++ *
++ * However, if we make an explicit vmap of the page, that
++ * uses a larger vmalloc arena, and also has the ability
++ * to tell us to release unwanted mappings. Most importantly,
++ * it will fail and propagate an error instead of waiting
++ * forever.
++ *
++ * So if the page is beyond the 32b boundary, make an explicit
++ * vmap. On 64b, this check will be optimised away as we can
++ * directly kmap any page on the system.
++ */
++ if (!PageHighMem(page))
++ return kmap(page);
++ }
+
+ mem = stack;
+ if (n_pte > ARRAY_SIZE(stack)) {
--- /dev/null
+From ba2ebf605d5f32a9be0f7b05d3174bbc501b83fe Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Tue, 15 Sep 2020 10:14:16 +0100
+Subject: drm/i915/gem: Prevent using pgprot_writecombine() if PAT is not supported
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit ba2ebf605d5f32a9be0f7b05d3174bbc501b83fe upstream.
+
+Let's not try and use PAT attributes for I915_MAP_WC if the CPU doesn't
+support PAT.
+
+Fixes: 6056e50033d9 ("drm/i915/gem: Support discontiguous lmem object maps")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Cc: <stable@vger.kernel.org> # v5.6+
+Link: https://patchwork.freedesktop.org/patch/msgid/20200915091417.4086-2-chris@chris-wilson.co.uk
+(cherry picked from commit 121ba69ffddc60df11da56f6d5b29bdb45c8eb80)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_pages.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+@@ -254,6 +254,10 @@ static void *i915_gem_object_map(struct
+ if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
+ return NULL;
+
++ if (GEM_WARN_ON(type == I915_MAP_WC &&
++ !static_cpu_has(X86_FEATURE_PAT)))
++ return NULL;
++
+ /* A single page can always be kmapped */
+ if (n_pte == 1 && type == I915_MAP_WB) {
+ struct page *page = sg_page(sgt->sgl);
--- /dev/null
+From ca65fc0d8e01dca8fc82f0ccf433725469256c71 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 28 Sep 2020 23:15:09 +0100
+Subject: drm/i915/gt: Always send a pulse down the engine after disabling heartbeat
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit ca65fc0d8e01dca8fc82f0ccf433725469256c71 upstream.
+
+Currently, we check we can send a pulse prior to disabling the
+heartbeat to verify that we can change the heartbeat, but since we may
+re-evaluate execution upon changing the heartbeat interval we need another
+pulse afterwards to refresh execution.
+
+v2: Tvrtko asked if we could reduce the double pulse to a single, which
+opened up a discussion of how we should handle the pulse-error after
+attempting to change the property, and the desire to serialise
+adjustment of the property with its validating pulse, and unwind upon
+failure.
+
+Fixes: 9a40bddd47ca ("drm/i915/gt: Expose heartbeat interval via sysfs")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: <stable@vger.kernel.org> # v5.7+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200928221510.26044-2-chris@chris-wilson.co.uk
+(cherry picked from commit 3dd66a94de59d7792e7917eb3075342e70f06f44)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c | 106 ++++++++++++++---------
+ 1 file changed, 67 insertions(+), 39 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+@@ -177,36 +177,82 @@ void intel_engine_init_heartbeat(struct
+ INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
+ }
+
++static int __intel_engine_pulse(struct intel_engine_cs *engine)
++{
++ struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
++ struct intel_context *ce = engine->kernel_context;
++ struct i915_request *rq;
++
++ lockdep_assert_held(&ce->timeline->mutex);
++ GEM_BUG_ON(!intel_engine_has_preemption(engine));
++ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
++
++ intel_context_enter(ce);
++ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
++ intel_context_exit(ce);
++ if (IS_ERR(rq))
++ return PTR_ERR(rq);
++
++ __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
++ idle_pulse(engine, rq);
++
++ __i915_request_commit(rq);
++ __i915_request_queue(rq, &attr);
++ GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
++
++ return 0;
++}
++
++static unsigned long set_heartbeat(struct intel_engine_cs *engine,
++ unsigned long delay)
++{
++ unsigned long old;
++
++ old = xchg(&engine->props.heartbeat_interval_ms, delay);
++ if (delay)
++ intel_engine_unpark_heartbeat(engine);
++ else
++ intel_engine_park_heartbeat(engine);
++
++ return old;
++}
++
+ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay)
+ {
+- int err;
++ struct intel_context *ce = engine->kernel_context;
++ int err = 0;
+
+- /* Send one last pulse before to cleanup persistent hogs */
+- if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) {
+- err = intel_engine_pulse(engine);
+- if (err)
+- return err;
+- }
++ if (!delay && !intel_engine_has_preempt_reset(engine))
++ return -ENODEV;
++
++ intel_engine_pm_get(engine);
++
++ err = mutex_lock_interruptible(&ce->timeline->mutex);
++ if (err)
++ goto out_rpm;
+
+- WRITE_ONCE(engine->props.heartbeat_interval_ms, delay);
++ if (delay != engine->props.heartbeat_interval_ms) {
++ unsigned long saved = set_heartbeat(engine, delay);
+
+- if (intel_engine_pm_get_if_awake(engine)) {
+- if (delay)
+- intel_engine_unpark_heartbeat(engine);
+- else
+- intel_engine_park_heartbeat(engine);
+- intel_engine_pm_put(engine);
++ /* recheck current execution */
++ if (intel_engine_has_preemption(engine)) {
++ err = __intel_engine_pulse(engine);
++ if (err)
++ set_heartbeat(engine, saved);
++ }
+ }
+
+- return 0;
++ mutex_unlock(&ce->timeline->mutex);
++
++out_rpm:
++ intel_engine_pm_put(engine);
++ return err;
+ }
+
+ int intel_engine_pulse(struct intel_engine_cs *engine)
+ {
+- struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+ struct intel_context *ce = engine->kernel_context;
+- struct i915_request *rq;
+ int err;
+
+ if (!intel_engine_has_preemption(engine))
+@@ -215,30 +261,12 @@ int intel_engine_pulse(struct intel_engi
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
+- if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+- err = -EINTR;
+- goto out_rpm;
++ err = -EINTR;
++ if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
++ err = __intel_engine_pulse(engine);
++ mutex_unlock(&ce->timeline->mutex);
+ }
+
+- intel_context_enter(ce);
+- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+- intel_context_exit(ce);
+- if (IS_ERR(rq)) {
+- err = PTR_ERR(rq);
+- goto out_unlock;
+- }
+-
+- __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
+- idle_pulse(engine, rq);
+-
+- __i915_request_commit(rq);
+- __i915_request_queue(rq, &attr);
+- GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+- err = 0;
+-
+-out_unlock:
+- mutex_unlock(&ce->timeline->mutex);
+-out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
+ }
--- /dev/null
+drm-i915-gem-avoid-implicit-vmap-for-highmem-on-x86-32.patch
+drm-i915-gem-prevent-using-pgprot_writecombine-if-pat-is-not-supported.patch
+drm-i915-gem-always-test-execution-status-on-closing-the-context.patch
+drm-i915-gt-always-send-a-pulse-down-the-engine-after-disabling-heartbeat.patch