From f00ff6bc30dcf47caa902d00a4b73a4fbc6ab089 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 27 Jan 2022 16:19:33 +0100 Subject: [PATCH] 4.19-stable patches added patches: drm-i915-flush-tlbs-before-releasing-backing-store.patch --- ...-tlbs-before-releasing-backing-store.patch | 204 ++++++++++++++++++ queue-4.19/series | 1 + 2 files changed, 205 insertions(+) create mode 100644 queue-4.19/drm-i915-flush-tlbs-before-releasing-backing-store.patch create mode 100644 queue-4.19/series diff --git a/queue-4.19/drm-i915-flush-tlbs-before-releasing-backing-store.patch b/queue-4.19/drm-i915-flush-tlbs-before-releasing-backing-store.patch new file mode 100644 index 00000000000..06195b77fcd --- /dev/null +++ b/queue-4.19/drm-i915-flush-tlbs-before-releasing-backing-store.patch @@ -0,0 +1,204 @@ +From 7938d61591d33394a21bdd7797a245b65428f44c Mon Sep 17 00:00:00 2001 +From: Tvrtko Ursulin +Date: Tue, 19 Oct 2021 13:27:10 +0100 +Subject: drm/i915: Flush TLBs before releasing backing store + +From: Tvrtko Ursulin + +commit 7938d61591d33394a21bdd7797a245b65428f44c upstream. + +We need to flush TLBs before releasing backing store otherwise userspace +is able to encounter stale entries if a) it is not declaring access to +certain buffers and b) it races with the backing store release from a +such undeclared execution already executing on the GPU in parallel. + +The approach taken is to mark any buffer objects which were ever bound +to the GPU and to trigger a serialized TLB flush when their backing +store is released. + +Alternatively the flushing could be done on VMA unbind, at which point +we would be able to ascertain whether there is potential a parallel GPU +execution (which could race), but essentially it boils down to paying +the cost of TLB flushes potentially needlessly at VMA unbind time (when +the backing store is not known to be going away so not needed for +safety), versus potentially needlessly at backing store relase time +(since we at that point cannot tell whether there is anything executing +on the GPU which uses that object). + +Thereforce simplicity of implementation has been chosen for now with +scope to benchmark and refine later as required. + +Signed-off-by: Tvrtko Ursulin +Reported-by: Sushma Venkatesh Reddy +Reviewed-by: Daniel Vetter +Acked-by: Dave Airlie +Cc: Daniel Vetter +Cc: Jon Bloomfield +Cc: Joonas Lahtinen +Cc: Jani Nikula +Cc: stable@vger.kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem.c | 83 +++++++++++++++++++++++++++++++++ + drivers/gpu/drm/i915/i915_gem_object.h | 1 + drivers/gpu/drm/i915/i915_reg.h | 6 ++ + drivers/gpu/drm/i915/i915_vma.c | 4 + + 5 files changed, 96 insertions(+) + +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -1595,6 +1595,8 @@ struct drm_i915_private { + + struct intel_uncore uncore; + ++ struct mutex tlb_invalidate_lock; ++ + struct i915_virtual_gpu vgpu; + + struct intel_gvt *gvt; +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2446,6 +2446,78 @@ static void __i915_gem_object_reset_page + rcu_read_unlock(); + } + ++struct reg_and_bit { ++ i915_reg_t reg; ++ u32 bit; ++}; ++ ++static struct reg_and_bit ++get_reg_and_bit(const struct intel_engine_cs *engine, ++ const i915_reg_t *regs, const unsigned int num) ++{ ++ const unsigned int class = engine->class; ++ struct reg_and_bit rb = { .bit = 1 }; ++ ++ if (WARN_ON_ONCE(class >= num || !regs[class].reg)) ++ return rb; ++ ++ rb.reg = regs[class]; ++ if (class == VIDEO_DECODE_CLASS) ++ rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */ ++ ++ return rb; ++} ++ ++static void invalidate_tlbs(struct drm_i915_private *dev_priv) ++{ ++ static const i915_reg_t gen8_regs[] = { ++ [RENDER_CLASS] = GEN8_RTCR, ++ [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */ ++ [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR, ++ [COPY_ENGINE_CLASS] = GEN8_BTCR, ++ }; ++ const unsigned int num = ARRAY_SIZE(gen8_regs); ++ const i915_reg_t *regs = gen8_regs; ++ struct intel_engine_cs *engine; ++ enum intel_engine_id id; ++ ++ if (INTEL_GEN(dev_priv) < 8) ++ return; ++ ++ GEM_TRACE("\n"); ++ ++ assert_rpm_wakelock_held(dev_priv); ++ ++ mutex_lock(&dev_priv->tlb_invalidate_lock); ++ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); ++ ++ for_each_engine(engine, dev_priv, id) { ++ /* ++ * HW architecture suggest typical invalidation time at 40us, ++ * with pessimistic cases up to 100us and a recommendation to ++ * cap at 1ms. We go a bit higher just in case. ++ */ ++ const unsigned int timeout_us = 100; ++ const unsigned int timeout_ms = 4; ++ struct reg_and_bit rb; ++ ++ rb = get_reg_and_bit(engine, regs, num); ++ if (!i915_mmio_reg_offset(rb.reg)) ++ continue; ++ ++ I915_WRITE_FW(rb.reg, rb.bit); ++ if (__intel_wait_for_register_fw(dev_priv, ++ rb.reg, rb.bit, 0, ++ timeout_us, timeout_ms, ++ NULL)) ++ DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n", ++ engine->name, timeout_ms); ++ } ++ ++ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); ++ mutex_unlock(&dev_priv->tlb_invalidate_lock); ++} ++ + static struct sg_table * + __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) + { +@@ -2475,6 +2547,15 @@ __i915_gem_object_unset_pages(struct drm + __i915_gem_object_reset_page_iter(obj); + obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; + ++ if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) { ++ struct drm_i915_private *i915 = to_i915(obj->base.dev); ++ ++ if (intel_runtime_pm_get_if_in_use(i915)) { ++ invalidate_tlbs(i915); ++ intel_runtime_pm_put(i915); ++ } ++ } ++ + return pages; + } + +@@ -5792,6 +5873,8 @@ int i915_gem_init_early(struct drm_i915_ + + spin_lock_init(&dev_priv->fb_tracking.lock); + ++ mutex_init(&dev_priv->tlb_invalidate_lock); ++ + err = i915_gemfs_init(dev_priv); + if (err) + DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); +--- a/drivers/gpu/drm/i915/i915_gem_object.h ++++ b/drivers/gpu/drm/i915/i915_gem_object.h +@@ -136,6 +136,7 @@ struct drm_i915_gem_object { + * activity? + */ + #define I915_BO_ACTIVE_REF 0 ++#define I915_BO_WAS_BOUND_BIT 1 + + /* + * Is the object to be mapped as read-only to the GPU +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -2431,6 +2431,12 @@ enum i915_power_well_id { + #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28) + #define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24) + ++#define GEN8_RTCR _MMIO(0x4260) ++#define GEN8_M1TCR _MMIO(0x4264) ++#define GEN8_M2TCR _MMIO(0x4268) ++#define GEN8_BTCR _MMIO(0x426c) ++#define GEN8_VTCR _MMIO(0x4270) ++ + #if 0 + #define PRB0_TAIL _MMIO(0x2030) + #define PRB0_HEAD _MMIO(0x2034) +--- a/drivers/gpu/drm/i915/i915_vma.c ++++ b/drivers/gpu/drm/i915/i915_vma.c +@@ -335,6 +335,10 @@ int i915_vma_bind(struct i915_vma *vma, + return ret; + + vma->flags |= bind_flags; ++ ++ if (vma->obj) ++ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags); ++ + return 0; + } + diff --git a/queue-4.19/series b/queue-4.19/series new file mode 100644 index 00000000000..4f89d40aca8 --- /dev/null +++ b/queue-4.19/series @@ -0,0 +1 @@ +drm-i915-flush-tlbs-before-releasing-backing-store.patch -- 2.47.2