]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/i915/gvt: Stop using intel_runtime_pm_put_unchecked()
authorVille Syrjälä <ville.syrjala@linux.intel.com>
Tue, 11 Feb 2025 00:01:34 +0000 (02:01 +0200)
committerVille Syrjälä <ville.syrjala@linux.intel.com>
Tue, 25 Mar 2025 14:53:24 +0000 (16:53 +0200)
intel_runtime_pm_put_unchecked() is not meant to be used
outside the runtime pm implementation, so don't.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250211000135.6096-4-ville.syrjala@linux.intel.com
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/debugfs.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/sched_policy.c

index eedd1865bb98c977540c0c2cf248ec6a3d2c7a24..62d14f82256f68637cea5aedfe96aa7a7c3ebb04 100644 (file)
@@ -46,6 +46,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
        unsigned int flags;
        u64 start, end, size;
        struct drm_mm_node *node;
+       intel_wakeref_t wakeref;
        int ret;
 
        if (high_gm) {
@@ -63,12 +64,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
        }
 
        mutex_lock(&gt->ggtt->vm.mutex);
-       mmio_hw_access_pre(gt);
+       wakeref = mmio_hw_access_pre(gt);
        ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
                                  size, I915_GTT_PAGE_SIZE,
                                  I915_COLOR_UNEVICTABLE,
                                  start, end, flags);
-       mmio_hw_access_post(gt);
+       mmio_hw_access_post(gt, wakeref);
        mutex_unlock(&gt->ggtt->vm.mutex);
        if (ret)
                gvt_err("fail to alloc %s gm space from host\n",
@@ -226,7 +227,7 @@ out_free_fence:
                vgpu->fence.regs[i] = NULL;
        }
        mutex_unlock(&gvt->gt->ggtt->vm.mutex);
-       intel_runtime_pm_put_unchecked(uncore->rpm);
+       intel_runtime_pm_put(uncore->rpm, wakeref);
        return -ENOSPC;
 }
 
index baccbf1761b77108ea2fdd327103b667f9e86289..673534f061ef19803de99181b16a6df1d58da0f5 100644 (file)
@@ -91,16 +91,17 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
                .diff = 0,
        };
        struct diff_mmio *node, *next;
+       intel_wakeref_t wakeref;
 
        INIT_LIST_HEAD(&param.diff_mmio_list);
 
        mutex_lock(&gvt->lock);
        spin_lock_bh(&gvt->scheduler.mmio_context_lock);
 
-       mmio_hw_access_pre(gvt->gt);
+       wakeref = mmio_hw_access_pre(gvt->gt);
        /* Recognize all the diff mmios to list. */
        intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
-       mmio_hw_access_post(gvt->gt);
+       mmio_hw_access_post(gvt->gt, wakeref);
 
        spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
        mutex_unlock(&gvt->lock);
index 2fa7ca19ba5d25873f3a6d38aa826a8b8e30a81b..ae9b0ded365137f460e63e9488be3bf7fbe76387 100644 (file)
@@ -220,9 +220,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
 
 static void ggtt_invalidate(struct intel_gt *gt)
 {
-       mmio_hw_access_pre(gt);
+       intel_wakeref_t wakeref;
+
+       wakeref = mmio_hw_access_pre(gt);
        intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-       mmio_hw_access_post(gt);
+       mmio_hw_access_post(gt, wakeref);
 }
 
 static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
index 01d890999f256e31686b86809f87ceb03ba4d67a..1d10c16e6465d3af5779d2d5bfa72f644008f778 100644 (file)
@@ -570,14 +570,15 @@ enum {
        GVT_FAILSAFE_GUEST_ERR,
 };
 
-static inline void mmio_hw_access_pre(struct intel_gt *gt)
+static inline intel_wakeref_t mmio_hw_access_pre(struct intel_gt *gt)
 {
-       intel_runtime_pm_get(gt->uncore->rpm);
+       return intel_runtime_pm_get(gt->uncore->rpm);
 }
 
-static inline void mmio_hw_access_post(struct intel_gt *gt)
+static inline void mmio_hw_access_post(struct intel_gt *gt,
+                                      intel_wakeref_t wakeref)
 {
-       intel_runtime_pm_put_unchecked(gt->uncore->rpm);
+       intel_runtime_pm_put(gt->uncore->rpm, wakeref);
 }
 
 /**
index 4efee6797873aab1a8b84eae89e26c3132fed8a4..02f45929592e5128a2ff9b4816216773f778abcd 100644 (file)
@@ -264,6 +264,7 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 {
        struct intel_gvt *gvt = vgpu->gvt;
        unsigned int fence_num = offset_to_fence_num(off);
+       intel_wakeref_t wakeref;
        int ret;
 
        ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
@@ -271,10 +272,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
                return ret;
        write_vreg(vgpu, off, p_data, bytes);
 
-       mmio_hw_access_pre(gvt->gt);
+       wakeref = mmio_hw_access_pre(gvt->gt);
        intel_vgpu_write_fence(vgpu, fence_num,
                        vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
-       mmio_hw_access_post(gvt->gt);
+       mmio_hw_access_post(gvt->gt, wakeref);
        return 0;
 }
 
@@ -1975,10 +1976,12 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
            vgpu == gvt->scheduler.engine_owner[engine->id] ||
            offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
            offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
-               mmio_hw_access_pre(gvt->gt);
+               intel_wakeref_t wakeref;
+
+               wakeref = mmio_hw_access_pre(gvt->gt);
                vgpu_vreg(vgpu, offset) =
                        intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
-               mmio_hw_access_post(gvt->gt);
+               mmio_hw_access_post(gvt->gt, wakeref);
        }
 
        return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -3209,10 +3212,12 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
        int i, id;
 
        idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
-               mmio_hw_access_pre(gvt->gt);
+               intel_wakeref_t wakeref;
+
+               wakeref = mmio_hw_access_pre(gvt->gt);
                for (i = 0; i < vgpu_fence_sz(vgpu); i++)
                        intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
-               mmio_hw_access_post(gvt->gt);
+               mmio_hw_access_post(gvt->gt, wakeref);
        }
 }
 
@@ -3233,8 +3238,10 @@ void intel_gvt_restore_mmio(struct intel_gvt *gvt)
        int id;
 
        idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
-               mmio_hw_access_pre(gvt->gt);
+               intel_wakeref_t wakeref;
+
+               wakeref = mmio_hw_access_pre(gvt->gt);
                intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
-               mmio_hw_access_post(gvt->gt);
+               mmio_hw_access_post(gvt->gt, wakeref);
        }
 }
index c077fb4674f0f428736b827f38b2a8e5d43f25d0..c75b393ab0b7482b26a7e6eb2184176dac8bdb89 100644 (file)
@@ -448,6 +448,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
        struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
 
        if (!vgpu_data->active)
                return;
@@ -466,7 +467,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                scheduler->current_vgpu = NULL;
        }
 
-       intel_runtime_pm_get(&dev_priv->runtime_pm);
+       wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
        spin_lock_bh(&scheduler->mmio_context_lock);
        for_each_engine(engine, vgpu->gvt->gt, id) {
                if (scheduler->engine_owner[engine->id] == vgpu) {
@@ -475,6 +476,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                }
        }
        spin_unlock_bh(&scheduler->mmio_context_lock);
-       intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
        mutex_unlock(&vgpu->gvt->sched_lock);
 }