]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/i915: Consolidate the timeline->barrier
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 8 Apr 2019 09:17:03 +0000 (10:17 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 8 Apr 2019 16:04:12 +0000 (17:04 +0100)
The timeline is strictly ordered, so by inserting the timeline->barrier
request into the timeline->last_request it naturally provides the same
barrier. Consolidate the pair of barriers into one as they serve the
same purpose.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190408091728.20207-4-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_timeline.c
drivers/gpu/drm/i915/i915_timeline.h
drivers/gpu/drm/i915/i915_timeline_types.h
drivers/gpu/drm/i915/selftests/mock_timeline.c

index 66b6852cb4d2917948a507503252325825d692fa..7fc34ab6df87ceca24b940695d540d37b7719b06 100644 (file)
@@ -1167,7 +1167,7 @@ static int
 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 {
        struct drm_i915_private *i915 = ce->engine->i915;
-       struct i915_request *rq, *prev;
+       struct i915_request *rq;
        intel_wakeref_t wakeref;
        int ret;
 
@@ -1192,16 +1192,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
        }
 
        /* Queue this switch after all other activity by this context. */
-       prev = i915_active_request_raw(&ce->ring->timeline->last_request,
-                                      &i915->drm.struct_mutex);
-       if (prev && !i915_request_completed(prev)) {
-               ret = i915_request_await_dma_fence(rq, &prev->fence);
-               if (ret < 0)
-                       goto out_add;
-       }
-
-       /* Order all following requests to be after. */
-       ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
+       ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
        if (ret)
                goto out_add;
 
index 2da0d6436a1a966bed6ce561e6d9bcac23ef91df..96a9e8bcd80567150c9b1d7bd98a5952ce4f0e04 100644 (file)
@@ -584,11 +584,6 @@ out:
        return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
 }
 
-static int add_timeline_barrier(struct i915_request *rq)
-{
-       return i915_request_await_active_request(rq, &rq->timeline->barrier);
-}
-
 /**
  * i915_request_alloc - allocate a request structure
  *
@@ -738,10 +733,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
         */
        rq->head = rq->ring->emit;
 
-       ret = add_timeline_barrier(rq);
-       if (ret)
-               goto err_unwind;
-
        ret = engine->request_alloc(rq);
        if (ret)
                goto err_unwind;
index 2f49073649205875b5e2611b3e977e3d9c3dc0a3..5fbea0892f334da8fedc65a2652386d147399ef9 100644 (file)
@@ -253,7 +253,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
        spin_lock_init(&timeline->lock);
        mutex_init(&timeline->mutex);
 
-       INIT_ACTIVE_REQUEST(&timeline->barrier);
        INIT_ACTIVE_REQUEST(&timeline->last_request);
        INIT_LIST_HEAD(&timeline->requests);
 
@@ -326,7 +325,6 @@ void i915_timeline_fini(struct i915_timeline *timeline)
 {
        GEM_BUG_ON(timeline->pin_count);
        GEM_BUG_ON(!list_empty(&timeline->requests));
-       GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
 
        i915_syncmap_free(&timeline->sync);
 
index 4ca7f80bdf6d661b1b4b88dc59a82d9253b6c16a..27668a1a69a37e574bdfd1f9eec574240e769057 100644 (file)
@@ -110,19 +110,4 @@ void i915_timelines_init(struct drm_i915_private *i915);
 void i915_timelines_park(struct drm_i915_private *i915);
 void i915_timelines_fini(struct drm_i915_private *i915);
 
-/**
- * i915_timeline_set_barrier - orders submission between different timelines
- * @timeline: timeline to set the barrier on
- * @rq: request after which new submissions can proceed
- *
- * Sets the passed in request as the serialization point for all subsequent
- * submissions on @timeline. Subsequent requests will not be submitted to GPU
- * until the barrier has been completed.
- */
-static inline int
-i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
-{
-       return i915_active_request_set(&tl->barrier, rq);
-}
-
 #endif
index 1f5b55d9ffb5439cd3af23e2ea05a0e09c622638..5256a0b5c5f77b005199f9d5a2b3e09100ab1070 100644 (file)
@@ -61,16 +61,6 @@ struct i915_timeline {
         */
        struct i915_syncmap *sync;
 
-       /**
-        * Barrier provides the ability to serialize ordering between different
-        * timelines.
-        *
-        * Users can call i915_timeline_set_barrier which will make all
-        * subsequent submissions to this timeline be executed only after the
-        * barrier has been completed.
-        */
-       struct i915_active_request barrier;
-
        struct list_head link;
        struct drm_i915_private *i915;
 
index 416d85233263dbecf04229b3e6cf51558a9aed41..e084476469ef1807fbf04fc47c2fa83a411ef60c 100644 (file)
@@ -16,7 +16,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
        spin_lock_init(&timeline->lock);
        mutex_init(&timeline->mutex);
 
-       INIT_ACTIVE_REQUEST(&timeline->barrier);
        INIT_ACTIVE_REQUEST(&timeline->last_request);
        INIT_LIST_HEAD(&timeline->requests);