]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/i915: move cpu_write_needs_clflush
authorMatthew Auld <matthew.auld@intel.com>
Wed, 27 Oct 2021 16:18:12 +0000 (17:18 +0100)
committerMatthew Auld <matthew.auld@intel.com>
Tue, 2 Nov 2021 09:44:10 +0000 (09:44 +0000)
Move it next to its partner in crime; gpu_write_needs_clflush. For
better readability lets keep gpu vs cpu at least in the same file.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211027161813.3094681-3-matthew.auld@intel.com
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem.c

index b684a62bf3b0773e7e06fb32ed4fd327337a8111..d30d5a699788c072b5108376548dc7559ba366ea 100644 (file)
@@ -22,6 +22,18 @@ static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
                 obj->cache_level == I915_CACHE_WT);
 }
 
+bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+       if (obj->cache_dirty)
+               return false;
+
+       if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+               return true;
+
+       /* Currently in use by HW (display engine)? Keep flushed. */
+       return i915_gem_object_is_framebuffer(obj);
+}
+
 static void
 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
 {
index ba224598ed69c97e862cef1f604c1971a0df163f..133963b461350fede10f931597659078becb4497 100644 (file)
@@ -522,6 +522,7 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
+bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
 
 int __must_check
 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -542,23 +543,11 @@ void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
 
-static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
-       if (obj->cache_dirty)
-               return false;
-
-       if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
-               return true;
-
-       /* Currently in use by HW (display engine)? Keep flushed. */
-       return i915_gem_object_is_framebuffer(obj);
-}
-
 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
 {
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
-       if (cpu_write_needs_clflush(obj))
+       if (i915_gem_cpu_write_needs_clflush(obj))
                obj->cache_dirty = true;
 }
 
index 981e383d1a5db7fd406d9243409b80f4aefff177..d0e642c820640cc618d60538191148de032429c5 100644 (file)
@@ -764,7 +764,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * perspective, requiring manual detiling by the client.
         */
        if (!i915_gem_object_has_struct_page(obj) ||
-           cpu_write_needs_clflush(obj))
+           i915_gem_cpu_write_needs_clflush(obj))
                /* Note that the gtt paths might fail with non-page-backed user
                 * pointers (e.g. gtt mappings when moving data between
                 * textures). Fallback to the shmem path in that case.