]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/xe: Move DSB l2 flush to a more sensible place
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Fri, 6 Jun 2025 10:45:47 +0000 (11:45 +0100)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 26 Jun 2025 12:52:19 +0000 (14:52 +0200)
Flushing l2 is only needed after all data has been written.

Fixes: 01570b446939 ("drm/xe/bmg: implement Wa_16023588340")
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: stable@vger.kernel.org # v6.12+
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://lore.kernel.org/r/20250606104546.1996818-3-matthew.auld@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
(cherry picked from commit 0dd2dd0182bc444a62652e89d08c7f0e4fde15ba)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
drivers/gpu/drm/xe/display/xe_dsb_buffer.c

index f95375451e2fafce123104808a4cb65d61038712..9f941fc2e36bb2cd0656ad21b21c10bedb869a98 100644 (file)
@@ -17,10 +17,7 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
 
 void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
 {
-       struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
-
        iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
-       xe_device_l2_flush(xe);
 }
 
 u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
@@ -30,12 +27,9 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
 
 void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
 {
-       struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
-
        WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
 
        iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
-       xe_device_l2_flush(xe);
 }
 
 bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
@@ -74,9 +68,12 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
 
 void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
 {
+       struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
+
        /*
         * The memory barrier here is to ensure coherency of DSB vs MMIO,
         * both for weak ordering archs and discrete cards.
         */
-       xe_device_wmb(dsb_buf->vma->bo->tile->xe);
+       xe_device_wmb(xe);
+       xe_device_l2_flush(xe);
 }