]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amdgpu: Reset the clear flag in buddy during resume
authorArunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Wed, 16 Jul 2025 07:51:24 +0000 (13:21 +0530)
committerChristian König <christian.koenig@amd.com>
Wed, 16 Jul 2025 10:50:32 +0000 (12:50 +0200)
- Added a handler in DRM buddy manager to reset the cleared
  flag for the blocks in the freelist.

- This is necessary because, upon resuming, the VRAM becomes
  cluttered with BIOS data, yet the VRAM backend manager
  believes that everything has been cleared.

v2:
  - Add lock before accessing drm_buddy_clear_reset_blocks()(Matthew Auld)
  - Force merge the two dirty blocks.(Matthew Auld)
  - Add a new unit test case for this issue.(Matthew Auld)
  - Having this function being able to flip the state either way would be
    good. (Matthew Brost)

v3(Matthew Auld):
  - Do merge step first to avoid the use of extra reset flag.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Cc: stable@vger.kernel.org
Fixes: a68c7eaa7a8f ("drm/amdgpu: Enable clear page functionality")
Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3812
Signed-off-by: Christian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/r/20250716075125.240637-2-Arunpravin.PaneerSelvam@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/drm_buddy.c
include/drm/drm_buddy.h

index e1bab6a96cb67df049d52ca6601fea565783c27c..4ecacbcb66bbefee2777a034eeda6bfafb9704c2 100644 (file)
@@ -5193,6 +5193,8 @@ exit:
                dev->dev->power.disable_depth--;
 #endif
        }
+
+       amdgpu_vram_mgr_clear_reset_blocks(adev);
        adev->in_suspend = false;
 
        if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
index 208b7d1d8a277bd8463b836a7d3d0a49822325dc..450e4bf093b79b4bc7ffa8f125753c9f598bea94 100644 (file)
@@ -154,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
                                  uint64_t start, uint64_t size);
 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
                                      uint64_t start);
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
 
 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
                            struct ttm_resource *res);
index abdc52b0895a60136cf6beb567bc9d20f8ca114b..07c936e90d8e40080d956afa73de00e3d8cbdb7d 100644 (file)
@@ -782,6 +782,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
        return atomic64_read(&mgr->vis_usage);
 }
 
+/**
+ * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Reset the cleared drm buddy blocks.
+ */
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
+{
+       struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+       struct drm_buddy *mm = &mgr->mm;
+
+       mutex_lock(&mgr->lock);
+       drm_buddy_reset_clear(mm, false);
+       mutex_unlock(&mgr->lock);
+}
+
 /**
  * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
  *
index 241c855f891f8b4b1e5c5b6d82c6614bf3ccc15e..66aff35f864762d66010b656bae2d92e3aa23660 100644 (file)
@@ -404,6 +404,49 @@ drm_get_buddy(struct drm_buddy_block *block)
 }
 EXPORT_SYMBOL(drm_get_buddy);
 
+/**
+ * drm_buddy_reset_clear - reset blocks clear state
+ *
+ * @mm: DRM buddy manager
+ * @is_clear: blocks clear state
+ *
+ * Reset the clear state based on @is_clear value for each block
+ * in the freelist.
+ */
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+{
+       u64 root_size, size, start;
+       unsigned int order;
+       int i;
+
+       size = mm->size;
+       for (i = 0; i < mm->n_roots; ++i) {
+               order = ilog2(size) - ilog2(mm->chunk_size);
+               start = drm_buddy_block_offset(mm->roots[i]);
+               __force_merge(mm, start, start + size, order);
+
+               root_size = mm->chunk_size << order;
+               size -= root_size;
+       }
+
+       for (i = 0; i <= mm->max_order; ++i) {
+               struct drm_buddy_block *block;
+
+               list_for_each_entry_reverse(block, &mm->free_list[i], link) {
+                       if (is_clear != drm_buddy_block_is_clear(block)) {
+                               if (is_clear) {
+                                       mark_cleared(block);
+                                       mm->clear_avail += drm_buddy_block_size(mm, block);
+                               } else {
+                                       clear_reset(block);
+                                       mm->clear_avail -= drm_buddy_block_size(mm, block);
+                               }
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL(drm_buddy_reset_clear);
+
 /**
  * drm_buddy_free_block - free a block
  *
index 9689a7c5dd36b25d7ddefa0ec9fd4de210349829..513837632b7d371c4562823c4355b6bdc545dcfd 100644 (file)
@@ -160,6 +160,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
                         u64 new_size,
                         struct list_head *blocks);
 
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
+
 void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
 
 void drm_buddy_free_list(struct drm_buddy *mm,