]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amdgpu: remove fence slab
authorAlex Deucher <alexander.deucher@amd.com>
Mon, 16 Jun 2025 18:28:32 +0000 (14:28 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 24 Jun 2025 14:00:03 +0000 (10:00 -0400)
Just use kmalloc for the fences in the rare case we need
an independent fence.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c

index 42278e9a2d75a844e7f0e3659039d9fec831a529..76ce77f43d4f4170c917c48c382f9c11c47048c5 100644 (file)
@@ -470,9 +470,6 @@ struct amdgpu_sa_manager {
        void                            *cpu_ptr;
 };
 
-int amdgpu_fence_slab_init(void);
-void amdgpu_fence_slab_fini(void);
-
 /*
  * IRQS.
  */
index 7f8fa69300bf447cbfcfc148c81d26ab61e827d7..d645fa9bdff3b0402b01f618293812a45233f723 100644 (file)
@@ -3113,10 +3113,6 @@ static int __init amdgpu_init(void)
        if (r)
                goto error_sync;
 
-       r = amdgpu_fence_slab_init();
-       if (r)
-               goto error_fence;
-
        r = amdgpu_userq_fence_slab_init();
        if (r)
                goto error_fence;
@@ -3151,7 +3147,6 @@ static void __exit amdgpu_exit(void)
        amdgpu_unregister_atpx_handler();
        amdgpu_acpi_release();
        amdgpu_sync_fini();
-       amdgpu_fence_slab_fini();
        amdgpu_userq_fence_slab_fini();
        mmu_notifier_synchronize();
        amdgpu_xcp_drv_release();
index f5855c412321f83012211cdccf46c5ad73956694..343c2bfdefb279b518c2c25b7d7174b93809efda 100644 (file)
 #include "amdgpu_trace.h"
 #include "amdgpu_reset.h"
 
-static struct kmem_cache *amdgpu_fence_slab;
-
-int amdgpu_fence_slab_init(void)
-{
-       amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN);
-       if (!amdgpu_fence_slab)
-               return -ENOMEM;
-       return 0;
-}
-
-void amdgpu_fence_slab_fini(void)
-{
-       rcu_barrier();
-       kmem_cache_destroy(amdgpu_fence_slab);
-}
 /*
  * Cast helper
  */
@@ -131,9 +116,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
        int r;
 
        if (job == NULL) {
-               /* create a sperate hw fence */
-               am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
-               if (am_fence == NULL)
+               /* create a separate hw fence */
+               am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
+               if (!am_fence)
                        return -ENOMEM;
        } else {
                /* take use of job-embedded fence */
@@ -814,7 +799,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
        struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
 
        /* free fence_slab if it's separated fence*/
-       kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
+       kfree(to_amdgpu_fence(f));
 }
 
 /**