]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: use atomic functions with memory barriers for vm fault info
authorGui-Dong Han <hanguidong02@gmail.com>
Wed, 8 Oct 2025 03:43:27 +0000 (03:43 +0000)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 13 Oct 2025 18:14:15 +0000 (14:14 -0400)
The atomic variable vm_fault_info_updated is used to synchronize access to
adev->gmc.vm_fault_info between the interrupt handler and
get_vm_fault_info().

The default atomic functions like atomic_set() and atomic_read() do not
provide memory barriers. This allows for CPU instruction reordering,
meaning the memory accesses to vm_fault_info and the vm_fault_info_updated
flag are not guaranteed to occur in the intended order. This creates a
race condition that can lead to inconsistent or stale data being used.

The previous implementation, which used an explicit mb(), was incomplete
and inefficient. It failed to account for all potential CPU reorderings,
such as the access of vm_fault_info being reordered before the atomic_read
of the flag. This approach is also more verbose and less performant than
using the proper atomic functions with acquire/release semantics.

Fix this by switching to atomic_set_release() and atomic_read_acquire().
These functions provide the necessary acquire and release semantics,
which act as memory barriers to ensure the correct order of operations.
It is also more efficient and idiomatic than using explicit full memory
barriers.

Fixes: b97dfa27ef3a ("drm/amdgpu: save vm fault information for amdkfd")
Cc: stable@vger.kernel.org
Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
Signed-off-by: Felix Kuehling <felix.kuehling@amd.com>
Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c

index 83020963dfde46492fd3c3088db2edf98eccaaa7..a2ca9acf8c4ea8eefd50d91ec88e046bc5cbdf29 100644 (file)
@@ -2329,10 +2329,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
                                          struct kfd_vm_fault_info *mem)
 {
-       if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+       if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
                *mem = *adev->gmc.vm_fault_info;
-               mb(); /* make sure read happened */
-               atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+               atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
        }
        return 0;
 }
index 93d7ccb7d013ad9eb54abe2560182de5e1a0df0d..0e5e54d0a9a5b0796c14a26401b3641a69093471 100644 (file)
@@ -1068,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
                                        GFP_KERNEL);
        if (!adev->gmc.vm_fault_info)
                return -ENOMEM;
-       atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+       atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
 
        return 0;
 }
@@ -1290,7 +1290,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
                             VMID);
        if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
-               && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+               && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
                struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
                u32 protections = REG_GET_FIELD(status,
                                        VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1306,8 +1306,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
                info->prot_read = protections & 0x8 ? true : false;
                info->prot_write = protections & 0x10 ? true : false;
                info->prot_exec = protections & 0x20 ? true : false;
-               mb();
-               atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+               atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
        }
 
        return 0;
index c5e2a2c41e06555d0f6ca6f81a5649c0adea9b37..e1509480dfc23335890614e9e9471e8e868fb796 100644 (file)
@@ -1183,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
                                        GFP_KERNEL);
        if (!adev->gmc.vm_fault_info)
                return -ENOMEM;
-       atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+       atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
 
        return 0;
 }
@@ -1478,7 +1478,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
                             VMID);
        if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
-               && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+               && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
                struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
                u32 protections = REG_GET_FIELD(status,
                                        VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1494,8 +1494,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
                info->prot_read = protections & 0x8 ? true : false;
                info->prot_write = protections & 0x10 ? true : false;
                info->prot_exec = protections & 0x20 ? true : false;
-               mb();
-               atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+               atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
        }
 
        return 0;