]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amdkfd: Use huge page size to check split svm range alignment
authorXiaogang Chen <xiaogang.chen@amd.com>
Mon, 1 Dec 2025 20:12:29 +0000 (14:12 -0600)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 8 Dec 2025 20:21:56 +0000 (15:21 -0500)
When split svm ranges that have been mapped using huge page should use huge
page size(2MB) to check split range alignment, not prange->granularity that
means migration granularity.

Fixes: 7ef6b2d4b7e5 ("drm/amdkfd: remap unaligned svm ranges that have split")
Signed-off-by: Xiaogang Chen <xiaogang.chen@amd.com>
Reviewed-by: Philip Yang <Philip.Yang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 448ee45353ef9fb1a34f5f26eb3f48923c6f0898)

drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index 97c2270f278fd37c00a2560c2a50b83edf610400..79ea138897fcf0e94ee7de0d1a9f5e9e758bb985 100644 (file)
@@ -1144,30 +1144,48 @@ static int
 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
                     struct list_head *insert_list, struct list_head *remap_list)
 {
+       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
+       unsigned long start_align = ALIGN(prange->start, 512);
+       bool huge_page_mapping = last_align_down > start_align;
        struct svm_range *tail = NULL;
-       int r = svm_range_split(prange, prange->start, new_last, &tail);
+       int r;
 
-       if (!r) {
-               list_add(&tail->list, insert_list);
-               if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
-                       list_add(&tail->update_list, remap_list);
-       }
-       return r;
+       r = svm_range_split(prange, prange->start, new_last, &tail);
+
+       if (r)
+               return r;
+
+       list_add(&tail->list, insert_list);
+
+       if (huge_page_mapping && tail->start > start_align &&
+           tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
+               list_add(&tail->update_list, remap_list);
+
+       return 0;
 }
 
 static int
 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
                     struct list_head *insert_list, struct list_head *remap_list)
 {
+       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
+       unsigned long start_align = ALIGN(prange->start, 512);
+       bool huge_page_mapping = last_align_down > start_align;
        struct svm_range *head = NULL;
-       int r = svm_range_split(prange, new_start, prange->last, &head);
+       int r;
 
-       if (!r) {
-               list_add(&head->list, insert_list);
-               if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
-                       list_add(&head->update_list, remap_list);
-       }
-       return r;
+       r = svm_range_split(prange, new_start, prange->last, &head);
+
+       if (r)
+               return r;
+
+       list_add(&head->list, insert_list);
+
+       if (huge_page_mapping && head->last + 1 > start_align &&
+           head->last + 1 < last_align_down && (!IS_ALIGNED(head->last, 512)))
+               list_add(&head->update_list, remap_list);
+
+       return 0;
 }
 
 static void