]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amdgpu: Convert pre|post_partition_switch into common helpers
authorHawking Zhang <Hawking.Zhang@amd.com>
Thu, 5 Jun 2025 08:39:24 +0000 (16:39 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 24 Jun 2025 14:04:03 +0000 (10:04 -0400)
So they can be reused for future products

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Likun Gao <Likun.Gao@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c

index 9985eeab86768b74586ce7425fcb2e99ddd5cc50..c8fcafeb686450fa14e74f5df70e961d74ac9b94 100644 (file)
@@ -634,6 +634,33 @@ void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
        }
 }
 
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+       /* TODO:
+        * Stop user queues and threads, and make sure GPU is empty of work.
+        */
+
+       if (flags & AMDGPU_XCP_OPS_KFD)
+               amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
+
+       return 0;
+}
+
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+       int ret = 0;
+
+       if (flags & AMDGPU_XCP_OPS_KFD) {
+               amdgpu_amdkfd_device_probe(xcp_mgr->adev);
+               amdgpu_amdkfd_device_init(xcp_mgr->adev);
+               /* If KFD init failed, return failure */
+               if (!xcp_mgr->adev->kfd.init_complete)
+                       ret = -EIO;
+       }
+
+       return ret;
+}
+
 /*====================== xcp sysfs - configuration ======================*/
 #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name)                         \
        static ssize_t amdgpu_xcp_res_sysfs_##_name##_show(        \
index 80996ea1be625f9428b4d0e1d2791f649fe7ccd2..70a0f8400b5783ee551c05e8432f2440b0c43eec 100644 (file)
@@ -39,6 +39,8 @@
 
 #define AMDGPU_XCP_NO_PARTITION (~0)
 
+#define AMDGPU_XCP_OPS_KFD     (1 << 0)
+
 struct amdgpu_fpriv;
 
 enum AMDGPU_XCP_IP_BLOCK {
@@ -179,6 +181,8 @@ int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
                             struct drm_gpu_scheduler ***scheds);
 void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr);
 int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev);
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
 void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
 void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
 
index cc78af40512f175a65290684dbfd6b91fc6240e0..914cf4bfb03333ebf16461ec8c9d3553de25953d 100644 (file)
@@ -34,8 +34,6 @@
 #define XCP_INST_MASK(num_inst, xcp_id)                                        \
        (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
 
-#define AMDGPU_XCP_OPS_KFD     (1 << 0)
-
 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
 {
        int i;
@@ -369,33 +367,6 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
        return false;
 }
 
-static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
-       /* TODO:
-        * Stop user queues and threads, and make sure GPU is empty of work.
-        */
-
-       if (flags & AMDGPU_XCP_OPS_KFD)
-               amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
-
-       return 0;
-}
-
-static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
-       int ret = 0;
-
-       if (flags & AMDGPU_XCP_OPS_KFD) {
-               amdgpu_amdkfd_device_probe(xcp_mgr->adev);
-               amdgpu_amdkfd_device_init(xcp_mgr->adev);
-               /* If KFD init failed, return failure */
-               if (!xcp_mgr->adev->kfd.init_complete)
-                       ret = -EIO;
-       }
-
-       return ret;
-}
-
 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
 {
        int mode;
@@ -442,7 +413,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
                        goto out;
        }
 
-       ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
+       ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
        if (ret)
                goto unlock;
 
@@ -455,7 +426,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
        *num_xcps = num_xcc / num_xcc_per_xcp;
        amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
 
-       ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+       ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
        if (!ret)
                __aqua_vanjaram_update_available_partition_mode(xcp_mgr);
 unlock: