]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amdgpu: Convert update_partition_sched_list into a common helper v3
authorHawking Zhang <Hawking.Zhang@amd.com>
Mon, 16 Jun 2025 09:07:26 +0000 (17:07 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 24 Jun 2025 14:03:41 +0000 (10:03 -0400)
The update_partition_sched_list function does not
need to remain as a soc specific callback. It can
be reused for future products.

v2: bypass the function if xcp_mgr is not available (Likun)

v3: Let caller check the availability of xcp_mgr (Lijo)

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c

index 94db61c75fff4af21f64a8e7ae96f9ef75d2402c..36b3893bfbd69eeed8404b2d0d48c22e0787bb3f 100644 (file)
@@ -3012,7 +3012,8 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
                }
        }
 
-       amdgpu_xcp_update_partition_sched_list(adev);
+       if (adev->xcp_mgr)
+               amdgpu_xcp_update_partition_sched_list(adev);
 
        return 0;
 }
index eef827fbdc74ab14aca0194a837acc7d620535ab..a073718cd2f633e3e722f8e13a0df6c2393450d6 100644 (file)
@@ -486,6 +486,117 @@ int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
        return 0;
 }
 
+static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
+                             uint32_t inst_idx,
+                             struct amdgpu_ring *ring)
+{
+       int xcp_id;
+       enum AMDGPU_XCP_IP_BLOCK ip_blk;
+       uint32_t inst_mask;
+
+       ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+               adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
+       if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
+           (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
+               return;
+
+       inst_mask = 1 << inst_idx;
+
+       switch (ring->funcs->type) {
+       case AMDGPU_HW_IP_GFX:
+       case AMDGPU_RING_TYPE_COMPUTE:
+       case AMDGPU_RING_TYPE_KIQ:
+               ip_blk = AMDGPU_XCP_GFX;
+               break;
+       case AMDGPU_RING_TYPE_SDMA:
+               ip_blk = AMDGPU_XCP_SDMA;
+               break;
+       case AMDGPU_RING_TYPE_VCN_ENC:
+       case AMDGPU_RING_TYPE_VCN_JPEG:
+               ip_blk = AMDGPU_XCP_VCN;
+               break;
+       default:
+               dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
+               return;
+       }
+
+       for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+               if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+                       ring->xcp_id = xcp_id;
+                       dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+                               ring->xcp_id);
+                       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+                               adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
+                       break;
+               }
+       }
+}
+
+static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
+                                       struct amdgpu_ring *ring,
+                                       unsigned int sel_xcp_id)
+{
+       unsigned int *num_gpu_sched;
+
+       num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+                       .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+       adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+                       .sched[(*num_gpu_sched)++] = &ring->sched;
+       dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
+               ring->name, sel_xcp_id, ring->funcs->type,
+               ring->hw_prio, *num_gpu_sched);
+}
+
+static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       int i;
+
+       for (i = 0; i < MAX_XCP; i++) {
+               atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+               memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+       }
+
+       if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+               return 0;
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+               ring = adev->rings[i];
+               if (!ring || !ring->sched.ready || ring->no_scheduler)
+                       continue;
+
+               amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+               /* VCN may be shared by two partitions under CPX MODE in certain
+                * configs.
+                */
+               if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+                    ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+                   (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
+                       amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+       }
+
+       return 0;
+}
+
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->num_rings; i++) {
+               struct amdgpu_ring *ring = adev->rings[i];
+
+               if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+                       ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+                       amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
+               else
+                       amdgpu_set_xcp_id(adev, ring->me, ring);
+       }
+
+       return amdgpu_xcp_sched_list_update(adev);
+}
+
 /*====================== xcp sysfs - configuration ======================*/
 #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name)                         \
        static ssize_t amdgpu_xcp_res_sysfs_##_name##_show(        \
index fd8821c6671ed7a7e4d42ae97c6f75c869b7c048..8b567ad7cce178d69a876ea5f12d263e6f9db36a 100644 (file)
@@ -144,7 +144,6 @@ struct amdgpu_xcp_mgr_funcs {
        int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
        int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
        int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
-       int (*update_partition_sched_list)(struct amdgpu_device *adev);
 };
 
 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
@@ -178,14 +177,10 @@ int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
                             struct amdgpu_fpriv *fpriv,
                             unsigned int *num_scheds,
                             struct drm_gpu_scheduler ***scheds);
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev);
 void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
 void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
 
-#define amdgpu_xcp_update_partition_sched_list(adev) \
-       ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
-       (adev)->xcp_mgr->funcs->update_partition_sched_list ? \
-       (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
-
 static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
 {
        if (!xcp_mgr)
index d9b49883f9ec77a00b7d3a877f1ffe9bc9e49b7f..6018a05973933b87000434222f3f95305374dea9 100644 (file)
@@ -63,123 +63,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
        adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
 }
 
-static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
-{
-       return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
-}
-
-static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
-                            uint32_t inst_idx, struct amdgpu_ring *ring)
-{
-       int xcp_id;
-       enum AMDGPU_XCP_IP_BLOCK ip_blk;
-       uint32_t inst_mask;
-
-       ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
-       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
-               adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
-       if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
-           (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
-               return;
-
-       inst_mask = 1 << inst_idx;
-
-       switch (ring->funcs->type) {
-       case AMDGPU_HW_IP_GFX:
-       case AMDGPU_RING_TYPE_COMPUTE:
-       case AMDGPU_RING_TYPE_KIQ:
-               ip_blk = AMDGPU_XCP_GFX;
-               break;
-       case AMDGPU_RING_TYPE_SDMA:
-               ip_blk = AMDGPU_XCP_SDMA;
-               break;
-       case AMDGPU_RING_TYPE_VCN_ENC:
-       case AMDGPU_RING_TYPE_VCN_JPEG:
-               ip_blk = AMDGPU_XCP_VCN;
-               break;
-       default:
-               DRM_ERROR("Not support ring type %d!", ring->funcs->type);
-               return;
-       }
-
-       for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
-               if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
-                       ring->xcp_id = xcp_id;
-                       dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
-                               ring->xcp_id);
-                       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
-                               adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
-                       break;
-               }
-       }
-}
-
-static void aqua_vanjaram_xcp_gpu_sched_update(
-               struct amdgpu_device *adev,
-               struct amdgpu_ring *ring,
-               unsigned int sel_xcp_id)
-{
-       unsigned int *num_gpu_sched;
-
-       num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
-                       .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
-       adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
-                       .sched[(*num_gpu_sched)++] = &ring->sched;
-       DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
-                       sel_xcp_id, ring->funcs->type,
-                       ring->hw_prio, *num_gpu_sched);
-}
-
-static int aqua_vanjaram_xcp_sched_list_update(
-               struct amdgpu_device *adev)
-{
-       struct amdgpu_ring *ring;
-       int i;
-
-       for (i = 0; i < MAX_XCP; i++) {
-               atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
-               memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
-       }
-
-       if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
-               return 0;
-
-       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-               ring = adev->rings[i];
-               if (!ring || !ring->sched.ready || ring->no_scheduler)
-                       continue;
-
-               aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
-
-               /* VCN may be shared by two partitions under CPX MODE in certain
-                * configs.
-                */
-               if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
-                    ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
-                   aqua_vanjaram_xcp_vcn_shared(adev))
-                       aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
-       }
-
-       return 0;
-}
-
-static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
-{
-       int i;
-
-       for (i = 0; i < adev->num_rings; i++) {
-               struct amdgpu_ring *ring = adev->rings[i];
-
-               if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
-                       ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
-                       aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
-               else
-                       aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
-       }
-
-       return aqua_vanjaram_xcp_sched_list_update(adev);
-}
-
 /* Fixed pattern for smn addressing on different AIDs:
  *   bit[34]: indicate cross AID access
  *   bit[33:32]: indicate target AID id
@@ -694,8 +577,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
        .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
        .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
        .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
-       .update_partition_sched_list =
-               &aqua_vanjaram_update_partition_sched_list
 };
 
 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)