From dad13af3985c1d096d3141026d5a534f8aacb4e6 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 18 Dec 2025 14:38:52 -0800 Subject: [PATCH] drm/xe/vf: Check if scheduler groups are enabled VF can check if PF has enabled scheduler groups with a dedicated KLV query. If scheduler groups are enabled, MLRC queue registrations are forbidden. Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Link: https://patch.msgid.link/20251218223846.1146344-20-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/xe/abi/guc_klvs_abi.h | 7 +++ drivers/gpu/drm/xe/xe_exec_queue.c | 3 + drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 67 +++++++++++++++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 1 + drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 2 + drivers/gpu/drm/xe/xe_guc_klv_helpers.c | 3 + 6 files changed, 83 insertions(+) diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h index ac10cf3adbc15..39dee685a92c7 100644 --- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h @@ -48,11 +48,18 @@ * Refers to 32 bit architecture version as reported by the HW IP. * This key is supported on MTL+ platforms only. * Requires GuC ABI 1.2+. + * + * _`GUC_KLV_GLOBAL_CFG_GROUP_SCHEDULING_AVAILABLE` : 0x3001 + * Tells the driver whether scheduler groups are enabled or not. + * Requires GuC ABI 1.26+ */ #define GUC_KLV_GLOBAL_CFG_GMD_ID_KEY 0x3000u #define GUC_KLV_GLOBAL_CFG_GMD_ID_LEN 1u +#define GUC_KLV_GLOBAL_CFG_GROUP_SCHEDULING_AVAILABLE_KEY 0x3001u +#define GUC_KLV_GLOBAL_CFG_GROUP_SCHEDULING_AVAILABLE_LEN 1u + /** * DOC: GuC Self Config KLVs * diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index c336dcd19020b..0b9e074b022ff 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -1114,6 +1114,9 @@ static bool has_sched_groups(struct xe_gt *gt) if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_sriov_pf_sched_groups_enabled(gt)) return true; + if (IS_SRIOV_VF(gt_to_xe(gt)) && xe_gt_sriov_vf_sched_groups_enabled(gt)) + return true; + return false; } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index b8b391cfc8eb8..d91c65dc34960 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -612,6 +612,52 @@ static void vf_cache_gmdid(struct xe_gt *gt) gt->sriov.vf.runtime.gmdid = xe_gt_sriov_vf_gmdid(gt); } +static int vf_query_sched_groups(struct xe_gt *gt) +{ + struct xe_guc *guc = >->uc.guc; + struct xe_uc_fw_version guc_version; + u32 value = 0; + int err; + + xe_gt_sriov_vf_guc_versions(gt, NULL, &guc_version); + + if (MAKE_GUC_VER_STRUCT(guc_version) < MAKE_GUC_VER(1, 26, 0)) + return 0; + + err = guc_action_query_single_klv32(guc, + GUC_KLV_GLOBAL_CFG_GROUP_SCHEDULING_AVAILABLE_KEY, + &value); + if (unlikely(err)) { + xe_gt_sriov_err(gt, "Failed to obtain sched groups status (%pe)\n", + ERR_PTR(err)); + return err; + } + + /* valid values are 0 (disabled) and 1 (enabled) */ + if (value > 1) { + xe_gt_sriov_err(gt, "Invalid sched groups status %u\n", value); + return -EPROTO; + } + + xe_gt_sriov_dbg(gt, "sched groups %s\n", str_enabled_disabled(value)); + return value; +} + +static int vf_cache_sched_groups_status(struct xe_gt *gt) +{ + int ret; + + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + + ret = vf_query_sched_groups(gt); + if (ret < 0) + return ret; + + gt->sriov.vf.runtime.uses_sched_groups = ret; + + return 0; +} + /** * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO. * @gt: the &xe_gt @@ -641,12 +687,33 @@ int xe_gt_sriov_vf_query_config(struct xe_gt *gt) if (unlikely(err)) return err; + err = vf_cache_sched_groups_status(gt); + if (unlikely(err)) + return err; + if (has_gmdid(xe)) vf_cache_gmdid(gt); return 0; } +/** + * xe_gt_sriov_vf_sched_groups_enabled() - Check if PF has enabled multiple + * scheduler groups + * @gt: the &xe_gt + * + * This function is for VF use only. + * + * Return: true if shed groups were enabled, false otherwise. + */ +bool xe_gt_sriov_vf_sched_groups_enabled(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + xe_gt_assert(gt, gt->sriov.vf.guc_version.major); + + return gt->sriov.vf.runtime.uses_sched_groups; +} + /** * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration. * @gt: the &xe_gt diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h index af40276790fad..7d97189c2d3d9 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h @@ -30,6 +30,7 @@ bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt); u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt); u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt); u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt); +bool xe_gt_sriov_vf_sched_groups_enabled(struct xe_gt *gt); u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg); void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h index 510c33116fbdb..9a6b5672d569b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h @@ -27,6 +27,8 @@ struct xe_gt_sriov_vf_selfconfig { struct xe_gt_sriov_vf_runtime { /** @gmdid: cached value of the GDMID register. */ u32 gmdid; + /** @uses_sched_groups: whether PF enabled sched groups or not. */ + bool uses_sched_groups; /** @regs_size: size of runtime register array. */ u32 regs_size; /** @num_regs: number of runtime registers in the array. */ diff --git a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c index 1b08b443606e0..dd504b77cb176 100644 --- a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c +++ b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c @@ -21,6 +21,9 @@ const char *xe_guc_klv_key_to_string(u16 key) { switch (key) { + /* GuC Global Config KLVs */ + case GUC_KLV_GLOBAL_CFG_GROUP_SCHEDULING_AVAILABLE_KEY: + return "group_scheduling_available"; /* VGT POLICY keys */ case GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY: return "sched_if_idle"; -- 2.47.3