* it to take effect. Such cases might typically happen on a 1PF+1VF
* Virtualization config enabled for heavier workloads like AI/ML.
*
+ * If scheduling groups are supported, the provided value is applied to all
+ * groups (even if they've not yet been enabled). Support for this feature
+ * is available from GuC 70.53.0.
+ *
* The max value for this KLV is 100 seconds, anything exceeding that
* will be clamped to the max.
*
* on a 1PF+1VF Virtualization config enabled for heavier workloads like
* AI/ML.
*
+ * If scheduling groups are supported, the provided value is applied to all
+ * groups (even if they've not yet been enabled). Support for this feature
+ * is available from GuC 70.53.0.
+ *
* The max value for this KLV is 100 seconds, anything exceeding that
* will be clamped to the max.
*
}
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
- cfg[n++] = config->exec_quantum;
+ cfg[n++] = config->exec_quantum[0];
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
- cfg[n++] = config->preempt_timeout;
+ cfg[n++] = config->preempt_timeout[0];
#define encode_threshold_config(TAG, NAME, VER...) ({ \
if (IF_ARGS(GUC_FIRMWARE_VER_AT_LEAST(>->uc.guc, VER), true, VER)) { \
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
int err;
+ int i;
err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
if (unlikely(err))
return err;
- config->exec_quantum = exec_quantum;
+ for (i = 0; i < ARRAY_SIZE(config->exec_quantum); i++)
+ config->exec_quantum[i] = exec_quantum;
+
return 0;
}
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
- return config->exec_quantum;
+ return config->exec_quantum[0];
}
/**
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
int err;
+ int i;
err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
if (unlikely(err))
return err;
- config->preempt_timeout = preempt_timeout;
+ for (i = 0; i < ARRAY_SIZE(config->preempt_timeout); i++)
+ config->preempt_timeout[i] = preempt_timeout;
return 0;
}
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
- return config->preempt_timeout;
+ return config->preempt_timeout[0];
}
/**
static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
+ int i;
+
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
- config->exec_quantum = 0;
- config->preempt_timeout = 0;
+ for (i = 0; i < ARRAY_SIZE(config->exec_quantum); i++) {
+ config->exec_quantum[i] = 0;
+ config->preempt_timeout[i] = 0;
+ }
}
static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
#ifndef _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
#define _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
+#include "abi/guc_scheduler_abi.h"
#include "xe_ggtt_types.h"
#include "xe_guc_klv_thresholds_set_types.h"
/** @begin_db: start index of GuC doorbell ID range. */
u16 begin_db;
/** @exec_quantum: execution-quantum in milliseconds. */
- u32 exec_quantum;
+ u32 exec_quantum[GUC_MAX_SCHED_GROUPS];
/** @preempt_timeout: preemption timeout in microseconds. */
- u32 preempt_timeout;
+ u32 preempt_timeout[GUC_MAX_SCHED_GROUPS];
/** @sched_priority: scheduling priority. */
u32 sched_priority;
/** @thresholds: GuC thresholds for adverse events notifications. */