XE_GUC_ACTION_ENTER_S_STATE = 0x501,
XE_GUC_ACTION_EXIT_S_STATE = 0x502,
XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
- XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
XE_GUC_ACTION_SCHED_CONTEXT = 0x1000,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
* | | | - `GuC Opt In Feature KLVs`_ |
- * | | | - `GuC Scheduling Policies KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_KEY 0x4001
#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_LEN 0u
-/**
- * DOC: GuC Scheduling Policies KLVs
- *
- * `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV.
- *
- * _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001
- * Some platforms do not allow concurrent execution of RCS and CCS
- * workloads from different address spaces. By default, the GuC prioritizes
- * RCS submissions over CCS ones, which can lead to CCS workloads being
- * significantly (or completely) starved of execution time. This KLV allows
- * the driver to specify a quantum (in ms) and a ratio (percentage value
- * between 0 and 100), and the GuC will prioritize the CCS for that
- * percentage of each quantum. For example, specifying 100ms and 30% will
- * make the GuC prioritize the CCS for 30ms of every 100ms.
- * Note that this does not necessarly mean that RCS and CCS engines will
- * only be active for their percentage of the quantum, as the restriction
- * only kicks in if both classes are fully busy with non-compatible address
- * spaces; i.e., if one engine is idle or running the same address space,
- * a pending job on the other engine will still be submitted to the HW no
- * matter what the ratio is
- */
-#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001
-#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u
-
/**
* DOC: GuC VGT Policy KLVs
*
#include "xe_gt_topology.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_pc.h"
-#include "xe_guc_submit.h"
#include "xe_hw_fence.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_irq.h"
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
* reload
*/
- xe_guc_submit_disable(>->uc.guc);
+ gt->uc.guc.submission_state.enabled = false;
}
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
return ret;
}
- return xe_guc_submit_enable(guc);
+ guc->submission_state.enabled = true;
+
+ return 0;
}
int xe_guc_reset(struct xe_guc *guc)
{
xe_uc_fw_sanitize(&guc->fw);
xe_guc_ct_disable(&guc->ct);
- xe_guc_submit_disable(guc);
+ guc->submission_state.enabled = false;
}
int xe_guc_reset_prepare(struct xe_guc *guc)
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
-#include "xe_guc_klv_helpers.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
-/*
- * Given that we want to guarantee enough RCS throughput to avoid missing
- * frames, we set the yield policy to 20% of each 80ms interval.
- */
-#define RC_YIELD_DURATION 80 /* in ms */
-#define RC_YIELD_RATIO 20 /* in percent */
-static u32 *emit_render_compute_yield_klv(u32 *emit)
-{
- *emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD);
- *emit++ = RC_YIELD_DURATION;
- *emit++ = RC_YIELD_RATIO;
-
- return emit;
-}
-
-#define SCHEDULING_POLICY_MAX_DWORDS 16
-static int guc_init_global_schedule_policy(struct xe_guc *guc)
-{
- u32 data[SCHEDULING_POLICY_MAX_DWORDS];
- u32 *emit = data;
- u32 count = 0;
- int ret;
-
- if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
- return 0;
-
- *emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
-
- if (CCS_MASK(guc_to_gt(guc)))
- emit = emit_render_compute_yield_klv(emit);
-
- count = emit - data;
- if (count > 1) {
- xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS);
-
- ret = xe_guc_ct_send_block(&guc->ct, data, count);
- if (ret < 0) {
- xe_gt_err(guc_to_gt(guc),
- "failed to enable GuC sheduling policies: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
- }
-
- return 0;
-}
-
-int xe_guc_submit_enable(struct xe_guc *guc)
-{
- int ret;
-
- ret = guc_init_global_schedule_policy(guc);
- if (ret)
- return ret;
-
- guc->submission_state.enabled = true;
-
- return 0;
-}
-
-void xe_guc_submit_disable(struct xe_guc *guc)
-{
- guc->submission_state.enabled = false;
-}
-
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{
int i;
struct xe_guc;
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
-int xe_guc_submit_enable(struct xe_guc *guc);
-void xe_guc_submit_disable(struct xe_guc *guc);
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);