Since engines in the same class can be divided across multiple groups,
the GuC does not allow scheduler groups to be active if there are
multi-lrc contexts. This means that:
1) if a MLRC context is registered when we enable scheduler groups, the
GuC will silently ignore the configuration
2) if a MLRC context is registered after scheduler groups are enabled,
the GuC will disable the groups and generate an adverse event.
The expectation is that the admin will ensure that all apps that use
MLRC on PF have been terminated before scheduler groups are created. A
check is added anyway to make sure we don't still have contexts waiting
to be cleaned up laying around. A check is also added at queue creation
time to block MLRC queue creation if scheduler groups have been enabled.
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patch.msgid.link/20251218223846.1146344-19-daniele.ceraolospurio@intel.com
#include "xe_dep_scheduler.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_vf.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_engine_group.h"
return return_mask;
}
+static bool has_sched_groups(struct xe_gt *gt)
+{
+ if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_sriov_pf_sched_groups_enabled(gt))
+ return true;
+
+ return false;
+}
+
int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
return -ENOENT;
}
+ /* SRIOV sched groups are not compatible with multi-lrc */
+ if (XE_IOCTL_DBG(xe, args->width > 1 && has_sched_groups(hwe->gt))) {
+ up_read(&vm->lock);
+ xe_vm_put(vm);
+ return -EINVAL;
+ }
+
q = xe_exec_queue_create(xe, vm, logical_mask,
args->width, hwe, flags,
args->extensions);
pf_flush_restart(gt);
return 0;
}
+
+/**
+ * xe_gt_sriov_pf_sched_groups_enabled - Check if multiple scheduler groups are
+ * enabled
+ * @gt: the &xe_gt
+ *
+ * This function is for PF use only.
+ *
+ * Return: true if shed groups were enabled, false otherwise.
+ */
+bool xe_gt_sriov_pf_sched_groups_enabled(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ return xe_gt_sriov_pf_policy_sched_groups_enabled(gt);
+}
+
#ifndef _XE_GT_SRIOV_PF_H_
#define _XE_GT_SRIOV_PF_H_
+#include <linux/types.h>
+
struct xe_gt;
#ifdef CONFIG_PCI_IOV
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
+bool xe_gt_sriov_pf_sched_groups_enabled(struct xe_gt *gt);
#else
static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
{
static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
}
+
+static inline bool xe_gt_sriov_pf_sched_groups_enabled(struct xe_gt *gt)
+{
+ return false;
+}
#endif
#endif
#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_guc_klv_helpers.h"
+#include "xe_guc_submit.h"
#include "xe_pm.h"
/*
if (xe_sriov_pf_num_vfs(gt_to_xe(gt)))
return -EBUSY;
+ /*
+ * The GuC silently ignores the setting if any MLRC contexts are
+ * registered. We expect the admin to make sure that all apps that use
+ * MLRC are terminated before scheduler groups are enabled, so this
+ * check is just to make sure that the exec_queue destruction has been
+ * completed.
+ */
+ if (mode != XE_SRIOV_SCHED_GROUPS_DISABLED &&
+ xe_guc_has_registered_mlrc_queues(>->uc.guc)) {
+ xe_gt_sriov_notice(gt, "can't enable sched groups with active MLRC queues\n");
+ return -EPERM;
+ }
+
err = __pf_provision_sched_groups(gt, mode);
if (err)
return err;
return pf_provision_sched_groups(gt, mode);
}
+/**
+ * xe_gt_sriov_pf_policy_sched_groups_enabled() - check whether the GT has
+ * multiple scheduler groups enabled
+ * @gt: the &xe_gt to check
+ *
+ * This function can only be called on PF.
+ *
+ * Return: true if the GT has multiple groups enabled, false otherwise.
+ */
+bool xe_gt_sriov_pf_policy_sched_groups_enabled(struct xe_gt *gt)
+{
+ return gt->sriov.pf.policy.guc.sched_groups.current_mode != XE_SRIOV_SCHED_GROUPS_DISABLED;
+}
+
static void pf_sanitize_guc_policies(struct xe_gt *gt)
{
pf_sanitize_sched_if_idle(gt);
enum xe_sriov_sched_group_modes mode);
int xe_gt_sriov_pf_policy_set_sched_groups_mode(struct xe_gt *gt,
enum xe_sriov_sched_group_modes mode);
+bool xe_gt_sriov_pf_policy_sched_groups_enabled(struct xe_gt *gt);
void xe_gt_sriov_pf_policy_init(struct xe_gt *gt);
void xe_gt_sriov_pf_policy_sanitize(struct xe_gt *gt);
mutex_unlock(&guc->submission_state.lock);
}
+/**
+ * xe_guc_has_registered_mlrc_queues - check whether there are any MLRC queues
+ * registered with the GuC
+ * @guc: GuC.
+ *
+ * Return: true if any MLRC queue is registered with the GuC, false otherwise.
+ */
+bool xe_guc_has_registered_mlrc_queues(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ guard(mutex)(&guc->submission_state.lock);
+
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ if (q->width > 1)
+ return true;
+
+ return false;
+}
+
/**
* xe_guc_contexts_hwsp_rebase - Re-compute GGTT references within all
* exec queues registered to given GuC.
void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type);
+bool xe_guc_has_registered_mlrc_queues(struct xe_guc *guc);
+
int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch);
#endif