goto kill_vm_tile1;
}
update.ijob = ijob;
+ /*
+ * Only add page reclaim for the primary GT. Media GT does not have
+ * any PPC to flush, so enabling the PPC flush bit for media is
+ * effectively a NOP and provides no performance benefit nor
+ * interfere with primary GT.
+ */
+ if (xe_page_reclaim_list_valid(&pt_update_ops->prl)) {
+ xe_tlb_inval_job_add_page_reclaim(ijob, &pt_update_ops->prl);
+ /* Release ref from alloc, job will now handle it */
+ xe_page_reclaim_list_invalidate(&pt_update_ops->prl);
+ }
if (tile->media_gt) {
dep_scheduler = to_dep_scheduler(q, tile->media_gt);
#include "xe_dep_job_types.h"
#include "xe_dep_scheduler.h"
#include "xe_exec_queue.h"
+#include "xe_gt_printk.h"
#include "xe_gt_types.h"
+#include "xe_page_reclaim.h"
#include "xe_tlb_inval.h"
#include "xe_tlb_inval_job.h"
#include "xe_migrate.h"
job->start = start;
job->end = end;
job->fence_armed = false;
+ xe_page_reclaim_list_init(&job->prl);
job->dep.ops = &dep_job_ops;
job->type = type;
kref_init(&job->refcount);
return ERR_PTR(err);
}
+/**
+ * xe_tlb_inval_job_add_page_reclaim() - Embed PRL into a TLB job
+ * @job: TLB invalidation job that may trigger reclamation
+ * @prl: Page reclaim list populated during unbind
+ *
+ * Copies @prl into the job and takes an extra reference to the entry page so
+ * ownership can transfer to the TLB fence when the job is pushed.
+ */
+void xe_tlb_inval_job_add_page_reclaim(struct xe_tlb_inval_job *job,
+ struct xe_page_reclaim_list *prl)
+{
+ struct xe_device *xe = gt_to_xe(job->q->gt);
+
+ xe_gt_WARN_ON(job->q->gt, !xe->info.has_page_reclaim_hw_assist);
+ job->prl = *prl;
+ /* Pair with put in job_destroy */
+ xe_page_reclaim_entries_get(job->prl.entries);
+}
+
static void xe_tlb_inval_job_destroy(struct kref *ref)
{
struct xe_tlb_inval_job *job = container_of(ref, typeof(*job),
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_vm *vm = job->vm;
+ /* BO creation retains a copy (if used), so no longer needed */
+ xe_page_reclaim_entries_put(job->prl.entries);
+
if (!job->fence_armed)
kfree(ifence);
else
struct xe_dep_scheduler;
struct xe_exec_queue;
struct xe_migrate;
+struct xe_page_reclaim_list;
struct xe_tlb_inval;
struct xe_tlb_inval_job;
struct xe_vm;
struct xe_dep_scheduler *dep_scheduler,
struct xe_vm *vm, u64 start, u64 end, int type);
+void xe_tlb_inval_job_add_page_reclaim(struct xe_tlb_inval_job *job,
+ struct xe_page_reclaim_list *prl);
+
int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job);
struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,