From: Brian Nguyen Date: Fri, 12 Dec 2025 21:32:33 +0000 (+0800) Subject: drm/xe: Prep page reclaim in tlb inval job X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9945e6a52f3c66b40ae30c2f4b79312a56fc9ba7;p=thirdparty%2Flinux.git drm/xe: Prep page reclaim in tlb inval job Use page reclaim list as indicator if page reclaim action is desired and pass it to tlb inval fence to handle. Job will need to maintain its own embedded copy to ensure lifetime of PRL exist until job has run. v2: - Use xe variant of WARN_ON (Michal) v3: - Add comments for PRL tile handling and flush behavior with media. (Matthew Brost) Signed-off-by: Brian Nguyen Reviewed-by: Matthew Brost Cc: Michal Wajdeczko Signed-off-by: Matthew Brost Link: https://patch.msgid.link/20251212213225.3564537-19-brian3.nguyen@intel.com --- diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index ac17d5702030..6e01675213c7 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -2512,6 +2512,17 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) goto kill_vm_tile1; } update.ijob = ijob; + /* + * Only add page reclaim for the primary GT. Media GT does not have + * any PPC to flush, so enabling the PPC flush bit for media is + * effectively a NOP and provides no performance benefit nor + * interfere with primary GT. + */ + if (xe_page_reclaim_list_valid(&pt_update_ops->prl)) { + xe_tlb_inval_job_add_page_reclaim(ijob, &pt_update_ops->prl); + /* Release ref from alloc, job will now handle it */ + xe_page_reclaim_list_invalidate(&pt_update_ops->prl); + } if (tile->media_gt) { dep_scheduler = to_dep_scheduler(q, tile->media_gt); diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.c b/drivers/gpu/drm/xe/xe_tlb_inval_job.c index 78e39a4fb264..fc5b4a32a32d 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval_job.c +++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.c @@ -7,7 +7,9 @@ #include "xe_dep_job_types.h" #include "xe_dep_scheduler.h" #include "xe_exec_queue.h" +#include "xe_gt_printk.h" #include "xe_gt_types.h" +#include "xe_page_reclaim.h" #include "xe_tlb_inval.h" #include "xe_tlb_inval_job.h" #include "xe_migrate.h" @@ -116,6 +118,7 @@ xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval, job->start = start; job->end = end; job->fence_armed = false; + xe_page_reclaim_list_init(&job->prl); job->dep.ops = &dep_job_ops; job->type = type; kref_init(&job->refcount); @@ -149,6 +152,25 @@ err_job: return ERR_PTR(err); } +/** + * xe_tlb_inval_job_add_page_reclaim() - Embed PRL into a TLB job + * @job: TLB invalidation job that may trigger reclamation + * @prl: Page reclaim list populated during unbind + * + * Copies @prl into the job and takes an extra reference to the entry page so + * ownership can transfer to the TLB fence when the job is pushed. + */ +void xe_tlb_inval_job_add_page_reclaim(struct xe_tlb_inval_job *job, + struct xe_page_reclaim_list *prl) +{ + struct xe_device *xe = gt_to_xe(job->q->gt); + + xe_gt_WARN_ON(job->q->gt, !xe->info.has_page_reclaim_hw_assist); + job->prl = *prl; + /* Pair with put in job_destroy */ + xe_page_reclaim_entries_get(job->prl.entries); +} + static void xe_tlb_inval_job_destroy(struct kref *ref) { struct xe_tlb_inval_job *job = container_of(ref, typeof(*job), @@ -159,6 +181,9 @@ static void xe_tlb_inval_job_destroy(struct kref *ref) struct xe_device *xe = gt_to_xe(q->gt); struct xe_vm *vm = job->vm; + /* BO creation retains a copy (if used), so no longer needed */ + xe_page_reclaim_entries_put(job->prl.entries); + if (!job->fence_armed) kfree(ifence); else diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.h b/drivers/gpu/drm/xe/xe_tlb_inval_job.h index 4d6df1a6c6ca..03d6e21cd611 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval_job.h +++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.h @@ -12,6 +12,7 @@ struct dma_fence; struct xe_dep_scheduler; struct xe_exec_queue; struct xe_migrate; +struct xe_page_reclaim_list; struct xe_tlb_inval; struct xe_tlb_inval_job; struct xe_vm; @@ -21,6 +22,9 @@ xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval, struct xe_dep_scheduler *dep_scheduler, struct xe_vm *vm, u64 start, u64 end, int type); +void xe_tlb_inval_job_add_page_reclaim(struct xe_tlb_inval_job *job, + struct xe_page_reclaim_list *prl); + int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job); struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,