]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe/pf: Clear all LMTT pages on alloc
authorMichal Wajdeczko <michal.wajdeczko@intel.com>
Tue, 1 Jul 2025 22:00:52 +0000 (00:00 +0200)
committerLucas De Marchi <lucas.demarchi@intel.com>
Tue, 8 Jul 2025 03:57:07 +0000 (20:57 -0700)
Our LMEM buffer objects are not cleared by default on alloc
and during VF provisioning we only setup LMTT PTEs for the
actually provisioned LMEM range. But beyond that valid range
we might leave some stale data that could either point to some
other VFs allocations or even to the PF pages.

Explicitly clear all new LMTT page to avoid the risk that a
malicious VF would try to exploit that gap.

While around add asserts to catch any undesired PTE overwrites
and low-level debug traces to track LMTT PT life-cycle.

Fixes: b1d204058218 ("drm/xe/pf: Introduce Local Memory Translation Table")
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Lukasz Laguna <lukasz.laguna@intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Piotr Piórkowski <piotr.piorkowski@intel.com>
Link: https://lore.kernel.org/r/20250701220052.1612-1-michal.wajdeczko@intel.com
(cherry picked from commit 3fae6918a3e27cce20ded2551f863fb05d4bef8d)
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
drivers/gpu/drm/xe/xe_lmtt.c

index 63db66df064b5098e697d409d6b27987b7acf733..023ed6a6b49da8040e5b437b133cc303ff04e0e0 100644 (file)
@@ -78,6 +78,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
        }
 
        lmtt_assert(lmtt, xe_bo_is_vram(bo));
+       lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE));
+
+       xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size);
 
        pt->level = level;
        pt->bo = bo;
@@ -91,6 +94,9 @@ out:
 
 static void lmtt_pt_free(struct xe_lmtt_pt *pt)
 {
+       lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n",
+                  pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE));
+
        xe_bo_unpin_map_no_vm(pt->bo);
        kfree(pt);
 }
@@ -226,9 +232,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
 
        switch (lmtt->ops->lmtt_pte_size(level)) {
        case sizeof(u32):
+               lmtt_assert(lmtt, !overflows_type(pte, u32));
+               lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32));
+
                xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte);
                break;
        case sizeof(u64):
+               lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64));
+
                xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte);
                break;
        default: