]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Invalidate media_gt TLBs in PT code
authorMatthew Brost <matthew.brost@intel.com>
Tue, 20 Aug 2024 16:16:32 +0000 (09:16 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Thu, 22 Aug 2024 02:34:07 +0000 (19:34 -0700)
Testing on LNL has shown media GT's TLBs need to be invalidated via the
GuC, update PT code appropriately.

v2:
 - Do dma_fence_get before first call of invalidation_fence_init (Himal)
 - No need to check for valid chain fence (Himal)

Fixes: 3330361543fc ("drm/xe/lnl: Add LNL platform definition")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240820161632.987369-1-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_pt.c

index 579ed31b46dbaf9088d08bb234d96faca2698ce3..6c6714af3d5d26ffc1852397efd658be0c3de7c0 100644 (file)
@@ -3,6 +3,8 @@
  * Copyright © 2022 Intel Corporation
  */
 
+#include <linux/dma-fence-chain.h>
+
 #include "xe_pt.h"
 
 #include "regs/xe_gtt_defs.h"
@@ -1849,13 +1851,20 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
 
 static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
                           struct xe_vm_pgtable_update_ops *pt_update_ops,
-                          struct xe_vma *vma, struct dma_fence *fence)
+                          struct xe_vma *vma, struct dma_fence *fence,
+                          struct dma_fence *fence2)
 {
-       if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+       if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
                dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
                                   pt_update_ops->wait_vm_bookkeep ?
                                   DMA_RESV_USAGE_KERNEL :
                                   DMA_RESV_USAGE_BOOKKEEP);
+               if (fence2)
+                       dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+                                          pt_update_ops->wait_vm_bookkeep ?
+                                          DMA_RESV_USAGE_KERNEL :
+                                          DMA_RESV_USAGE_BOOKKEEP);
+       }
        vma->tile_present |= BIT(tile->id);
        vma->tile_staged &= ~BIT(tile->id);
        if (xe_vma_is_userptr(vma)) {
@@ -1875,13 +1884,20 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
 
 static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
                             struct xe_vm_pgtable_update_ops *pt_update_ops,
-                            struct xe_vma *vma, struct dma_fence *fence)
+                            struct xe_vma *vma, struct dma_fence *fence,
+                            struct dma_fence *fence2)
 {
-       if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+       if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
                dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
                                   pt_update_ops->wait_vm_bookkeep ?
                                   DMA_RESV_USAGE_KERNEL :
                                   DMA_RESV_USAGE_BOOKKEEP);
+               if (fence2)
+                       dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+                                          pt_update_ops->wait_vm_bookkeep ?
+                                          DMA_RESV_USAGE_KERNEL :
+                                          DMA_RESV_USAGE_BOOKKEEP);
+       }
        vma->tile_present &= ~BIT(tile->id);
        if (!vma->tile_present) {
                list_del_init(&vma->combined_links.rebind);
@@ -1898,7 +1914,8 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
 static void op_commit(struct xe_vm *vm,
                      struct xe_tile *tile,
                      struct xe_vm_pgtable_update_ops *pt_update_ops,
-                     struct xe_vma_op *op, struct dma_fence *fence)
+                     struct xe_vma_op *op, struct dma_fence *fence,
+                     struct dma_fence *fence2)
 {
        xe_vm_assert_held(vm);
 
@@ -1907,26 +1924,28 @@ static void op_commit(struct xe_vm *vm,
                if (!op->map.immediate && xe_vm_in_fault_mode(vm))
                        break;
 
-               bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence);
+               bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
+                              fence2);
                break;
        case DRM_GPUVA_OP_REMAP:
                unbind_op_commit(vm, tile, pt_update_ops,
-                                gpuva_to_vma(op->base.remap.unmap->va), fence);
+                                gpuva_to_vma(op->base.remap.unmap->va), fence,
+                                fence2);
 
                if (op->remap.prev)
                        bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
-                                      fence);
+                                      fence, fence2);
                if (op->remap.next)
                        bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
-                                      fence);
+                                      fence, fence2);
                break;
        case DRM_GPUVA_OP_UNMAP:
                unbind_op_commit(vm, tile, pt_update_ops,
-                                gpuva_to_vma(op->base.unmap.va), fence);
+                                gpuva_to_vma(op->base.unmap.va), fence, fence2);
                break;
        case DRM_GPUVA_OP_PREFETCH:
                bind_op_commit(vm, tile, pt_update_ops,
-                              gpuva_to_vma(op->base.prefetch.va), fence);
+                              gpuva_to_vma(op->base.prefetch.va), fence, fence2);
                break;
        default:
                drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -1963,7 +1982,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
        struct xe_vm_pgtable_update_ops *pt_update_ops =
                &vops->pt_update_ops[tile->id];
        struct dma_fence *fence;
-       struct invalidation_fence *ifence = NULL;
+       struct invalidation_fence *ifence = NULL, *mfence = NULL;
+       struct dma_fence_chain *chain_fence = NULL;
        struct xe_range_fence *rfence;
        struct xe_vma_op *op;
        int err = 0, i;
@@ -1996,6 +2016,18 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
                        err = -ENOMEM;
                        goto kill_vm_tile1;
                }
+               if (tile->media_gt) {
+                       mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+                       if (!mfence) {
+                               err = -ENOMEM;
+                               goto free_ifence;
+                       }
+                       chain_fence = dma_fence_chain_alloc();
+                       if (!chain_fence) {
+                               err = -ENOMEM;
+                               goto free_ifence;
+                       }
+               }
        }
 
        rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
@@ -2027,19 +2059,46 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 
        /* tlb invalidation must be done before signaling rebind */
        if (ifence) {
+               if (mfence)
+                       dma_fence_get(fence);
                invalidation_fence_init(tile->primary_gt, ifence, fence,
                                        pt_update_ops->start,
                                        pt_update_ops->last, vm->usm.asid);
-               fence = &ifence->base.base;
+               if (mfence) {
+                       invalidation_fence_init(tile->media_gt, mfence, fence,
+                                               pt_update_ops->start,
+                                               pt_update_ops->last, vm->usm.asid);
+                       dma_fence_chain_init(chain_fence, &ifence->base.base,
+                                            &mfence->base.base, 0);
+                       fence = &chain_fence->base;
+               } else {
+                       fence = &ifence->base.base;
+               }
        }
 
-       dma_resv_add_fence(xe_vm_resv(vm), fence,
-                          pt_update_ops->wait_vm_bookkeep ?
-                          DMA_RESV_USAGE_KERNEL :
-                          DMA_RESV_USAGE_BOOKKEEP);
+       if (!mfence) {
+               dma_resv_add_fence(xe_vm_resv(vm), fence,
+                                  pt_update_ops->wait_vm_bookkeep ?
+                                  DMA_RESV_USAGE_KERNEL :
+                                  DMA_RESV_USAGE_BOOKKEEP);
+
+               list_for_each_entry(op, &vops->list, link)
+                       op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
+       } else {
+               dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
+                                  pt_update_ops->wait_vm_bookkeep ?
+                                  DMA_RESV_USAGE_KERNEL :
+                                  DMA_RESV_USAGE_BOOKKEEP);
+
+               dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
+                                  pt_update_ops->wait_vm_bookkeep ?
+                                  DMA_RESV_USAGE_KERNEL :
+                                  DMA_RESV_USAGE_BOOKKEEP);
 
-       list_for_each_entry(op, &vops->list, link)
-               op_commit(vops->vm, tile, pt_update_ops, op, fence);
+               list_for_each_entry(op, &vops->list, link)
+                       op_commit(vops->vm, tile, pt_update_ops, op,
+                                 &ifence->base.base, &mfence->base.base);
+       }
 
        if (pt_update_ops->needs_userptr_lock)
                up_read(&vm->userptr.notifier_lock);
@@ -2049,6 +2108,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 free_rfence:
        kfree(rfence);
 free_ifence:
+       dma_fence_chain_free(chain_fence);
+       kfree(mfence);
        kfree(ifence);
 kill_vm_tile1:
        if (err != -EAGAIN && tile->id)