From: Matt Roper Date: Tue, 10 Sep 2024 23:47:37 +0000 (-0700) Subject: drm/xe/device: Convert register access to use xe_mmio X-Git-Tag: v6.13-rc1~122^2~20^2~115 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=10a0575c2ff625eecdb8e7533ce212d6a76c1eab;p=thirdparty%2Fkernel%2Flinux.git drm/xe/device: Convert register access to use xe_mmio Stop using GT pointers for register access. Since a GT was passed as a parameter to verify_lmem_ready() solely as a way to do MMIO accesses, change the parameter to xe_device, which more accurately reflects that this is a device-wide operation. v2: - Expand commit message to explain why verify_lmem_ready()'s parameter changes. (Rodrigo) Reviewed-by: Rodrigo Vivi Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240910234719.3335472-62-matthew.d.roper@intel.com --- diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index fdb3a4133f43d..4d3c794f134cb 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -385,7 +385,7 @@ err: static bool xe_driver_flr_disabled(struct xe_device *xe) { - return xe_mmio_read32(xe_root_mmio_gt(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS; + return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS; } /* @@ -404,7 +404,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe) static void __xe_driver_flr(struct xe_device *xe) { const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ - struct xe_gt *gt = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); int ret; drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); @@ -418,25 +418,25 @@ static void __xe_driver_flr(struct xe_device *xe) * is still pending (unless the HW is totally dead), but better to be * safe in case something unexpected happens */ - ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); + ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); if (ret) { drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); return; } - xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); + xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); /* Trigger the actual Driver-FLR */ - xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR); + xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR); /* Wait for hardware teardown to complete */ - ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); + ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); if (ret) { drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); return; } /* Wait for hardware/firmware re-init to complete */ - ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, + ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, flr_timeout, NULL, false); if (ret) { drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); @@ -444,7 +444,7 @@ static void __xe_driver_flr(struct xe_device *xe) } /* Clear sticky completion status */ - xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); + xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); } static void xe_driver_flr(struct xe_device *xe) @@ -497,16 +497,15 @@ mask_err: return err; } -static bool verify_lmem_ready(struct xe_gt *gt) +static bool verify_lmem_ready(struct xe_device *xe) { - u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT; + u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; return !!val; } static int wait_for_lmem_ready(struct xe_device *xe) { - struct xe_gt *gt = xe_root_mmio_gt(xe); unsigned long timeout, start; if (!IS_DGFX(xe)) @@ -515,7 +514,7 @@ static int wait_for_lmem_ready(struct xe_device *xe) if (IS_SRIOV_VF(xe)) return 0; - if (verify_lmem_ready(gt)) + if (verify_lmem_ready(xe)) return 0; drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); @@ -544,7 +543,7 @@ static int wait_for_lmem_ready(struct xe_device *xe) msleep(20); - } while (!verify_lmem_ready(gt)); + } while (!verify_lmem_ready(xe)); drm_dbg(&xe->drm, "lmem ready after %ums", jiffies_to_msecs(jiffies - start)); @@ -841,11 +840,9 @@ void xe_device_shutdown(struct xe_device *xe) */ void xe_device_wmb(struct xe_device *xe) { - struct xe_gt *gt = xe_root_mmio_gt(xe); - wmb(); if (IS_DGFX(xe)) - xe_mmio_write32(gt, VF_CAP_REG, 0); + xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); } /** @@ -886,7 +883,7 @@ void xe_device_td_flush(struct xe_device *xe) if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)) return; - xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); + xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); /* * FIXME: We can likely do better here with our choice of * timeout. Currently we just assume the worst case, i.e. 150us, @@ -894,7 +891,7 @@ void xe_device_td_flush(struct xe_device *xe) * scenario on current platforms if all cache entries are * transient and need to be flushed.. */ - if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, + if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, 150, NULL, false)) xe_gt_err_once(gt, "TD flush timeout\n"); @@ -917,9 +914,9 @@ void xe_device_l2_flush(struct xe_device *xe) return; spin_lock(>->global_invl_lock); - xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); + xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); - if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) + if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); spin_unlock(>->global_invl_lock);