From: Gustavo Sousa Date: Tue, 23 Jul 2024 12:01:21 +0000 (-0300) Subject: drm/xe/mmio: Use single logic for waiting functions X-Git-Tag: v6.12-rc1~126^2~25^2~13 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=be8f9f4c866f41b62a3df9a5cfd54e0e88fc3e3e;p=thirdparty%2Flinux.git drm/xe/mmio: Use single logic for waiting functions The implementations for xe_mmio_wait32() and xe_mmio_wait32_not() are almost identical. Let us avoid duplication of logic by having them calling a common __xe_mmio_wait32() function. Signed-off-by: Gustavo Sousa Reviewed-by: Himal Prasad Ghimiray Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240723120120.5443-2-gustavo.sousa@intel.com --- diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 435c01d003a85..bdcc7282385c4 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -333,37 +333,24 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) return (u64)udw << 32 | ldw; } -/** - * xe_mmio_wait32() - Wait for a register to match the desired masked value - * @gt: MMIO target GT - * @reg: register to read value from - * @mask: mask to be applied to the value read from the register - * @val: desired value after applying the mask - * @timeout_us: time out after this period of time. Wait logic tries to be - * smart, applying an exponential backoff until @timeout_us is reached. - * @out_val: if not NULL, points where to store the last unmasked value - * @atomic: needs to be true if calling from an atomic context - * - * This function polls for the desired masked value and returns zero on success - * or -ETIMEDOUT if timed out. - * - * Note that @timeout_us represents the minimum amount of time to wait before - * giving up. The actual time taken by this function can be a little more than - * @timeout_us for different reasons, specially in non-atomic contexts. Thus, - * it is possible that this function succeeds even after @timeout_us has passed. - */ -int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, - u32 *out_val, bool atomic) +static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, + u32 *out_val, bool atomic, bool expect_match) { ktime_t cur = ktime_get_raw(); const ktime_t end = ktime_add_us(cur, timeout_us); int ret = -ETIMEDOUT; s64 wait = 10; u32 read; + bool check; for (;;) { read = xe_mmio_read32(gt, reg); - if ((read & mask) == val) { + + check = (read & mask) == val; + if (!expect_match) + check = !check; + + if (check) { ret = 0; break; } @@ -384,7 +371,12 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t if (ret != 0) { read = xe_mmio_read32(gt, reg); - if ((read & mask) == val) + + check = (read & mask) == val; + if (!expect_match) + check = !check; + + if (check) ret = 0; } @@ -395,62 +387,45 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t } /** - * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value + * xe_mmio_wait32() - Wait for a register to match the desired masked value * @gt: MMIO target GT * @reg: register to read value from * @mask: mask to be applied to the value read from the register - * @val: value to match after applying the mask + * @val: desired value after applying the mask * @timeout_us: time out after this period of time. Wait logic tries to be * smart, applying an exponential backoff until @timeout_us is reached. * @out_val: if not NULL, points where to store the last unmasked value * @atomic: needs to be true if calling from an atomic context * - * This function polls for a masked value to change from a given value and - * returns zero on success or -ETIMEDOUT if timed out. + * This function polls for the desired masked value and returns zero on success + * or -ETIMEDOUT if timed out. * * Note that @timeout_us represents the minimum amount of time to wait before * giving up. The actual time taken by this function can be a little more than * @timeout_us for different reasons, specially in non-atomic contexts. Thus, * it is possible that this function succeeds even after @timeout_us has passed. */ +int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, + u32 *out_val, bool atomic) +{ + return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true); +} + +/** + * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value + * @gt: MMIO target GT + * @reg: register to read value from + * @mask: mask to be applied to the value read from the register + * @val: value not to be matched after applying the mask + * @timeout_us: time out after this period of time + * @out_val: if not NULL, points where to store the last unmasked value + * @atomic: needs to be true if calling from an atomic context + * + * This function works exactly like xe_mmio_wait32() with the exception that + * @val is expected not to be matched. + */ int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { - ktime_t cur = ktime_get_raw(); - const ktime_t end = ktime_add_us(cur, timeout_us); - int ret = -ETIMEDOUT; - s64 wait = 10; - u32 read; - - for (;;) { - read = xe_mmio_read32(gt, reg); - if ((read & mask) != val) { - ret = 0; - break; - } - - cur = ktime_get_raw(); - if (!ktime_before(cur, end)) - break; - - if (ktime_after(ktime_add_us(cur, wait), end)) - wait = ktime_us_delta(end, cur); - - if (atomic) - udelay(wait); - else - usleep_range(wait, wait << 1); - wait <<= 1; - } - - if (ret != 0) { - read = xe_mmio_read32(gt, reg); - if ((read & mask) != val) - ret = 0; - } - - if (out_val) - *out_val = read; - - return ret; + return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false); }