From b0ac4ef074ace20714a3bf96d23ae2ba61cc4439 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 22 Sep 2025 12:58:30 -0700 Subject: [PATCH] drm/xe/guc_pc: Use poll_timeout_us() for waiting Convert wait_for_pc_state() and wait_for_act_freq_limit() to poll_timeout_us(). This brings 2 changes in behavior: Drop the exponential wait and fix a potential much longer sleep. usleep_range() will wait anywhere between `wait` and `wait << 1`, so it's not correct to assume `slept += wait`. This code is not really accurate. Pairing this with the exponential wait increase, it could be waiting much longer than intended. Reviewed-by: Vinay Belgaumkar Link: https://lore.kernel.org/r/20250922-xe-iopoll-v4-2-06438311a63f@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_guc_pc.c | 44 ++++++++++------------------------ 1 file changed, 13 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 53fdf59524c41..3c0feb50a1e2f 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -130,26 +131,16 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc) FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count)) static int wait_for_pc_state(struct xe_guc_pc *pc, - enum slpc_global_state state, + enum slpc_global_state target_state, int timeout_ms) { - int timeout_us = 1000 * timeout_ms; - int slept, wait = 10; + enum slpc_global_state state; xe_device_assert_mem_access(pc_to_xe(pc)); - for (slept = 0; slept < timeout_us;) { - if (slpc_shared_data_read(pc, header.global_state) == state) - return 0; - - usleep_range(wait, wait << 1); - slept += wait; - wait <<= 1; - if (slept + wait > timeout_us) - wait = timeout_us - slept; - } - - return -ETIMEDOUT; + return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state), + state == target_state, + 20, timeout_ms * USEC_PER_MSEC, false); } static int wait_for_flush_complete(struct xe_guc_pc *pc) @@ -164,24 +155,15 @@ static int wait_for_flush_complete(struct xe_guc_pc *pc) return 0; } -static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq) +static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit) { - int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC; - int slept, wait = 10; - - for (slept = 0; slept < timeout_us;) { - if (xe_guc_pc_get_act_freq(pc) <= freq) - return 0; - - usleep_range(wait, wait << 1); - slept += wait; - wait <<= 1; - if (slept + wait > timeout_us) - wait = timeout_us - slept; - } + u32 freq; - return -ETIMEDOUT; + return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc), + freq <= max_limit, + 20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false); } + static int pc_action_reset(struct xe_guc_pc *pc) { struct xe_guc_ct *ct = pc_to_ct(pc); @@ -983,7 +965,7 @@ void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc) * Wait for actual freq to go below the flush cap: even if the previous * max was below cap, the current one might still be above it */ - ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP); + ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP); if (ret) xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n", BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); -- 2.47.3