]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/guc_pc: Use poll_timeout_us() for waiting
authorLucas De Marchi <lucas.demarchi@intel.com>
Mon, 22 Sep 2025 19:58:30 +0000 (12:58 -0700)
committerLucas De Marchi <lucas.demarchi@intel.com>
Thu, 25 Sep 2025 04:23:19 +0000 (21:23 -0700)
Convert wait_for_pc_state() and wait_for_act_freq_limit() to
poll_timeout_us(). This brings 2 changes in behavior: Drop the
exponential wait and fix a potential much longer sleep.

usleep_range() will wait anywhere between `wait` and `wait << 1`, so
it's not correct to assume `slept += wait`.  This code is not really
accurate. Pairing this with the exponential wait increase, it could be
waiting much longer than intended.

Reviewed-by: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
Link: https://lore.kernel.org/r/20250922-xe-iopoll-v4-2-06438311a63f@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
drivers/gpu/drm/xe/xe_guc_pc.c

index 53fdf59524c41af0963d896c731dba1f3393c0c7..3c0feb50a1e2fa62cc6e3ec90acc086a5be839cc 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/cleanup.h>
 #include <linux/delay.h>
+#include <linux/iopoll.h>
 #include <linux/jiffies.h>
 #include <linux/ktime.h>
 #include <linux/wait_bit.h>
@@ -130,26 +131,16 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
         FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
 
 static int wait_for_pc_state(struct xe_guc_pc *pc,
-                            enum slpc_global_state state,
+                            enum slpc_global_state target_state,
                             int timeout_ms)
 {
-       int timeout_us = 1000 * timeout_ms;
-       int slept, wait = 10;
+       enum slpc_global_state state;
 
        xe_device_assert_mem_access(pc_to_xe(pc));
 
-       for (slept = 0; slept < timeout_us;) {
-               if (slpc_shared_data_read(pc, header.global_state) == state)
-                       return 0;
-
-               usleep_range(wait, wait << 1);
-               slept += wait;
-               wait <<= 1;
-               if (slept + wait > timeout_us)
-                       wait = timeout_us - slept;
-       }
-
-       return -ETIMEDOUT;
+       return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state),
+                              state == target_state,
+                              20, timeout_ms * USEC_PER_MSEC, false);
 }
 
 static int wait_for_flush_complete(struct xe_guc_pc *pc)
@@ -164,24 +155,15 @@ static int wait_for_flush_complete(struct xe_guc_pc *pc)
        return 0;
 }
 
-static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq)
+static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit)
 {
-       int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC;
-       int slept, wait = 10;
-
-       for (slept = 0; slept < timeout_us;) {
-               if (xe_guc_pc_get_act_freq(pc) <= freq)
-                       return 0;
-
-               usleep_range(wait, wait << 1);
-               slept += wait;
-               wait <<= 1;
-               if (slept + wait > timeout_us)
-                       wait = timeout_us - slept;
-       }
+       u32 freq;
 
-       return -ETIMEDOUT;
+       return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc),
+                              freq <= max_limit,
+                              20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false);
 }
+
 static int pc_action_reset(struct xe_guc_pc *pc)
 {
        struct xe_guc_ct *ct = pc_to_ct(pc);
@@ -983,7 +965,7 @@ void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
         * Wait for actual freq to go below the flush cap: even if the previous
         * max was below cap, the current one might still be above it
         */
-       ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
+       ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
        if (ret)
                xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
                               BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));