]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Cleanup unused func args in rqspinlock implementation
authorSiddharth Chintamaneni <sidchintamaneni@gmail.com>
Wed, 1 Oct 2025 17:27:02 +0000 (17:27 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 7 Oct 2025 22:30:43 +0000 (15:30 -0700)
cleanup unused function args in check_deadlock* functions.

Fixes: 31158ad02ddb ("rqspinlock: Add deadlock detection and recovery")
Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
Reviewed-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20251001172702.122838-1-sidchintamaneni@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/rqspinlock.c

index a00561b1d3e515c2d3390eb5ee840000f806b668..21be48108e96209cb90c87c24961cafc20e7d161 100644 (file)
@@ -89,15 +89,14 @@ struct rqspinlock_timeout {
 DEFINE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks);
 EXPORT_SYMBOL_GPL(rqspinlock_held_locks);
 
-static bool is_lock_released(rqspinlock_t *lock, u32 mask, struct rqspinlock_timeout *ts)
+static bool is_lock_released(rqspinlock_t *lock, u32 mask)
 {
        if (!(atomic_read_acquire(&lock->val) & (mask)))
                return true;
        return false;
 }
 
-static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
-                                     struct rqspinlock_timeout *ts)
+static noinline int check_deadlock_AA(rqspinlock_t *lock)
 {
        struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
        int cnt = min(RES_NR_HELD, rqh->cnt);
@@ -118,8 +117,7 @@ static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
  * more locks, which reduce to ABBA). This is not exhaustive, and we rely on
  * timeouts as the final line of defense.
  */
-static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
-                                       struct rqspinlock_timeout *ts)
+static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask)
 {
        struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
        int rqh_cnt = min(RES_NR_HELD, rqh->cnt);
@@ -142,7 +140,7 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
                 * Let's ensure to break out of this loop if the lock is available for
                 * us to potentially acquire.
                 */
-               if (is_lock_released(lock, mask, ts))
+               if (is_lock_released(lock, mask))
                        return 0;
 
                /*
@@ -198,15 +196,14 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
        return 0;
 }
 
-static noinline int check_deadlock(rqspinlock_t *lock, u32 mask,
-                                  struct rqspinlock_timeout *ts)
+static noinline int check_deadlock(rqspinlock_t *lock, u32 mask)
 {
        int ret;
 
-       ret = check_deadlock_AA(lock, mask, ts);
+       ret = check_deadlock_AA(lock);
        if (ret)
                return ret;
-       ret = check_deadlock_ABBA(lock, mask, ts);
+       ret = check_deadlock_ABBA(lock, mask);
        if (ret)
                return ret;
 
@@ -234,7 +231,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
         */
        if (prev + NSEC_PER_MSEC < time) {
                ts->cur = time;
-               return check_deadlock(lock, mask, ts);
+               return check_deadlock(lock, mask);
        }
 
        return 0;