* (C) Copyright 2013-2014,2018 Red Hat, Inc.
* (C) Copyright 2015 Intel Corp.
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
+ * (C) Copyright 2024-2025 Meta Platforms, Inc. and affiliates.
*
* Authors: Waiman Long <longman@redhat.com>
* Peter Zijlstra <peterz@infradead.org>
+ * Kumar Kartikeya Dwivedi <memxor@gmail.com>
*/
#include <linux/smp.h>
#include <asm/qspinlock.h>
#include <trace/events/lock.h>
#include <asm/rqspinlock.h>
+#include <linux/timekeeping.h>
/*
* Include queued spinlock definitions and statistics code
#include "../locking/mcs_spinlock.h"
+struct rqspinlock_timeout {
+ u64 timeout_end;
+ u64 duration;
+ u16 spin;
+};
+
+static noinline int check_timeout(struct rqspinlock_timeout *ts)
+{
+ u64 time = ktime_get_mono_fast_ns();
+
+ if (!ts->timeout_end) {
+ ts->timeout_end = time + ts->duration;
+ return 0;
+ }
+
+ if (time > ts->timeout_end)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+#define RES_CHECK_TIMEOUT(ts, ret) \
+ ({ \
+ if (!(ts).spin++) \
+ (ret) = check_timeout(&(ts)); \
+ (ret); \
+ })
+
+/*
+ * Initialize the 'spin' member.
+ */
+#define RES_INIT_TIMEOUT(ts) ({ (ts).spin = 1; })
+
+/*
+ * We only need to reset 'timeout_end', 'spin' will just wrap around as necessary.
+ * Duration is defined for each spin attempt, so set it here.
+ */
+#define RES_RESET_TIMEOUT(ts, _duration) ({ (ts).timeout_end = 0; (ts).duration = _duration; })
+
/*
* Per-CPU queue node structures; we can never have more than 4 nested
* contexts: task, softirq, hardirq, nmi.
void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
{
struct mcs_spinlock *prev, *next, *node;
+ struct rqspinlock_timeout ts;
u32 old, tail;
int idx;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+ RES_INIT_TIMEOUT(ts);
+
/*
* Wait for in-progress pending->locked hand-overs with a bounded
* number of spins so that we guarantee forward progress.