#define UNWIND_MAX_ENTRIES \
((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long))
-/* Guards adding to and reading the list of callbacks */
+/* Guards adding to or removing from the list of callbacks */
static DEFINE_MUTEX(callback_mutex);
static LIST_HEAD(callbacks);
/* Zero'd bits are available for assigning callback users */
static unsigned long unwind_mask = RESERVED_BITS;
+DEFINE_STATIC_SRCU(unwind_srcu);
static inline bool unwind_pending(struct unwind_task_info *info)
{
cookie = info->id.id;
- guard(mutex)(&callback_mutex);
- list_for_each_entry(work, &callbacks, list) {
+ guard(srcu)(&unwind_srcu);
+ list_for_each_entry_srcu(work, &callbacks, list,
+ srcu_read_lock_held(&unwind_srcu)) {
if (test_bit(work->bit, &bits)) {
work->func(work, &trace, cookie);
if (info->cache)
{
struct unwind_task_info *info = ¤t->unwind_info;
unsigned long old, bits;
- unsigned long bit = BIT(work->bit);
+ unsigned long bit;
int ret;
*cookie = 0;
if (WARN_ON_ONCE(!CAN_USE_IN_NMI && in_nmi()))
return -EINVAL;
+ /* Do not allow cancelled works to request again */
+ bit = READ_ONCE(work->bit);
+ if (WARN_ON_ONCE(bit < 0))
+ return -EINVAL;
+
+ /* Only need the mask now */
+ bit = BIT(bit);
+
guard(irqsave)();
*cookie = get_cookie(info);
return;
guard(mutex)(&callback_mutex);
- list_del(&work->list);
+ list_del_rcu(&work->list);
+
+ /* Do not allow any more requests and prevent callbacks */
+ work->bit = -1;
__clear_bit(bit, &unwind_mask);
+ synchronize_srcu(&unwind_srcu);
+
guard(rcu)();
/* Clear this bit from all threads */
for_each_process_thread(g, t) {
work->bit = ffz(unwind_mask);
__set_bit(work->bit, &unwind_mask);
- list_add(&work->list, &callbacks);
+ list_add_rcu(&work->list, &callbacks);
work->func = func;
return 0;
}