]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
rxrpc: Fix call removal to use RCU safe deletion
authorDavid Howells <dhowells@redhat.com>
Wed, 8 Apr 2026 12:12:32 +0000 (13:12 +0100)
committerJakub Kicinski <kuba@kernel.org>
Thu, 9 Apr 2026 01:44:32 +0000 (18:44 -0700)
Fix rxrpc call removal from the rxnet->calls list to use list_del_rcu()
rather than list_del_init() to prevent stuffing up reading
/proc/net/rxrpc/calls from potentially getting into an infinite loop.

This, however, means that list_empty() no longer works on an entry that's
been deleted from the list, making it harder to detect prior deletion.  Fix
this by:

Firstly, make rxrpc_destroy_all_calls() only dump the first ten calls that
are unexpectedly still on the list.  Limiting the number of steps means
there's no need to call cond_resched() or to remove calls from the list
here, thereby eliminating the need for rxrpc_put_call() to check for that.

rxrpc_put_call() can then be fixed to unconditionally delete the call from
the list as it is the only place that the deletion occurs.

Fixes: 2baec2c3f854 ("rxrpc: Support network namespacing")
Closes: https://sashiko.dev/#/patchset/20260319150150.4189381-1-dhowells%40redhat.com
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Jeffrey Altman <jaltman@auristor.com>
cc: Linus Torvalds <torvalds@linux-foundation.org>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
cc: stable@kernel.org
Link: https://patch.msgid.link/20260408121252.2249051-5-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/trace/events/rxrpc.h
net/rxrpc/call_object.c

index 869f97c9bf733e48ed69b038e3f592a5a3a0ff7f..a826cd80007b34dba36fd6007d5eb47a3e115d96 100644 (file)
        EM(rxrpc_call_see_release,              "SEE release ") \
        EM(rxrpc_call_see_userid_exists,        "SEE u-exists") \
        EM(rxrpc_call_see_waiting_call,         "SEE q-conn  ") \
-       E_(rxrpc_call_see_zap,                  "SEE zap     ")
+       E_(rxrpc_call_see_still_live,           "SEE !still-l")
 
 #define rxrpc_txqueue_traces \
        EM(rxrpc_txqueue_await_reply,           "AWR") \
index 918f41d97a2f93e42fed57262bcb7f3804e1e039..59329cfe15322c81fbc0e9fab2d54ea63798fae1 100644 (file)
@@ -654,11 +654,9 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
        if (dead) {
                ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
 
-               if (!list_empty(&call->link)) {
-                       spin_lock(&rxnet->call_lock);
-                       list_del_init(&call->link);
-                       spin_unlock(&rxnet->call_lock);
-               }
+               spin_lock(&rxnet->call_lock);
+               list_del_rcu(&call->link);
+               spin_unlock(&rxnet->call_lock);
 
                rxrpc_cleanup_call(call);
        }
@@ -730,24 +728,20 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
        _enter("");
 
        if (!list_empty(&rxnet->calls)) {
-               spin_lock(&rxnet->call_lock);
+               int shown = 0;
 
-               while (!list_empty(&rxnet->calls)) {
-                       call = list_entry(rxnet->calls.next,
-                                         struct rxrpc_call, link);
-                       _debug("Zapping call %p", call);
+               spin_lock(&rxnet->call_lock);
 
-                       rxrpc_see_call(call, rxrpc_call_see_zap);
-                       list_del_init(&call->link);
+               list_for_each_entry(call, &rxnet->calls, link) {
+                       rxrpc_see_call(call, rxrpc_call_see_still_live);
 
                        pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
                               call, refcount_read(&call->ref),
                               rxrpc_call_states[__rxrpc_call_state(call)],
                               call->flags, call->events);
 
-                       spin_unlock(&rxnet->call_lock);
-                       cond_resched();
-                       spin_lock(&rxnet->call_lock);
+                       if (++shown >= 10)
+                               break;
                }
 
                spin_unlock(&rxnet->call_lock);