]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.14.117/rxrpc-fix-net-namespace-cleanup.patch
Fixes for 4.19
[thirdparty/kernel/stable-queue.git] / releases / 4.14.117 / rxrpc-fix-net-namespace-cleanup.patch
CommitLineData
15e6eb8b
GKH
1From foo@baz Sat 04 May 2019 11:25:56 AM CEST
2From: David Howells <dhowells@redhat.com>
3Date: Tue, 30 Apr 2019 08:34:08 +0100
4Subject: rxrpc: Fix net namespace cleanup
5
6From: David Howells <dhowells@redhat.com>
7
8[ Upstream commit b13023421b5179413421333f602850914f6a7ad8 ]
9
10In rxrpc_destroy_all_calls(), there are two phases: (1) make sure the
11->calls list is empty, emitting error messages if not, and (2) wait for the
12RCU cleanup to happen on outstanding calls (ie. ->nr_calls becomes 0).
13
14To avoid taking the call_lock, the function prechecks ->calls and if empty,
15it returns to avoid taking the lock - this is wrong, however: it still
16needs to go and do the second phase and wait for ->nr_calls to become 0.
17
18Without this, the rxrpc_net struct may get deallocated before we get to the
19RCU cleanup for the last calls. This can lead to:
20
21 Slab corruption (Not tainted): kmalloc-16k start=ffff88802b178000, len=16384
22 050: 6b 6b 6b 6b 6b 6b 6b 6b 61 6b 6b 6b 6b 6b 6b 6b kkkkkkkkakkkkkkk
23
24Note the "61" at offset 0x58. This corresponds to the ->nr_calls member of
25struct rxrpc_net (which is >9k in size, and thus allocated out of the 16k
26slab).
27
28Fix this by flipping the condition on the if-statement, putting the locked
29section inside the if-body and dropping the return from there. The
30function will then always go on to wait for the RCU cleanup on outstanding
31calls.
32
33Fixes: 2baec2c3f854 ("rxrpc: Support network namespacing")
34Signed-off-by: David Howells <dhowells@redhat.com>
35Signed-off-by: David S. Miller <davem@davemloft.net>
36Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
37---
38 net/rxrpc/call_object.c | 38 +++++++++++++++++++-------------------
39 1 file changed, 19 insertions(+), 19 deletions(-)
40
41--- a/net/rxrpc/call_object.c
42+++ b/net/rxrpc/call_object.c
43@@ -684,27 +684,27 @@ void rxrpc_destroy_all_calls(struct rxrp
44
45 _enter("");
46
47- if (list_empty(&rxnet->calls))
48- return;
49-
50- write_lock(&rxnet->call_lock);
51+ if (!list_empty(&rxnet->calls)) {
52+ write_lock(&rxnet->call_lock);
53
54- while (!list_empty(&rxnet->calls)) {
55- call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
56- _debug("Zapping call %p", call);
57-
58- rxrpc_see_call(call);
59- list_del_init(&call->link);
60-
61- pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
62- call, atomic_read(&call->usage),
63- rxrpc_call_states[call->state],
64- call->flags, call->events);
65+ while (!list_empty(&rxnet->calls)) {
66+ call = list_entry(rxnet->calls.next,
67+ struct rxrpc_call, link);
68+ _debug("Zapping call %p", call);
69+
70+ rxrpc_see_call(call);
71+ list_del_init(&call->link);
72+
73+ pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
74+ call, atomic_read(&call->usage),
75+ rxrpc_call_states[call->state],
76+ call->flags, call->events);
77+
78+ write_unlock(&rxnet->call_lock);
79+ cond_resched();
80+ write_lock(&rxnet->call_lock);
81+ }
82
83 write_unlock(&rxnet->call_lock);
84- cond_resched();
85- write_lock(&rxnet->call_lock);
86 }
87-
88- write_unlock(&rxnet->call_lock);
89 }