]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 27 Aug 2019 07:25:05 +0000 (09:25 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 27 Aug 2019 07:25:05 +0000 (09:25 +0200)
added patches:
rxrpc-fix-local-endpoint-refcounting.patch
rxrpc-fix-read-after-free-in-rxrpc_queue_local.patch

queue-4.19/rxrpc-fix-local-endpoint-refcounting.patch [new file with mode: 0644]
queue-4.19/rxrpc-fix-read-after-free-in-rxrpc_queue_local.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/rxrpc-fix-local-endpoint-refcounting.patch b/queue-4.19/rxrpc-fix-local-endpoint-refcounting.patch
new file mode 100644 (file)
index 0000000..c72e18d
--- /dev/null
@@ -0,0 +1,281 @@
+From 730c5fd42c1e3652a065448fd235cb9fafb2bd10 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Fri, 9 Aug 2019 15:20:41 +0100
+Subject: rxrpc: Fix local endpoint refcounting
+
+From: David Howells <dhowells@redhat.com>
+
+commit 730c5fd42c1e3652a065448fd235cb9fafb2bd10 upstream.
+
+The object lifetime management on the rxrpc_local struct is broken in that
+the rxrpc_local_processor() function is expected to clean up and remove an
+object - but it may get requeued by packets coming in on the backing UDP
+socket once it starts running.
+
+This may result in the assertion in rxrpc_local_rcu() firing because the
+memory has been scheduled for RCU destruction whilst still queued:
+
+       rxrpc: Assertion failed
+       ------------[ cut here ]------------
+       kernel BUG at net/rxrpc/local_object.c:468!
+
+Note that if the processor comes around before the RCU free function, it
+will just do nothing because ->dead is true.
+
+Fix this by adding a separate refcount to count active users of the
+endpoint that causes the endpoint to be destroyed when it reaches 0.
+
+The original refcount can then be used to refcount objects through the work
+processor and cause the memory to be rcu freed when that reaches 0.
+
+Fixes: 4f95dd78a77e ("rxrpc: Rework local endpoint management")
+Reported-by: syzbot+1e0edc4b8b7494c28450@syzkaller.appspotmail.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/rxrpc/af_rxrpc.c     |    4 +-
+ net/rxrpc/ar-internal.h  |    5 ++
+ net/rxrpc/input.c        |   16 ++++++--
+ net/rxrpc/local_object.c |   86 +++++++++++++++++++++++++++++------------------
+ 4 files changed, 72 insertions(+), 39 deletions(-)
+
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -195,7 +195,7 @@ static int rxrpc_bind(struct socket *soc
+ service_in_use:
+       write_unlock(&local->services_lock);
+-      rxrpc_put_local(local);
++      rxrpc_unuse_local(local);
+       ret = -EADDRINUSE;
+ error_unlock:
+       release_sock(&rx->sk);
+@@ -908,7 +908,7 @@ static int rxrpc_release_sock(struct soc
+       rxrpc_queue_work(&rxnet->service_conn_reaper);
+       rxrpc_queue_work(&rxnet->client_conn_reaper);
+-      rxrpc_put_local(rx->local);
++      rxrpc_unuse_local(rx->local);
+       rx->local = NULL;
+       key_put(rx->key);
+       rx->key = NULL;
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -258,7 +258,8 @@ struct rxrpc_security {
+  */
+ struct rxrpc_local {
+       struct rcu_head         rcu;
+-      atomic_t                usage;
++      atomic_t                active_users;   /* Number of users of the local endpoint */
++      atomic_t                usage;          /* Number of references to the structure */
+       struct rxrpc_net        *rxnet;         /* The network ns in which this resides */
+       struct list_head        link;
+       struct socket           *socket;        /* my UDP socket */
+@@ -998,6 +999,8 @@ struct rxrpc_local *rxrpc_lookup_local(s
+ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
+ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
+ void rxrpc_put_local(struct rxrpc_local *);
++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
++void rxrpc_unuse_local(struct rxrpc_local *);
+ void rxrpc_queue_local(struct rxrpc_local *);
+ void rxrpc_destroy_all_locals(struct rxrpc_net *);
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -1106,8 +1106,12 @@ static void rxrpc_post_packet_to_local(s
+ {
+       _enter("%p,%p", local, skb);
+-      skb_queue_tail(&local->event_queue, skb);
+-      rxrpc_queue_local(local);
++      if (rxrpc_get_local_maybe(local)) {
++              skb_queue_tail(&local->event_queue, skb);
++              rxrpc_queue_local(local);
++      } else {
++              rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
++      }
+ }
+ /*
+@@ -1117,8 +1121,12 @@ static void rxrpc_reject_packet(struct r
+ {
+       CHECK_SLAB_OKAY(&local->usage);
+-      skb_queue_tail(&local->reject_queue, skb);
+-      rxrpc_queue_local(local);
++      if (rxrpc_get_local_maybe(local)) {
++              skb_queue_tail(&local->reject_queue, skb);
++              rxrpc_queue_local(local);
++      } else {
++              rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
++      }
+ }
+ /*
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -83,6 +83,7 @@ static struct rxrpc_local *rxrpc_alloc_l
+       local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
+       if (local) {
+               atomic_set(&local->usage, 1);
++              atomic_set(&local->active_users, 1);
+               local->rxnet = rxnet;
+               INIT_LIST_HEAD(&local->link);
+               INIT_WORK(&local->processor, rxrpc_local_processor);
+@@ -270,11 +271,8 @@ struct rxrpc_local *rxrpc_lookup_local(s
+                * bind the transport socket may still fail if we're attempting
+                * to use a local address that the dying object is still using.
+                */
+-              if (!rxrpc_get_local_maybe(local)) {
+-                      cursor = cursor->next;
+-                      list_del_init(&local->link);
++              if (!rxrpc_use_local(local))
+                       break;
+-              }
+               age = "old";
+               goto found;
+@@ -288,7 +286,10 @@ struct rxrpc_local *rxrpc_lookup_local(s
+       if (ret < 0)
+               goto sock_error;
+-      list_add_tail(&local->link, cursor);
++      if (cursor != &rxnet->local_endpoints)
++              list_replace(cursor, &local->link);
++      else
++              list_add_tail(&local->link, cursor);
+       age = "new";
+ found:
+@@ -346,7 +347,8 @@ struct rxrpc_local *rxrpc_get_local_mayb
+ }
+ /*
+- * Queue a local endpoint.
++ * Queue a local endpoint unless it has become unreferenced and pass the
++ * caller's reference to the work item.
+  */
+ void rxrpc_queue_local(struct rxrpc_local *local)
+ {
+@@ -355,15 +357,8 @@ void rxrpc_queue_local(struct rxrpc_loca
+       if (rxrpc_queue_work(&local->processor))
+               trace_rxrpc_local(local, rxrpc_local_queued,
+                                 atomic_read(&local->usage), here);
+-}
+-
+-/*
+- * A local endpoint reached its end of life.
+- */
+-static void __rxrpc_put_local(struct rxrpc_local *local)
+-{
+-      _enter("%d", local->debug_id);
+-      rxrpc_queue_work(&local->processor);
++      else
++              rxrpc_put_local(local);
+ }
+ /*
+@@ -379,11 +374,46 @@ void rxrpc_put_local(struct rxrpc_local
+               trace_rxrpc_local(local, rxrpc_local_put, n, here);
+               if (n == 0)
+-                      __rxrpc_put_local(local);
++                      call_rcu(&local->rcu, rxrpc_local_rcu);
+       }
+ }
+ /*
++ * Start using a local endpoint.
++ */
++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
++{
++      unsigned int au;
++
++      local = rxrpc_get_local_maybe(local);
++      if (!local)
++              return NULL;
++
++      au = atomic_fetch_add_unless(&local->active_users, 1, 0);
++      if (au == 0) {
++              rxrpc_put_local(local);
++              return NULL;
++      }
++
++      return local;
++}
++
++/*
++ * Cease using a local endpoint.  Once the number of active users reaches 0, we
++ * start the closure of the transport in the work processor.
++ */
++void rxrpc_unuse_local(struct rxrpc_local *local)
++{
++      unsigned int au;
++
++      au = atomic_dec_return(&local->active_users);
++      if (au == 0)
++              rxrpc_queue_local(local);
++      else
++              rxrpc_put_local(local);
++}
++
++/*
+  * Destroy a local endpoint's socket and then hand the record to RCU to dispose
+  * of.
+  *
+@@ -397,16 +427,6 @@ static void rxrpc_local_destroyer(struct
+       _enter("%d", local->debug_id);
+-      /* We can get a race between an incoming call packet queueing the
+-       * processor again and the work processor starting the destruction
+-       * process which will shut down the UDP socket.
+-       */
+-      if (local->dead) {
+-              _leave(" [already dead]");
+-              return;
+-      }
+-      local->dead = true;
+-
+       mutex_lock(&rxnet->local_mutex);
+       list_del_init(&local->link);
+       mutex_unlock(&rxnet->local_mutex);
+@@ -426,13 +446,11 @@ static void rxrpc_local_destroyer(struct
+        */
+       rxrpc_purge_queue(&local->reject_queue);
+       rxrpc_purge_queue(&local->event_queue);
+-
+-      _debug("rcu local %d", local->debug_id);
+-      call_rcu(&local->rcu, rxrpc_local_rcu);
+ }
+ /*
+- * Process events on an endpoint
++ * Process events on an endpoint.  The work item carries a ref which
++ * we must release.
+  */
+ static void rxrpc_local_processor(struct work_struct *work)
+ {
+@@ -445,8 +463,10 @@ static void rxrpc_local_processor(struct
+       do {
+               again = false;
+-              if (atomic_read(&local->usage) == 0)
+-                      return rxrpc_local_destroyer(local);
++              if (atomic_read(&local->active_users) == 0) {
++                      rxrpc_local_destroyer(local);
++                      break;
++              }
+               if (!skb_queue_empty(&local->reject_queue)) {
+                       rxrpc_reject_packets(local);
+@@ -458,6 +478,8 @@ static void rxrpc_local_processor(struct
+                       again = true;
+               }
+       } while (again);
++
++      rxrpc_put_local(local);
+ }
+ /*
diff --git a/queue-4.19/rxrpc-fix-read-after-free-in-rxrpc_queue_local.patch b/queue-4.19/rxrpc-fix-read-after-free-in-rxrpc_queue_local.patch
new file mode 100644 (file)
index 0000000..b48a9b7
--- /dev/null
@@ -0,0 +1,122 @@
+From 06d9532fa6b34f12a6d75711162d47c17c1add72 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Tue, 13 Aug 2019 22:26:36 +0100
+Subject: rxrpc: Fix read-after-free in rxrpc_queue_local()
+
+From: David Howells <dhowells@redhat.com>
+
+commit 06d9532fa6b34f12a6d75711162d47c17c1add72 upstream.
+
+rxrpc_queue_local() attempts to queue the local endpoint it is given and
+then, if successful, prints a trace line.  The trace line includes the
+current usage count - but we're not allowed to look at the local endpoint
+at this point as we passed our ref on it to the workqueue.
+
+Fix this by reading the usage count before queuing the work item.
+
+Also fix the reading of local->debug_id for trace lines, which must be done
+with the same consideration as reading the usage count.
+
+Fixes: 09d2bf595db4 ("rxrpc: Add a tracepoint to track rxrpc_local refcounting")
+Reported-by: syzbot+78e71c5bab4f76a6a719@syzkaller.appspotmail.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/trace/events/rxrpc.h |    6 +++---
+ net/rxrpc/local_object.c     |   19 ++++++++++---------
+ 2 files changed, 13 insertions(+), 12 deletions(-)
+
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -500,10 +500,10 @@ rxrpc_tx_points;
+ #define E_(a, b)      { a, b }
+ TRACE_EVENT(rxrpc_local,
+-          TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
++          TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
+                    int usage, const void *where),
+-          TP_ARGS(local, op, usage, where),
++          TP_ARGS(local_debug_id, op, usage, where),
+           TP_STRUCT__entry(
+                   __field(unsigned int,       local           )
+@@ -513,7 +513,7 @@ TRACE_EVENT(rxrpc_local,
+                            ),
+           TP_fast_assign(
+-                  __entry->local = local->debug_id;
++                  __entry->local = local_debug_id;
+                   __entry->op = op;
+                   __entry->usage = usage;
+                   __entry->where = where;
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -97,7 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_l
+               local->debug_id = atomic_inc_return(&rxrpc_debug_id);
+               memcpy(&local->srx, srx, sizeof(*srx));
+               local->srx.srx_service = 0;
+-              trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
++              trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
+       }
+       _leave(" = %p", local);
+@@ -325,7 +325,7 @@ struct rxrpc_local *rxrpc_get_local(stru
+       int n;
+       n = atomic_inc_return(&local->usage);
+-      trace_rxrpc_local(local, rxrpc_local_got, n, here);
++      trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
+       return local;
+ }
+@@ -339,7 +339,8 @@ struct rxrpc_local *rxrpc_get_local_mayb
+       if (local) {
+               int n = atomic_fetch_add_unless(&local->usage, 1, 0);
+               if (n > 0)
+-                      trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
++                      trace_rxrpc_local(local->debug_id, rxrpc_local_got,
++                                        n + 1, here);
+               else
+                       local = NULL;
+       }
+@@ -347,16 +348,16 @@ struct rxrpc_local *rxrpc_get_local_mayb
+ }
+ /*
+- * Queue a local endpoint unless it has become unreferenced and pass the
+- * caller's reference to the work item.
++ * Queue a local endpoint and pass the caller's reference to the work item.
+  */
+ void rxrpc_queue_local(struct rxrpc_local *local)
+ {
+       const void *here = __builtin_return_address(0);
++      unsigned int debug_id = local->debug_id;
++      int n = atomic_read(&local->usage);
+       if (rxrpc_queue_work(&local->processor))
+-              trace_rxrpc_local(local, rxrpc_local_queued,
+-                                atomic_read(&local->usage), here);
++              trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
+       else
+               rxrpc_put_local(local);
+ }
+@@ -371,7 +372,7 @@ void rxrpc_put_local(struct rxrpc_local
+       if (local) {
+               n = atomic_dec_return(&local->usage);
+-              trace_rxrpc_local(local, rxrpc_local_put, n, here);
++              trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
+               if (n == 0)
+                       call_rcu(&local->rcu, rxrpc_local_rcu);
+@@ -458,7 +459,7 @@ static void rxrpc_local_processor(struct
+               container_of(work, struct rxrpc_local, processor);
+       bool again;
+-      trace_rxrpc_local(local, rxrpc_local_processing,
++      trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
+                         atomic_read(&local->usage), NULL);
+       do {
index 9c1b811fbeabe026c4b54382746e99bbe08e658f..418aac9e570449bef44e7ce898ec23bffaf81710 100644 (file)
@@ -94,3 +94,5 @@ xfs-add-attibute-remove-and-helper-functions.patch
 xfs-always-rejoin-held-resources-during-defer-roll.patch
 dm-zoned-fix-potential-null-dereference-in-dmz_do_re.patch
 powerpc-allow-flush_-inval_-dcache_range-to-work-across-ranges-4gb.patch
+rxrpc-fix-local-endpoint-refcounting.patch
+rxrpc-fix-read-after-free-in-rxrpc_queue_local.patch