--- /dev/null
+From 02f7e4101092b88e57c73171174976c8a72a3eba Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 2 Oct 2017 12:20:51 -0700
+Subject: socket, bpf: fix possible use after free
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit eefca20eb20c66b06cf5ed09b49b1a7caaa27b7b upstream
+
+Starting from linux-4.4, 3WHS no longer takes the listener lock.
+
+Since this time, we might hit a use-after-free in sk_filter_charge(),
+if the filter we got in the memcpy() of the listener content
+just happened to be replaced by a thread changing listener BPF filter.
+
+To fix this, we need to make sure the filter refcount is not already
+zero before incrementing it again.
+
+Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Zubin Mithra <zsm@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/filter.c | 15 +++++++++++++--
+ net/core/sock.c | 5 ++++-
+ 2 files changed, 17 insertions(+), 3 deletions(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -920,20 +920,31 @@ void sk_filter_uncharge(struct sock *sk,
+ /* try to charge the socket memory if there is space available
+ * return true on success
+ */
+-bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
++static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+ {
+ u32 filter_size = bpf_prog_size(fp->prog->len);
+
+ /* same check as in sock_kmalloc() */
+ if (filter_size <= sysctl_optmem_max &&
+ atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
+- atomic_inc(&fp->refcnt);
+ atomic_add(filter_size, &sk->sk_omem_alloc);
+ return true;
+ }
+ return false;
+ }
+
++bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
++{
++ if (!atomic_inc_not_zero(&fp->refcnt))
++ return false;
++
++ if (!__sk_filter_charge(sk, fp)) {
++ sk_filter_release(fp);
++ return false;
++ }
++ return true;
++}
++
+ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
+ {
+ struct sock_filter *old_prog;
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1550,13 +1550,16 @@ struct sock *sk_clone_lock(const struct
+ sock_reset_flag(newsk, SOCK_DONE);
+ skb_queue_head_init(&newsk->sk_error_queue);
+
+- filter = rcu_dereference_protected(newsk->sk_filter, 1);
++ rcu_read_lock();
++ filter = rcu_dereference(sk->sk_filter);
+ if (filter != NULL)
+ /* though it's an empty new sock, the charging may fail
+ * if sysctl_optmem_max was changed between creation of
+ * original socket and cloning
+ */
+ is_charged = sk_filter_charge(newsk, filter);
++ RCU_INIT_POINTER(newsk->sk_filter, filter);
++ rcu_read_unlock();
+
+ if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
+ /* We need to make sure that we don't uncharge the new