--- /dev/null
+From 3701cb59d892b88d569427586f01491552f377b1 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 24 Sep 2020 19:41:58 -0400
+Subject: ep_create_wakeup_source(): dentry name can change under you...
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 3701cb59d892b88d569427586f01491552f377b1 upstream.
+
+or get freed, for that matter, if it's a long (separately stored)
+name.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/eventpoll.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1389,7 +1389,7 @@ static int reverse_path_check(void)
+
+ static int ep_create_wakeup_source(struct epitem *epi)
+ {
+- const char *name;
++ struct name_snapshot n;
+ struct wakeup_source *ws;
+
+ if (!epi->ep->ws) {
+@@ -1398,8 +1398,9 @@ static int ep_create_wakeup_source(struc
+ return -ENOMEM;
+ }
+
+- name = epi->ffd.file->f_path.dentry->d_name.name;
+- ws = wakeup_source_register(name);
++ take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
++ ws = wakeup_source_register(n.name);
++ release_dentry_name_snapshot(&n);
+
+ if (!ws)
+ return -ENOMEM;
--- /dev/null
+From f8d4f44df056c5b504b0d49683fb7279218fd207 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Wed, 9 Sep 2020 22:25:06 -0400
+Subject: epoll: do not insert into poll queues until all sanity checks are done
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit f8d4f44df056c5b504b0d49683fb7279218fd207 upstream.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/eventpoll.c | 37 ++++++++++++++++++-------------------
+ 1 file changed, 18 insertions(+), 19 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1461,6 +1461,22 @@ static int ep_insert(struct eventpoll *e
+ RCU_INIT_POINTER(epi->ws, NULL);
+ }
+
++ /* Add the current item to the list of active epoll hook for this file */
++ spin_lock(&tfile->f_lock);
++ list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
++ spin_unlock(&tfile->f_lock);
++
++ /*
++ * Add the current item to the RB tree. All RB tree operations are
++ * protected by "mtx", and ep_insert() is called with "mtx" held.
++ */
++ ep_rbtree_insert(ep, epi);
++
++ /* now check if we've created too many backpaths */
++ error = -EINVAL;
++ if (full_check && reverse_path_check())
++ goto error_remove_epi;
++
+ /* Initialize the poll table using the queue callback */
+ epq.epi = epi;
+ init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
+@@ -1483,22 +1499,6 @@ static int ep_insert(struct eventpoll *e
+ if (epi->nwait < 0)
+ goto error_unregister;
+
+- /* Add the current item to the list of active epoll hook for this file */
+- spin_lock(&tfile->f_lock);
+- list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
+- spin_unlock(&tfile->f_lock);
+-
+- /*
+- * Add the current item to the RB tree. All RB tree operations are
+- * protected by "mtx", and ep_insert() is called with "mtx" held.
+- */
+- ep_rbtree_insert(ep, epi);
+-
+- /* now check if we've created too many backpaths */
+- error = -EINVAL;
+- if (full_check && reverse_path_check())
+- goto error_remove_epi;
+-
+ /* We have to drop the new item inside our item list to keep track of it */
+ spin_lock_irqsave(&ep->lock, flags);
+
+@@ -1527,6 +1527,8 @@ static int ep_insert(struct eventpoll *e
+
+ return 0;
+
++error_unregister:
++ ep_unregister_pollwait(ep, epi);
+ error_remove_epi:
+ spin_lock(&tfile->f_lock);
+ list_del_rcu(&epi->fllink);
+@@ -1534,9 +1536,6 @@ error_remove_epi:
+
+ rb_erase_cached(&epi->rbn, &ep->rbr);
+
+-error_unregister:
+- ep_unregister_pollwait(ep, epi);
+-
+ /*
+ * We need to do this because an event could have been arrived on some
+ * allocated wait queue. Note that we don't care about the ep->ovflist
--- /dev/null
+From fe0a916c1eae8e17e86c3753d13919177d63ed7e Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 10 Sep 2020 08:33:27 -0400
+Subject: epoll: EPOLL_CTL_ADD: close the race in decision to take fast path
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit fe0a916c1eae8e17e86c3753d13919177d63ed7e upstream.
+
+Checking for the lack of epitems refering to the epoll we want to insert into
+is not enough; we might have an insertion of that epoll into another one that
+has already collected the set of files to recheck for excessive reverse paths,
+but hasn't gotten to creating/inserting the epitem for it.
+
+However, any such insertion in progress can be detected - it will update the
+generation count in our epoll when it's done looking through it for files
+to check. That gets done under ->mtx of our epoll and that allows us to
+detect that safely.
+
+We are *not* holding epmutex here, so the generation count is not stable.
+However, since both the update of ep->gen by loop check and (later)
+insertion into ->f_ep_link are done with ep->mtx held, we are fine -
+the sequence is
+ grab epmutex
+ bump loop_check_gen
+ ...
+ grab tep->mtx // 1
+ tep->gen = loop_check_gen
+ ...
+ drop tep->mtx // 2
+ ...
+ grab tep->mtx // 3
+ ...
+ insert into ->f_ep_link
+ ...
+ drop tep->mtx // 4
+ bump loop_check_gen
+ drop epmutex
+and if the fastpath check in another thread happens for that
+eventpoll, it can come
+ * before (1) - in that case fastpath is just fine
+ * after (4) - we'll see non-empty ->f_ep_link, slow path
+taken
+ * between (2) and (3) - loop_check_gen is stable,
+with ->mtx providing barriers and we end up taking slow path.
+
+Note that ->f_ep_link emptiness check is slightly racy - we are protected
+against insertions into that list, but removals can happen right under us.
+Not a problem - in the worst case we'll end up taking a slow path for
+no good reason.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/eventpoll.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -2079,6 +2079,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, in
+ mutex_lock_nested(&ep->mtx, 0);
+ if (op == EPOLL_CTL_ADD) {
+ if (!list_empty(&f.file->f_ep_links) ||
++ ep->gen == loop_check_gen ||
+ is_file_epoll(tf.file)) {
+ full_check = 1;
+ mutex_unlock(&ep->mtx);
--- /dev/null
+From 18306c404abe18a0972587a6266830583c60c928 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 10 Sep 2020 08:30:05 -0400
+Subject: epoll: replace ->visited/visited_list with generation count
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 18306c404abe18a0972587a6266830583c60c928 upstream.
+
+removes the need to clear it, along with the races.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/eventpoll.c | 26 +++++++-------------------
+ 1 file changed, 7 insertions(+), 19 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -223,8 +223,7 @@ struct eventpoll {
+ struct file *file;
+
+ /* used to optimize loop detection check */
+- int visited;
+- struct list_head visited_list_link;
++ u64 gen;
+
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+ /* used to track busy poll napi_id */
+@@ -273,6 +272,8 @@ static long max_user_watches __read_most
+ */
+ static DEFINE_MUTEX(epmutex);
+
++static u64 loop_check_gen = 0;
++
+ /* Used to check for epoll file descriptor inclusion loops */
+ static struct nested_calls poll_loop_ncalls;
+
+@@ -288,9 +289,6 @@ static struct kmem_cache *epi_cache __re
+ /* Slab cache used to allocate "struct eppoll_entry" */
+ static struct kmem_cache *pwq_cache __read_mostly;
+
+-/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
+-static LIST_HEAD(visited_list);
+-
+ /*
+ * List of files with newly added links, where we may need to limit the number
+ * of emanating paths. Protected by the epmutex.
+@@ -1877,13 +1875,12 @@ static int ep_loop_check_proc(void *priv
+ struct epitem *epi;
+
+ mutex_lock_nested(&ep->mtx, call_nests + 1);
+- ep->visited = 1;
+- list_add(&ep->visited_list_link, &visited_list);
++ ep->gen = loop_check_gen;
+ for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+ epi = rb_entry(rbp, struct epitem, rbn);
+ if (unlikely(is_file_epoll(epi->ffd.file))) {
+ ep_tovisit = epi->ffd.file->private_data;
+- if (ep_tovisit->visited)
++ if (ep_tovisit->gen == loop_check_gen)
+ continue;
+ error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ ep_loop_check_proc, epi->ffd.file,
+@@ -1924,18 +1921,8 @@ static int ep_loop_check_proc(void *priv
+ */
+ static int ep_loop_check(struct eventpoll *ep, struct file *file)
+ {
+- int ret;
+- struct eventpoll *ep_cur, *ep_next;
+-
+- ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
++ return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ ep_loop_check_proc, file, ep, current);
+- /* clear visited list */
+- list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
+- visited_list_link) {
+- ep_cur->visited = 0;
+- list_del(&ep_cur->visited_list_link);
+- }
+- return ret;
+ }
+
+ static void clear_tfile_check_list(void)
+@@ -2152,6 +2139,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, in
+ error_tgt_fput:
+ if (full_check) {
+ clear_tfile_check_list();
++ loop_check_gen++;
+ mutex_unlock(&epmutex);
+ }
+
--- /dev/null
+From 1cc5ef91d2ff94d2bf2de3b3585423e8a1051cb6 Mon Sep 17 00:00:00 2001
+From: Will McVicker <willmcvicker@google.com>
+Date: Mon, 24 Aug 2020 19:38:32 +0000
+Subject: netfilter: ctnetlink: add a range check for l3/l4 protonum
+
+From: Will McVicker <willmcvicker@google.com>
+
+commit 1cc5ef91d2ff94d2bf2de3b3585423e8a1051cb6 upstream.
+
+The indexes to the nf_nat_l[34]protos arrays come from userspace. So
+check the tuple's family, e.g. l3num, when creating the conntrack in
+order to prevent an OOB memory access during setup. Here is an example
+kernel panic on 4.14.180 when userspace passes in an index greater than
+NFPROTO_NUMPROTO.
+
+Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
+Modules linked in:...
+Process poc (pid: 5614, stack limit = 0x00000000a3933121)
+CPU: 4 PID: 5614 Comm: poc Tainted: G S W O 4.14.180-g051355490483
+Hardware name: Qualcomm Technologies, Inc. SM8150 V2 PM8150 Google Inc. MSM
+task: 000000002a3dfffe task.stack: 00000000a3933121
+pc : __cfi_check_fail+0x1c/0x24
+lr : __cfi_check_fail+0x1c/0x24
+...
+Call trace:
+__cfi_check_fail+0x1c/0x24
+name_to_dev_t+0x0/0x468
+nfnetlink_parse_nat_setup+0x234/0x258
+ctnetlink_parse_nat_setup+0x4c/0x228
+ctnetlink_new_conntrack+0x590/0xc40
+nfnetlink_rcv_msg+0x31c/0x4d4
+netlink_rcv_skb+0x100/0x184
+nfnetlink_rcv+0xf4/0x180
+netlink_unicast+0x360/0x770
+netlink_sendmsg+0x5a0/0x6a4
+___sys_sendmsg+0x314/0x46c
+SyS_sendmsg+0xb4/0x108
+el0_svc_naked+0x34/0x38
+
+This crash is not happening since 5.4+, however, ctnetlink still
+allows for creating entries with unsupported layer 3 protocol number.
+
+Fixes: c1d10adb4a521 ("[NETFILTER]: Add ctnetlink port for nf_conntrack")
+Signed-off-by: Will McVicker <willmcvicker@google.com>
+[pablo@netfilter.org: rebased original patch on top of nf.git]
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -1043,6 +1043,8 @@ ctnetlink_parse_tuple(const struct nlatt
+ if (!tb[CTA_TUPLE_IP])
+ return -EINVAL;
+
++ if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
++ return -EOPNOTSUPP;
+ tuple->src.l3num = l3num;
+
+ err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
input-trackpoint-enable-synaptics-trackpoints.patch
random32-restore-__latent_entropy-attribute-on-net_r.patch
net-packet-fix-overflow-in-tpacket_rcv.patch
+epoll-do-not-insert-into-poll-queues-until-all-sanity-checks-are-done.patch
+epoll-replace-visited-visited_list-with-generation-count.patch
+epoll-epoll_ctl_add-close-the-race-in-decision-to-take-fast-path.patch
+ep_create_wakeup_source-dentry-name-can-change-under-you.patch
+netfilter-ctnetlink-add-a-range-check-for-l3-l4-protonum.patch