--- /dev/null
+From 3701cb59d892b88d569427586f01491552f377b1 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 24 Sep 2020 19:41:58 -0400
+Subject: ep_create_wakeup_source(): dentry name can change under you...
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 3701cb59d892b88d569427586f01491552f377b1 upstream.
+
+or get freed, for that matter, if it's a long (separately stored)
+name.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/eventpoll.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1448,7 +1448,7 @@ static int reverse_path_check(void)
+
+ static int ep_create_wakeup_source(struct epitem *epi)
+ {
+- const char *name;
++ struct name_snapshot n;
+ struct wakeup_source *ws;
+
+ if (!epi->ep->ws) {
+@@ -1457,8 +1457,9 @@ static int ep_create_wakeup_source(struc
+ return -ENOMEM;
+ }
+
+- name = epi->ffd.file->f_path.dentry->d_name.name;
+- ws = wakeup_source_register(NULL, name);
++ take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
++ ws = wakeup_source_register(NULL, n.name.name);
++ release_dentry_name_snapshot(&n);
+
+ if (!ws)
+ return -ENOMEM;
--- /dev/null
+From f8d4f44df056c5b504b0d49683fb7279218fd207 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Wed, 9 Sep 2020 22:25:06 -0400
+Subject: epoll: do not insert into poll queues until all sanity checks are done
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit f8d4f44df056c5b504b0d49683fb7279218fd207 upstream.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/eventpoll.c | 37 ++++++++++++++++++-------------------
+ 1 file changed, 18 insertions(+), 19 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1522,6 +1522,22 @@ static int ep_insert(struct eventpoll *e
+ RCU_INIT_POINTER(epi->ws, NULL);
+ }
+
++ /* Add the current item to the list of active epoll hook for this file */
++ spin_lock(&tfile->f_lock);
++ list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
++ spin_unlock(&tfile->f_lock);
++
++ /*
++ * Add the current item to the RB tree. All RB tree operations are
++ * protected by "mtx", and ep_insert() is called with "mtx" held.
++ */
++ ep_rbtree_insert(ep, epi);
++
++ /* now check if we've created too many backpaths */
++ error = -EINVAL;
++ if (full_check && reverse_path_check())
++ goto error_remove_epi;
++
+ /* Initialize the poll table using the queue callback */
+ epq.epi = epi;
+ init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
+@@ -1544,22 +1560,6 @@ static int ep_insert(struct eventpoll *e
+ if (epi->nwait < 0)
+ goto error_unregister;
+
+- /* Add the current item to the list of active epoll hook for this file */
+- spin_lock(&tfile->f_lock);
+- list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
+- spin_unlock(&tfile->f_lock);
+-
+- /*
+- * Add the current item to the RB tree. All RB tree operations are
+- * protected by "mtx", and ep_insert() is called with "mtx" held.
+- */
+- ep_rbtree_insert(ep, epi);
+-
+- /* now check if we've created too many backpaths */
+- error = -EINVAL;
+- if (full_check && reverse_path_check())
+- goto error_remove_epi;
+-
+ /* We have to drop the new item inside our item list to keep track of it */
+ write_lock_irq(&ep->lock);
+
+@@ -1588,6 +1588,8 @@ static int ep_insert(struct eventpoll *e
+
+ return 0;
+
++error_unregister:
++ ep_unregister_pollwait(ep, epi);
+ error_remove_epi:
+ spin_lock(&tfile->f_lock);
+ list_del_rcu(&epi->fllink);
+@@ -1595,9 +1597,6 @@ error_remove_epi:
+
+ rb_erase_cached(&epi->rbn, &ep->rbr);
+
+-error_unregister:
+- ep_unregister_pollwait(ep, epi);
+-
+ /*
+ * We need to do this because an event could have been arrived on some
+ * allocated wait queue. Note that we don't care about the ep->ovflist
--- /dev/null
+From fe0a916c1eae8e17e86c3753d13919177d63ed7e Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 10 Sep 2020 08:33:27 -0400
+Subject: epoll: EPOLL_CTL_ADD: close the race in decision to take fast path
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit fe0a916c1eae8e17e86c3753d13919177d63ed7e upstream.
+
+Checking for the lack of epitems refering to the epoll we want to insert into
+is not enough; we might have an insertion of that epoll into another one that
+has already collected the set of files to recheck for excessive reverse paths,
+but hasn't gotten to creating/inserting the epitem for it.
+
+However, any such insertion in progress can be detected - it will update the
+generation count in our epoll when it's done looking through it for files
+to check. That gets done under ->mtx of our epoll and that allows us to
+detect that safely.
+
+We are *not* holding epmutex here, so the generation count is not stable.
+However, since both the update of ep->gen by loop check and (later)
+insertion into ->f_ep_link are done with ep->mtx held, we are fine -
+the sequence is
+ grab epmutex
+ bump loop_check_gen
+ ...
+ grab tep->mtx // 1
+ tep->gen = loop_check_gen
+ ...
+ drop tep->mtx // 2
+ ...
+ grab tep->mtx // 3
+ ...
+ insert into ->f_ep_link
+ ...
+ drop tep->mtx // 4
+ bump loop_check_gen
+ drop epmutex
+and if the fastpath check in another thread happens for that
+eventpoll, it can come
+ * before (1) - in that case fastpath is just fine
+ * after (4) - we'll see non-empty ->f_ep_link, slow path
+taken
+ * between (2) and (3) - loop_check_gen is stable,
+with ->mtx providing barriers and we end up taking slow path.
+
+Note that ->f_ep_link emptiness check is slightly racy - we are protected
+against insertions into that list, but removals can happen right under us.
+Not a problem - in the worst case we'll end up taking a slow path for
+no good reason.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/eventpoll.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -2181,6 +2181,7 @@ int do_epoll_ctl(int epfd, int op, int f
+ goto error_tgt_fput;
+ if (op == EPOLL_CTL_ADD) {
+ if (!list_empty(&f.file->f_ep_links) ||
++ ep->gen == loop_check_gen ||
+ is_file_epoll(tf.file)) {
+ mutex_unlock(&ep->mtx);
+ error = epoll_mutex_lock(&epmutex, 0, nonblock);
--- /dev/null
+From 18306c404abe18a0972587a6266830583c60c928 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 10 Sep 2020 08:30:05 -0400
+Subject: epoll: replace ->visited/visited_list with generation count
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 18306c404abe18a0972587a6266830583c60c928 upstream.
+
+removes the need to clear it, along with the races.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/eventpoll.c | 27 ++++++++-------------------
+ 1 file changed, 8 insertions(+), 19 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -218,8 +218,7 @@ struct eventpoll {
+ struct file *file;
+
+ /* used to optimize loop detection check */
+- struct list_head visited_list_link;
+- int visited;
++ u64 gen;
+
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+ /* used to track busy poll napi_id */
+@@ -274,6 +273,8 @@ static long max_user_watches __read_most
+ */
+ static DEFINE_MUTEX(epmutex);
+
++static u64 loop_check_gen = 0;
++
+ /* Used to check for epoll file descriptor inclusion loops */
+ static struct nested_calls poll_loop_ncalls;
+
+@@ -283,9 +284,6 @@ static struct kmem_cache *epi_cache __re
+ /* Slab cache used to allocate "struct eppoll_entry" */
+ static struct kmem_cache *pwq_cache __read_mostly;
+
+-/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
+-static LIST_HEAD(visited_list);
+-
+ /*
+ * List of files with newly added links, where we may need to limit the number
+ * of emanating paths. Protected by the epmutex.
+@@ -1971,13 +1969,12 @@ static int ep_loop_check_proc(void *priv
+ struct epitem *epi;
+
+ mutex_lock_nested(&ep->mtx, call_nests + 1);
+- ep->visited = 1;
+- list_add(&ep->visited_list_link, &visited_list);
++ ep->gen = loop_check_gen;
+ for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+ epi = rb_entry(rbp, struct epitem, rbn);
+ if (unlikely(is_file_epoll(epi->ffd.file))) {
+ ep_tovisit = epi->ffd.file->private_data;
+- if (ep_tovisit->visited)
++ if (ep_tovisit->gen == loop_check_gen)
+ continue;
+ error = ep_call_nested(&poll_loop_ncalls,
+ ep_loop_check_proc, epi->ffd.file,
+@@ -2018,18 +2015,8 @@ static int ep_loop_check_proc(void *priv
+ */
+ static int ep_loop_check(struct eventpoll *ep, struct file *file)
+ {
+- int ret;
+- struct eventpoll *ep_cur, *ep_next;
+-
+- ret = ep_call_nested(&poll_loop_ncalls,
++ return ep_call_nested(&poll_loop_ncalls,
+ ep_loop_check_proc, file, ep, current);
+- /* clear visited list */
+- list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
+- visited_list_link) {
+- ep_cur->visited = 0;
+- list_del(&ep_cur->visited_list_link);
+- }
+- return ret;
+ }
+
+ static void clear_tfile_check_list(void)
+@@ -2199,6 +2186,7 @@ int do_epoll_ctl(int epfd, int op, int f
+ error = epoll_mutex_lock(&epmutex, 0, nonblock);
+ if (error)
+ goto error_tgt_fput;
++ loop_check_gen++;
+ full_check = 1;
+ if (is_file_epoll(tf.file)) {
+ error = -ELOOP;
+@@ -2262,6 +2250,7 @@ int do_epoll_ctl(int epfd, int op, int f
+ error_tgt_fput:
+ if (full_check) {
+ clear_tfile_check_list();
++ loop_check_gen++;
+ mutex_unlock(&epmutex);
+ }
+
drm-i915-gvt-fix-port-number-for-bdw-on-edid-region-setup.patch
scsi-sd-sd_zbc-fix-handling-of-host-aware-zbc-disks.patch
scsi-sd-sd_zbc-fix-zbc-disk-initialization.patch
+epoll-do-not-insert-into-poll-queues-until-all-sanity-checks-are-done.patch
+epoll-replace-visited-visited_list-with-generation-count.patch
+epoll-epoll_ctl_add-close-the-race-in-decision-to-take-fast-path.patch
+ep_create_wakeup_source-dentry-name-can-change-under-you.patch