--- /dev/null
+From dccc1613d34c85c432bb33580cf5f97242eb6ac2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Sep 2020 00:45:14 +0300
+Subject: io_uring: fix cancel of deferred reqs with ->files
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit b7ddce3cbf010edbfac6c6d8cc708560a7bcd7a4 ]
+
+While trying to cancel requests with ->files, it also should look for
+requests in ->defer_list, otherwise it might end up hanging a thread.
+
+Cancel all requests in ->defer_list up to the last request there with
+matching ->files, that's needed to follow drain ordering semantics.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 38f3ec15ba3b1..5f627194d0920 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7675,12 +7675,38 @@ static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
+ io_timeout_remove_link(ctx, req);
+ }
+
++static void io_cancel_defer_files(struct io_ring_ctx *ctx,
++ struct files_struct *files)
++{
++ struct io_kiocb *req = NULL;
++ LIST_HEAD(list);
++
++ spin_lock_irq(&ctx->completion_lock);
++ list_for_each_entry_reverse(req, &ctx->defer_list, list) {
++ if ((req->flags & REQ_F_WORK_INITIALIZED)
++ && req->work.files == files) {
++ list_cut_position(&list, &ctx->defer_list, &req->list);
++ break;
++ }
++ }
++ spin_unlock_irq(&ctx->completion_lock);
++
++ while (!list_empty(&list)) {
++ req = list_first_entry(&list, struct io_kiocb, list);
++ list_del_init(&req->list);
++ req_set_fail_links(req);
++ io_cqring_add_event(req, -ECANCELED);
++ io_double_put_req(req);
++ }
++}
++
+ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+ {
+ if (list_empty_careful(&ctx->inflight_list))
+ return;
+
++ io_cancel_defer_files(ctx, files);
+ /* cancel all at once, should be faster than doing it one by one*/
+ io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
+
+--
+2.25.1
+
--- /dev/null
+From add036bcf863627747ac09444cd5727791348fb0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Sep 2020 00:45:15 +0300
+Subject: io_uring: fix linked deferred ->files cancellation
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit c127a2a1b7baa5eb40a7e2de4b7f0c51ccbbb2ef ]
+
+While looking for ->files in ->defer_list, consider that requests there
+may actually be links.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 5f627194d0920..d05023ca74bdc 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7601,6 +7601,28 @@ static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
+ return false;
+ }
+
++static inline bool io_match_files(struct io_kiocb *req,
++ struct files_struct *files)
++{
++ return (req->flags & REQ_F_WORK_INITIALIZED) && req->work.files == files;
++}
++
++static bool io_match_link_files(struct io_kiocb *req,
++ struct files_struct *files)
++{
++ struct io_kiocb *link;
++
++ if (io_match_files(req, files))
++ return true;
++ if (req->flags & REQ_F_LINK_HEAD) {
++ list_for_each_entry(link, &req->link_list, link_list) {
++ if (io_match_files(link, files))
++ return true;
++ }
++ }
++ return false;
++}
++
+ /*
+ * We're looking to cancel 'req' because it's holding on to our files, but
+ * 'req' could be a link to another request. See if it is, and cancel that
+@@ -7683,8 +7705,7 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_reverse(req, &ctx->defer_list, list) {
+- if ((req->flags & REQ_F_WORK_INITIALIZED)
+- && req->work.files == files) {
++ if (io_match_link_files(req, files)) {
+ list_cut_position(&list, &ctx->defer_list, &req->list);
+ break;
+ }
+--
+2.25.1
+
--- /dev/null
+From eafeacb647026e8a7482921d9982af3790906e55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jul 2020 10:07:07 +0300
+Subject: RDMA/cma: Execute rdma_cm destruction from a handler properly
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit f6a9d47ae6854980fc4b1676f1fe9f9fa45ea4e2 ]
+
+When a rdma_cm_id needs to be destroyed after a handler callback fails,
+part of the destruction pattern is open coded into each call site.
+
+Unfortunately the blind assignment to state discards important information
+needed to do cma_cancel_operation(). This results in active operations
+being left running after rdma_destroy_id() completes, and the
+use-after-free bugs from KASAN.
+
+Consolidate this entire pattern into destroy_id_handler_unlock() and
+manage the locking correctly. The state should be set to
+RDMA_CM_DESTROYING under the handler_lock to atomically ensure no futher
+handlers are called.
+
+Link: https://lore.kernel.org/r/20200723070707.1771101-5-leon@kernel.org
+Reported-by: syzbot+08092148130652a6faae@syzkaller.appspotmail.com
+Reported-by: syzbot+a929647172775e335941@syzkaller.appspotmail.com
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c | 174 ++++++++++++++++------------------
+ 1 file changed, 84 insertions(+), 90 deletions(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 11f43204fee77..26de0dab60bbb 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -428,19 +428,6 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
+ return ret;
+ }
+
+-static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
+- enum rdma_cm_state exch)
+-{
+- unsigned long flags;
+- enum rdma_cm_state old;
+-
+- spin_lock_irqsave(&id_priv->lock, flags);
+- old = id_priv->state;
+- id_priv->state = exch;
+- spin_unlock_irqrestore(&id_priv->lock, flags);
+- return old;
+-}
+-
+ static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
+ {
+ return hdr->ip_version >> 4;
+@@ -1829,21 +1816,9 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
+ }
+ }
+
+-void rdma_destroy_id(struct rdma_cm_id *id)
++static void _destroy_id(struct rdma_id_private *id_priv,
++ enum rdma_cm_state state)
+ {
+- struct rdma_id_private *id_priv =
+- container_of(id, struct rdma_id_private, id);
+- enum rdma_cm_state state;
+-
+- /*
+- * Wait for any active callback to finish. New callbacks will find
+- * the id_priv state set to destroying and abort.
+- */
+- mutex_lock(&id_priv->handler_mutex);
+- trace_cm_id_destroy(id_priv);
+- state = cma_exch(id_priv, RDMA_CM_DESTROYING);
+- mutex_unlock(&id_priv->handler_mutex);
+-
+ cma_cancel_operation(id_priv, state);
+
+ rdma_restrack_del(&id_priv->res);
+@@ -1874,6 +1849,42 @@ void rdma_destroy_id(struct rdma_cm_id *id)
+ put_net(id_priv->id.route.addr.dev_addr.net);
+ kfree(id_priv);
+ }
++
++/*
++ * destroy an ID from within the handler_mutex. This ensures that no other
++ * handlers can start running concurrently.
++ */
++static void destroy_id_handler_unlock(struct rdma_id_private *id_priv)
++ __releases(&idprv->handler_mutex)
++{
++ enum rdma_cm_state state;
++ unsigned long flags;
++
++ trace_cm_id_destroy(id_priv);
++
++ /*
++ * Setting the state to destroyed under the handler mutex provides a
++ * fence against calling handler callbacks. If this is invoked due to
++ * the failure of a handler callback then it guarentees that no future
++ * handlers will be called.
++ */
++ lockdep_assert_held(&id_priv->handler_mutex);
++ spin_lock_irqsave(&id_priv->lock, flags);
++ state = id_priv->state;
++ id_priv->state = RDMA_CM_DESTROYING;
++ spin_unlock_irqrestore(&id_priv->lock, flags);
++ mutex_unlock(&id_priv->handler_mutex);
++ _destroy_id(id_priv, state);
++}
++
++void rdma_destroy_id(struct rdma_cm_id *id)
++{
++ struct rdma_id_private *id_priv =
++ container_of(id, struct rdma_id_private, id);
++
++ mutex_lock(&id_priv->handler_mutex);
++ destroy_id_handler_unlock(id_priv);
++}
+ EXPORT_SYMBOL(rdma_destroy_id);
+
+ static int cma_rep_recv(struct rdma_id_private *id_priv)
+@@ -1938,7 +1949,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
+ {
+ struct rdma_id_private *id_priv = cm_id->context;
+ struct rdma_cm_event event = {};
+- int ret = 0;
++ int ret;
+
+ mutex_lock(&id_priv->handler_mutex);
+ if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
+@@ -2007,14 +2018,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ id_priv->cm_id.ib = NULL;
+- cma_exch(id_priv, RDMA_CM_DESTROYING);
+- mutex_unlock(&id_priv->handler_mutex);
+- rdma_destroy_id(&id_priv->id);
++ destroy_id_handler_unlock(id_priv);
+ return ret;
+ }
+ out:
+ mutex_unlock(&id_priv->handler_mutex);
+- return ret;
++ return 0;
+ }
+
+ static struct rdma_id_private *
+@@ -2176,7 +2185,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
+ mutex_lock(&listen_id->handler_mutex);
+ if (listen_id->state != RDMA_CM_LISTEN) {
+ ret = -ECONNABORTED;
+- goto err1;
++ goto err_unlock;
+ }
+
+ offset = cma_user_data_offset(listen_id);
+@@ -2193,43 +2202,38 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
+ }
+ if (!conn_id) {
+ ret = -ENOMEM;
+- goto err1;
++ goto err_unlock;
+ }
+
+ mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
+ ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
+- if (ret)
+- goto err2;
++ if (ret) {
++ destroy_id_handler_unlock(conn_id);
++ goto err_unlock;
++ }
+
+ conn_id->cm_id.ib = cm_id;
+ cm_id->context = conn_id;
+ cm_id->cm_handler = cma_ib_handler;
+
+ ret = cma_cm_event_handler(conn_id, &event);
+- if (ret)
+- goto err3;
++ if (ret) {
++ /* Destroy the CM ID by returning a non-zero value. */
++ conn_id->cm_id.ib = NULL;
++ mutex_unlock(&listen_id->handler_mutex);
++ destroy_id_handler_unlock(conn_id);
++ goto net_dev_put;
++ }
++
+ if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
+ (conn_id->id.qp_type != IB_QPT_UD)) {
+ trace_cm_send_mra(cm_id->context);
+ ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ }
+- mutex_unlock(&lock);
+ mutex_unlock(&conn_id->handler_mutex);
+- mutex_unlock(&listen_id->handler_mutex);
+- if (net_dev)
+- dev_put(net_dev);
+- return 0;
+
+-err3:
+- /* Destroy the CM ID by returning a non-zero value. */
+- conn_id->cm_id.ib = NULL;
+-err2:
+- cma_exch(conn_id, RDMA_CM_DESTROYING);
+- mutex_unlock(&conn_id->handler_mutex);
+-err1:
++err_unlock:
+ mutex_unlock(&listen_id->handler_mutex);
+- if (conn_id)
+- rdma_destroy_id(&conn_id->id);
+
+ net_dev_put:
+ if (net_dev)
+@@ -2329,9 +2333,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ id_priv->cm_id.iw = NULL;
+- cma_exch(id_priv, RDMA_CM_DESTROYING);
+- mutex_unlock(&id_priv->handler_mutex);
+- rdma_destroy_id(&id_priv->id);
++ destroy_id_handler_unlock(id_priv);
+ return ret;
+ }
+
+@@ -2378,16 +2380,16 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
+ if (ret) {
+- mutex_unlock(&conn_id->handler_mutex);
+- rdma_destroy_id(new_cm_id);
+- goto out;
++ mutex_unlock(&listen_id->handler_mutex);
++ destroy_id_handler_unlock(conn_id);
++ return ret;
+ }
+
+ ret = cma_iw_acquire_dev(conn_id, listen_id);
+ if (ret) {
+- mutex_unlock(&conn_id->handler_mutex);
+- rdma_destroy_id(new_cm_id);
+- goto out;
++ mutex_unlock(&listen_id->handler_mutex);
++ destroy_id_handler_unlock(conn_id);
++ return ret;
+ }
+
+ conn_id->cm_id.iw = cm_id;
+@@ -2401,10 +2403,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+ if (ret) {
+ /* User wants to destroy the CM ID */
+ conn_id->cm_id.iw = NULL;
+- cma_exch(conn_id, RDMA_CM_DESTROYING);
+- mutex_unlock(&conn_id->handler_mutex);
+ mutex_unlock(&listen_id->handler_mutex);
+- rdma_destroy_id(&conn_id->id);
++ destroy_id_handler_unlock(conn_id);
+ return ret;
+ }
+
+@@ -2644,21 +2644,21 @@ static void cma_work_handler(struct work_struct *_work)
+ {
+ struct cma_work *work = container_of(_work, struct cma_work, work);
+ struct rdma_id_private *id_priv = work->id;
+- int destroy = 0;
+
+ mutex_lock(&id_priv->handler_mutex);
+ if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
+- goto out;
++ goto out_unlock;
+
+ if (cma_cm_event_handler(id_priv, &work->event)) {
+- cma_exch(id_priv, RDMA_CM_DESTROYING);
+- destroy = 1;
++ cma_id_put(id_priv);
++ destroy_id_handler_unlock(id_priv);
++ goto out_free;
+ }
+-out:
++
++out_unlock:
+ mutex_unlock(&id_priv->handler_mutex);
+ cma_id_put(id_priv);
+- if (destroy)
+- rdma_destroy_id(&id_priv->id);
++out_free:
+ kfree(work);
+ }
+
+@@ -2666,23 +2666,22 @@ static void cma_ndev_work_handler(struct work_struct *_work)
+ {
+ struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
+ struct rdma_id_private *id_priv = work->id;
+- int destroy = 0;
+
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state == RDMA_CM_DESTROYING ||
+ id_priv->state == RDMA_CM_DEVICE_REMOVAL)
+- goto out;
++ goto out_unlock;
+
+ if (cma_cm_event_handler(id_priv, &work->event)) {
+- cma_exch(id_priv, RDMA_CM_DESTROYING);
+- destroy = 1;
++ cma_id_put(id_priv);
++ destroy_id_handler_unlock(id_priv);
++ goto out_free;
+ }
+
+-out:
++out_unlock:
+ mutex_unlock(&id_priv->handler_mutex);
+ cma_id_put(id_priv);
+- if (destroy)
+- rdma_destroy_id(&id_priv->id);
++out_free:
+ kfree(work);
+ }
+
+@@ -3158,9 +3157,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
+ event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+
+ if (cma_cm_event_handler(id_priv, &event)) {
+- cma_exch(id_priv, RDMA_CM_DESTROYING);
+- mutex_unlock(&id_priv->handler_mutex);
+- rdma_destroy_id(&id_priv->id);
++ destroy_id_handler_unlock(id_priv);
+ return;
+ }
+ out:
+@@ -3777,7 +3774,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
+ struct rdma_cm_event event = {};
+ const struct ib_cm_sidr_rep_event_param *rep =
+ &ib_event->param.sidr_rep_rcvd;
+- int ret = 0;
++ int ret;
+
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_CONNECT)
+@@ -3827,14 +3824,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ id_priv->cm_id.ib = NULL;
+- cma_exch(id_priv, RDMA_CM_DESTROYING);
+- mutex_unlock(&id_priv->handler_mutex);
+- rdma_destroy_id(&id_priv->id);
++ destroy_id_handler_unlock(id_priv);
+ return ret;
+ }
+ out:
+ mutex_unlock(&id_priv->handler_mutex);
+- return ret;
++ return 0;
+ }
+
+ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
+@@ -4359,9 +4354,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+
+ rdma_destroy_ah_attr(&event.param.ud.ah_attr);
+ if (ret) {
+- cma_exch(id_priv, RDMA_CM_DESTROYING);
+- mutex_unlock(&id_priv->handler_mutex);
+- rdma_destroy_id(&id_priv->id);
++ destroy_id_handler_unlock(id_priv);
+ return 0;
+ }
+
+@@ -4802,7 +4795,8 @@ static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
+ */
+ cma_id_put(id_priv);
+ mutex_unlock(&id_priv->handler_mutex);
+- rdma_destroy_id(&id_priv->id);
++ trace_cm_id_destroy(id_priv);
++ _destroy_id(id_priv, state);
+ return;
+ }
+ mutex_unlock(&id_priv->handler_mutex);
+--
+2.25.1
+
--- /dev/null
+From 531152780e4b0d2f59f7ccd09b05e51c59fe05c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jul 2020 10:07:06 +0300
+Subject: RDMA/cma: Remove unneeded locking for req paths
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit cc9c037343898eb7a775e6b81d092ee21eeff218 ]
+
+The REQ flows are concerned that once the handler is called on the new
+cm_id the ULP can choose to trigger a rdma_destroy_id() concurrently at
+any time.
+
+However, this is not true, while the ULP can call rdma_destroy_id(), it
+immediately blocks on the handler_mutex which prevents anything harmful
+from running concurrently.
+
+Remove the confusing extra locking and refcounts and make the
+handler_mutex protecting state during destroy more clear.
+
+Link: https://lore.kernel.org/r/20200723070707.1771101-4-leon@kernel.org
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c | 31 ++++++-------------------------
+ 1 file changed, 6 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 04151c301e851..11f43204fee77 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1831,21 +1831,21 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
+
+ void rdma_destroy_id(struct rdma_cm_id *id)
+ {
+- struct rdma_id_private *id_priv;
++ struct rdma_id_private *id_priv =
++ container_of(id, struct rdma_id_private, id);
+ enum rdma_cm_state state;
+
+- id_priv = container_of(id, struct rdma_id_private, id);
+- trace_cm_id_destroy(id_priv);
+- state = cma_exch(id_priv, RDMA_CM_DESTROYING);
+- cma_cancel_operation(id_priv, state);
+-
+ /*
+ * Wait for any active callback to finish. New callbacks will find
+ * the id_priv state set to destroying and abort.
+ */
+ mutex_lock(&id_priv->handler_mutex);
++ trace_cm_id_destroy(id_priv);
++ state = cma_exch(id_priv, RDMA_CM_DESTROYING);
+ mutex_unlock(&id_priv->handler_mutex);
+
++ cma_cancel_operation(id_priv, state);
++
+ rdma_restrack_del(&id_priv->res);
+ if (id_priv->cma_dev) {
+ if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
+@@ -2205,19 +2205,9 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
+ cm_id->context = conn_id;
+ cm_id->cm_handler = cma_ib_handler;
+
+- /*
+- * Protect against the user destroying conn_id from another thread
+- * until we're done accessing it.
+- */
+- cma_id_get(conn_id);
+ ret = cma_cm_event_handler(conn_id, &event);
+ if (ret)
+ goto err3;
+- /*
+- * Acquire mutex to prevent user executing rdma_destroy_id()
+- * while we're accessing the cm_id.
+- */
+- mutex_lock(&lock);
+ if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
+ (conn_id->id.qp_type != IB_QPT_UD)) {
+ trace_cm_send_mra(cm_id->context);
+@@ -2226,13 +2216,11 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
+ mutex_unlock(&lock);
+ mutex_unlock(&conn_id->handler_mutex);
+ mutex_unlock(&listen_id->handler_mutex);
+- cma_id_put(conn_id);
+ if (net_dev)
+ dev_put(net_dev);
+ return 0;
+
+ err3:
+- cma_id_put(conn_id);
+ /* Destroy the CM ID by returning a non-zero value. */
+ conn_id->cm_id.ib = NULL;
+ err2:
+@@ -2409,11 +2397,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+ memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
+ memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
+
+- /*
+- * Protect against the user destroying conn_id from another thread
+- * until we're done accessing it.
+- */
+- cma_id_get(conn_id);
+ ret = cma_cm_event_handler(conn_id, &event);
+ if (ret) {
+ /* User wants to destroy the CM ID */
+@@ -2421,13 +2404,11 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+ cma_exch(conn_id, RDMA_CM_DESTROYING);
+ mutex_unlock(&conn_id->handler_mutex);
+ mutex_unlock(&listen_id->handler_mutex);
+- cma_id_put(conn_id);
+ rdma_destroy_id(&conn_id->id);
+ return ret;
+ }
+
+ mutex_unlock(&conn_id->handler_mutex);
+- cma_id_put(conn_id);
+
+ out:
+ mutex_unlock(&listen_id->handler_mutex);
+--
+2.25.1
+
--- /dev/null
+From 4decc6be3d3af1da5b7ab670258fc05f2c966bb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jul 2020 10:07:04 +0300
+Subject: RDMA/cma: Simplify DEVICE_REMOVAL for internal_id
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit d54f23c09ec62670901f1a2a4712a5218522ca2b ]
+
+cma_process_remove() triggers an unconditional rdma_destroy_id() for
+internal_id's and skips the event deliver and transition through
+RDMA_CM_DEVICE_REMOVAL.
+
+This is confusing and unnecessary. internal_id always has
+cma_listen_handler() as the handler, have it catch the
+RDMA_CM_DEVICE_REMOVAL event and directly consume it and signal removal.
+
+This way the FSM sequence never skips the DEVICE_REMOVAL case and the
+logic in this hard to test area is simplified.
+
+Link: https://lore.kernel.org/r/20200723070707.1771101-2-leon@kernel.org
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index c30cf5307ce3e..537eeebde5f4d 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2482,6 +2482,10 @@ static int cma_listen_handler(struct rdma_cm_id *id,
+ {
+ struct rdma_id_private *id_priv = id->context;
+
++ /* Listening IDs are always destroyed on removal */
++ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
++ return -1;
++
+ id->context = id_priv->id.context;
+ id->event_handler = id_priv->id.event_handler;
+ trace_cm_event_handler(id_priv, event);
+@@ -4829,7 +4833,7 @@ static void cma_process_remove(struct cma_device *cma_dev)
+ cma_id_get(id_priv);
+ mutex_unlock(&lock);
+
+- ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
++ ret = cma_remove_id_dev(id_priv);
+ cma_id_put(id_priv);
+ if (ret)
+ rdma_destroy_id(&id_priv->id);
+--
+2.25.1
+
--- /dev/null
+From 2e0b68e7d94795f150a66adae8a52e307abee254 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jul 2020 10:07:05 +0300
+Subject: RDMA/cma: Using the standard locking pattern when delivering the
+ removal event
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit 3647a28de1ada8708efc78d956619b9df5004478 ]
+
+Whenever an event is delivered to the handler it should be done under the
+handler_mutex and upon any non-zero return from the handler it should
+trigger destruction of the cm_id.
+
+cma_process_remove() skips some steps here, it is not necessarily wrong
+since the state change should prevent any races, but it is confusing and
+unnecessary.
+
+Follow the standard pattern here, with the slight twist that the
+transition to RDMA_CM_DEVICE_REMOVAL includes a cma_cancel_operation().
+
+Link: https://lore.kernel.org/r/20200723070707.1771101-3-leon@kernel.org
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c | 62 ++++++++++++++++++++---------------
+ 1 file changed, 36 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 537eeebde5f4d..04151c301e851 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1925,6 +1925,8 @@ static int cma_cm_event_handler(struct rdma_id_private *id_priv,
+ {
+ int ret;
+
++ lockdep_assert_held(&id_priv->handler_mutex);
++
+ trace_cm_event_handler(id_priv, event);
+ ret = id_priv->id.event_handler(&id_priv->id, event);
+ trace_cm_event_done(id_priv, event, ret);
+@@ -4793,50 +4795,58 @@ free_cma_dev:
+ return ret;
+ }
+
+-static int cma_remove_id_dev(struct rdma_id_private *id_priv)
++static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
+ {
+- struct rdma_cm_event event = {};
++ struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
+ enum rdma_cm_state state;
+- int ret = 0;
+-
+- /* Record that we want to remove the device */
+- state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
+- if (state == RDMA_CM_DESTROYING)
+- return 0;
++ unsigned long flags;
+
+- cma_cancel_operation(id_priv, state);
+ mutex_lock(&id_priv->handler_mutex);
++ /* Record that we want to remove the device */
++ spin_lock_irqsave(&id_priv->lock, flags);
++ state = id_priv->state;
++ if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) {
++ spin_unlock_irqrestore(&id_priv->lock, flags);
++ mutex_unlock(&id_priv->handler_mutex);
++ cma_id_put(id_priv);
++ return;
++ }
++ id_priv->state = RDMA_CM_DEVICE_REMOVAL;
++ spin_unlock_irqrestore(&id_priv->lock, flags);
+
+- /* Check for destruction from another callback. */
+- if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
+- goto out;
+-
+- event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
+- ret = cma_cm_event_handler(id_priv, &event);
+-out:
++ if (cma_cm_event_handler(id_priv, &event)) {
++ /*
++ * At this point the ULP promises it won't call
++ * rdma_destroy_id() concurrently
++ */
++ cma_id_put(id_priv);
++ mutex_unlock(&id_priv->handler_mutex);
++ rdma_destroy_id(&id_priv->id);
++ return;
++ }
+ mutex_unlock(&id_priv->handler_mutex);
+- return ret;
++
++ /*
++ * If this races with destroy then the thread that first assigns state
++ * to a destroying does the cancel.
++ */
++ cma_cancel_operation(id_priv, state);
++ cma_id_put(id_priv);
+ }
+
+ static void cma_process_remove(struct cma_device *cma_dev)
+ {
+- struct rdma_id_private *id_priv;
+- int ret;
+-
+ mutex_lock(&lock);
+ while (!list_empty(&cma_dev->id_list)) {
+- id_priv = list_entry(cma_dev->id_list.next,
+- struct rdma_id_private, list);
++ struct rdma_id_private *id_priv = list_first_entry(
++ &cma_dev->id_list, struct rdma_id_private, list);
+
+ list_del(&id_priv->listen_list);
+ list_del_init(&id_priv->list);
+ cma_id_get(id_priv);
+ mutex_unlock(&lock);
+
+- ret = cma_remove_id_dev(id_priv);
+- cma_id_put(id_priv);
+- if (ret)
+- rdma_destroy_id(&id_priv->id);
++ cma_send_device_removal_put(id_priv);
+
+ mutex_lock(&lock);
+ }
+--
+2.25.1
+
--- /dev/null
+io_uring-fix-cancel-of-deferred-reqs-with-files.patch
+io_uring-fix-linked-deferred-files-cancellation.patch
+rdma-cma-simplify-device_removal-for-internal_id.patch
+rdma-cma-using-the-standard-locking-pattern-when-del.patch
+rdma-cma-remove-unneeded-locking-for-req-paths.patch
+rdma-cma-execute-rdma_cm-destruction-from-a-handler-.patch