+++ /dev/null
-From ba4db06d47008d44d789bd54bf849665577a6468 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 24 Sep 2020 13:14:52 -0400
-Subject: dm raid: fix discard limits for raid1 and raid10
-
-From: Mike Snitzer <snitzer@redhat.com>
-
-[ Upstream commit e0910c8e4f87bb9f767e61a778b0d9271c4dc512 ]
-
-Block core warned that discard_granularity was 0 for dm-raid with
-personality of raid1. Reason is that raid_io_hints() was incorrectly
-special-casing raid1 rather than raid0.
-
-But since commit 29efc390b9462 ("md/md0: optimize raid0 discard
-handling") even raid0 properly handles large discards.
-
-Fix raid_io_hints() by removing discard limits settings for raid1.
-Also, fix limits for raid10 by properly stacking underlying limits as
-done in blk_stack_limits().
-
-Depends-on: 29efc390b9462 ("md/md0: optimize raid0 discard handling")
-Fixes: 61697a6abd24a ("dm: eliminate 'split_discard_bios' flag from DM target interface")
-Cc: stable@vger.kernel.org
-Reported-by: Zdenek Kabelac <zkabelac@redhat.com>
-Reported-by: Mikulas Patocka <mpatocka@redhat.com>
-Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/md/dm-raid.c | 12 +++++++-----
- 1 file changed, 7 insertions(+), 5 deletions(-)
-
-diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
-index 8d2b835d7a108..b45232f9b5c6c 100644
---- a/drivers/md/dm-raid.c
-+++ b/drivers/md/dm-raid.c
-@@ -3730,12 +3730,14 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
- blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
-
- /*
-- * RAID1 and RAID10 personalities require bio splitting,
-- * RAID0/4/5/6 don't and process large discard bios properly.
-+ * RAID10 personality requires bio splitting,
-+ * RAID0/1/4/5/6 don't and process large discard bios properly.
- */
-- if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
-- limits->discard_granularity = chunk_size_bytes;
-- limits->max_discard_sectors = rs->md.chunk_sectors;
-+ if (rs_is_raid10(rs)) {
-+ limits->discard_granularity = max(chunk_size_bytes,
-+ limits->discard_granularity);
-+ limits->max_discard_sectors = min_not_zero(rs->md.chunk_sectors,
-+ limits->max_discard_sectors);
- }
- }
-
---
-2.27.0
-
+++ /dev/null
-From 7cf00d16fe4cffcc645be7118f502d812d59d44a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 18 Aug 2020 15:05:16 +0300
-Subject: RDMA/ucma: Fix error cases around ucma_alloc_ctx()
-
-From: Jason Gunthorpe <jgg@nvidia.com>
-
-[ Upstream commit 620db1a1183d69cc49981ee59c9207e53befeae4 ]
-
-The store to ctx->cm_id was based on the idea that _ucma_find_context()
-would not return the ctx until it was fully setup.
-
-Without locking this doesn't work properly.
-
-Split things so that the xarray is allocated with NULL to reserve the ID
-and once everything is final set the cm_id and store.
-
-Along the way this shows that the error unwind in ucma_get_event() if a
-new ctx is created is wrong, fix it up.
-
-Link: https://lore.kernel.org/r/20200818120526.702120-5-leon@kernel.org
-Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
-Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/core/ucma.c | 68 +++++++++++++++++++++-------------
- 1 file changed, 42 insertions(+), 26 deletions(-)
-
-diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
-index 6f42ff8f2ec57..45b3e9f76c3b2 100644
---- a/drivers/infiniband/core/ucma.c
-+++ b/drivers/infiniband/core/ucma.c
-@@ -130,6 +130,7 @@ static DEFINE_XARRAY_ALLOC(ctx_table);
- static DEFINE_XARRAY_ALLOC(multicast_table);
-
- static const struct file_operations ucma_fops;
-+static int __destroy_id(struct ucma_context *ctx);
-
- static inline struct ucma_context *_ucma_find_context(int id,
- struct ucma_file *file)
-@@ -139,7 +140,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
- ctx = xa_load(&ctx_table, id);
- if (!ctx)
- ctx = ERR_PTR(-ENOENT);
-- else if (ctx->file != file || !ctx->cm_id)
-+ else if (ctx->file != file)
- ctx = ERR_PTR(-EINVAL);
- return ctx;
- }
-@@ -217,18 +218,23 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
- refcount_set(&ctx->ref, 1);
- init_completion(&ctx->comp);
- INIT_LIST_HEAD(&ctx->mc_list);
-+ /* So list_del() will work if we don't do ucma_finish_ctx() */
-+ INIT_LIST_HEAD(&ctx->list);
- ctx->file = file;
- mutex_init(&ctx->mutex);
-
-- if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
-- goto error;
--
-- list_add_tail(&ctx->list, &file->ctx_list);
-+ if (xa_alloc(&ctx_table, &ctx->id, NULL, xa_limit_32b, GFP_KERNEL)) {
-+ kfree(ctx);
-+ return NULL;
-+ }
- return ctx;
-+}
-
--error:
-- kfree(ctx);
-- return NULL;
-+static void ucma_finish_ctx(struct ucma_context *ctx)
-+{
-+ lockdep_assert_held(&ctx->file->mut);
-+ list_add_tail(&ctx->list, &ctx->file->ctx_list);
-+ xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL);
- }
-
- static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
-@@ -399,7 +405,7 @@ out:
- static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
- int in_len, int out_len)
- {
-- struct ucma_context *ctx;
-+ struct ucma_context *ctx = NULL;
- struct rdma_ucm_get_event cmd;
- struct ucma_event *uevent;
- int ret = 0;
-@@ -429,33 +435,46 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
- mutex_lock(&file->mut);
- }
-
-- uevent = list_entry(file->event_list.next, struct ucma_event, list);
-+ uevent = list_first_entry(&file->event_list, struct ucma_event, list);
-
- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
- ctx = ucma_alloc_ctx(file);
- if (!ctx) {
- ret = -ENOMEM;
-- goto done;
-+ goto err_unlock;
- }
-- uevent->ctx->backlog++;
-- ctx->cm_id = uevent->cm_id;
-- ctx->cm_id->context = ctx;
- uevent->resp.id = ctx->id;
-+ ctx->cm_id = uevent->cm_id;
- }
-
- if (copy_to_user(u64_to_user_ptr(cmd.response),
- &uevent->resp,
- min_t(size_t, out_len, sizeof(uevent->resp)))) {
- ret = -EFAULT;
-- goto done;
-+ goto err_ctx;
-+ }
-+
-+ if (ctx) {
-+ uevent->ctx->backlog++;
-+ uevent->cm_id->context = ctx;
-+ ucma_finish_ctx(ctx);
- }
-
- list_del(&uevent->list);
- uevent->ctx->events_reported++;
- if (uevent->mc)
- uevent->mc->events_reported++;
-+ mutex_unlock(&file->mut);
-+
- kfree(uevent);
--done:
-+ return 0;
-+
-+err_ctx:
-+ if (ctx) {
-+ xa_erase(&ctx_table, ctx->id);
-+ kfree(ctx);
-+ }
-+err_unlock:
- mutex_unlock(&file->mut);
- return ret;
- }
-@@ -498,9 +517,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
- if (ret)
- return ret;
-
-- mutex_lock(&file->mut);
- ctx = ucma_alloc_ctx(file);
-- mutex_unlock(&file->mut);
- if (!ctx)
- return -ENOMEM;
-
-@@ -511,24 +528,23 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
- ret = PTR_ERR(cm_id);
- goto err1;
- }
-+ ctx->cm_id = cm_id;
-
- resp.id = ctx->id;
- if (copy_to_user(u64_to_user_ptr(cmd.response),
- &resp, sizeof(resp))) {
-- ret = -EFAULT;
-- goto err2;
-+ xa_erase(&ctx_table, ctx->id);
-+ __destroy_id(ctx);
-+ return -EFAULT;
- }
-
-- ctx->cm_id = cm_id;
-+ mutex_lock(&file->mut);
-+ ucma_finish_ctx(ctx);
-+ mutex_unlock(&file->mut);
- return 0;
-
--err2:
-- rdma_destroy_id(cm_id);
- err1:
- xa_erase(&ctx_table, ctx->id);
-- mutex_lock(&file->mut);
-- list_del(&ctx->list);
-- mutex_unlock(&file->mut);
- kfree(ctx);
- return ret;
- }
---
-2.27.0
-