]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Sep 2017 08:36:49 +0000 (10:36 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Sep 2017 08:36:49 +0000 (10:36 +0200)
added patches:
md-raid1-10-reset-bio-allocated-from-mempool.patch
md-raid5-release-flush-io-in-raid5_do_work.patch

queue-4.12/md-raid1-10-reset-bio-allocated-from-mempool.patch [new file with mode: 0644]
queue-4.12/md-raid5-release-flush-io-in-raid5_do_work.patch [new file with mode: 0644]
queue-4.12/series

diff --git a/queue-4.12/md-raid1-10-reset-bio-allocated-from-mempool.patch b/queue-4.12/md-raid1-10-reset-bio-allocated-from-mempool.patch
new file mode 100644 (file)
index 0000000..9da003e
--- /dev/null
@@ -0,0 +1,125 @@
+From 208410b546207cfc4c832635fa46419cfa86b4cd Mon Sep 17 00:00:00 2001
+From: Shaohua Li <shli@fb.com>
+Date: Thu, 24 Aug 2017 17:50:40 -0700
+Subject: md/raid1/10: reset bio allocated from mempool
+
+From: Shaohua Li <shli@fb.com>
+
+commit 208410b546207cfc4c832635fa46419cfa86b4cd upstream.
+
+Data allocated from mempool doesn't always get initialized, this happens when
+the data is reused instead of fresh allocation. In the raid1/10 case, we must
+reinitialize the bios.
+
+Reported-by: Jonathan G. Underwood <jonathan.underwood@gmail.com>
+Fixes: f0250618361d(md: raid10: don't use bio's vec table to manage resync pages)
+Fixes: 98d30c5812c3(md: raid1: don't use bio's vec table to manage resync pages)
+Cc: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c  |   19 ++++++++++++++++++-
+ drivers/md/raid10.c |   35 ++++++++++++++++++++++++++++++++---
+ 2 files changed, 50 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2586,6 +2586,23 @@ static int init_resync(struct r1conf *co
+       return 0;
+ }
++static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
++{
++      struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
++      struct resync_pages *rps;
++      struct bio *bio;
++      int i;
++
++      for (i = conf->poolinfo->raid_disks; i--; ) {
++              bio = r1bio->bios[i];
++              rps = bio->bi_private;
++              bio_reset(bio);
++              bio->bi_private = rps;
++      }
++      r1bio->master_bio = NULL;
++      return r1bio;
++}
++
+ /*
+  * perform a "sync" on one "block"
+  *
+@@ -2671,7 +2688,7 @@ static sector_t raid1_sync_request(struc
+       bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+               mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
+-      r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
++      r1_bio = raid1_alloc_init_r1buf(conf);
+       raise_barrier(conf, sector_nr);
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2807,6 +2807,35 @@ static int init_resync(struct r10conf *c
+       return 0;
+ }
++static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
++{
++      struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++      struct rsync_pages *rp;
++      struct bio *bio;
++      int nalloc;
++      int i;
++
++      if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
++          test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
++              nalloc = conf->copies; /* resync */
++      else
++              nalloc = 2; /* recovery */
++
++      for (i = 0; i < nalloc; i++) {
++              bio = r10bio->devs[i].bio;
++              rp = bio->bi_private;
++              bio_reset(bio);
++              bio->bi_private = rp;
++              bio = r10bio->devs[i].repl_bio;
++              if (bio) {
++                      rp = bio->bi_private;
++                      bio_reset(bio);
++                      bio->bi_private = rp;
++              }
++      }
++      return r10bio;
++}
++
+ /*
+  * perform a "sync" on one "block"
+  *
+@@ -3036,7 +3065,7 @@ static sector_t raid10_sync_request(stru
+                               atomic_inc(&mreplace->nr_pending);
+                       rcu_read_unlock();
+-                      r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++                      r10_bio = raid10_alloc_init_r10buf(conf);
+                       r10_bio->state = 0;
+                       raise_barrier(conf, rb2 != NULL);
+                       atomic_set(&r10_bio->remaining, 0);
+@@ -3245,7 +3274,7 @@ static sector_t raid10_sync_request(stru
+               }
+               if (sync_blocks < max_sync)
+                       max_sync = sync_blocks;
+-              r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++              r10_bio = raid10_alloc_init_r10buf(conf);
+               r10_bio->state = 0;
+               r10_bio->mddev = mddev;
+@@ -4369,7 +4398,7 @@ static sector_t reshape_request(struct m
+ read_more:
+       /* Now schedule reads for blocks from sector_nr to last */
+-      r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++      r10_bio = raid10_alloc_init_r10buf(conf);
+       r10_bio->state = 0;
+       raise_barrier(conf, sectors_done != 0);
+       atomic_set(&r10_bio->remaining, 0);
diff --git a/queue-4.12/md-raid5-release-flush-io-in-raid5_do_work.patch b/queue-4.12/md-raid5-release-flush-io-in-raid5_do_work.patch
new file mode 100644 (file)
index 0000000..9aaf534
--- /dev/null
@@ -0,0 +1,47 @@
+From 9c72a18e46ebe0f09484cce8ebf847abdab58498 Mon Sep 17 00:00:00 2001
+From: Song Liu <songliubraving@fb.com>
+Date: Thu, 24 Aug 2017 09:53:59 -0700
+Subject: md/raid5: release/flush io in raid5_do_work()
+
+From: Song Liu <songliubraving@fb.com>
+
+commit 9c72a18e46ebe0f09484cce8ebf847abdab58498 upstream.
+
+In raid5, there are scenarios where some ios are deferred to a later
+time, and some IO need a flush to complete. To make sure we make
+progress with these IOs, we need to call the following functions:
+
+    flush_deferred_bios(conf);
+    r5l_flush_stripe_to_raid(conf->log);
+
+Both of these functions are called in raid5d(), but missing in
+raid5_do_work(). As a result, these functions are not called
+when multi-threading (group_thread_cnt > 0) is enabled. This patch
+adds calls to these function to raid5_do_work().
+
+Note for stable branches:
+
+  r5l_flush_stripe_to_raid(conf->log) is need for 4.4+
+  flush_deferred_bios(conf) is only needed for 4.11+
+
+Signed-off-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid5.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6238,6 +6238,10 @@ static void raid5_do_work(struct work_st
+       spin_unlock_irq(&conf->device_lock);
++      flush_deferred_bios(conf);
++
++      r5l_flush_stripe_to_raid(conf->log);
++
+       async_tx_issue_pending_all();
+       blk_finish_plug(&plug);
index cb2f0ff62ba940014c26a55c365a7366e2965af0..edecb2ba0ac863146017f74d8873fcd494b0af26 100644 (file)
@@ -48,3 +48,5 @@ x86-fsgsbase-64-report-fsbase-and-gsbase-correctly-in-core-dumps.patch
 x86-switch_to-64-rewrite-fs-gs-switching-yet-again-to-fix-amd-cpus.patch
 fuse-allow-server-to-run-in-different-pid_ns.patch
 idr-remove-warn_on_once-when-trying-to-replace-negative-id.patch
+md-raid1-10-reset-bio-allocated-from-mempool.patch
+md-raid5-release-flush-io-in-raid5_do_work.patch