]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.11-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Oct 2013 18:50:36 +0000 (11:50 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Oct 2013 18:50:36 +0000 (11:50 -0700)
added patches:
bcache-fix-a-dumb-cpu-spinning-bug-in-writeback.patch
bcache-fix-a-flush-fua-performance-bug.patch
bcache-fix-a-shrinker-deadlock.patch
bcache-fix-a-writeback-performance-regression.patch
bcache-fix-flushes-in-writeback-mode.patch
bcache-fix-for-handling-overlapping-extents-when-reading-in-a-btree-node.patch
bcache-fix-for-when-no-journal-entries-are-found.patch
bcache-strip-endline-when-writing-the-label-through-sysfs.patch

queue-3.11/bcache-fix-a-dumb-cpu-spinning-bug-in-writeback.patch [new file with mode: 0644]
queue-3.11/bcache-fix-a-flush-fua-performance-bug.patch [new file with mode: 0644]
queue-3.11/bcache-fix-a-shrinker-deadlock.patch [new file with mode: 0644]
queue-3.11/bcache-fix-a-writeback-performance-regression.patch [new file with mode: 0644]
queue-3.11/bcache-fix-flushes-in-writeback-mode.patch [new file with mode: 0644]
queue-3.11/bcache-fix-for-handling-overlapping-extents-when-reading-in-a-btree-node.patch [new file with mode: 0644]
queue-3.11/bcache-fix-for-when-no-journal-entries-are-found.patch [new file with mode: 0644]
queue-3.11/bcache-strip-endline-when-writing-the-label-through-sysfs.patch [new file with mode: 0644]
queue-3.11/series

diff --git a/queue-3.11/bcache-fix-a-dumb-cpu-spinning-bug-in-writeback.patch b/queue-3.11/bcache-fix-a-dumb-cpu-spinning-bug-in-writeback.patch
new file mode 100644 (file)
index 0000000..2aaac10
--- /dev/null
@@ -0,0 +1,31 @@
+From 79e3dab90d9f826ceca67c7890e048ac9169de49 Mon Sep 17 00:00:00 2001
+From: Kent Overstreet <kmo@daterainc.com>
+Date: Mon, 23 Sep 2013 23:17:33 -0700
+Subject: bcache: Fix a dumb CPU spinning bug in writeback
+
+From: Kent Overstreet <kmo@daterainc.com>
+
+commit 79e3dab90d9f826ceca67c7890e048ac9169de49 upstream.
+
+schedule_timeout() != schedule_timeout_uninterruptible()
+
+Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/writeback.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -397,8 +397,7 @@ static void read_dirty(struct closure *c
+               if (delay > 0 &&
+                   (KEY_START(&w->key) != dc->last_read ||
+                    jiffies_to_msecs(delay) > 50))
+-                      while (delay)
+-                              delay = schedule_timeout(delay);
++                      delay = schedule_timeout_uninterruptible(delay);
+               dc->last_read   = KEY_OFFSET(&w->key);
diff --git a/queue-3.11/bcache-fix-a-flush-fua-performance-bug.patch b/queue-3.11/bcache-fix-a-flush-fua-performance-bug.patch
new file mode 100644 (file)
index 0000000..e5934d6
--- /dev/null
@@ -0,0 +1,32 @@
+From 1394d6761b6e9e15ee7c632a6d48791188727b40 Mon Sep 17 00:00:00 2001
+From: Kent Overstreet <kmo@daterainc.com>
+Date: Mon, 23 Sep 2013 23:17:32 -0700
+Subject: bcache: Fix a flush/fua performance bug
+
+From: Kent Overstreet <kmo@daterainc.com>
+
+commit 1394d6761b6e9e15ee7c632a6d48791188727b40 upstream.
+
+bch_journal_meta() was missing the flush to make the journal write
+actually go down (instead of waiting up to journal_delay_ms)...
+
+Whoops
+
+Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/journal.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -695,6 +695,7 @@ void bch_journal_meta(struct cache_set *
+               if (cl)
+                       BUG_ON(!closure_wait(&w->wait, cl));
++              closure_flush(&c->journal.io);
+               __journal_try_write(c, true);
+       }
+ }
diff --git a/queue-3.11/bcache-fix-a-shrinker-deadlock.patch b/queue-3.11/bcache-fix-a-shrinker-deadlock.patch
new file mode 100644 (file)
index 0000000..ed5f4df
--- /dev/null
@@ -0,0 +1,32 @@
+From a698e08c82dfb9771e0bac12c7337c706d729b6d Mon Sep 17 00:00:00 2001
+From: Kent Overstreet <kmo@daterainc.com>
+Date: Mon, 23 Sep 2013 23:17:34 -0700
+Subject: bcache: Fix a shrinker deadlock
+
+From: Kent Overstreet <kmo@daterainc.com>
+
+commit a698e08c82dfb9771e0bac12c7337c706d729b6d upstream.
+
+GFP_NOIO means we could be getting called recursively - mca_alloc() ->
+mca_data_alloc() - definitely can't use mutex_lock(bucket_lock) then.
+Whoops.
+
+Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/btree.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -617,7 +617,7 @@ static int bch_mca_shrink(struct shrinke
+               return mca_can_free(c) * c->btree_pages;
+       /* Return -1 if we can't do anything right now */
+-      if (sc->gfp_mask & __GFP_WAIT)
++      if (sc->gfp_mask & __GFP_IO)
+               mutex_lock(&c->bucket_lock);
+       else if (!mutex_trylock(&c->bucket_lock))
+               return -1;
diff --git a/queue-3.11/bcache-fix-a-writeback-performance-regression.patch b/queue-3.11/bcache-fix-a-writeback-performance-regression.patch
new file mode 100644 (file)
index 0000000..96ee9ce
--- /dev/null
@@ -0,0 +1,225 @@
+From c2a4f3183a1248f615a695fbd8905da55ad11bba Mon Sep 17 00:00:00 2001
+From: Kent Overstreet <kmo@daterainc.com>
+Date: Mon, 23 Sep 2013 23:17:31 -0700
+Subject: bcache: Fix a writeback performance regression
+
+From: Kent Overstreet <kmo@daterainc.com>
+
+commit c2a4f3183a1248f615a695fbd8905da55ad11bba upstream.
+
+Background writeback works by scanning the btree for dirty data and
+adding those keys into a fixed size buffer, then for each dirty key in
+the keybuf writing it to the backing device.
+
+When read_dirty() finishes and it's time to scan for more dirty data, we
+need to wait for the outstanding writeback IO to finish - they still
+take up slots in the keybuf (so that foreground writes can check for
+them to avoid races) - without that wait, we'll continually rescan when
+we'll be able to add at most a key or two to the keybuf, and that takes
+locks that starves foreground IO.  Doh.
+
+Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/bcache.h    |    7 ++----
+ drivers/md/bcache/util.c      |   11 +++++++++-
+ drivers/md/bcache/util.h      |   12 ++++++++---
+ drivers/md/bcache/writeback.c |   43 ++++++++++++++++++++----------------------
+ 4 files changed, 43 insertions(+), 30 deletions(-)
+
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -498,7 +498,7 @@ struct cached_dev {
+        */
+       atomic_t                has_dirty;
+-      struct ratelimit        writeback_rate;
++      struct bch_ratelimit    writeback_rate;
+       struct delayed_work     writeback_rate_update;
+       /*
+@@ -507,10 +507,9 @@ struct cached_dev {
+        */
+       sector_t                last_read;
+-      /* Number of writeback bios in flight */
+-      atomic_t                in_flight;
++      /* Limit number of writeback bios in flight */
++      struct semaphore        in_flight;
+       struct closure_with_timer writeback;
+-      struct closure_waitlist writeback_wait;
+       struct keybuf           writeback_keys;
+--- a/drivers/md/bcache/util.c
++++ b/drivers/md/bcache/util.c
+@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_s
+       stats->last = now ?: 1;
+ }
+-unsigned bch_next_delay(struct ratelimit *d, uint64_t done)
++/**
++ * bch_next_delay() - increment @d by the amount of work done, and return how
++ * long to delay until the next time to do some work.
++ *
++ * @d - the struct bch_ratelimit to update
++ * @done - the amount of work done, in arbitrary units
++ *
++ * Returns the amount of time to delay by, in jiffies
++ */
++uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
+ {
+       uint64_t now = local_clock();
+--- a/drivers/md/bcache/util.h
++++ b/drivers/md/bcache/util.h
+@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequen
+       (ewma) >> factor;                                               \
+ })
+-struct ratelimit {
++struct bch_ratelimit {
++      /* Next time we want to do some work, in nanoseconds */
+       uint64_t                next;
++
++      /*
++       * Rate at which we want to do work, in units per nanosecond
++       * The units here correspond to the units passed to bch_next_delay()
++       */
+       unsigned                rate;
+ };
+-static inline void ratelimit_reset(struct ratelimit *d)
++static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
+ {
+       d->next = local_clock();
+ }
+-unsigned bch_next_delay(struct ratelimit *d, uint64_t done);
++uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
+ #define __DIV_SAFE(n, d, zero)                                                \
+ ({                                                                    \
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -94,11 +94,15 @@ static void update_writeback_rate(struct
+ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
+ {
++      uint64_t ret;
++
+       if (atomic_read(&dc->disk.detaching) ||
+           !dc->writeback_percent)
+               return 0;
+-      return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
++      ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
++
++      return min_t(uint64_t, ret, HZ);
+ }
+ /* Background writeback */
+@@ -208,7 +212,7 @@ normal_refill:
+       up_write(&dc->writeback_lock);
+-      ratelimit_reset(&dc->writeback_rate);
++      bch_ratelimit_reset(&dc->writeback_rate);
+       /* Punt to workqueue only so we don't recurse and blow the stack */
+       continue_at(cl, read_dirty, dirty_wq);
+@@ -318,9 +322,7 @@ static void write_dirty_finish(struct cl
+       }
+       bch_keybuf_del(&dc->writeback_keys, w);
+-      atomic_dec_bug(&dc->in_flight);
+-
+-      closure_wake_up(&dc->writeback_wait);
++      up(&dc->in_flight);
+       closure_return_with_destructor(cl, dirty_io_destructor);
+ }
+@@ -349,7 +351,7 @@ static void write_dirty(struct closure *
+       closure_bio_submit(&io->bio, cl, &io->dc->disk);
+-      continue_at(cl, write_dirty_finish, dirty_wq);
++      continue_at(cl, write_dirty_finish, system_wq);
+ }
+ static void read_dirty_endio(struct bio *bio, int error)
+@@ -369,7 +371,7 @@ static void read_dirty_submit(struct clo
+       closure_bio_submit(&io->bio, cl, &io->dc->disk);
+-      continue_at(cl, write_dirty, dirty_wq);
++      continue_at(cl, write_dirty, system_wq);
+ }
+ static void read_dirty(struct closure *cl)
+@@ -394,12 +396,9 @@ static void read_dirty(struct closure *c
+               if (delay > 0 &&
+                   (KEY_START(&w->key) != dc->last_read ||
+-                   jiffies_to_msecs(delay) > 50)) {
+-                      w->private = NULL;
+-
+-                      closure_delay(&dc->writeback, delay);
+-                      continue_at(cl, read_dirty, dirty_wq);
+-              }
++                   jiffies_to_msecs(delay) > 50))
++                      while (delay)
++                              delay = schedule_timeout(delay);
+               dc->last_read   = KEY_OFFSET(&w->key);
+@@ -424,15 +423,10 @@ static void read_dirty(struct closure *c
+               trace_bcache_writeback(&w->key);
+-              closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
++              down(&dc->in_flight);
++              closure_call(&io->cl, read_dirty_submit, NULL, cl);
+               delay = writeback_delay(dc, KEY_SIZE(&w->key));
+-
+-              atomic_inc(&dc->in_flight);
+-
+-              if (!closure_wait_event(&dc->writeback_wait, cl,
+-                                      atomic_read(&dc->in_flight) < 64))
+-                      continue_at(cl, read_dirty, dirty_wq);
+       }
+       if (0) {
+@@ -442,7 +436,11 @@ err:
+               bch_keybuf_del(&dc->writeback_keys, w);
+       }
+-      refill_dirty(cl);
++      /*
++       * Wait for outstanding writeback IOs to finish (and keybuf slots to be
++       * freed) before refilling again
++       */
++      continue_at(cl, refill_dirty, dirty_wq);
+ }
+ /* Init */
+@@ -484,6 +482,7 @@ void bch_sectors_dirty_init(struct cache
+ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+ {
++      sema_init(&dc->in_flight, 64);
+       closure_init_unlocked(&dc->writeback);
+       init_rwsem(&dc->writeback_lock);
+@@ -513,7 +512,7 @@ void bch_writeback_exit(void)
+ int __init bch_writeback_init(void)
+ {
+-      dirty_wq = create_singlethread_workqueue("bcache_writeback");
++      dirty_wq = create_workqueue("bcache_writeback");
+       if (!dirty_wq)
+               return -ENOMEM;
diff --git a/queue-3.11/bcache-fix-flushes-in-writeback-mode.patch b/queue-3.11/bcache-fix-flushes-in-writeback-mode.patch
new file mode 100644 (file)
index 0000000..3626e2f
--- /dev/null
@@ -0,0 +1,54 @@
+From c0f04d88e46d14de51f4baebb6efafb7d59e9f96 Mon Sep 17 00:00:00 2001
+From: Kent Overstreet <kmo@daterainc.com>
+Date: Mon, 23 Sep 2013 23:17:36 -0700
+Subject: bcache: Fix flushes in writeback mode
+
+From: Kent Overstreet <kmo@daterainc.com>
+
+commit c0f04d88e46d14de51f4baebb6efafb7d59e9f96 upstream.
+
+In writeback mode, when we get a cache flush we need to make sure we
+issue a flush to the backing device.
+
+The code for sending down an extra flush was wrong - by cloning the bio
+we were probably getting flags that didn't make sense for a bare flush,
+and also the old code was firing for FUA bios, for which we don't need
+to send a flush to the backing device.
+
+This was causing data corruption somehow - the mechanism was never
+determined, but this patch fixes it for the users that were seeing it.
+
+Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/request.c |   15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -997,14 +997,17 @@ static void request_write(struct cached_
+       } else {
+               bch_writeback_add(dc);
+-              if (s->op.flush_journal) {
++              if (bio->bi_rw & REQ_FLUSH) {
+                       /* Also need to send a flush to the backing device */
+-                      s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
+-                                                         dc->disk.bio_split);
++                      struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
++                                                           dc->disk.bio_split);
+-                      bio->bi_size = 0;
+-                      bio->bi_vcnt = 0;
+-                      closure_bio_submit(bio, cl, s->d);
++                      flush->bi_rw    = WRITE_FLUSH;
++                      flush->bi_bdev  = bio->bi_bdev;
++                      flush->bi_end_io = request_endio;
++                      flush->bi_private = cl;
++
++                      closure_bio_submit(flush, cl, s->d);
+               } else {
+                       s->op.cache_bio = bio;
+               }
diff --git a/queue-3.11/bcache-fix-for-handling-overlapping-extents-when-reading-in-a-btree-node.patch b/queue-3.11/bcache-fix-for-handling-overlapping-extents-when-reading-in-a-btree-node.patch
new file mode 100644 (file)
index 0000000..cf8451f
--- /dev/null
@@ -0,0 +1,83 @@
+From 84786438ed17978d72eeced580ab757e4da8830b Mon Sep 17 00:00:00 2001
+From: Kent Overstreet <kmo@daterainc.com>
+Date: Mon, 23 Sep 2013 23:17:35 -0700
+Subject: bcache: Fix for handling overlapping extents when reading in a btree node
+
+From: Kent Overstreet <kmo@daterainc.com>
+
+commit 84786438ed17978d72eeced580ab757e4da8830b upstream.
+
+btree_sort_fixup() was overly clever, because it was trying to avoid
+pulling a key off the btree iterator in more than one place.
+
+This led to a really obscure bug where we'd break early from the loop in
+btree_sort_fixup() if the current key overlapped with keys in more than
+one older set, and the next key it overlapped with was zero size.
+
+Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/bset.c |   39 ++++++++++++++++++++++++++++-----------
+ 1 file changed, 28 insertions(+), 11 deletions(-)
+
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct
+ /* Mergesort */
++static void sort_key_next(struct btree_iter *iter,
++                        struct btree_iter_set *i)
++{
++      i->k = bkey_next(i->k);
++
++      if (i->k == i->end)
++              *i = iter->data[--iter->used];
++}
++
+ static void btree_sort_fixup(struct btree_iter *iter)
+ {
+       while (iter->used > 1) {
+               struct btree_iter_set *top = iter->data, *i = top + 1;
+-              struct bkey *k;
+               if (iter->used > 2 &&
+                   btree_iter_cmp(i[0], i[1]))
+                       i++;
+-              for (k = i->k;
+-                   k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
+-                   k = bkey_next(k))
+-                      if (top->k > i->k)
+-                              __bch_cut_front(top->k, k);
+-                      else if (KEY_SIZE(k))
+-                              bch_cut_back(&START_KEY(k), top->k);
+-
+-              if (top->k < i->k || k == i->k)
++              if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
+                       break;
+-              heap_sift(iter, i - top, btree_iter_cmp);
++              if (!KEY_SIZE(i->k)) {
++                      sort_key_next(iter, i);
++                      heap_sift(iter, i - top, btree_iter_cmp);
++                      continue;
++              }
++
++              if (top->k > i->k) {
++                      if (bkey_cmp(top->k, i->k) >= 0)
++                              sort_key_next(iter, i);
++                      else
++                              bch_cut_front(top->k, i->k);
++
++                      heap_sift(iter, i - top, btree_iter_cmp);
++              } else {
++                      /* can't happen because of comparison func */
++                      BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
++                      bch_cut_back(&START_KEY(i->k), top->k);
++              }
+       }
+ }
diff --git a/queue-3.11/bcache-fix-for-when-no-journal-entries-are-found.patch b/queue-3.11/bcache-fix-for-when-no-journal-entries-are-found.patch
new file mode 100644 (file)
index 0000000..853ad74
--- /dev/null
@@ -0,0 +1,88 @@
+From c426c4fd46f709ade2bddd51c5738729c7ae1db5 Mon Sep 17 00:00:00 2001
+From: Kent Overstreet <kmo@daterainc.com>
+Date: Mon, 23 Sep 2013 23:17:29 -0700
+Subject: bcache: Fix for when no journal entries are found
+
+From: Kent Overstreet <kmo@daterainc.com>
+
+commit c426c4fd46f709ade2bddd51c5738729c7ae1db5 upstream.
+
+The journal replay code didn't handle this case, causing it to go into
+an infinite loop...
+
+Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/journal.c |   30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c
+               bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
+               pr_debug("%u journal buckets", ca->sb.njournal_buckets);
+-              /* Read journal buckets ordered by golden ratio hash to quickly
++              /*
++               * Read journal buckets ordered by golden ratio hash to quickly
+                * find a sequence of buckets with valid journal entries
+                */
+               for (i = 0; i < ca->sb.njournal_buckets; i++) {
+@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c
+                               goto bsearch;
+               }
+-              /* If that fails, check all the buckets we haven't checked
++              /*
++               * If that fails, check all the buckets we haven't checked
+                * already
+                */
+               pr_debug("falling back to linear search");
+-              for (l = 0; l < ca->sb.njournal_buckets; l++) {
+-                      if (test_bit(l, bitmap))
+-                              continue;
+-
++              for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
++                   l < ca->sb.njournal_buckets;
++                   l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
+                       if (read_bucket(l))
+                               goto bsearch;
+-              }
++
++              if (list_empty(list))
++                      continue;
+ bsearch:
+               /* Binary search */
+               m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
+@@ -197,10 +200,12 @@ bsearch:
+                               r = m;
+               }
+-              /* Read buckets in reverse order until we stop finding more
++              /*
++               * Read buckets in reverse order until we stop finding more
+                * journal entries
+                */
+-              pr_debug("finishing up");
++              pr_debug("finishing up: m %u njournal_buckets %u",
++                       m, ca->sb.njournal_buckets);
+               l = m;
+               while (1) {
+@@ -228,9 +233,10 @@ bsearch:
+                       }
+       }
+-      c->journal.seq = list_entry(list->prev,
+-                                  struct journal_replay,
+-                                  list)->j.seq;
++      if (!list_empty(list))
++              c->journal.seq = list_entry(list->prev,
++                                          struct journal_replay,
++                                          list)->j.seq;
+       return 0;
+ #undef read_bucket
diff --git a/queue-3.11/bcache-strip-endline-when-writing-the-label-through-sysfs.patch b/queue-3.11/bcache-strip-endline-when-writing-the-label-through-sysfs.patch
new file mode 100644 (file)
index 0000000..c63e3b5
--- /dev/null
@@ -0,0 +1,41 @@
+From aee6f1cfff3ce240eb4b43b41ca466b907acbd2e Mon Sep 17 00:00:00 2001
+From: Gabriel de Perthuis <g2p.code@gmail.com>
+Date: Mon, 23 Sep 2013 23:17:28 -0700
+Subject: bcache: Strip endline when writing the label through sysfs
+
+From: Gabriel de Perthuis <g2p.code@gmail.com>
+
+commit aee6f1cfff3ce240eb4b43b41ca466b907acbd2e upstream.
+
+sysfs attributes with unusual characters have crappy failure modes
+in Squeeze (udev 164); later versions of udev are unaffected.
+
+This should make these characters more unusual.
+
+Signed-off-by: Gabriel de Perthuis <g2p.code@gmail.com>
+Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/sysfs.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -223,8 +223,13 @@ STORE(__cached_dev)
+       }
+       if (attr == &sysfs_label) {
+-              /* note: endlines are preserved */
+-              memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
++              if (size > SB_LABEL_SIZE)
++                      return -EINVAL;
++              memcpy(dc->sb.label, buf, size);
++              if (size < SB_LABEL_SIZE)
++                      dc->sb.label[size] = '\0';
++              if (size && dc->sb.label[size - 1] == '\n')
++                      dc->sb.label[size - 1] = '\0';
+               bch_write_bdev_super(dc, NULL);
+               if (dc->disk.c) {
+                       memcpy(dc->disk.c->uuids[dc->disk.id].label,
index c23a966f27dfaf75b2a21678fb7df95a66cefb82..cccbe3e0b2db3410a1b9fa3739ecac976229db06 100644 (file)
@@ -1,3 +1,11 @@
 block-fix-bio_copy_data.patch
 sysv-add-forgotten-superblock-lock-init-for-v7-fs.patch
 bcache-fix-a-dumb-journal-discard-bug.patch
+bcache-strip-endline-when-writing-the-label-through-sysfs.patch
+bcache-fix-for-when-no-journal-entries-are-found.patch
+bcache-fix-a-writeback-performance-regression.patch
+bcache-fix-a-flush-fua-performance-bug.patch
+bcache-fix-a-dumb-cpu-spinning-bug-in-writeback.patch
+bcache-fix-a-shrinker-deadlock.patch
+bcache-fix-for-handling-overlapping-extents-when-reading-in-a-btree-node.patch
+bcache-fix-flushes-in-writeback-mode.patch