]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.3-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Mar 2012 23:10:18 +0000 (16:10 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Mar 2012 23:10:18 +0000 (16:10 -0700)
added patches:
dm-crypt-add-missing-error-handling.patch
dm-crypt-fix-mempool-deadlock.patch
dm-exception-store-fix-init-error-path.patch
dm-persistent-data-fix-btree-rebalancing-after-remove.patch
dm-thin-fix-stacked-bi_next-usage.patch

queue-3.3/dm-crypt-add-missing-error-handling.patch [new file with mode: 0644]
queue-3.3/dm-crypt-fix-mempool-deadlock.patch [new file with mode: 0644]
queue-3.3/dm-exception-store-fix-init-error-path.patch [new file with mode: 0644]
queue-3.3/dm-persistent-data-fix-btree-rebalancing-after-remove.patch [new file with mode: 0644]
queue-3.3/dm-thin-fix-stacked-bi_next-usage.patch [new file with mode: 0644]
queue-3.3/series

diff --git a/queue-3.3/dm-crypt-add-missing-error-handling.patch b/queue-3.3/dm-crypt-add-missing-error-handling.patch
new file mode 100644 (file)
index 0000000..67d79ce
--- /dev/null
@@ -0,0 +1,114 @@
+From 72c6e7afc43e19f68a31dea204fc366624d6eee9 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 28 Mar 2012 18:41:22 +0100
+Subject: dm crypt: add missing error handling
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 72c6e7afc43e19f68a31dea204fc366624d6eee9 upstream.
+
+Always set io->error to -EIO when an error is detected in dm-crypt.
+
+There were cases where an error code would be set only if we finish
+processing the last sector. If there were other encryption operations in
+flight, the error would be ignored and bio would be returned with
+success as if no error happened.
+
+This bug is present in kcryptd_crypt_write_convert, kcryptd_crypt_read_convert
+and kcryptd_async_done.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Milan Broz <mbroz@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c |   28 ++++++++++++++++------------
+ 1 file changed, 16 insertions(+), 12 deletions(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1044,16 +1044,14 @@ static void kcryptd_queue_io(struct dm_c
+       queue_work(cc->io_queue, &io->work);
+ }
+-static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
+-                                        int error, int async)
++static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
+ {
+       struct bio *clone = io->ctx.bio_out;
+       struct crypt_config *cc = io->target->private;
+-      if (unlikely(error < 0)) {
++      if (unlikely(io->error < 0)) {
+               crypt_free_buffer_pages(cc, clone);
+               bio_put(clone);
+-              io->error = -EIO;
+               crypt_dec_pending(io);
+               return;
+       }
+@@ -1104,12 +1102,16 @@ static void kcryptd_crypt_write_convert(
+               sector += bio_sectors(clone);
+               crypt_inc_pending(io);
++
+               r = crypt_convert(cc, &io->ctx);
++              if (r < 0)
++                      io->error = -EIO;
++
+               crypt_finished = atomic_dec_and_test(&io->ctx.pending);
+               /* Encryption was already finished, submit io now */
+               if (crypt_finished) {
+-                      kcryptd_crypt_write_io_submit(io, r, 0);
++                      kcryptd_crypt_write_io_submit(io, 0);
+                       /*
+                        * If there was an error, do not try next fragments.
+@@ -1160,11 +1162,8 @@ static void kcryptd_crypt_write_convert(
+       crypt_dec_pending(io);
+ }
+-static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
++static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
+ {
+-      if (unlikely(error < 0))
+-              io->error = -EIO;
+-
+       crypt_dec_pending(io);
+ }
+@@ -1179,9 +1178,11 @@ static void kcryptd_crypt_read_convert(s
+                          io->sector);
+       r = crypt_convert(cc, &io->ctx);
++      if (r < 0)
++              io->error = -EIO;
+       if (atomic_dec_and_test(&io->ctx.pending))
+-              kcryptd_crypt_read_done(io, r);
++              kcryptd_crypt_read_done(io);
+       crypt_dec_pending(io);
+ }
+@@ -1202,15 +1203,18 @@ static void kcryptd_async_done(struct cr
+       if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+               error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
++      if (error < 0)
++              io->error = -EIO;
++
+       mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
+       if (!atomic_dec_and_test(&ctx->pending))
+               return;
+       if (bio_data_dir(io->base_bio) == READ)
+-              kcryptd_crypt_read_done(io, error);
++              kcryptd_crypt_read_done(io);
+       else
+-              kcryptd_crypt_write_io_submit(io, error, 1);
++              kcryptd_crypt_write_io_submit(io, 1);
+ }
+ static void kcryptd_crypt(struct work_struct *work)
diff --git a/queue-3.3/dm-crypt-fix-mempool-deadlock.patch b/queue-3.3/dm-crypt-fix-mempool-deadlock.patch
new file mode 100644 (file)
index 0000000..1e29467
--- /dev/null
@@ -0,0 +1,63 @@
+From aeb2deae2660a1773c83d3c6e9e6575daa3855d6 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 28 Mar 2012 18:41:22 +0100
+Subject: dm crypt: fix mempool deadlock
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit aeb2deae2660a1773c83d3c6e9e6575daa3855d6 upstream.
+
+This patch fixes a possible deadlock in dm-crypt's mempool use.
+
+Currently, dm-crypt reserves a mempool of MIN_BIO_PAGES reserved pages.
+It allocates first MIN_BIO_PAGES with non-failing allocation (the allocation
+cannot fail and waits until the mempool is refilled). Further pages are
+allocated with different gfp flags that allow failing.
+
+Because allocations may be done in parallel, this code can deadlock. Example:
+There are two processes, each tries to allocate MIN_BIO_PAGES and the processes
+run simultaneously.
+It may end up in a situation where each process allocates (MIN_BIO_PAGES / 2)
+pages. The mempool is exhausted. Each process waits for more pages to be freed
+to the mempool, which never happens.
+
+To avoid this deadlock scenario, this patch changes the code so that only
+the first page is allocated with non-failing gfp mask. Allocation of further
+pages may fail.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Milan Broz <mbroz@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c |   10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -176,7 +176,6 @@ struct crypt_config {
+ #define MIN_IOS        16
+ #define MIN_POOL_PAGES 32
+-#define MIN_BIO_PAGES  8
+ static struct kmem_cache *_crypt_io_pool;
+@@ -848,12 +847,11 @@ static struct bio *crypt_alloc_buffer(st
+               }
+               /*
+-               * if additional pages cannot be allocated without waiting,
+-               * return a partially allocated bio, the caller will then try
+-               * to allocate additional bios while submitting this partial bio
++               * If additional pages cannot be allocated without waiting,
++               * return a partially-allocated bio.  The caller will then try
++               * to allocate more bios while submitting this partial bio.
+                */
+-              if (i == (MIN_BIO_PAGES - 1))
+-                      gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
++              gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
+               len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
diff --git a/queue-3.3/dm-exception-store-fix-init-error-path.patch b/queue-3.3/dm-exception-store-fix-init-error-path.patch
new file mode 100644 (file)
index 0000000..34aec7d
--- /dev/null
@@ -0,0 +1,31 @@
+From aadbe266f2f89ccc68b52f4effc7b3a8b29521ef Mon Sep 17 00:00:00 2001
+From: Andrei Warkentin <andrey.warkentin@gmail.com>
+Date: Wed, 28 Mar 2012 18:41:22 +0100
+Subject: dm exception store: fix init error path
+
+From: Andrei Warkentin <andrey.warkentin@gmail.com>
+
+commit aadbe266f2f89ccc68b52f4effc7b3a8b29521ef upstream.
+
+Call the correct exit function on failure in dm_exception_store_init.
+
+Signed-off-by: Andrei Warkentin <andrey.warkentin@gmail.com>
+Acked-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-exception-store.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-exception-store.c
++++ b/drivers/md/dm-exception-store.c
+@@ -283,7 +283,7 @@ int dm_exception_store_init(void)
+       return 0;
+ persistent_fail:
+-      dm_persistent_snapshot_exit();
++      dm_transient_snapshot_exit();
+ transient_fail:
+       return r;
+ }
diff --git a/queue-3.3/dm-persistent-data-fix-btree-rebalancing-after-remove.patch b/queue-3.3/dm-persistent-data-fix-btree-rebalancing-after-remove.patch
new file mode 100644 (file)
index 0000000..04b91a0
--- /dev/null
@@ -0,0 +1,269 @@
+From b0988900bae9ecf968a8a8d086a9eec671a9517a Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 28 Mar 2012 18:41:23 +0100
+Subject: dm persistent data: fix btree rebalancing after remove
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit b0988900bae9ecf968a8a8d086a9eec671a9517a upstream.
+
+When we remove an entry from a node we sometimes rebalance with it's
+two neighbours.  This wasn't being done correctly; in some cases
+entries have to move all the way from the right neighbour to the left
+neighbour, or vice versa.  This patch pretty much re-writes the
+balancing code to fix it.
+
+This code is barely used currently; only when you delete a thin
+device, and then only if you have hundreds of them in the same pool.
+Once we have discard support, which removes mappings, this will be used
+much more heavily.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-btree-remove.c |  174 +++++++++++++++------------
+ 1 file changed, 99 insertions(+), 75 deletions(-)
+
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -128,18 +128,9 @@ static void delete_at(struct node *n, un
+       n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+ }
+-static unsigned del_threshold(struct node *n)
+-{
+-      return le32_to_cpu(n->header.max_entries) / 3;
+-}
+-
+ static unsigned merge_threshold(struct node *n)
+ {
+-      /*
+-       * The extra one is because we know we're potentially going to
+-       * delete an entry.
+-       */
+-      return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1;
++      return le32_to_cpu(n->header.max_entries) / 3;
+ }
+ struct child {
+@@ -188,6 +179,15 @@ static int exit_child(struct dm_btree_in
+ static void shift(struct node *left, struct node *right, int count)
+ {
++      uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
++      uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
++      uint32_t max_entries = le32_to_cpu(left->header.max_entries);
++      uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
++
++      BUG_ON(max_entries != r_max_entries);
++      BUG_ON(nr_left - count > max_entries);
++      BUG_ON(nr_right + count > max_entries);
++
+       if (!count)
+               return;
+@@ -199,13 +199,8 @@ static void shift(struct node *left, str
+               node_shift(right, count);
+       }
+-      left->header.nr_entries =
+-              cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count);
+-      BUG_ON(le32_to_cpu(left->header.nr_entries) > le32_to_cpu(left->header.max_entries));
+-
+-      right->header.nr_entries =
+-              cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count);
+-      BUG_ON(le32_to_cpu(right->header.nr_entries) > le32_to_cpu(right->header.max_entries));
++      left->header.nr_entries = cpu_to_le32(nr_left - count);
++      right->header.nr_entries = cpu_to_le32(nr_right + count);
+ }
+ static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+@@ -215,8 +210,9 @@ static void __rebalance2(struct dm_btree
+       struct node *right = r->n;
+       uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+       uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
++      unsigned threshold = 2 * merge_threshold(left) + 1;
+-      if (nr_left + nr_right <= merge_threshold(left)) {
++      if (nr_left + nr_right < threshold) {
+               /*
+                * Merge
+                */
+@@ -234,9 +230,6 @@ static void __rebalance2(struct dm_btree
+                * Rebalance.
+                */
+               unsigned target_left = (nr_left + nr_right) / 2;
+-              unsigned shift_ = nr_left - target_left;
+-              BUG_ON(le32_to_cpu(left->header.max_entries) <= nr_left - shift_);
+-              BUG_ON(le32_to_cpu(right->header.max_entries) <= nr_right + shift_);
+               shift(left, right, nr_left - target_left);
+               *key_ptr(parent, r->index) = right->keys[0];
+       }
+@@ -272,6 +265,84 @@ static int rebalance2(struct shadow_spin
+       return exit_child(info, &right);
+ }
++/*
++ * We dump as many entries from center as possible into left, then the rest
++ * in right, then rebalance2.  This wastes some cpu, but I want something
++ * simple atm.
++ */
++static void delete_center_node(struct dm_btree_info *info, struct node *parent,
++                             struct child *l, struct child *c, struct child *r,
++                             struct node *left, struct node *center, struct node *right,
++                             uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
++{
++      uint32_t max_entries = le32_to_cpu(left->header.max_entries);
++      unsigned shift = min(max_entries - nr_left, nr_center);
++
++      BUG_ON(nr_left + shift > max_entries);
++      node_copy(left, center, -shift);
++      left->header.nr_entries = cpu_to_le32(nr_left + shift);
++
++      if (shift != nr_center) {
++              shift = nr_center - shift;
++              BUG_ON((nr_right + shift) > max_entries);
++              node_shift(right, shift);
++              node_copy(center, right, shift);
++              right->header.nr_entries = cpu_to_le32(nr_right + shift);
++      }
++      *key_ptr(parent, r->index) = right->keys[0];
++
++      delete_at(parent, c->index);
++      r->index--;
++
++      dm_tm_dec(info->tm, dm_block_location(c->block));
++      __rebalance2(info, parent, l, r);
++}
++
++/*
++ * Redistributes entries among 3 sibling nodes.
++ */
++static void redistribute3(struct dm_btree_info *info, struct node *parent,
++                        struct child *l, struct child *c, struct child *r,
++                        struct node *left, struct node *center, struct node *right,
++                        uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
++{
++      int s;
++      uint32_t max_entries = le32_to_cpu(left->header.max_entries);
++      unsigned target = (nr_left + nr_center + nr_right) / 3;
++      BUG_ON(target > max_entries);
++
++      if (nr_left < nr_right) {
++              s = nr_left - target;
++
++              if (s < 0 && nr_center < -s) {
++                      /* not enough in central node */
++                      shift(left, center, nr_center);
++                      s = nr_center - target;
++                      shift(left, right, s);
++                      nr_right += s;
++              } else
++                      shift(left, center, s);
++
++              shift(center, right, target - nr_right);
++
++      } else {
++              s = target - nr_right;
++              if (s > 0 && nr_center < s) {
++                      /* not enough in central node */
++                      shift(center, right, nr_center);
++                      s = target - nr_center;
++                      shift(left, right, s);
++                      nr_left -= s;
++              } else
++                      shift(center, right, s);
++
++              shift(left, center, nr_left - target);
++      }
++
++      *key_ptr(parent, c->index) = center->keys[0];
++      *key_ptr(parent, r->index) = right->keys[0];
++}
++
+ static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+                        struct child *l, struct child *c, struct child *r)
+ {
+@@ -282,62 +353,18 @@ static void __rebalance3(struct dm_btree
+       uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+       uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+       uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+-      uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+-      unsigned target;
++      unsigned threshold = merge_threshold(left) * 4 + 1;
+       BUG_ON(left->header.max_entries != center->header.max_entries);
+       BUG_ON(center->header.max_entries != right->header.max_entries);
+-      if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) {
+-              /*
+-               * Delete center node:
+-               *
+-               * We dump as many entries from center as possible into
+-               * left, then the rest in right, then rebalance2.  This
+-               * wastes some cpu, but I want something simple atm.
+-               */
+-              unsigned shift = min(max_entries - nr_left, nr_center);
+-
+-              BUG_ON(nr_left + shift > max_entries);
+-              node_copy(left, center, -shift);
+-              left->header.nr_entries = cpu_to_le32(nr_left + shift);
+-
+-              if (shift != nr_center) {
+-                      shift = nr_center - shift;
+-                      BUG_ON((nr_right + shift) >= max_entries);
+-                      node_shift(right, shift);
+-                      node_copy(center, right, shift);
+-                      right->header.nr_entries = cpu_to_le32(nr_right + shift);
+-              }
+-              *key_ptr(parent, r->index) = right->keys[0];
+-
+-              delete_at(parent, c->index);
+-              r->index--;
+-
+-              dm_tm_dec(info->tm, dm_block_location(c->block));
+-              __rebalance2(info, parent, l, r);
+-
+-              return;
+-      }
+-
+-      /*
+-       * Rebalance
+-       */
+-      target = (nr_left + nr_center + nr_right) / 3;
+-      BUG_ON(target > max_entries);
+-
+-      /*
+-       * Adjust the left node
+-       */
+-      shift(left, center, nr_left - target);
+-
+-      /*
+-       * Adjust the right node
+-       */
+-      shift(center, right, target - nr_right);
+-      *key_ptr(parent, c->index) = center->keys[0];
+-      *key_ptr(parent, r->index) = right->keys[0];
++      if ((nr_left + nr_center + nr_right) < threshold)
++              delete_center_node(info, parent, l, c, r, left, center, right,
++                                 nr_left, nr_center, nr_right);
++      else
++              redistribute3(info, parent, l, c, r, left, center, right,
++                            nr_left, nr_center, nr_right);
+ }
+ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+@@ -441,9 +468,6 @@ static int rebalance_children(struct sha
+       if (r)
+               return r;
+-      if (child_entries > del_threshold(n))
+-              return 0;
+-
+       has_left_sibling = i > 0;
+       has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
diff --git a/queue-3.3/dm-thin-fix-stacked-bi_next-usage.patch b/queue-3.3/dm-thin-fix-stacked-bi_next-usage.patch
new file mode 100644 (file)
index 0000000..1f7ca5a
--- /dev/null
@@ -0,0 +1,231 @@
+From 6f94a4c45a6f744383f9f695dde019998db3df55 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 28 Mar 2012 18:41:23 +0100
+Subject: dm thin: fix stacked bi_next usage
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 6f94a4c45a6f744383f9f695dde019998db3df55 upstream.
+
+Avoid using the bi_next field for the holder of a cell when deferring
+bios because a stacked device below might change it.  Store the
+holder in a new field in struct cell instead.
+
+When a cell is created, the bio that triggered creation (the holder) was
+added to the same bio list as subsequent bios.  In some cases we pass
+this holder bio directly to devices underneath.  If those devices use
+the bi_next field there will be trouble...
+
+This also simplifies some code that had to work out which bio was the
+holder.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c |  126 +++++++++++++++++++++++++++++----------------------
+ 1 file changed, 74 insertions(+), 52 deletions(-)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -124,7 +124,7 @@ struct cell {
+       struct hlist_node list;
+       struct bio_prison *prison;
+       struct cell_key key;
+-      unsigned count;
++      struct bio *holder;
+       struct bio_list bios;
+ };
+@@ -220,54 +220,59 @@ static struct cell *__search_bucket(stru
+  * This may block if a new cell needs allocating.  You must ensure that
+  * cells will be unlocked even if the calling thread is blocked.
+  *
+- * Returns the number of entries in the cell prior to the new addition
+- * or < 0 on failure.
++ * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+  */
+ static int bio_detain(struct bio_prison *prison, struct cell_key *key,
+                     struct bio *inmate, struct cell **ref)
+ {
+-      int r;
++      int r = 1;
+       unsigned long flags;
+       uint32_t hash = hash_key(prison, key);
+-      struct cell *uninitialized_var(cell), *cell2 = NULL;
++      struct cell *cell, *cell2;
+       BUG_ON(hash > prison->nr_buckets);
+       spin_lock_irqsave(&prison->lock, flags);
+-      cell = __search_bucket(prison->cells + hash, key);
+-      if (!cell) {
+-              /*
+-               * Allocate a new cell
+-               */
+-              spin_unlock_irqrestore(&prison->lock, flags);
+-              cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
+-              spin_lock_irqsave(&prison->lock, flags);
+-
+-              /*
+-               * We've been unlocked, so we have to double check that
+-               * nobody else has inserted this cell in the meantime.
+-               */
+-              cell = __search_bucket(prison->cells + hash, key);
+-
+-              if (!cell) {
+-                      cell = cell2;
+-                      cell2 = NULL;
+-
+-                      cell->prison = prison;
+-                      memcpy(&cell->key, key, sizeof(cell->key));
+-                      cell->count = 0;
+-                      bio_list_init(&cell->bios);
+-                      hlist_add_head(&cell->list, prison->cells + hash);
+-              }
++      cell = __search_bucket(prison->cells + hash, key);
++      if (cell) {
++              bio_list_add(&cell->bios, inmate);
++              goto out;
+       }
+-      r = cell->count++;
+-      bio_list_add(&cell->bios, inmate);
++      /*
++       * Allocate a new cell
++       */
+       spin_unlock_irqrestore(&prison->lock, flags);
++      cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
++      spin_lock_irqsave(&prison->lock, flags);
+-      if (cell2)
++      /*
++       * We've been unlocked, so we have to double check that
++       * nobody else has inserted this cell in the meantime.
++       */
++      cell = __search_bucket(prison->cells + hash, key);
++      if (cell) {
+               mempool_free(cell2, prison->cell_pool);
++              bio_list_add(&cell->bios, inmate);
++              goto out;
++      }
++
++      /*
++       * Use new cell.
++       */
++      cell = cell2;
++
++      cell->prison = prison;
++      memcpy(&cell->key, key, sizeof(cell->key));
++      cell->holder = inmate;
++      bio_list_init(&cell->bios);
++      hlist_add_head(&cell->list, prison->cells + hash);
++
++      r = 0;
++
++out:
++      spin_unlock_irqrestore(&prison->lock, flags);
+       *ref = cell;
+@@ -283,8 +288,8 @@ static void __cell_release(struct cell *
+       hlist_del(&cell->list);
+-      if (inmates)
+-              bio_list_merge(inmates, &cell->bios);
++      bio_list_add(inmates, cell->holder);
++      bio_list_merge(inmates, &cell->bios);
+       mempool_free(cell, prison->cell_pool);
+ }
+@@ -305,22 +310,44 @@ static void cell_release(struct cell *ce
+  * bio may be in the cell.  This function releases the cell, and also does
+  * a sanity check.
+  */
++static void __cell_release_singleton(struct cell *cell, struct bio *bio)
++{
++      hlist_del(&cell->list);
++      BUG_ON(cell->holder != bio);
++      BUG_ON(!bio_list_empty(&cell->bios));
++}
++
+ static void cell_release_singleton(struct cell *cell, struct bio *bio)
+ {
+-      struct bio_prison *prison = cell->prison;
+-      struct bio_list bios;
+-      struct bio *b;
+       unsigned long flags;
+-
+-      bio_list_init(&bios);
++      struct bio_prison *prison = cell->prison;
+       spin_lock_irqsave(&prison->lock, flags);
+-      __cell_release(cell, &bios);
++      __cell_release_singleton(cell, bio);
+       spin_unlock_irqrestore(&prison->lock, flags);
++}
++
++/*
++ * Sometimes we don't want the holder, just the additional bios.
++ */
++static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
++{
++      struct bio_prison *prison = cell->prison;
++
++      hlist_del(&cell->list);
++      bio_list_merge(inmates, &cell->bios);
++
++      mempool_free(cell, prison->cell_pool);
++}
++
++static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
++{
++      unsigned long flags;
++      struct bio_prison *prison = cell->prison;
+-      b = bio_list_pop(&bios);
+-      BUG_ON(b != bio);
+-      BUG_ON(!bio_list_empty(&bios));
++      spin_lock_irqsave(&prison->lock, flags);
++      __cell_release_no_holder(cell, inmates);
++      spin_unlock_irqrestore(&prison->lock, flags);
+ }
+ static void cell_error(struct cell *cell)
+@@ -800,21 +827,16 @@ static void cell_defer(struct thin_c *tc
+  * Same as cell_defer above, except it omits one particular detainee,
+  * a write bio that covers the block and has already been processed.
+  */
+-static void cell_defer_except(struct thin_c *tc, struct cell *cell,
+-                            struct bio *exception)
++static void cell_defer_except(struct thin_c *tc, struct cell *cell)
+ {
+       struct bio_list bios;
+-      struct bio *bio;
+       struct pool *pool = tc->pool;
+       unsigned long flags;
+       bio_list_init(&bios);
+-      cell_release(cell, &bios);
+       spin_lock_irqsave(&pool->lock, flags);
+-      while ((bio = bio_list_pop(&bios)))
+-              if (bio != exception)
+-                      bio_list_add(&pool->deferred_bios, bio);
++      cell_release_no_holder(cell, &pool->deferred_bios);
+       spin_unlock_irqrestore(&pool->lock, flags);
+       wake_worker(pool);
+@@ -854,7 +876,7 @@ static void process_prepared_mapping(str
+        * the bios in the cell.
+        */
+       if (bio) {
+-              cell_defer_except(tc, m->cell, bio);
++              cell_defer_except(tc, m->cell);
+               bio_endio(bio, 0);
+       } else
+               cell_defer(tc, m->cell, m->data_block);
index 625fdbf0333121c6b0bf42d87d6ad280bdc2581f..bac18988dd4efc353fed16217b0663d9b7a0ed1f 100644 (file)
@@ -145,3 +145,8 @@ gpio-davinci-fix-oops-on-unbanked-gpio-irq-request.patch
 gpio-davinci-fix-enabling-unbanked-gpio-irqs.patch
 mfd-test-for-jack-detection-when-deciding-if-wm8994-should-suspend.patch
 drm-radeon-kms-atom-force-bpc-to-8-for-now.patch
+dm-crypt-fix-mempool-deadlock.patch
+dm-crypt-add-missing-error-handling.patch
+dm-exception-store-fix-init-error-path.patch
+dm-persistent-data-fix-btree-rebalancing-after-remove.patch
+dm-thin-fix-stacked-bi_next-usage.patch