]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Nov 2012 18:38:49 +0000 (10:38 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Nov 2012 18:38:49 +0000 (10:38 -0800)
added patches:
0001-crush-clean-up-types-const-ness.patch
0002-crush-adjust-local-retry-threshold.patch
0003-crush-be-more-tolerant-of-nonsensical-crush-maps.patch
0004-crush-fix-tree-node-weight-lookup.patch
0005-crush-fix-memory-leak-when-destroying-tree-buckets.patch
0006-ceph-osd_client-fix-endianness-bug-in-osd_req_encode.patch
0007-ceph-messenger-use-read_partial-in-read_partial_mess.patch
0008-ceph-messenger-update-to-in-read_partial-caller.patch

queue-3.4/0001-crush-clean-up-types-const-ness.patch [new file with mode: 0644]
queue-3.4/0002-crush-adjust-local-retry-threshold.patch [new file with mode: 0644]
queue-3.4/0003-crush-be-more-tolerant-of-nonsensical-crush-maps.patch [new file with mode: 0644]
queue-3.4/0004-crush-fix-tree-node-weight-lookup.patch [new file with mode: 0644]
queue-3.4/0005-crush-fix-memory-leak-when-destroying-tree-buckets.patch [new file with mode: 0644]
queue-3.4/0006-ceph-osd_client-fix-endianness-bug-in-osd_req_encode.patch [new file with mode: 0644]
queue-3.4/0007-ceph-messenger-use-read_partial-in-read_partial_mess.patch [new file with mode: 0644]
queue-3.4/0008-ceph-messenger-update-to-in-read_partial-caller.patch [new file with mode: 0644]
queue-3.4/series

diff --git a/queue-3.4/0001-crush-clean-up-types-const-ness.patch b/queue-3.4/0001-crush-clean-up-types-const-ness.patch
new file mode 100644 (file)
index 0000000..678ba78
--- /dev/null
@@ -0,0 +1,206 @@
+From 5fc9a378abff45e4527f28bcde687db748969419 Mon Sep 17 00:00:00 2001
+From: Sage Weil <sage@inktank.com>
+Date: Mon, 7 May 2012 15:38:35 -0700
+Subject: crush: clean up types, const-ness
+
+From: Sage Weil <sage@inktank.com>
+
+(cherry picked from commit 8b12d47b80c7a34dffdd98244d99316db490ec58)
+
+Move various types from int -> __u32 (or similar), and add const as
+appropriate.
+
+This reflects changes that have been present in the userland implementation
+for some time.
+
+Reviewed-by: Alex Elder <elder@inktank.com>
+Signed-off-by: Sage Weil <sage@inktank.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/crush/crush.h  |    2 +-
+ include/linux/crush/mapper.h |    6 +++---
+ net/ceph/crush/crush.c       |    8 ++++----
+ net/ceph/crush/mapper.c      |   31 ++++++++++++++++---------------
+ 4 files changed, 24 insertions(+), 23 deletions(-)
+
+--- a/include/linux/crush/crush.h
++++ b/include/linux/crush/crush.h
+@@ -168,7 +168,7 @@ struct crush_map {
+ /* crush.c */
+-extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos);
++extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
+ extern void crush_calc_parents(struct crush_map *map);
+ extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
+ extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
+--- a/include/linux/crush/mapper.h
++++ b/include/linux/crush/mapper.h
+@@ -10,11 +10,11 @@
+ #include "crush.h"
+-extern int crush_find_rule(struct crush_map *map, int pool, int type, int size);
+-extern int crush_do_rule(struct crush_map *map,
++extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
++extern int crush_do_rule(const struct crush_map *map,
+                        int ruleno,
+                        int x, int *result, int result_max,
+                        int forcefeed,    /* -1 for none */
+-                       __u32 *weights);
++                       const __u32 *weights);
+ #endif
+--- a/net/ceph/crush/crush.c
++++ b/net/ceph/crush/crush.c
+@@ -26,9 +26,9 @@ const char *crush_bucket_alg_name(int al
+  * @b: bucket pointer
+  * @p: item index in bucket
+  */
+-int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
++int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
+ {
+-      if (p >= b->size)
++      if ((__u32)p >= b->size)
+               return 0;
+       switch (b->alg) {
+@@ -124,10 +124,9 @@ void crush_destroy_bucket(struct crush_b
+  */
+ void crush_destroy(struct crush_map *map)
+ {
+-      int b;
+-
+       /* buckets */
+       if (map->buckets) {
++              __s32 b;
+               for (b = 0; b < map->max_buckets; b++) {
+                       if (map->buckets[b] == NULL)
+                               continue;
+@@ -138,6 +137,7 @@ void crush_destroy(struct crush_map *map
+       /* rules */
+       if (map->rules) {
++              __u32 b;
+               for (b = 0; b < map->max_rules; b++)
+                       kfree(map->rules[b]);
+               kfree(map->rules);
+--- a/net/ceph/crush/mapper.c
++++ b/net/ceph/crush/mapper.c
+@@ -32,9 +32,9 @@
+  * @type: storage ruleset type (user defined)
+  * @size: output set size
+  */
+-int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
++int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
+ {
+-      int i;
++      __u32 i;
+       for (i = 0; i < map->max_rules; i++) {
+               if (map->rules[i] &&
+@@ -72,7 +72,7 @@ static int bucket_perm_choose(struct cru
+       unsigned i, s;
+       /* start a new permutation if @x has changed */
+-      if (bucket->perm_x != x || bucket->perm_n == 0) {
++      if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
+               dprintk("bucket %d new x=%d\n", bucket->id, x);
+               bucket->perm_x = x;
+@@ -219,7 +219,7 @@ static int bucket_tree_choose(struct cru
+ static int bucket_straw_choose(struct crush_bucket_straw *bucket,
+                              int x, int r)
+ {
+-      int i;
++      __u32 i;
+       int high = 0;
+       __u64 high_draw = 0;
+       __u64 draw;
+@@ -262,7 +262,7 @@ static int crush_bucket_choose(struct cr
+  * true if device is marked "out" (failed, fully offloaded)
+  * of the cluster
+  */
+-static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
++static int is_out(const struct crush_map *map, const __u32 *weight, int item, int x)
+ {
+       if (weight[item] >= 0x10000)
+               return 0;
+@@ -287,16 +287,16 @@ static int is_out(struct crush_map *map,
+  * @recurse_to_leaf: true if we want one device under each item of given type
+  * @out2: second output vector for leaf items (if @recurse_to_leaf)
+  */
+-static int crush_choose(struct crush_map *map,
++static int crush_choose(const struct crush_map *map,
+                       struct crush_bucket *bucket,
+-                      __u32 *weight,
++                      const __u32 *weight,
+                       int x, int numrep, int type,
+                       int *out, int outpos,
+                       int firstn, int recurse_to_leaf,
+                       int *out2)
+ {
+       int rep;
+-      int ftotal, flocal;
++      unsigned int ftotal, flocal;
+       int retry_descent, retry_bucket, skip_rep;
+       struct crush_bucket *in = bucket;
+       int r;
+@@ -304,7 +304,7 @@ static int crush_choose(struct crush_map
+       int item = 0;
+       int itemtype;
+       int collide, reject;
+-      const int orig_tries = 5; /* attempts before we fall back to search */
++      const unsigned int orig_tries = 5; /* attempts before we fall back to search */
+       dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
+               bucket->id, x, outpos, numrep);
+@@ -325,7 +325,7 @@ static int crush_choose(struct crush_map
+                               r = rep;
+                               if (in->alg == CRUSH_BUCKET_UNIFORM) {
+                                       /* be careful */
+-                                      if (firstn || numrep >= in->size)
++                                      if (firstn || (__u32)numrep >= in->size)
+                                               /* r' = r + f_total */
+                                               r += ftotal;
+                                       else if (in->size % numrep == 0)
+@@ -425,7 +425,7 @@ reject:
+                                               /* else give up */
+                                               skip_rep = 1;
+                                       dprintk("  reject %d  collide %d  "
+-                                              "ftotal %d  flocal %d\n",
++                                              "ftotal %u  flocal %u\n",
+                                               reject, collide, ftotal,
+                                               flocal);
+                               }
+@@ -456,9 +456,9 @@ reject:
+  * @result_max: maximum result size
+  * @force: force initial replica choice; -1 for none
+  */
+-int crush_do_rule(struct crush_map *map,
++int crush_do_rule(const struct crush_map *map,
+                 int ruleno, int x, int *result, int result_max,
+-                int force, __u32 *weight)
++                int force, const __u32 *weight)
+ {
+       int result_len;
+       int force_context[CRUSH_MAX_DEPTH];
+@@ -473,7 +473,7 @@ int crush_do_rule(struct crush_map *map,
+       int osize;
+       int *tmp;
+       struct crush_rule *rule;
+-      int step;
++      __u32 step;
+       int i, j;
+       int numrep;
+       int firstn;
+@@ -488,7 +488,8 @@ int crush_do_rule(struct crush_map *map,
+       /*
+        * determine hierarchical context of force, if any.  note
+        * that this may or may not correspond to the specific types
+-       * referenced by the crush rule.
++       * referenced by the crush rule.  it will also only affect
++       * the first descent (TAKE).
+        */
+       if (force >= 0 &&
+           force < map->max_devices &&
diff --git a/queue-3.4/0002-crush-adjust-local-retry-threshold.patch b/queue-3.4/0002-crush-adjust-local-retry-threshold.patch
new file mode 100644 (file)
index 0000000..b909002
--- /dev/null
@@ -0,0 +1,31 @@
+From defd846d69bae213dc1c2c152bb3075138557fd5 Mon Sep 17 00:00:00 2001
+From: Sage Weil <sage@inktank.com>
+Date: Mon, 7 May 2012 15:35:09 -0700
+Subject: crush: adjust local retry threshold
+
+From: Sage Weil <sage@inktank.com>
+
+(cherry picked from commit c90f95ed46393e29d843686e21947d1c6fcb1164)
+
+This small adjustment reflects a change that was made in ceph.git commit
+af6a9f30696c900a2a8bd7ae24e8ed15fb4964bb, about 6 months ago.  An N-1
+search is not exhaustive.  Fixed ceph.git bug #1594.
+
+Reviewed-by: Alex Elder <elder@inktank.com>
+Signed-off-by: Sage Weil <sage@inktank.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/crush/mapper.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ceph/crush/mapper.c
++++ b/net/ceph/crush/mapper.c
+@@ -415,7 +415,7 @@ reject:
+                                       if (collide && flocal < 3)
+                                               /* retry locally a few times */
+                                               retry_bucket = 1;
+-                                      else if (flocal < in->size + orig_tries)
++                                      else if (flocal <= in->size + orig_tries)
+                                               /* exhaustive bucket search */
+                                               retry_bucket = 1;
+                                       else if (ftotal < 20)
diff --git a/queue-3.4/0003-crush-be-more-tolerant-of-nonsensical-crush-maps.patch b/queue-3.4/0003-crush-be-more-tolerant-of-nonsensical-crush-maps.patch
new file mode 100644 (file)
index 0000000..9ea8768
--- /dev/null
@@ -0,0 +1,112 @@
+From 44e4548c74c41db47f356fce9115df067ce1f4e1 Mon Sep 17 00:00:00 2001
+From: Sage Weil <sage@inktank.com>
+Date: Mon, 7 May 2012 15:35:24 -0700
+Subject: crush: be more tolerant of nonsensical crush maps
+
+From: Sage Weil <sage@inktank.com>
+
+(cherry picked from commit a1f4895be8bf1ba56c2306b058f51619e9b0e8f8)
+
+If we get a map that doesn't make sense, error out or ignore the badness
+instead of BUGging out.  This reflects the ceph.git commits
+9895f0bff7dc68e9b49b572613d242315fb11b6c and
+8ded26472058d5205803f244c2f33cb6cb10de79.
+
+Reviewed-by: Alex Elder <elder@inktank.com>
+Signed-off-by: Sage Weil <sage@inktank.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/crush/mapper.c |   33 ++++++++++++++++++++++++---------
+ 1 file changed, 24 insertions(+), 9 deletions(-)
+
+--- a/net/ceph/crush/mapper.c
++++ b/net/ceph/crush/mapper.c
+@@ -152,8 +152,8 @@ static int bucket_list_choose(struct cru
+                       return bucket->h.items[i];
+       }
+-      BUG_ON(1);
+-      return 0;
++      dprintk("bad list sums for bucket %d\n", bucket->h.id);
++      return bucket->h.items[0];
+ }
+@@ -239,6 +239,7 @@ static int bucket_straw_choose(struct cr
+ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
+ {
+       dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
++      BUG_ON(in->size == 0);
+       switch (in->alg) {
+       case CRUSH_BUCKET_UNIFORM:
+               return bucket_uniform_choose((struct crush_bucket_uniform *)in,
+@@ -253,7 +254,7 @@ static int crush_bucket_choose(struct cr
+               return bucket_straw_choose((struct crush_bucket_straw *)in,
+                                          x, r);
+       default:
+-              BUG_ON(1);
++              dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
+               return in->items[0];
+       }
+ }
+@@ -354,7 +355,11 @@ static int crush_choose(const struct cru
+                                       item = bucket_perm_choose(in, x, r);
+                               else
+                                       item = crush_bucket_choose(in, x, r);
+-                              BUG_ON(item >= map->max_devices);
++                              if (item >= map->max_devices) {
++                                      dprintk("   bad item %d\n", item);
++                                      skip_rep = 1;
++                                      break;
++                              }
+                               /* desired type? */
+                               if (item < 0)
+@@ -365,8 +370,12 @@ static int crush_choose(const struct cru
+                               /* keep going? */
+                               if (itemtype != type) {
+-                                      BUG_ON(item >= 0 ||
+-                                             (-1-item) >= map->max_buckets);
++                                      if (item >= 0 ||
++                                          (-1-item) >= map->max_buckets) {
++                                              dprintk("   bad item type %d\n", type);
++                                              skip_rep = 1;
++                                              break;
++                                      }
+                                       in = map->buckets[-1-item];
+                                       retry_bucket = 1;
+                                       continue;
+@@ -478,7 +487,10 @@ int crush_do_rule(const struct crush_map
+       int numrep;
+       int firstn;
+-      BUG_ON(ruleno >= map->max_rules);
++      if ((__u32)ruleno >= map->max_rules) {
++              dprintk(" bad ruleno %d\n", ruleno);
++              return 0;
++      }
+       rule = map->rules[ruleno];
+       result_len = 0;
+@@ -528,7 +540,8 @@ int crush_do_rule(const struct crush_map
+                       firstn = 1;
+               case CRUSH_RULE_CHOOSE_LEAF_INDEP:
+               case CRUSH_RULE_CHOOSE_INDEP:
+-                      BUG_ON(wsize == 0);
++                      if (wsize == 0)
++                              break;
+                       recurse_to_leaf =
+                               rule->steps[step].op ==
+@@ -597,7 +610,9 @@ int crush_do_rule(const struct crush_map
+                       break;
+               default:
+-                      BUG_ON(1);
++                      dprintk(" unknown op %d at step %d\n",
++                              curstep->op, step);
++                      break;
+               }
+       }
+       return result_len;
diff --git a/queue-3.4/0004-crush-fix-tree-node-weight-lookup.patch b/queue-3.4/0004-crush-fix-tree-node-weight-lookup.patch
new file mode 100644 (file)
index 0000000..97f4046
--- /dev/null
@@ -0,0 +1,46 @@
+From 7e2c188f5aa5a008bace69a30260eceac7873d95 Mon Sep 17 00:00:00 2001
+From: Sage Weil <sage@inktank.com>
+Date: Mon, 7 May 2012 15:36:49 -0700
+Subject: crush: fix tree node weight lookup
+
+From: Sage Weil <sage@inktank.com>
+
+(cherry picked from commit f671d4cd9b36691ac4ef42cde44c1b7a84e13631)
+
+Fix the node weight lookup for tree buckets by using a correct accessor.
+
+Reflects ceph.git commit d287ade5bcbdca82a3aef145b92924cf1e856733.
+
+Reviewed-by: Alex Elder <elder@inktank.com>
+Signed-off-by: Sage Weil <sage@inktank.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/crush/crush.h |    5 +++++
+ net/ceph/crush/crush.c      |    4 +---
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/include/linux/crush/crush.h
++++ b/include/linux/crush/crush.h
+@@ -177,4 +177,9 @@ extern void crush_destroy_bucket_straw(s
+ extern void crush_destroy_bucket(struct crush_bucket *b);
+ extern void crush_destroy(struct crush_map *map);
++static inline int crush_calc_tree_node(int i)
++{
++      return ((i+1) << 1)-1;
++}
++
+ #endif
+--- a/net/ceph/crush/crush.c
++++ b/net/ceph/crush/crush.c
+@@ -37,9 +37,7 @@ int crush_get_bucket_item_weight(const s
+       case CRUSH_BUCKET_LIST:
+               return ((struct crush_bucket_list *)b)->item_weights[p];
+       case CRUSH_BUCKET_TREE:
+-              if (p & 1)
+-                      return ((struct crush_bucket_tree *)b)->node_weights[p];
+-              return 0;
++              return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
+       case CRUSH_BUCKET_STRAW:
+               return ((struct crush_bucket_straw *)b)->item_weights[p];
+       }
diff --git a/queue-3.4/0005-crush-fix-memory-leak-when-destroying-tree-buckets.patch b/queue-3.4/0005-crush-fix-memory-leak-when-destroying-tree-buckets.patch
new file mode 100644 (file)
index 0000000..1885664
--- /dev/null
@@ -0,0 +1,30 @@
+From e630305ecf47d81a0ff4c21f0cf167fc1ce477b0 Mon Sep 17 00:00:00 2001
+From: Sage Weil <sage@inktank.com>
+Date: Mon, 7 May 2012 15:37:05 -0700
+Subject: crush: fix memory leak when destroying tree buckets
+
+From: Sage Weil <sage@inktank.com>
+
+(cherry picked from commit 6eb43f4b5a2a74599b4ff17a97c03a342327ca65)
+
+Reflects ceph.git commit 46d63d98434b3bc9dad2fc9ab23cbaedc3bcb0e4.
+
+Reported-by: Alexander Lyakas <alex.bolshoy@gmail.com>
+Reviewed-by: Alex Elder <elder@inktank.com>
+Signed-off-by: Sage Weil <sage@inktank.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/crush/crush.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/ceph/crush/crush.c
++++ b/net/ceph/crush/crush.c
+@@ -85,6 +85,8 @@ void crush_destroy_bucket_list(struct cr
+ void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
+ {
++      kfree(b->h.perm);
++      kfree(b->h.items);
+       kfree(b->node_weights);
+       kfree(b);
+ }
diff --git a/queue-3.4/0006-ceph-osd_client-fix-endianness-bug-in-osd_req_encode.patch b/queue-3.4/0006-ceph-osd_client-fix-endianness-bug-in-osd_req_encode.patch
new file mode 100644 (file)
index 0000000..49b913e
--- /dev/null
@@ -0,0 +1,32 @@
+From 51da4e7a2aba62a1ccbba8841332c543a3c30187 Mon Sep 17 00:00:00 2001
+From: Alex Elder <elder@dreawmhost.com>
+Date: Fri, 20 Apr 2012 15:49:43 -0500
+Subject: ceph: osd_client: fix endianness bug in osd_req_encode_op()
+
+From: Alex Elder <elder@dreawmhost.com>
+
+(cherry picked from commit 065a68f9167e20f321a62d044cb2c3024393d455)
+
+From Al Viro <viro@zeniv.linux.org.uk>
+
+Al Viro noticed that we were using a non-cpu-encoded value in
+a switch statement in osd_req_encode_op().  The result would
+clearly not work correctly on a big-endian machine.
+
+Signed-off-by: Alex Elder <elder@dreamhost.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/osd_client.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -278,7 +278,7 @@ static void osd_req_encode_op(struct cep
+ {
+       dst->op = cpu_to_le16(src->op);
+-      switch (dst->op) {
++      switch (src->op) {
+       case CEPH_OSD_OP_READ:
+       case CEPH_OSD_OP_WRITE:
+               dst->extent.offset =
diff --git a/queue-3.4/0007-ceph-messenger-use-read_partial-in-read_partial_mess.patch b/queue-3.4/0007-ceph-messenger-use-read_partial-in-read_partial_mess.patch
new file mode 100644 (file)
index 0000000..dc4a4bc
--- /dev/null
@@ -0,0 +1,73 @@
+From 01cfe16230deeb4f97854f80212d9c25305b2311 Mon Sep 17 00:00:00 2001
+From: Alex Elder <elder@inktank.com>
+Date: Thu, 10 May 2012 10:29:50 -0500
+Subject: ceph: messenger: use read_partial() in read_partial_message()
+
+From: Alex Elder <elder@inktank.com>
+
+(cherry picked from commit 57dac9d1620942608306d8c17c98a9d1568ffdf4)
+
+There are two blocks of code in read_partial_message()--those that
+read the header and footer of the message--that can be replaced by a
+call to read_partial().  Do that.
+
+Signed-off-by: Alex Elder <elder@inktank.com>
+Reviewed-by: Sage Weil <sage@inktank.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/messenger.c |   30 ++++++++++--------------------
+ 1 file changed, 10 insertions(+), 20 deletions(-)
+
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -1628,7 +1628,7 @@ static int read_partial_message(struct c
+ {
+       struct ceph_msg *m = con->in_msg;
+       int ret;
+-      int to, left;
++      int to;
+       unsigned front_len, middle_len, data_len;
+       bool do_datacrc = !con->msgr->nocrc;
+       int skip;
+@@ -1638,15 +1638,10 @@ static int read_partial_message(struct c
+       dout("read_partial_message con %p msg %p\n", con, m);
+       /* header */
+-      while (con->in_base_pos < sizeof(con->in_hdr)) {
+-              left = sizeof(con->in_hdr) - con->in_base_pos;
+-              ret = ceph_tcp_recvmsg(con->sock,
+-                                     (char *)&con->in_hdr + con->in_base_pos,
+-                                     left);
+-              if (ret <= 0)
+-                      return ret;
+-              con->in_base_pos += ret;
+-      }
++      to = 0;
++      ret = read_partial(con, &to, sizeof (con->in_hdr), &con->in_hdr);
++      if (ret <= 0)
++              return ret;
+       crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
+       if (cpu_to_le32(crc) != con->in_hdr.crc) {
+@@ -1759,16 +1754,11 @@ static int read_partial_message(struct c
+       }
+       /* footer */
+-      to = sizeof(m->hdr) + sizeof(m->footer);
+-      while (con->in_base_pos < to) {
+-              left = to - con->in_base_pos;
+-              ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
+-                                     (con->in_base_pos - sizeof(m->hdr)),
+-                                     left);
+-              if (ret <= 0)
+-                      return ret;
+-              con->in_base_pos += ret;
+-      }
++      to = sizeof (m->hdr);
++      ret = read_partial(con, &to, sizeof (m->footer), &m->footer);
++      if (ret <= 0)
++              return ret;
++
+       dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
+            m, front_len, m->footer.front_crc, middle_len,
+            m->footer.middle_crc, data_len, m->footer.data_crc);
diff --git a/queue-3.4/0008-ceph-messenger-update-to-in-read_partial-caller.patch b/queue-3.4/0008-ceph-messenger-update-to-in-read_partial-caller.patch
new file mode 100644 (file)
index 0000000..6c98d18
--- /dev/null
@@ -0,0 +1,107 @@
+From 9421e64f2c08f167f10bf34879b4b2ed32060b5c Mon Sep 17 00:00:00 2001
+From: Alex Elder <elder@inktank.com>
+Date: Thu, 10 May 2012 10:29:50 -0500
+Subject: ceph: messenger: update "to" in read_partial() caller
+
+From: Alex Elder <elder@inktank.com>
+
+(cherry picked from commit e6cee71fac27c946a0bbad754dd076e66c4e9dbd)
+
+read_partial() always increases whatever "to" value is supplied by
+adding the requested size to it, and that's the only thing it does
+with that pointed-to value.
+
+Do that pointer advance in the caller (and then only when the
+updated value will be subsequently used), and change the "to"
+parameter to be an in-only and non-pointer value.
+
+Signed-off-by: Alex Elder <elder@inktank.com>
+Reviewed-by: Sage Weil <sage@inktank.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/messenger.c |   31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -992,11 +992,12 @@ static int prepare_read_message(struct c
+ static int read_partial(struct ceph_connection *con,
+-                      int *to, int size, void *object)
++                      int to, int size, void *object)
+ {
+-      *to += size;
+-      while (con->in_base_pos < *to) {
+-              int left = *to - con->in_base_pos;
++      int end = to + size;
++
++      while (con->in_base_pos < end) {
++              int left = end - con->in_base_pos;
+               int have = size - left;
+               int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
+               if (ret <= 0)
+@@ -1017,14 +1018,16 @@ static int read_partial_banner(struct ce
+       dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
+       /* peer's banner */
+-      ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
++      ret = read_partial(con, to, strlen(CEPH_BANNER), con->in_banner);
+       if (ret <= 0)
+               goto out;
+-      ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
++      to += strlen(CEPH_BANNER);
++      ret = read_partial(con, to, sizeof(con->actual_peer_addr),
+                          &con->actual_peer_addr);
+       if (ret <= 0)
+               goto out;
+-      ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
++      to += sizeof(con->actual_peer_addr);
++      ret = read_partial(con, to, sizeof(con->peer_addr_for_me),
+                          &con->peer_addr_for_me);
+       if (ret <= 0)
+               goto out;
+@@ -1038,10 +1041,11 @@ static int read_partial_connect(struct c
+       dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
+-      ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
++      ret = read_partial(con, to, sizeof(con->in_reply), &con->in_reply);
+       if (ret <= 0)
+               goto out;
+-      ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
++      to += sizeof(con->in_reply);
++      ret = read_partial(con, to, le32_to_cpu(con->in_reply.authorizer_len),
+                          con->auth_reply_buf);
+       if (ret <= 0)
+               goto out;
+@@ -1491,9 +1495,7 @@ static int process_connect(struct ceph_c
+  */
+ static int read_partial_ack(struct ceph_connection *con)
+ {
+-      int to = 0;
+-
+-      return read_partial(con, &to, sizeof(con->in_temp_ack),
++      return read_partial(con, 0, sizeof(con->in_temp_ack),
+                           &con->in_temp_ack);
+ }
+@@ -1638,8 +1640,7 @@ static int read_partial_message(struct c
+       dout("read_partial_message con %p msg %p\n", con, m);
+       /* header */
+-      to = 0;
+-      ret = read_partial(con, &to, sizeof (con->in_hdr), &con->in_hdr);
++      ret = read_partial(con, 0, sizeof (con->in_hdr), &con->in_hdr);
+       if (ret <= 0)
+               return ret;
+@@ -1755,7 +1756,7 @@ static int read_partial_message(struct c
+       /* footer */
+       to = sizeof (m->hdr);
+-      ret = read_partial(con, &to, sizeof (m->footer), &m->footer);
++      ret = read_partial(con, to, sizeof (m->footer), &m->footer);
+       if (ret <= 0)
+               return ret;
index cfe9f49cbf8f4f4880e6102c0c4ea89d1c41d0fc..ca889807e3016d9b763615adf096d6833117122d 100644 (file)
@@ -53,3 +53,11 @@ reiserfs-protect-reiserfs_quota_on-with-write-lock.patch
 reiserfs-move-quota-calls-out-of-write-lock.patch
 reiserfs-protect-reiserfs_quota_write-with-write-lock.patch
 selinux-fix-sel_netnode_insert-suspicious-rcu-dereference.patch
+0001-crush-clean-up-types-const-ness.patch
+0002-crush-adjust-local-retry-threshold.patch
+0003-crush-be-more-tolerant-of-nonsensical-crush-maps.patch
+0004-crush-fix-tree-node-weight-lookup.patch
+0005-crush-fix-memory-leak-when-destroying-tree-buckets.patch
+0006-ceph-osd_client-fix-endianness-bug-in-osd_req_encode.patch
+0007-ceph-messenger-use-read_partial-in-read_partial_mess.patch
+0008-ceph-messenger-update-to-in-read_partial-caller.patch