]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.7-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 25 Sep 2016 18:17:32 +0000 (20:17 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 25 Sep 2016 18:17:32 +0000 (20:17 +0200)
added patches:
autofs-use-dentry-flags-to-block-walks-during-expire.patch
crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch
crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch
crypto-echainiv-replace-chaining-with-multiplication.patch
crypto-skcipher-fix-blkcipher-walk-oom-crash.patch
ocfs2-dlm-fix-race-between-convert-and-migration.patch
ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch
revert-ocfs2-bump-up-o2cb-network-protocol-version.patch

queue-4.7/autofs-use-dentry-flags-to-block-walks-during-expire.patch [new file with mode: 0644]
queue-4.7/crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch [new file with mode: 0644]
queue-4.7/crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch [new file with mode: 0644]
queue-4.7/crypto-echainiv-replace-chaining-with-multiplication.patch [new file with mode: 0644]
queue-4.7/crypto-skcipher-fix-blkcipher-walk-oom-crash.patch [new file with mode: 0644]
queue-4.7/ocfs2-dlm-fix-race-between-convert-and-migration.patch [new file with mode: 0644]
queue-4.7/ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch [new file with mode: 0644]
queue-4.7/revert-ocfs2-bump-up-o2cb-network-protocol-version.patch [new file with mode: 0644]
queue-4.7/series

diff --git a/queue-4.7/autofs-use-dentry-flags-to-block-walks-during-expire.patch b/queue-4.7/autofs-use-dentry-flags-to-block-walks-during-expire.patch
new file mode 100644 (file)
index 0000000..52ced74
--- /dev/null
@@ -0,0 +1,148 @@
+From 7cbdb4a286a60c5d519cb9223fe2134d26870d39 Mon Sep 17 00:00:00 2001
+From: Ian Kent <raven@themaw.net>
+Date: Mon, 19 Sep 2016 14:44:12 -0700
+Subject: autofs: use dentry flags to block walks during expire
+
+From: Ian Kent <raven@themaw.net>
+
+commit 7cbdb4a286a60c5d519cb9223fe2134d26870d39 upstream.
+
+Somewhere along the way the autofs expire operation has changed to hold
+a spin lock over expired dentry selection.  The autofs indirect mount
+expired dentry selection is complicated and quite lengthy so it isn't
+appropriate to hold a spin lock over the operation.
+
+Commit 47be61845c77 ("fs/dcache.c: avoid soft-lockup in dput()") added a
+might_sleep() to dput() causing a WARN_ONCE() about this usage to be
+issued.
+
+But the spin lock doesn't need to be held over this check, the autofs
+dentry info.  flags are enough to block walks into dentrys during the
+expire.
+
+I've left the direct mount expire as it is (for now) because it is much
+simpler and quicker than the indirect mount expire and adding spin lock
+release and re-aquires would do nothing more than add overhead.
+
+Fixes: 47be61845c77 ("fs/dcache.c: avoid soft-lockup in dput()")
+Link: http://lkml.kernel.org/r/20160912014017.1773.73060.stgit@pluto.themaw.net
+Signed-off-by: Ian Kent <raven@themaw.net>
+Reported-by: Takashi Iwai <tiwai@suse.de>
+Tested-by: Takashi Iwai <tiwai@suse.de>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: NeilBrown <neilb@suse.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/autofs4/expire.c |   55 +++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 42 insertions(+), 13 deletions(-)
+
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -417,6 +417,7 @@ static struct dentry *should_expire(stru
+       }
+       return NULL;
+ }
++
+ /*
+  * Find an eligible tree to time-out
+  * A tree is eligible if :-
+@@ -432,6 +433,7 @@ struct dentry *autofs4_expire_indirect(s
+       struct dentry *root = sb->s_root;
+       struct dentry *dentry;
+       struct dentry *expired;
++      struct dentry *found;
+       struct autofs_info *ino;
+       if (!root)
+@@ -442,31 +444,46 @@ struct dentry *autofs4_expire_indirect(s
+       dentry = NULL;
+       while ((dentry = get_next_positive_subdir(dentry, root))) {
++              int flags = how;
++
+               spin_lock(&sbi->fs_lock);
+               ino = autofs4_dentry_ino(dentry);
+-              if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
+-                      expired = NULL;
+-              else
+-                      expired = should_expire(dentry, mnt, timeout, how);
+-              if (!expired) {
++              if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
+                       spin_unlock(&sbi->fs_lock);
+                       continue;
+               }
++              spin_unlock(&sbi->fs_lock);
++
++              expired = should_expire(dentry, mnt, timeout, flags);
++              if (!expired)
++                      continue;
++
++              spin_lock(&sbi->fs_lock);
+               ino = autofs4_dentry_ino(expired);
+               ino->flags |= AUTOFS_INF_WANT_EXPIRE;
+               spin_unlock(&sbi->fs_lock);
+               synchronize_rcu();
+-              spin_lock(&sbi->fs_lock);
+-              if (should_expire(expired, mnt, timeout, how)) {
+-                      if (expired != dentry)
+-                              dput(dentry);
+-                      goto found;
+-              }
++              /* Make sure a reference is not taken on found if
++               * things have changed.
++               */
++              flags &= ~AUTOFS_EXP_LEAVES;
++              found = should_expire(expired, mnt, timeout, how);
++              if (!found || found != expired)
++                      /* Something has changed, continue */
++                      goto next;
++
++              if (expired != dentry)
++                      dput(dentry);
++
++              spin_lock(&sbi->fs_lock);
++              goto found;
++next:
++              spin_lock(&sbi->fs_lock);
+               ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
++              spin_unlock(&sbi->fs_lock);
+               if (expired != dentry)
+                       dput(expired);
+-              spin_unlock(&sbi->fs_lock);
+       }
+       return NULL;
+@@ -483,6 +500,7 @@ int autofs4_expire_wait(struct dentry *d
+       struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+       struct autofs_info *ino = autofs4_dentry_ino(dentry);
+       int status;
++      int state;
+       /* Block on any pending expire */
+       if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
+@@ -490,8 +508,19 @@ int autofs4_expire_wait(struct dentry *d
+       if (rcu_walk)
+               return -ECHILD;
++retry:
+       spin_lock(&sbi->fs_lock);
+-      if (ino->flags & AUTOFS_INF_EXPIRING) {
++      state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
++      if (state == AUTOFS_INF_WANT_EXPIRE) {
++              spin_unlock(&sbi->fs_lock);
++              /*
++               * Possibly being selected for expire, wait until
++               * it's selected or not.
++               */
++              schedule_timeout_uninterruptible(HZ/10);
++              goto retry;
++      }
++      if (state & AUTOFS_INF_EXPIRING) {
+               spin_unlock(&sbi->fs_lock);
+               pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);
diff --git a/queue-4.7/crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch b/queue-4.7/crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch
new file mode 100644 (file)
index 0000000..16c4091
--- /dev/null
@@ -0,0 +1,43 @@
+From f82e90b28654804ab72881d577d87c3d5c65e2bc Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue, 13 Sep 2016 09:48:52 +0100
+Subject: crypto: arm/aes-ctr - fix NULL dereference in tail processing
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit f82e90b28654804ab72881d577d87c3d5c65e2bc upstream.
+
+The AES-CTR glue code avoids calling into the blkcipher API for the
+tail portion of the walk, by comparing the remainder of walk.nbytes
+modulo AES_BLOCK_SIZE with the residual nbytes, and jumping straight
+into the tail processing block if they are equal. This tail processing
+block checks whether nbytes != 0, and does nothing otherwise.
+
+However, in case of an allocation failure in the blkcipher layer, we
+may enter this code with walk.nbytes == 0, while nbytes > 0. In this
+case, we should not dereference the source and destination pointers,
+since they may be NULL. So instead of checking for nbytes != 0, check
+for (walk.nbytes % AES_BLOCK_SIZE) != 0, which implies the former in
+non-error conditions.
+
+Fixes: 86464859cc77 ("crypto: arm - AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions")
+Reported-by: xiakaixu <xiakaixu@huawei.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/crypto/aes-ce-glue.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/crypto/aes-ce-glue.c
++++ b/arch/arm/crypto/aes-ce-glue.c
+@@ -284,7 +284,7 @@ static int ctr_encrypt(struct blkcipher_
+               err = blkcipher_walk_done(desc, &walk,
+                                         walk.nbytes % AES_BLOCK_SIZE);
+       }
+-      if (nbytes) {
++      if (walk.nbytes % AES_BLOCK_SIZE) {
+               u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 __aligned(8) tail[AES_BLOCK_SIZE];
diff --git a/queue-4.7/crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch b/queue-4.7/crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch
new file mode 100644 (file)
index 0000000..59fbeff
--- /dev/null
@@ -0,0 +1,43 @@
+From 2db34e78f126c6001d79d3b66ab1abb482dc7caa Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue, 13 Sep 2016 09:48:53 +0100
+Subject: crypto: arm64/aes-ctr - fix NULL dereference in tail processing
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 2db34e78f126c6001d79d3b66ab1abb482dc7caa upstream.
+
+The AES-CTR glue code avoids calling into the blkcipher API for the
+tail portion of the walk, by comparing the remainder of walk.nbytes
+modulo AES_BLOCK_SIZE with the residual nbytes, and jumping straight
+into the tail processing block if they are equal. This tail processing
+block checks whether nbytes != 0, and does nothing otherwise.
+
+However, in case of an allocation failure in the blkcipher layer, we
+may enter this code with walk.nbytes == 0, while nbytes > 0. In this
+case, we should not dereference the source and destination pointers,
+since they may be NULL. So instead of checking for nbytes != 0, check
+for (walk.nbytes % AES_BLOCK_SIZE) != 0, which implies the former in
+non-error conditions.
+
+Fixes: 49788fe2a128 ("arm64/crypto: AES-ECB/CBC/CTR/XTS using ARMv8 NEON and Crypto Extensions")
+Reported-by: xiakaixu <xiakaixu@huawei.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/aes-glue.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/crypto/aes-glue.c
++++ b/arch/arm64/crypto/aes-glue.c
+@@ -216,7 +216,7 @@ static int ctr_encrypt(struct blkcipher_
+               err = blkcipher_walk_done(desc, &walk,
+                                         walk.nbytes % AES_BLOCK_SIZE);
+       }
+-      if (nbytes) {
++      if (walk.nbytes % AES_BLOCK_SIZE) {
+               u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 __aligned(8) tail[AES_BLOCK_SIZE];
diff --git a/queue-4.7/crypto-echainiv-replace-chaining-with-multiplication.patch b/queue-4.7/crypto-echainiv-replace-chaining-with-multiplication.patch
new file mode 100644 (file)
index 0000000..e11756b
--- /dev/null
@@ -0,0 +1,198 @@
+From 53a5d5ddccf849dbc27a8c1bba0b43c3a45fb792 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Wed, 7 Sep 2016 18:42:08 +0800
+Subject: crypto: echainiv - Replace chaining with multiplication
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 53a5d5ddccf849dbc27a8c1bba0b43c3a45fb792 upstream.
+
+The current implementation uses a global per-cpu array to store
+data which are used to derive the next IV.  This is insecure as
+the attacker may change the stored data.
+
+This patch removes all traces of chaining and replaces it with
+multiplication of the salt and the sequence number.
+
+Fixes: a10f554fa7e0 ("crypto: echainiv - Add encrypted chain IV...")
+Reported-by: Mathias Krause <minipli@googlemail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/echainiv.c |  115 +++++++++++-------------------------------------------
+ 1 file changed, 24 insertions(+), 91 deletions(-)
+
+--- a/crypto/echainiv.c
++++ b/crypto/echainiv.c
+@@ -1,8 +1,8 @@
+ /*
+  * echainiv: Encrypted Chain IV Generator
+  *
+- * This generator generates an IV based on a sequence number by xoring it
+- * with a salt and then encrypting it with the same key as used to encrypt
++ * This generator generates an IV based on a sequence number by multiplying
++ * it with a salt and then encrypting it with the same key as used to encrypt
+  * the plain text.  This algorithm requires that the block size be equal
+  * to the IV size.  It is mainly useful for CBC.
+  *
+@@ -23,81 +23,17 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+-#include <linux/mm.h>
+ #include <linux/module.h>
+-#include <linux/percpu.h>
+-#include <linux/spinlock.h>
++#include <linux/slab.h>
+ #include <linux/string.h>
+-#define MAX_IV_SIZE 16
+-
+-static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
+-
+-/* We don't care if we get preempted and read/write IVs from the next CPU. */
+-static void echainiv_read_iv(u8 *dst, unsigned size)
+-{
+-      u32 *a = (u32 *)dst;
+-      u32 __percpu *b = echainiv_iv;
+-
+-      for (; size >= 4; size -= 4) {
+-              *a++ = this_cpu_read(*b);
+-              b++;
+-      }
+-}
+-
+-static void echainiv_write_iv(const u8 *src, unsigned size)
+-{
+-      const u32 *a = (const u32 *)src;
+-      u32 __percpu *b = echainiv_iv;
+-
+-      for (; size >= 4; size -= 4) {
+-              this_cpu_write(*b, *a);
+-              a++;
+-              b++;
+-      }
+-}
+-
+-static void echainiv_encrypt_complete2(struct aead_request *req, int err)
+-{
+-      struct aead_request *subreq = aead_request_ctx(req);
+-      struct crypto_aead *geniv;
+-      unsigned int ivsize;
+-
+-      if (err == -EINPROGRESS)
+-              return;
+-
+-      if (err)
+-              goto out;
+-
+-      geniv = crypto_aead_reqtfm(req);
+-      ivsize = crypto_aead_ivsize(geniv);
+-
+-      echainiv_write_iv(subreq->iv, ivsize);
+-
+-      if (req->iv != subreq->iv)
+-              memcpy(req->iv, subreq->iv, ivsize);
+-
+-out:
+-      if (req->iv != subreq->iv)
+-              kzfree(subreq->iv);
+-}
+-
+-static void echainiv_encrypt_complete(struct crypto_async_request *base,
+-                                       int err)
+-{
+-      struct aead_request *req = base->data;
+-
+-      echainiv_encrypt_complete2(req, err);
+-      aead_request_complete(req, err);
+-}
+-
+ static int echainiv_encrypt(struct aead_request *req)
+ {
+       struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
+       struct aead_request *subreq = aead_request_ctx(req);
+-      crypto_completion_t compl;
+-      void *data;
++      __be64 nseqno;
++      u64 seqno;
+       u8 *info;
+       unsigned int ivsize = crypto_aead_ivsize(geniv);
+       int err;
+@@ -107,8 +43,6 @@ static int echainiv_encrypt(struct aead_
+       aead_request_set_tfm(subreq, ctx->child);
+-      compl = echainiv_encrypt_complete;
+-      data = req;
+       info = req->iv;
+       if (req->src != req->dst) {
+@@ -123,29 +57,30 @@ static int echainiv_encrypt(struct aead_
+                       return err;
+       }
+-      if (unlikely(!IS_ALIGNED((unsigned long)info,
+-                               crypto_aead_alignmask(geniv) + 1))) {
+-              info = kmalloc(ivsize, req->base.flags &
+-                                     CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
+-                                                                GFP_ATOMIC);
+-              if (!info)
+-                      return -ENOMEM;
+-
+-              memcpy(info, req->iv, ivsize);
+-      }
+-
+-      aead_request_set_callback(subreq, req->base.flags, compl, data);
++      aead_request_set_callback(subreq, req->base.flags,
++                                req->base.complete, req->base.data);
+       aead_request_set_crypt(subreq, req->dst, req->dst,
+                              req->cryptlen, info);
+       aead_request_set_ad(subreq, req->assoclen);
+-      crypto_xor(info, ctx->salt, ivsize);
++      memcpy(&nseqno, info + ivsize - 8, 8);
++      seqno = be64_to_cpu(nseqno);
++      memset(info, 0, ivsize);
++
+       scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
+-      echainiv_read_iv(info, ivsize);
+-      err = crypto_aead_encrypt(subreq);
+-      echainiv_encrypt_complete2(req, err);
+-      return err;
++      do {
++              u64 a;
++
++              memcpy(&a, ctx->salt + ivsize - 8, 8);
++
++              a |= 1;
++              a *= seqno;
++
++              memcpy(info + ivsize - 8, &a, 8);
++      } while ((ivsize -= 8));
++
++      return crypto_aead_encrypt(subreq);
+ }
+ static int echainiv_decrypt(struct aead_request *req)
+@@ -192,8 +127,7 @@ static int echainiv_aead_create(struct c
+       alg = crypto_spawn_aead_alg(spawn);
+       err = -EINVAL;
+-      if (inst->alg.ivsize & (sizeof(u32) - 1) ||
+-          inst->alg.ivsize > MAX_IV_SIZE)
++      if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
+               goto free_inst;
+       inst->alg.encrypt = echainiv_encrypt;
+@@ -202,7 +136,6 @@ static int echainiv_aead_create(struct c
+       inst->alg.init = aead_init_geniv;
+       inst->alg.exit = aead_exit_geniv;
+-      inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
+       inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
+       inst->alg.base.cra_ctxsize += inst->alg.ivsize;
diff --git a/queue-4.7/crypto-skcipher-fix-blkcipher-walk-oom-crash.patch b/queue-4.7/crypto-skcipher-fix-blkcipher-walk-oom-crash.patch
new file mode 100644 (file)
index 0000000..31c8700
--- /dev/null
@@ -0,0 +1,47 @@
+From acdb04d0b36769b3e05990c488dc74d8b7ac8060 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 13 Sep 2016 14:43:29 +0800
+Subject: crypto: skcipher - Fix blkcipher walk OOM crash
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit acdb04d0b36769b3e05990c488dc74d8b7ac8060 upstream.
+
+When we need to allocate a temporary blkcipher_walk_next and it
+fails, the code is supposed to take the slow path of processing
+the data block by block.  However, due to an unrelated change
+we instead end up dereferencing the NULL pointer.
+
+This patch fixes it by moving the unrelated bsize setting out
+of the way so that we enter the slow path as inteded.
+
+Fixes: 7607bd8ff03b ("[CRYPTO] blkcipher: Added blkcipher_walk_virt_block")
+Reported-by: xiakaixu <xiakaixu@huawei.com>
+Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/blkcipher.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -234,6 +234,8 @@ static int blkcipher_walk_next(struct bl
+               return blkcipher_walk_done(desc, walk, -EINVAL);
+       }
++      bsize = min(walk->walk_blocksize, n);
++
+       walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
+                        BLKCIPHER_WALK_DIFF);
+       if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
+@@ -246,7 +248,6 @@ static int blkcipher_walk_next(struct bl
+               }
+       }
+-      bsize = min(walk->walk_blocksize, n);
+       n = scatterwalk_clamp(&walk->in, n);
+       n = scatterwalk_clamp(&walk->out, n);
diff --git a/queue-4.7/ocfs2-dlm-fix-race-between-convert-and-migration.patch b/queue-4.7/ocfs2-dlm-fix-race-between-convert-and-migration.patch
new file mode 100644 (file)
index 0000000..26a72d8
--- /dev/null
@@ -0,0 +1,76 @@
+From e6f0c6e6170fec175fe676495f29029aecdf486c Mon Sep 17 00:00:00 2001
+From: Joseph Qi <joseph.qi@huawei.com>
+Date: Mon, 19 Sep 2016 14:43:55 -0700
+Subject: ocfs2/dlm: fix race between convert and migration
+
+From: Joseph Qi <joseph.qi@huawei.com>
+
+commit e6f0c6e6170fec175fe676495f29029aecdf486c upstream.
+
+Commit ac7cf246dfdb ("ocfs2/dlm: fix race between convert and recovery")
+checks if lockres master has changed to identify whether new master has
+finished recovery or not.  This will introduce a race that right after
+old master does umount ( means master will change), a new convert
+request comes.
+
+In this case, it will reset lockres state to DLM_RECOVERING and then
+retry convert, and then fail with lockres->l_action being set to
+OCFS2_AST_INVALID, which will cause inconsistent lock level between
+ocfs2 and dlm, and then finally BUG.
+
+Since dlm recovery will clear lock->convert_pending in
+dlm_move_lockres_to_recovery_list, we can use it to correctly identify
+the race case between convert and recovery.  So fix it.
+
+Fixes: ac7cf246dfdb ("ocfs2/dlm: fix race between convert and recovery")
+Link: http://lkml.kernel.org/r/57CE1569.8010704@huawei.com
+Signed-off-by: Joseph Qi <joseph.qi@huawei.com>
+Signed-off-by: Jun Piao <piaojun@huawei.com>
+Cc: Mark Fasheh <mfasheh@suse.de>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/dlm/dlmconvert.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -268,7 +268,6 @@ enum dlm_status dlmconvert_remote(struct
+                                 struct dlm_lock *lock, int flags, int type)
+ {
+       enum dlm_status status;
+-      u8 old_owner = res->owner;
+       mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+            lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+@@ -335,7 +334,6 @@ enum dlm_status dlmconvert_remote(struct
+       spin_lock(&res->spinlock);
+       res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+-      lock->convert_pending = 0;
+       /* if it failed, move it back to granted queue.
+        * if master returns DLM_NORMAL and then down before sending ast,
+        * it may have already been moved to granted queue, reset to
+@@ -344,12 +342,14 @@ enum dlm_status dlmconvert_remote(struct
+               if (status != DLM_NOTQUEUED)
+                       dlm_error(status);
+               dlm_revert_pending_convert(res, lock);
+-      } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
+-                      (old_owner != res->owner)) {
+-              mlog(0, "res %.*s is in recovering or has been recovered.\n",
+-                              res->lockname.len, res->lockname.name);
++      } else if (!lock->convert_pending) {
++              mlog(0, "%s: res %.*s, owner died and lock has been moved back "
++                              "to granted list, retry convert.\n",
++                              dlm->name, res->lockname.len, res->lockname.name);
+               status = DLM_RECOVERING;
+       }
++
++      lock->convert_pending = 0;
+ bail:
+       spin_unlock(&res->spinlock);
diff --git a/queue-4.7/ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch b/queue-4.7/ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch
new file mode 100644 (file)
index 0000000..53d9467
--- /dev/null
@@ -0,0 +1,109 @@
+From d21c353d5e99c56cdd5b5c1183ffbcaf23b8b960 Mon Sep 17 00:00:00 2001
+From: Ashish Samant <ashish.samant@oracle.com>
+Date: Mon, 19 Sep 2016 14:44:42 -0700
+Subject: ocfs2: fix start offset to ocfs2_zero_range_for_truncate()
+
+From: Ashish Samant <ashish.samant@oracle.com>
+
+commit d21c353d5e99c56cdd5b5c1183ffbcaf23b8b960 upstream.
+
+If we punch a hole on a reflink such that following conditions are met:
+
+1. start offset is on a cluster boundary
+2. end offset is not on a cluster boundary
+3. (end offset is somewhere in another extent) or
+   (hole range > MAX_CONTIG_BYTES(1MB)),
+
+we dont COW the first cluster starting at the start offset.  But in this
+case, we were wrongly passing this cluster to
+ocfs2_zero_range_for_truncate() to zero out.  This will modify the
+cluster in place and zero it in the source too.
+
+Fix this by skipping this cluster in such a scenario.
+
+To reproduce:
+
+1. Create a random file of say 10 MB
+     xfs_io -c 'pwrite -b 4k 0 10M' -f 10MBfile
+2. Reflink  it
+     reflink -f 10MBfile reflnktest
+3. Punch a hole at starting at cluster boundary  with range greater that
+1MB. You can also use a range that will put the end offset in another
+extent.
+     fallocate -p -o 0 -l 1048615 reflnktest
+4. sync
+5. Check the  first cluster in the source file. (It will be zeroed out).
+    dd if=10MBfile iflag=direct bs=<cluster size> count=1 | hexdump -C
+
+Link: http://lkml.kernel.org/r/1470957147-14185-1-git-send-email-ashish.samant@oracle.com
+Signed-off-by: Ashish Samant <ashish.samant@oracle.com>
+Reported-by: Saar Maoz <saar.maoz@oracle.com>
+Reviewed-by: Srinivas Eeda <srinivas.eeda@oracle.com>
+Cc: Mark Fasheh <mfasheh@suse.de>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Joseph Qi <joseph.qi@huawei.com>
+Cc: Eric Ren <zren@suse.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/file.c |   38 ++++++++++++++++++++++++++------------
+ 1 file changed, 26 insertions(+), 12 deletions(-)
+
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1506,7 +1506,8 @@ static int ocfs2_zero_partial_clusters(s
+                                      u64 start, u64 len)
+ {
+       int ret = 0;
+-      u64 tmpend, end = start + len;
++      u64 tmpend = 0;
++      u64 end = start + len;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       unsigned int csize = osb->s_clustersize;
+       handle_t *handle;
+@@ -1538,18 +1539,31 @@ static int ocfs2_zero_partial_clusters(s
+       }
+       /*
+-       * We want to get the byte offset of the end of the 1st cluster.
++       * If start is on a cluster boundary and end is somewhere in another
++       * cluster, we have not COWed the cluster starting at start, unless
++       * end is also within the same cluster. So, in this case, we skip this
++       * first call to ocfs2_zero_range_for_truncate() truncate and move on
++       * to the next one.
+        */
+-      tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
+-      if (tmpend > end)
+-              tmpend = end;
+-
+-      trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
+-                                               (unsigned long long)tmpend);
+-
+-      ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
+-      if (ret)
+-              mlog_errno(ret);
++      if ((start & (csize - 1)) != 0) {
++              /*
++               * We want to get the byte offset of the end of the 1st
++               * cluster.
++               */
++              tmpend = (u64)osb->s_clustersize +
++                      (start & ~(osb->s_clustersize - 1));
++              if (tmpend > end)
++                      tmpend = end;
++
++              trace_ocfs2_zero_partial_clusters_range1(
++                      (unsigned long long)start,
++                      (unsigned long long)tmpend);
++
++              ret = ocfs2_zero_range_for_truncate(inode, handle, start,
++                                                  tmpend);
++              if (ret)
++                      mlog_errno(ret);
++      }
+       if (tmpend < end) {
+               /*
diff --git a/queue-4.7/revert-ocfs2-bump-up-o2cb-network-protocol-version.patch b/queue-4.7/revert-ocfs2-bump-up-o2cb-network-protocol-version.patch
new file mode 100644 (file)
index 0000000..3a803cf
--- /dev/null
@@ -0,0 +1,54 @@
+From 63b52c4936a2e679639c38ef51a50aa8ca1c5c07 Mon Sep 17 00:00:00 2001
+From: Junxiao Bi <junxiao.bi@oracle.com>
+Date: Mon, 19 Sep 2016 14:44:44 -0700
+Subject: Revert "ocfs2: bump up o2cb network protocol version"
+
+From: Junxiao Bi <junxiao.bi@oracle.com>
+
+commit 63b52c4936a2e679639c38ef51a50aa8ca1c5c07 upstream.
+
+This reverts commit 38b52efd218b ("ocfs2: bump up o2cb network protocol
+version").
+
+This commit made rolling upgrade fail.  When one node is upgraded to new
+version with this commit, the remaining nodes will fail to establish
+connections to it, then the application like VMs on the remaining nodes
+can't be live migrated to the upgraded one.  This will cause an outage.
+Since negotiate hb timeout behavior didn't change without this commit,
+so revert it.
+
+Fixes: 38b52efd218bf ("ocfs2: bump up o2cb network protocol version")
+Link: http://lkml.kernel.org/r/1471396924-10375-1-git-send-email-junxiao.bi@oracle.com
+Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Mark Fasheh <mfasheh@suse.de>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Joseph Qi <joseph.qi@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/cluster/tcp_internal.h |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/fs/ocfs2/cluster/tcp_internal.h
++++ b/fs/ocfs2/cluster/tcp_internal.h
+@@ -44,9 +44,6 @@
+  * version here in tcp_internal.h should not need to be bumped for
+  * filesystem locking changes.
+  *
+- * New in version 12
+- *    - Negotiate hb timeout when storage is down.
+- *
+  * New in version 11
+  *    - Negotiation of filesystem locking in the dlm join.
+  *
+@@ -78,7 +75,7 @@
+  *    - full 64 bit i_size in the metadata lock lvbs
+  *    - introduction of "rw" lock and pushing meta/data locking down
+  */
+-#define O2NET_PROTOCOL_VERSION 12ULL
++#define O2NET_PROTOCOL_VERSION 11ULL
+ struct o2net_handshake {
+       __be64  protocol_version;
+       __be64  connector_id;
index b147d08f75d53afa1d480a333232b132d82c7761..8a742fe20500e8b3757fd876fc68509be68915de 100644 (file)
@@ -1 +1,9 @@
 reiserfs-fix-new_insert_key-may-be-used-uninitialized.patch
+crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch
+crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch
+crypto-skcipher-fix-blkcipher-walk-oom-crash.patch
+crypto-echainiv-replace-chaining-with-multiplication.patch
+ocfs2-dlm-fix-race-between-convert-and-migration.patch
+ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch
+revert-ocfs2-bump-up-o2cb-network-protocol-version.patch
+autofs-use-dentry-flags-to-block-walks-during-expire.patch