]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 25 Sep 2016 18:17:03 +0000 (20:17 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 25 Sep 2016 18:17:03 +0000 (20:17 +0200)
added patches:
crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch
crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch
crypto-echainiv-replace-chaining-with-multiplication.patch
crypto-skcipher-fix-blkcipher-walk-oom-crash.patch
ocfs2-dlm-fix-race-between-convert-and-migration.patch
ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch

queue-4.4/crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch [new file with mode: 0644]
queue-4.4/crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch [new file with mode: 0644]
queue-4.4/crypto-echainiv-replace-chaining-with-multiplication.patch [new file with mode: 0644]
queue-4.4/crypto-skcipher-fix-blkcipher-walk-oom-crash.patch [new file with mode: 0644]
queue-4.4/ocfs2-dlm-fix-race-between-convert-and-migration.patch [new file with mode: 0644]
queue-4.4/ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch b/queue-4.4/crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch
new file mode 100644 (file)
index 0000000..e23df90
--- /dev/null
@@ -0,0 +1,43 @@
+From f82e90b28654804ab72881d577d87c3d5c65e2bc Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue, 13 Sep 2016 09:48:52 +0100
+Subject: crypto: arm/aes-ctr - fix NULL dereference in tail processing
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit f82e90b28654804ab72881d577d87c3d5c65e2bc upstream.
+
+The AES-CTR glue code avoids calling into the blkcipher API for the
+tail portion of the walk, by comparing the remainder of walk.nbytes
+modulo AES_BLOCK_SIZE with the residual nbytes, and jumping straight
+into the tail processing block if they are equal. This tail processing
+block checks whether nbytes != 0, and does nothing otherwise.
+
+However, in case of an allocation failure in the blkcipher layer, we
+may enter this code with walk.nbytes == 0, while nbytes > 0. In this
+case, we should not dereference the source and destination pointers,
+since they may be NULL. So instead of checking for nbytes != 0, check
+for (walk.nbytes % AES_BLOCK_SIZE) != 0, which implies the former in
+non-error conditions.
+
+Fixes: 86464859cc77 ("crypto: arm - AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions")
+Reported-by: xiakaixu <xiakaixu@huawei.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/crypto/aes-ce-glue.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/crypto/aes-ce-glue.c
++++ b/arch/arm/crypto/aes-ce-glue.c
+@@ -279,7 +279,7 @@ static int ctr_encrypt(struct blkcipher_
+               err = blkcipher_walk_done(desc, &walk,
+                                         walk.nbytes % AES_BLOCK_SIZE);
+       }
+-      if (nbytes) {
++      if (walk.nbytes % AES_BLOCK_SIZE) {
+               u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 __aligned(8) tail[AES_BLOCK_SIZE];
diff --git a/queue-4.4/crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch b/queue-4.4/crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch
new file mode 100644 (file)
index 0000000..7807640
--- /dev/null
@@ -0,0 +1,43 @@
+From 2db34e78f126c6001d79d3b66ab1abb482dc7caa Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue, 13 Sep 2016 09:48:53 +0100
+Subject: crypto: arm64/aes-ctr - fix NULL dereference in tail processing
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 2db34e78f126c6001d79d3b66ab1abb482dc7caa upstream.
+
+The AES-CTR glue code avoids calling into the blkcipher API for the
+tail portion of the walk, by comparing the remainder of walk.nbytes
+modulo AES_BLOCK_SIZE with the residual nbytes, and jumping straight
+into the tail processing block if they are equal. This tail processing
+block checks whether nbytes != 0, and does nothing otherwise.
+
+However, in case of an allocation failure in the blkcipher layer, we
+may enter this code with walk.nbytes == 0, while nbytes > 0. In this
+case, we should not dereference the source and destination pointers,
+since they may be NULL. So instead of checking for nbytes != 0, check
+for (walk.nbytes % AES_BLOCK_SIZE) != 0, which implies the former in
+non-error conditions.
+
+Fixes: 49788fe2a128 ("arm64/crypto: AES-ECB/CBC/CTR/XTS using ARMv8 NEON and Crypto Extensions")
+Reported-by: xiakaixu <xiakaixu@huawei.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/aes-glue.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/crypto/aes-glue.c
++++ b/arch/arm64/crypto/aes-glue.c
+@@ -211,7 +211,7 @@ static int ctr_encrypt(struct blkcipher_
+               err = blkcipher_walk_done(desc, &walk,
+                                         walk.nbytes % AES_BLOCK_SIZE);
+       }
+-      if (nbytes) {
++      if (walk.nbytes % AES_BLOCK_SIZE) {
+               u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 __aligned(8) tail[AES_BLOCK_SIZE];
diff --git a/queue-4.4/crypto-echainiv-replace-chaining-with-multiplication.patch b/queue-4.4/crypto-echainiv-replace-chaining-with-multiplication.patch
new file mode 100644 (file)
index 0000000..e11756b
--- /dev/null
@@ -0,0 +1,198 @@
+From 53a5d5ddccf849dbc27a8c1bba0b43c3a45fb792 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Wed, 7 Sep 2016 18:42:08 +0800
+Subject: crypto: echainiv - Replace chaining with multiplication
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 53a5d5ddccf849dbc27a8c1bba0b43c3a45fb792 upstream.
+
+The current implementation uses a global per-cpu array to store
+data which are used to derive the next IV.  This is insecure as
+the attacker may change the stored data.
+
+This patch removes all traces of chaining and replaces it with
+multiplication of the salt and the sequence number.
+
+Fixes: a10f554fa7e0 ("crypto: echainiv - Add encrypted chain IV...")
+Reported-by: Mathias Krause <minipli@googlemail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/echainiv.c |  115 +++++++++++-------------------------------------------
+ 1 file changed, 24 insertions(+), 91 deletions(-)
+
+--- a/crypto/echainiv.c
++++ b/crypto/echainiv.c
+@@ -1,8 +1,8 @@
+ /*
+  * echainiv: Encrypted Chain IV Generator
+  *
+- * This generator generates an IV based on a sequence number by xoring it
+- * with a salt and then encrypting it with the same key as used to encrypt
++ * This generator generates an IV based on a sequence number by multiplying
++ * it with a salt and then encrypting it with the same key as used to encrypt
+  * the plain text.  This algorithm requires that the block size be equal
+  * to the IV size.  It is mainly useful for CBC.
+  *
+@@ -23,81 +23,17 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+-#include <linux/mm.h>
+ #include <linux/module.h>
+-#include <linux/percpu.h>
+-#include <linux/spinlock.h>
++#include <linux/slab.h>
+ #include <linux/string.h>
+-#define MAX_IV_SIZE 16
+-
+-static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
+-
+-/* We don't care if we get preempted and read/write IVs from the next CPU. */
+-static void echainiv_read_iv(u8 *dst, unsigned size)
+-{
+-      u32 *a = (u32 *)dst;
+-      u32 __percpu *b = echainiv_iv;
+-
+-      for (; size >= 4; size -= 4) {
+-              *a++ = this_cpu_read(*b);
+-              b++;
+-      }
+-}
+-
+-static void echainiv_write_iv(const u8 *src, unsigned size)
+-{
+-      const u32 *a = (const u32 *)src;
+-      u32 __percpu *b = echainiv_iv;
+-
+-      for (; size >= 4; size -= 4) {
+-              this_cpu_write(*b, *a);
+-              a++;
+-              b++;
+-      }
+-}
+-
+-static void echainiv_encrypt_complete2(struct aead_request *req, int err)
+-{
+-      struct aead_request *subreq = aead_request_ctx(req);
+-      struct crypto_aead *geniv;
+-      unsigned int ivsize;
+-
+-      if (err == -EINPROGRESS)
+-              return;
+-
+-      if (err)
+-              goto out;
+-
+-      geniv = crypto_aead_reqtfm(req);
+-      ivsize = crypto_aead_ivsize(geniv);
+-
+-      echainiv_write_iv(subreq->iv, ivsize);
+-
+-      if (req->iv != subreq->iv)
+-              memcpy(req->iv, subreq->iv, ivsize);
+-
+-out:
+-      if (req->iv != subreq->iv)
+-              kzfree(subreq->iv);
+-}
+-
+-static void echainiv_encrypt_complete(struct crypto_async_request *base,
+-                                       int err)
+-{
+-      struct aead_request *req = base->data;
+-
+-      echainiv_encrypt_complete2(req, err);
+-      aead_request_complete(req, err);
+-}
+-
+ static int echainiv_encrypt(struct aead_request *req)
+ {
+       struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
+       struct aead_request *subreq = aead_request_ctx(req);
+-      crypto_completion_t compl;
+-      void *data;
++      __be64 nseqno;
++      u64 seqno;
+       u8 *info;
+       unsigned int ivsize = crypto_aead_ivsize(geniv);
+       int err;
+@@ -107,8 +43,6 @@ static int echainiv_encrypt(struct aead_
+       aead_request_set_tfm(subreq, ctx->child);
+-      compl = echainiv_encrypt_complete;
+-      data = req;
+       info = req->iv;
+       if (req->src != req->dst) {
+@@ -123,29 +57,30 @@ static int echainiv_encrypt(struct aead_
+                       return err;
+       }
+-      if (unlikely(!IS_ALIGNED((unsigned long)info,
+-                               crypto_aead_alignmask(geniv) + 1))) {
+-              info = kmalloc(ivsize, req->base.flags &
+-                                     CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
+-                                                                GFP_ATOMIC);
+-              if (!info)
+-                      return -ENOMEM;
+-
+-              memcpy(info, req->iv, ivsize);
+-      }
+-
+-      aead_request_set_callback(subreq, req->base.flags, compl, data);
++      aead_request_set_callback(subreq, req->base.flags,
++                                req->base.complete, req->base.data);
+       aead_request_set_crypt(subreq, req->dst, req->dst,
+                              req->cryptlen, info);
+       aead_request_set_ad(subreq, req->assoclen);
+-      crypto_xor(info, ctx->salt, ivsize);
++      memcpy(&nseqno, info + ivsize - 8, 8);
++      seqno = be64_to_cpu(nseqno);
++      memset(info, 0, ivsize);
++
+       scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
+-      echainiv_read_iv(info, ivsize);
+-      err = crypto_aead_encrypt(subreq);
+-      echainiv_encrypt_complete2(req, err);
+-      return err;
++      do {
++              u64 a;
++
++              memcpy(&a, ctx->salt + ivsize - 8, 8);
++
++              a |= 1;
++              a *= seqno;
++
++              memcpy(info + ivsize - 8, &a, 8);
++      } while ((ivsize -= 8));
++
++      return crypto_aead_encrypt(subreq);
+ }
+ static int echainiv_decrypt(struct aead_request *req)
+@@ -192,8 +127,7 @@ static int echainiv_aead_create(struct c
+       alg = crypto_spawn_aead_alg(spawn);
+       err = -EINVAL;
+-      if (inst->alg.ivsize & (sizeof(u32) - 1) ||
+-          inst->alg.ivsize > MAX_IV_SIZE)
++      if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
+               goto free_inst;
+       inst->alg.encrypt = echainiv_encrypt;
+@@ -202,7 +136,6 @@ static int echainiv_aead_create(struct c
+       inst->alg.init = aead_init_geniv;
+       inst->alg.exit = aead_exit_geniv;
+-      inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
+       inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
+       inst->alg.base.cra_ctxsize += inst->alg.ivsize;
diff --git a/queue-4.4/crypto-skcipher-fix-blkcipher-walk-oom-crash.patch b/queue-4.4/crypto-skcipher-fix-blkcipher-walk-oom-crash.patch
new file mode 100644 (file)
index 0000000..31c8700
--- /dev/null
@@ -0,0 +1,47 @@
+From acdb04d0b36769b3e05990c488dc74d8b7ac8060 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 13 Sep 2016 14:43:29 +0800
+Subject: crypto: skcipher - Fix blkcipher walk OOM crash
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit acdb04d0b36769b3e05990c488dc74d8b7ac8060 upstream.
+
+When we need to allocate a temporary blkcipher_walk_next and it
+fails, the code is supposed to take the slow path of processing
+the data block by block.  However, due to an unrelated change
+we instead end up dereferencing the NULL pointer.
+
+This patch fixes it by moving the unrelated bsize setting out
+of the way so that we enter the slow path as inteded.
+
+Fixes: 7607bd8ff03b ("[CRYPTO] blkcipher: Added blkcipher_walk_virt_block")
+Reported-by: xiakaixu <xiakaixu@huawei.com>
+Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/blkcipher.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -234,6 +234,8 @@ static int blkcipher_walk_next(struct bl
+               return blkcipher_walk_done(desc, walk, -EINVAL);
+       }
++      bsize = min(walk->walk_blocksize, n);
++
+       walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
+                        BLKCIPHER_WALK_DIFF);
+       if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
+@@ -246,7 +248,6 @@ static int blkcipher_walk_next(struct bl
+               }
+       }
+-      bsize = min(walk->walk_blocksize, n);
+       n = scatterwalk_clamp(&walk->in, n);
+       n = scatterwalk_clamp(&walk->out, n);
diff --git a/queue-4.4/ocfs2-dlm-fix-race-between-convert-and-migration.patch b/queue-4.4/ocfs2-dlm-fix-race-between-convert-and-migration.patch
new file mode 100644 (file)
index 0000000..a26ae32
--- /dev/null
@@ -0,0 +1,76 @@
+From e6f0c6e6170fec175fe676495f29029aecdf486c Mon Sep 17 00:00:00 2001
+From: Joseph Qi <joseph.qi@huawei.com>
+Date: Mon, 19 Sep 2016 14:43:55 -0700
+Subject: ocfs2/dlm: fix race between convert and migration
+
+From: Joseph Qi <joseph.qi@huawei.com>
+
+commit e6f0c6e6170fec175fe676495f29029aecdf486c upstream.
+
+Commit ac7cf246dfdb ("ocfs2/dlm: fix race between convert and recovery")
+checks if lockres master has changed to identify whether new master has
+finished recovery or not.  This will introduce a race that right after
+old master does umount ( means master will change), a new convert
+request comes.
+
+In this case, it will reset lockres state to DLM_RECOVERING and then
+retry convert, and then fail with lockres->l_action being set to
+OCFS2_AST_INVALID, which will cause inconsistent lock level between
+ocfs2 and dlm, and then finally BUG.
+
+Since dlm recovery will clear lock->convert_pending in
+dlm_move_lockres_to_recovery_list, we can use it to correctly identify
+the race case between convert and recovery.  So fix it.
+
+Fixes: ac7cf246dfdb ("ocfs2/dlm: fix race between convert and recovery")
+Link: http://lkml.kernel.org/r/57CE1569.8010704@huawei.com
+Signed-off-by: Joseph Qi <joseph.qi@huawei.com>
+Signed-off-by: Jun Piao <piaojun@huawei.com>
+Cc: Mark Fasheh <mfasheh@suse.de>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/dlm/dlmconvert.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct
+                                 struct dlm_lock *lock, int flags, int type)
+ {
+       enum dlm_status status;
+-      u8 old_owner = res->owner;
+       mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+            lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+@@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct
+       spin_lock(&res->spinlock);
+       res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+-      lock->convert_pending = 0;
+       /* if it failed, move it back to granted queue.
+        * if master returns DLM_NORMAL and then down before sending ast,
+        * it may have already been moved to granted queue, reset to
+@@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct
+               if (status != DLM_NOTQUEUED)
+                       dlm_error(status);
+               dlm_revert_pending_convert(res, lock);
+-      } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
+-                      (old_owner != res->owner)) {
+-              mlog(0, "res %.*s is in recovering or has been recovered.\n",
+-                              res->lockname.len, res->lockname.name);
++      } else if (!lock->convert_pending) {
++              mlog(0, "%s: res %.*s, owner died and lock has been moved back "
++                              "to granted list, retry convert.\n",
++                              dlm->name, res->lockname.len, res->lockname.name);
+               status = DLM_RECOVERING;
+       }
++
++      lock->convert_pending = 0;
+ bail:
+       spin_unlock(&res->spinlock);
diff --git a/queue-4.4/ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch b/queue-4.4/ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch
new file mode 100644 (file)
index 0000000..e1e1fea
--- /dev/null
@@ -0,0 +1,109 @@
+From d21c353d5e99c56cdd5b5c1183ffbcaf23b8b960 Mon Sep 17 00:00:00 2001
+From: Ashish Samant <ashish.samant@oracle.com>
+Date: Mon, 19 Sep 2016 14:44:42 -0700
+Subject: ocfs2: fix start offset to ocfs2_zero_range_for_truncate()
+
+From: Ashish Samant <ashish.samant@oracle.com>
+
+commit d21c353d5e99c56cdd5b5c1183ffbcaf23b8b960 upstream.
+
+If we punch a hole on a reflink such that following conditions are met:
+
+1. start offset is on a cluster boundary
+2. end offset is not on a cluster boundary
+3. (end offset is somewhere in another extent) or
+   (hole range > MAX_CONTIG_BYTES(1MB)),
+
+we dont COW the first cluster starting at the start offset.  But in this
+case, we were wrongly passing this cluster to
+ocfs2_zero_range_for_truncate() to zero out.  This will modify the
+cluster in place and zero it in the source too.
+
+Fix this by skipping this cluster in such a scenario.
+
+To reproduce:
+
+1. Create a random file of say 10 MB
+     xfs_io -c 'pwrite -b 4k 0 10M' -f 10MBfile
+2. Reflink  it
+     reflink -f 10MBfile reflnktest
+3. Punch a hole at starting at cluster boundary  with range greater that
+1MB. You can also use a range that will put the end offset in another
+extent.
+     fallocate -p -o 0 -l 1048615 reflnktest
+4. sync
+5. Check the  first cluster in the source file. (It will be zeroed out).
+    dd if=10MBfile iflag=direct bs=<cluster size> count=1 | hexdump -C
+
+Link: http://lkml.kernel.org/r/1470957147-14185-1-git-send-email-ashish.samant@oracle.com
+Signed-off-by: Ashish Samant <ashish.samant@oracle.com>
+Reported-by: Saar Maoz <saar.maoz@oracle.com>
+Reviewed-by: Srinivas Eeda <srinivas.eeda@oracle.com>
+Cc: Mark Fasheh <mfasheh@suse.de>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Joseph Qi <joseph.qi@huawei.com>
+Cc: Eric Ren <zren@suse.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/file.c |   38 ++++++++++++++++++++++++++------------
+ 1 file changed, 26 insertions(+), 12 deletions(-)
+
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1536,7 +1536,8 @@ static int ocfs2_zero_partial_clusters(s
+                                      u64 start, u64 len)
+ {
+       int ret = 0;
+-      u64 tmpend, end = start + len;
++      u64 tmpend = 0;
++      u64 end = start + len;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       unsigned int csize = osb->s_clustersize;
+       handle_t *handle;
+@@ -1568,18 +1569,31 @@ static int ocfs2_zero_partial_clusters(s
+       }
+       /*
+-       * We want to get the byte offset of the end of the 1st cluster.
++       * If start is on a cluster boundary and end is somewhere in another
++       * cluster, we have not COWed the cluster starting at start, unless
++       * end is also within the same cluster. So, in this case, we skip this
++       * first call to ocfs2_zero_range_for_truncate() truncate and move on
++       * to the next one.
+        */
+-      tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
+-      if (tmpend > end)
+-              tmpend = end;
+-
+-      trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
+-                                               (unsigned long long)tmpend);
+-
+-      ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
+-      if (ret)
+-              mlog_errno(ret);
++      if ((start & (csize - 1)) != 0) {
++              /*
++               * We want to get the byte offset of the end of the 1st
++               * cluster.
++               */
++              tmpend = (u64)osb->s_clustersize +
++                      (start & ~(osb->s_clustersize - 1));
++              if (tmpend > end)
++                      tmpend = end;
++
++              trace_ocfs2_zero_partial_clusters_range1(
++                      (unsigned long long)start,
++                      (unsigned long long)tmpend);
++
++              ret = ocfs2_zero_range_for_truncate(inode, handle, start,
++                                                  tmpend);
++              if (ret)
++                      mlog_errno(ret);
++      }
+       if (tmpend < end) {
+               /*
index 47c3d0172f29e5573797333d10105bddc86a10c8..9866c90e74f0740542d6120ae19f4684d7a94502 100644 (file)
@@ -6,3 +6,9 @@ ipv6-release-dst-in-ping_v6_sendmsg.patch
 tcp-cwnd-does-not-increase-in-tcp-yeah.patch
 tcp-fix-use-after-free-in-tcp_xmit_retransmit_queue.patch
 tcp-properly-scale-window-in-tcp_v_reqsk_send_ack.patch
+crypto-arm64-aes-ctr-fix-null-dereference-in-tail-processing.patch
+crypto-arm-aes-ctr-fix-null-dereference-in-tail-processing.patch
+crypto-skcipher-fix-blkcipher-walk-oom-crash.patch
+crypto-echainiv-replace-chaining-with-multiplication.patch
+ocfs2-dlm-fix-race-between-convert-and-migration.patch
+ocfs2-fix-start-offset-to-ocfs2_zero_range_for_truncate.patch