]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 22 Sep 2017 09:37:38 +0000 (11:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 22 Sep 2017 09:37:38 +0000 (11:37 +0200)
added patches:
block-relax-a-check-in-blk_start_queue.patch
crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch
crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch
ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch
ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch
md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch
powerpc-fix-dar-reporting-when-alignment-handler-faults.patch

queue-4.9/block-relax-a-check-in-blk_start_queue.patch [new file with mode: 0644]
queue-4.9/crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch [new file with mode: 0644]
queue-4.9/crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch [new file with mode: 0644]
queue-4.9/ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch [new file with mode: 0644]
queue-4.9/ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch [new file with mode: 0644]
queue-4.9/md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch [new file with mode: 0644]
queue-4.9/powerpc-fix-dar-reporting-when-alignment-handler-faults.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/block-relax-a-check-in-blk_start_queue.patch b/queue-4.9/block-relax-a-check-in-blk_start_queue.patch
new file mode 100644 (file)
index 0000000..b1f68a1
--- /dev/null
@@ -0,0 +1,52 @@
+From 4ddd56b003f251091a67c15ae3fe4a5c5c5e390a Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Thu, 17 Aug 2017 13:12:44 -0700
+Subject: block: Relax a check in blk_start_queue()
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit 4ddd56b003f251091a67c15ae3fe4a5c5c5e390a upstream.
+
+Calling blk_start_queue() from interrupt context with the queue
+lock held and without disabling IRQs, as the skd driver does, is
+safe. This patch avoids that loading the skd driver triggers the
+following warning:
+
+WARNING: CPU: 11 PID: 1348 at block/blk-core.c:283 blk_start_queue+0x84/0xa0
+RIP: 0010:blk_start_queue+0x84/0xa0
+Call Trace:
+ skd_unquiesce_dev+0x12a/0x1d0 [skd]
+ skd_complete_internal+0x1e7/0x5a0 [skd]
+ skd_complete_other+0xc2/0xd0 [skd]
+ skd_isr_completion_posted.isra.30+0x2a5/0x470 [skd]
+ skd_isr+0x14f/0x180 [skd]
+ irq_forced_thread_fn+0x2a/0x70
+ irq_thread+0x144/0x1a0
+ kthread+0x125/0x140
+ ret_from_fork+0x2a/0x40
+
+Fixes: commit a038e2536472 ("[PATCH] blk_start_queue() must be called with irq disabled - add warning")
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
+Cc: Andrew Morton <akpm@osdl.org>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -233,7 +233,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
+  **/
+ void blk_start_queue(struct request_queue *q)
+ {
+-      WARN_ON(!irqs_disabled());
++      WARN_ON(!in_interrupt() && !irqs_disabled());
+       queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+       __blk_run_queue(q);
diff --git a/queue-4.9/crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch b/queue-4.9/crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch
new file mode 100644 (file)
index 0000000..88202d8
--- /dev/null
@@ -0,0 +1,51 @@
+From smueller@chronox.de  Fri Sep 22 11:04:43 2017
+From: Stephan Mueller <smueller@chronox.de>
+Date: Thu, 21 Sep 2017 10:16:53 +0200
+Subject: [PATCH - RESEND] crypto: AF_ALG - remove SGL terminator indicator when  chaining
+To: herbert@gondor.apana.org.au, greg@kroah.com
+Cc: linux-crypto@vger.kernel.org
+Message-ID: <5857040.2sfW0oRrdW@tauon.chronox.de>
+
+From: Stephan Mueller <smueller@chronox.de>
+
+Fixed differently upstream as commit 2d97591ef43d ("crypto: af_alg - consolidation of duplicate code")
+
+The SGL is MAX_SGL_ENTS + 1 in size. The last SG entry is used for the
+chaining and is properly updated with the sg_chain invocation. During
+the filling-in of the initial SG entries, sg_mark_end is called for each
+SG entry. This is appropriate as long as no additional SGL is chained
+with the current SGL. However, when a new SGL is chained and the last
+SG entry is updated with sg_chain, the last but one entry still contains
+the end marker from the sg_mark_end. This end marker must be removed as
+otherwise a walk of the chained SGLs will cause a NULL pointer
+dereference at the last but one SG entry, because sg_next will return
+NULL.
+
+The patch only applies to all kernels up to and including 4.13. The
+patch 2d97591ef43d0587be22ad1b0d758d6df4999a0b added to 4.14-rc1
+introduced a complete new code base which addresses this bug in
+a different way. Yet, that patch is too invasive for stable kernels
+and was therefore not marked for stable.
+
+Fixes: 8ff590903d5fc ("crypto: algif_skcipher - User-space interface for skcipher operations")
+Signed-off-by: Stephan Mueller <smueller@chronox.de>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/algif_skcipher.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -143,8 +143,10 @@ static int skcipher_alloc_sgl(struct soc
+               sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+               sgl->cur = 0;
+-              if (sg)
++              if (sg) {
+                       sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
++                      sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
++              }
+               list_add_tail(&sgl->list, &ctx->tsgl);
+       }
diff --git a/queue-4.9/crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch b/queue-4.9/crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch
new file mode 100644 (file)
index 0000000..eacc3a1
--- /dev/null
@@ -0,0 +1,176 @@
+From e652399edba99a5497f0d80f240c9075d3b43493 Mon Sep 17 00:00:00 2001
+From: Gary R Hook <gary.hook@amd.com>
+Date: Tue, 25 Jul 2017 14:12:11 -0500
+Subject: crypto: ccp - Fix XTS-AES-128 support on v5 CCPs
+
+From: Gary R Hook <gary.hook@amd.com>
+
+commit e652399edba99a5497f0d80f240c9075d3b43493 upstream.
+
+Version 5 CCPs have some new requirements for XTS-AES: the type field
+must be specified, and the key requires 512 bits, with each part
+occupying 256 bits and padded with zeroes.
+
+Signed-off-by: Gary R Hook <ghook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-crypto-aes-xts.c |    4 ++
+ drivers/crypto/ccp/ccp-dev-v5.c         |    2 +
+ drivers/crypto/ccp/ccp-dev.h            |    2 +
+ drivers/crypto/ccp/ccp-ops.c            |   43 +++++++++++++++++++++++++-------
+ include/linux/ccp.h                     |    3 +-
+ 5 files changed, 43 insertions(+), 11 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+@@ -1,8 +1,9 @@
+ /*
+  * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
+  *
+- * Copyright (C) 2013 Advanced Micro Devices, Inc.
++ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+  *
++ * Author: Gary R Hook <gary.hook@amd.com>
+  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+  *
+  * This program is free software; you can redistribute it and/or modify
+@@ -164,6 +165,7 @@ static int ccp_aes_xts_crypt(struct ablk
+       memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+       INIT_LIST_HEAD(&rctx->cmd.entry);
+       rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
++      rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
+       rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
+                                          : CCP_AES_ACTION_DECRYPT;
+       rctx->cmd.u.xts.unit_size = unit_size;
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -131,6 +131,7 @@ union ccp_function {
+ #define       CCP_AES_MODE(p)         ((p)->aes.mode)
+ #define       CCP_AES_TYPE(p)         ((p)->aes.type)
+ #define       CCP_XTS_SIZE(p)         ((p)->aes_xts.size)
++#define       CCP_XTS_TYPE(p)         ((p)->aes_xts.type)
+ #define       CCP_XTS_ENCRYPT(p)      ((p)->aes_xts.encrypt)
+ #define       CCP_SHA_TYPE(p)         ((p)->sha.type)
+ #define       CCP_RSA_SIZE(p)         ((p)->rsa.size)
+@@ -318,6 +319,7 @@ static int ccp5_perform_xts_aes(struct c
+       CCP5_CMD_PROT(&desc) = 0;
+       function.raw = 0;
++      CCP_XTS_TYPE(&function) = op->u.xts.type;
+       CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
+       CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
+       CCP5_CMD_FUNCTION(&desc) = function.raw;
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -187,6 +187,7 @@
+ #define CCP_AES_CTX_SB_COUNT          1
+ #define CCP_XTS_AES_KEY_SB_COUNT      1
++#define CCP5_XTS_AES_KEY_SB_COUNT     2
+ #define CCP_XTS_AES_CTX_SB_COUNT      1
+ #define CCP_SHA_SB_COUNT              1
+@@ -472,6 +473,7 @@ struct ccp_aes_op {
+ };
+ struct ccp_xts_aes_op {
++      enum ccp_aes_type type;
+       enum ccp_aes_action action;
+       enum ccp_xts_aes_unit_size unit_size;
+ };
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -779,6 +779,8 @@ static int ccp_run_xts_aes_cmd(struct cc
+       struct ccp_op op;
+       unsigned int unit_size, dm_offset;
+       bool in_place = false;
++      unsigned int sb_count;
++      enum ccp_aes_type aestype;
+       int ret;
+       switch (xts->unit_size) {
+@@ -802,7 +804,9 @@ static int ccp_run_xts_aes_cmd(struct cc
+               return -EINVAL;
+       }
+-      if (xts->key_len != AES_KEYSIZE_128)
++      if (xts->key_len == AES_KEYSIZE_128)
++              aestype = CCP_AES_TYPE_128;
++      else
+               return -EINVAL;
+       if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
+@@ -824,23 +828,44 @@ static int ccp_run_xts_aes_cmd(struct cc
+       op.sb_key = cmd_q->sb_key;
+       op.sb_ctx = cmd_q->sb_ctx;
+       op.init = 1;
++      op.u.xts.type = aestype;
+       op.u.xts.action = xts->action;
+       op.u.xts.unit_size = xts->unit_size;
+-      /* All supported key sizes fit in a single (32-byte) SB entry
+-       * and must be in little endian format. Use the 256-bit byte
+-       * swap passthru option to convert from big endian to little
+-       * endian.
++      /* A version 3 device only supports 128-bit keys, which fits into a
++       * single SB entry. A version 5 device uses a 512-bit vector, so two
++       * SB entries.
+        */
++      if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
++              sb_count = CCP_XTS_AES_KEY_SB_COUNT;
++      else
++              sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
+       ret = ccp_init_dm_workarea(&key, cmd_q,
+-                                 CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
++                                 sb_count * CCP_SB_BYTES,
+                                  DMA_TO_DEVICE);
+       if (ret)
+               return ret;
+-      dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+-      ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+-      ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
++      if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
++              /* All supported key sizes must be in little endian format.
++               * Use the 256-bit byte swap passthru option to convert from
++               * big endian to little endian.
++               */
++              dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
++              ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
++              ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
++      } else {
++              /* Version 5 CCPs use a 512-bit space for the key: each portion
++               * occupies 256 bits, or one entire slot, and is zero-padded.
++               */
++              unsigned int pad;
++
++              dm_offset = CCP_SB_BYTES;
++              pad = dm_offset - xts->key_len;
++              ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
++              ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
++                              xts->key_len);
++      }
+       ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+                            CCP_PASSTHRU_BYTESWAP_256BIT);
+       if (ret) {
+--- a/include/linux/ccp.h
++++ b/include/linux/ccp.h
+@@ -1,7 +1,7 @@
+ /*
+  * AMD Cryptographic Coprocessor (CCP) driver
+  *
+- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
++ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+  *
+  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+  * Author: Gary R Hook <gary.hook@amd.com>
+@@ -222,6 +222,7 @@ enum ccp_xts_aes_unit_size {
+  * AES operation the new IV overwrites the old IV.
+  */
+ struct ccp_xts_aes_engine {
++      enum ccp_aes_type type;
+       enum ccp_aes_action action;
+       enum ccp_xts_aes_unit_size unit_size;
diff --git a/queue-4.9/ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch b/queue-4.9/ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch
new file mode 100644 (file)
index 0000000..ace7c81
--- /dev/null
@@ -0,0 +1,70 @@
+From b0a5a9589decd07db755d6a8d9c0910d96ff7992 Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Thu, 24 Aug 2017 15:19:39 -0400
+Subject: ext4: fix incorrect quotaoff if the quota feature is enabled
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit b0a5a9589decd07db755d6a8d9c0910d96ff7992 upstream.
+
+Current ext4 quota should always "usage enabled" if the
+quota feautre is enabled. But in ext4_orphan_cleanup(), it
+turn quotas off directly (used for the older journaled
+quota), so we cannot turn it on again via "quotaon" unless
+umount and remount ext4.
+
+Simple reproduce:
+
+  mkfs.ext4 -O project,quota /dev/vdb1
+  mount -o prjquota /dev/vdb1 /mnt
+  chattr -p 123 /mnt
+  chattr +P /mnt
+  touch /mnt/aa /mnt/bb
+  exec 100<>/mnt/aa
+  rm -f /mnt/aa
+  sync
+  echo c > /proc/sysrq-trigger
+
+  #reboot and mount
+  mount -o prjquota /dev/vdb1 /mnt
+  #query status
+  quotaon -Ppv /dev/vdb1
+  #output
+  quotaon: Cannot find mountpoint for device /dev/vdb1
+  quotaon: No correct mountpoint specified.
+
+This patch add check for journaled quotas to avoid incorrect
+quotaoff when ext4 has quota feautre.
+
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/super.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2372,7 +2372,7 @@ static void ext4_orphan_cleanup(struct s
+ #ifdef CONFIG_QUOTA
+       /* Needed for iput() to work correctly and not trash data */
+       sb->s_flags |= MS_ACTIVE;
+-      /* Turn on quotas so that they are updated correctly */
++      /* Turn on journaled quotas so that they are updated correctly */
+       for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+               if (EXT4_SB(sb)->s_qf_names[i]) {
+                       int ret = ext4_quota_on_mount(sb, i);
+@@ -2438,9 +2438,9 @@ static void ext4_orphan_cleanup(struct s
+               ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
+                      PLURAL(nr_truncates));
+ #ifdef CONFIG_QUOTA
+-      /* Turn quotas off */
++      /* Turn off journaled quotas if they were enabled for orphan cleanup */
+       for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+-              if (sb_dqopt(sb)->files[i])
++              if (EXT4_SB(sb)->s_qf_names[i] && sb_dqopt(sb)->files[i])
+                       dquot_quota_off(sb, i);
+       }
+ #endif
diff --git a/queue-4.9/ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch b/queue-4.9/ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch
new file mode 100644 (file)
index 0000000..83a8bcf
--- /dev/null
@@ -0,0 +1,99 @@
+From 95f1fda47c9d8738f858c3861add7bf0a36a7c0b Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Thu, 24 Aug 2017 15:21:50 -0400
+Subject: ext4: fix quota inconsistency during orphan cleanup for read-only mounts
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit 95f1fda47c9d8738f858c3861add7bf0a36a7c0b upstream.
+
+Quota does not get enabled for read-only mounts if filesystem
+has quota feature, so that quotas cannot updated during orphan
+cleanup, which will lead to quota inconsistency.
+
+This patch turn on quotas during orphan cleanup for this case,
+make sure quotas can be updated correctly.
+
+Reported-by: Jan Kara <jack@suse.cz>
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/super.c |   38 +++++++++++++++++++++++++++++++-------
+ 1 file changed, 31 insertions(+), 7 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2334,6 +2334,7 @@ static void ext4_orphan_cleanup(struct s
+       unsigned int s_flags = sb->s_flags;
+       int nr_orphans = 0, nr_truncates = 0;
+ #ifdef CONFIG_QUOTA
++      int quota_update = 0;
+       int i;
+ #endif
+       if (!es->s_last_orphan) {
+@@ -2372,14 +2373,32 @@ static void ext4_orphan_cleanup(struct s
+ #ifdef CONFIG_QUOTA
+       /* Needed for iput() to work correctly and not trash data */
+       sb->s_flags |= MS_ACTIVE;
+-      /* Turn on journaled quotas so that they are updated correctly */
++
++      /*
++       * Turn on quotas which were not enabled for read-only mounts if
++       * filesystem has quota feature, so that they are updated correctly.
++       */
++      if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
++              int ret = ext4_enable_quotas(sb);
++
++              if (!ret)
++                      quota_update = 1;
++              else
++                      ext4_msg(sb, KERN_ERR,
++                              "Cannot turn on quotas: error %d", ret);
++      }
++
++      /* Turn on journaled quotas used for old sytle */
+       for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+               if (EXT4_SB(sb)->s_qf_names[i]) {
+                       int ret = ext4_quota_on_mount(sb, i);
+-                      if (ret < 0)
++
++                      if (!ret)
++                              quota_update = 1;
++                      else
+                               ext4_msg(sb, KERN_ERR,
+                                       "Cannot turn on journaled "
+-                                      "quota: error %d", ret);
++                                      "quota: type %d: error %d", i, ret);
+               }
+       }
+ #endif
+@@ -2438,10 +2457,12 @@ static void ext4_orphan_cleanup(struct s
+               ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
+                      PLURAL(nr_truncates));
+ #ifdef CONFIG_QUOTA
+-      /* Turn off journaled quotas if they were enabled for orphan cleanup */
+-      for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+-              if (EXT4_SB(sb)->s_qf_names[i] && sb_dqopt(sb)->files[i])
+-                      dquot_quota_off(sb, i);
++      /* Turn off quotas if they were enabled for orphan cleanup */
++      if (quota_update) {
++              for (i = 0; i < EXT4_MAXQUOTAS; i++) {
++                      if (sb_dqopt(sb)->files[i])
++                              dquot_quota_off(sb, i);
++              }
+       }
+ #endif
+       sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+@@ -5365,6 +5386,9 @@ static int ext4_enable_quotas(struct sup
+                               DQUOT_USAGE_ENABLED |
+                               (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
+                       if (err) {
++                              for (type--; type >= 0; type--)
++                                      dquot_quota_off(sb, type);
++
+                               ext4_warning(sb,
+                                       "Failed to enable quota tracking "
+                                       "(type=%d, err=%d). Please run "
diff --git a/queue-4.9/md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch b/queue-4.9/md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch
new file mode 100644 (file)
index 0000000..49982a2
--- /dev/null
@@ -0,0 +1,49 @@
+From e8a27f836f165c26f867ece7f31eb5c811692319 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Thu, 31 Aug 2017 10:23:25 +1000
+Subject: md/bitmap: disable bitmap_resize for file-backed bitmaps.
+
+From: NeilBrown <neilb@suse.com>
+
+commit e8a27f836f165c26f867ece7f31eb5c811692319 upstream.
+
+bitmap_resize() does not work for file-backed bitmaps.
+The buffer_heads are allocated and initialized when
+the bitmap is read from the file, but resize doesn't
+read from the file, it loads from the internal bitmap.
+When it comes time to write the new bitmap, the bh is
+non-existent and we crash.
+
+The common case when growing an array involves making the array larger,
+and that normally means making the bitmap larger.  Doing
+that inside the kernel is possible, but would need more code.
+It is probably easier to require people who use file-backed
+bitmaps to remove them and re-add after a reshape.
+
+So this patch disables the resizing of arrays which have
+file-backed bitmaps.  This is better than crashing.
+
+Reported-by: Zhilong Liu <zlliu@suse.com>
+Fixes: d60b479d177a ("md/bitmap: add bitmap_resize function to allow bitmap resizing.")
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bitmap.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -1992,6 +1992,11 @@ int bitmap_resize(struct bitmap *bitmap,
+       long pages;
+       struct bitmap_page *new_bp;
++      if (bitmap->storage.file && !init) {
++              pr_info("md: cannot resize file-based bitmap\n");
++              return -EINVAL;
++      }
++
+       if (chunksize == 0) {
+               /* If there is enough space, leave the chunk size unchanged,
+                * else increase by factor of two until there is enough space.
diff --git a/queue-4.9/powerpc-fix-dar-reporting-when-alignment-handler-faults.patch b/queue-4.9/powerpc-fix-dar-reporting-when-alignment-handler-faults.patch
new file mode 100644 (file)
index 0000000..c7914b8
--- /dev/null
@@ -0,0 +1,265 @@
+From f9effe925039cf54489b5c04e0d40073bb3a123d Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 24 Aug 2017 20:49:57 +1000
+Subject: powerpc: Fix DAR reporting when alignment handler faults
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit f9effe925039cf54489b5c04e0d40073bb3a123d upstream.
+
+Anton noticed that if we fault part way through emulating an unaligned
+instruction, we don't update the DAR to reflect that.
+
+The DAR value is eventually reported back to userspace as the address
+in the SEGV signal, and if userspace is using that value to demand
+fault then it can be confused by us not setting the value correctly.
+
+This patch is ugly as hell, but is intended to be the minimal fix and
+back ports easily.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Reviewed-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/align.c |  119 +++++++++++++++++++++++++++-----------------
+ 1 file changed, 74 insertions(+), 45 deletions(-)
+
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -235,6 +235,28 @@ static int emulate_dcbz(struct pt_regs *
+ #define SWIZ_PTR(p)           ((unsigned char __user *)((p) ^ swiz))
++#define __get_user_or_set_dar(_regs, _dest, _addr)            \
++      ({                                                      \
++              int rc = 0;                                     \
++              typeof(_addr) __addr = (_addr);                 \
++              if (__get_user_inatomic(_dest, __addr)) {       \
++                      _regs->dar = (unsigned long)__addr;     \
++                      rc = -EFAULT;                           \
++              }                                               \
++              rc;                                             \
++      })
++
++#define __put_user_or_set_dar(_regs, _src, _addr)             \
++      ({                                                      \
++              int rc = 0;                                     \
++              typeof(_addr) __addr = (_addr);                 \
++              if (__put_user_inatomic(_src, __addr)) {        \
++                      _regs->dar = (unsigned long)__addr;     \
++                      rc = -EFAULT;                           \
++              }                                               \
++              rc;                                             \
++      })
++
+ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+                           unsigned int reg, unsigned int nb,
+                           unsigned int flags, unsigned int instr,
+@@ -263,9 +285,10 @@ static int emulate_multiple(struct pt_re
+               } else {
+                       unsigned long pc = regs->nip ^ (swiz & 4);
+-                      if (__get_user_inatomic(instr,
+-                                              (unsigned int __user *)pc))
++                      if (__get_user_or_set_dar(regs, instr,
++                                                (unsigned int __user *)pc))
+                               return -EFAULT;
++
+                       if (swiz == 0 && (flags & SW))
+                               instr = cpu_to_le32(instr);
+                       nb = (instr >> 11) & 0x1f;
+@@ -309,31 +332,31 @@ static int emulate_multiple(struct pt_re
+                              ((nb0 + 3) / 4) * sizeof(unsigned long));
+               for (i = 0; i < nb; ++i, ++p)
+-                      if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+-                                              SWIZ_PTR(p)))
++                      if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++                                                SWIZ_PTR(p)))
+                               return -EFAULT;
+               if (nb0 > 0) {
+                       rptr = &regs->gpr[0];
+                       addr += nb;
+                       for (i = 0; i < nb0; ++i, ++p)
+-                              if (__get_user_inatomic(REG_BYTE(rptr,
+-                                                               i ^ bswiz),
+-                                                      SWIZ_PTR(p)))
++                              if (__get_user_or_set_dar(regs,
++                                                        REG_BYTE(rptr, i ^ bswiz),
++                                                        SWIZ_PTR(p)))
+                                       return -EFAULT;
+               }
+       } else {
+               for (i = 0; i < nb; ++i, ++p)
+-                      if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+-                                              SWIZ_PTR(p)))
++                      if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++                                                SWIZ_PTR(p)))
+                               return -EFAULT;
+               if (nb0 > 0) {
+                       rptr = &regs->gpr[0];
+                       addr += nb;
+                       for (i = 0; i < nb0; ++i, ++p)
+-                              if (__put_user_inatomic(REG_BYTE(rptr,
+-                                                               i ^ bswiz),
+-                                                      SWIZ_PTR(p)))
++                              if (__put_user_or_set_dar(regs,
++                                                        REG_BYTE(rptr, i ^ bswiz),
++                                                        SWIZ_PTR(p)))
+                                       return -EFAULT;
+               }
+       }
+@@ -345,29 +368,32 @@ static int emulate_multiple(struct pt_re
+  * Only POWER6 has these instructions, and it does true little-endian,
+  * so we don't need the address swizzling.
+  */
+-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
+-                         unsigned int flags)
++static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
++                         unsigned int reg, unsigned int flags)
+ {
+       char *ptr0 = (char *) &current->thread.TS_FPR(reg);
+       char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
+-      int i, ret, sw = 0;
++      int i, sw = 0;
+       if (reg & 1)
+               return 0;       /* invalid form: FRS/FRT must be even */
+       if (flags & SW)
+               sw = 7;
+-      ret = 0;
++
+       for (i = 0; i < 8; ++i) {
+               if (!(flags & ST)) {
+-                      ret |= __get_user(ptr0[i^sw], addr + i);
+-                      ret |= __get_user(ptr1[i^sw], addr + i + 8);
++                      if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++                              return -EFAULT;
++                      if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++                              return -EFAULT;
+               } else {
+-                      ret |= __put_user(ptr0[i^sw], addr + i);
+-                      ret |= __put_user(ptr1[i^sw], addr + i + 8);
++                      if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++                              return -EFAULT;
++                      if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++                              return -EFAULT;
+               }
+       }
+-      if (ret)
+-              return -EFAULT;
++
+       return 1;       /* exception handled and fixed up */
+ }
+@@ -377,24 +403,27 @@ static int emulate_lq_stq(struct pt_regs
+ {
+       char *ptr0 = (char *)&regs->gpr[reg];
+       char *ptr1 = (char *)&regs->gpr[reg+1];
+-      int i, ret, sw = 0;
++      int i, sw = 0;
+       if (reg & 1)
+               return 0;       /* invalid form: GPR must be even */
+       if (flags & SW)
+               sw = 7;
+-      ret = 0;
++
+       for (i = 0; i < 8; ++i) {
+               if (!(flags & ST)) {
+-                      ret |= __get_user(ptr0[i^sw], addr + i);
+-                      ret |= __get_user(ptr1[i^sw], addr + i + 8);
++                      if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++                              return -EFAULT;
++                      if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++                              return -EFAULT;
+               } else {
+-                      ret |= __put_user(ptr0[i^sw], addr + i);
+-                      ret |= __put_user(ptr1[i^sw], addr + i + 8);
++                      if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++                              return -EFAULT;
++                      if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++                              return -EFAULT;
+               }
+       }
+-      if (ret)
+-              return -EFAULT;
++
+       return 1;       /* exception handled and fixed up */
+ }
+ #endif /* CONFIG_PPC64 */
+@@ -687,9 +716,14 @@ static int emulate_vsx(unsigned char __u
+       for (j = 0; j < length; j += elsize) {
+               for (i = 0; i < elsize; ++i) {
+                       if (flags & ST)
+-                              ret |= __put_user(ptr[i^sw], addr + i);
++                              ret = __put_user_or_set_dar(regs, ptr[i^sw],
++                                                          addr + i);
+                       else
+-                              ret |= __get_user(ptr[i^sw], addr + i);
++                              ret = __get_user_or_set_dar(regs, ptr[i^sw],
++                                                          addr + i);
++
++                      if (ret)
++                              return ret;
+               }
+               ptr  += elsize;
+ #ifdef __LITTLE_ENDIAN__
+@@ -739,7 +773,7 @@ int fix_alignment(struct pt_regs *regs)
+       unsigned int dsisr;
+       unsigned char __user *addr;
+       unsigned long p, swiz;
+-      int ret, i;
++      int i;
+       union data {
+               u64 ll;
+               double dd;
+@@ -936,7 +970,7 @@ int fix_alignment(struct pt_regs *regs)
+               if (flags & F) {
+                       /* Special case for 16-byte FP loads and stores */
+                       PPC_WARN_ALIGNMENT(fp_pair, regs);
+-                      return emulate_fp_pair(addr, reg, flags);
++                      return emulate_fp_pair(regs, addr, reg, flags);
+               } else {
+ #ifdef CONFIG_PPC64
+                       /* Special case for 16-byte loads and stores */
+@@ -966,15 +1000,12 @@ int fix_alignment(struct pt_regs *regs)
+               }
+               data.ll = 0;
+-              ret = 0;
+               p = (unsigned long)addr;
+               for (i = 0; i < nb; i++)
+-                      ret |= __get_user_inatomic(data.v[start + i],
+-                                                 SWIZ_PTR(p++));
+-
+-              if (unlikely(ret))
+-                      return -EFAULT;
++                      if (__get_user_or_set_dar(regs, data.v[start + i],
++                                                SWIZ_PTR(p++)))
++                              return -EFAULT;
+       } else if (flags & F) {
+               data.ll = current->thread.TS_FPR(reg);
+@@ -1046,15 +1077,13 @@ int fix_alignment(struct pt_regs *regs)
+                       break;
+               }
+-              ret = 0;
+               p = (unsigned long)addr;
+               for (i = 0; i < nb; i++)
+-                      ret |= __put_user_inatomic(data.v[start + i],
+-                                                 SWIZ_PTR(p++));
++                      if (__put_user_or_set_dar(regs, data.v[start + i],
++                                                SWIZ_PTR(p++)))
++                              return -EFAULT;
+-              if (unlikely(ret))
+-                      return -EFAULT;
+       } else if (flags & F)
+               current->thread.TS_FPR(reg) = data.ll;
+       else
index 5b1881ce4b2d2235ee28ec23f22323da82271476..74c4b340a965f4ed34f0cdffb44f2369d18bc488 100644 (file)
@@ -23,3 +23,10 @@ mips-math-emu-maddf-msubf-.-d-s-fix-some-cases-of-zero-inputs.patch
 mips-math-emu-maddf-msubf-.-d-s-clean-up-maddf_flags-enumeration.patch
 mips-math-emu-maddf-msubf-.s-fix-accuracy-32-bit-case.patch
 mips-math-emu-maddf-msubf-.d-fix-accuracy-64-bit-case.patch
+crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch
+crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch
+ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch
+ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch
+powerpc-fix-dar-reporting-when-alignment-handler-faults.patch
+block-relax-a-check-in-blk_start_queue.patch
+md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch