]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.13-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 22 Sep 2017 09:36:51 +0000 (11:36 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 22 Sep 2017 09:36:51 +0000 (11:36 +0200)
added patches:
block-directly-insert-blk-mq-request-from-blk_insert_cloned_request.patch
block-relax-a-check-in-blk_start_queue.patch
crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch
crypto-caam-qi-fix-typo-in-authenc-alg-driver-name.patch
crypto-caam-qi-properly-set-iv-after-en-de-crypt.patch
crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch
crypto-scompress-don-t-sleep-with-preemption-disabled.patch
cxl-fix-driver-use-count.patch
ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch
ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch
ext4-in-ext4_seek_-hole-data-return-enxio-for-negative-offsets.patch
iwlwifi-add-workaround-to-disable-wide-channels-in-5ghz.patch
md-bitmap-copy-correct-data-for-bitmap-super.patch
md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch
powerpc-fix-dar-reporting-when-alignment-handler-faults.patch
powerpc-powernv-npu-move-tlb-flush-before-launching-atsd.patch
powerpc-pseries-don-t-attempt-to-acquire-drc-during-memory-hot-add-for-assigned-lmbs.patch
regulator-cpcap-fix-standby-mode.patch
wcn36xx-introduce-mutual-exclusion-of-fw-configuration.patch

20 files changed:
queue-4.13/block-directly-insert-blk-mq-request-from-blk_insert_cloned_request.patch [new file with mode: 0644]
queue-4.13/block-relax-a-check-in-blk_start_queue.patch [new file with mode: 0644]
queue-4.13/crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch [new file with mode: 0644]
queue-4.13/crypto-caam-qi-fix-typo-in-authenc-alg-driver-name.patch [new file with mode: 0644]
queue-4.13/crypto-caam-qi-properly-set-iv-after-en-de-crypt.patch [new file with mode: 0644]
queue-4.13/crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch [new file with mode: 0644]
queue-4.13/crypto-scompress-don-t-sleep-with-preemption-disabled.patch [new file with mode: 0644]
queue-4.13/cxl-fix-driver-use-count.patch [new file with mode: 0644]
queue-4.13/ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch [new file with mode: 0644]
queue-4.13/ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch [new file with mode: 0644]
queue-4.13/ext4-in-ext4_seek_-hole-data-return-enxio-for-negative-offsets.patch [new file with mode: 0644]
queue-4.13/iwlwifi-add-workaround-to-disable-wide-channels-in-5ghz.patch [new file with mode: 0644]
queue-4.13/md-bitmap-copy-correct-data-for-bitmap-super.patch [new file with mode: 0644]
queue-4.13/md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch [new file with mode: 0644]
queue-4.13/powerpc-fix-dar-reporting-when-alignment-handler-faults.patch [new file with mode: 0644]
queue-4.13/powerpc-powernv-npu-move-tlb-flush-before-launching-atsd.patch [new file with mode: 0644]
queue-4.13/powerpc-pseries-don-t-attempt-to-acquire-drc-during-memory-hot-add-for-assigned-lmbs.patch [new file with mode: 0644]
queue-4.13/regulator-cpcap-fix-standby-mode.patch [new file with mode: 0644]
queue-4.13/series
queue-4.13/wcn36xx-introduce-mutual-exclusion-of-fw-configuration.patch [new file with mode: 0644]

diff --git a/queue-4.13/block-directly-insert-blk-mq-request-from-blk_insert_cloned_request.patch b/queue-4.13/block-directly-insert-blk-mq-request-from-blk_insert_cloned_request.patch
new file mode 100644 (file)
index 0000000..d848eb9
--- /dev/null
@@ -0,0 +1,100 @@
+From 157f377beb710e84bd8bc7a3c4475c0674ebebd7 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 11 Sep 2017 16:43:57 -0600
+Subject: block: directly insert blk-mq request from blk_insert_cloned_request()
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 157f377beb710e84bd8bc7a3c4475c0674ebebd7 upstream.
+
+A NULL pointer crash was reported for the case of having the BFQ IO
+scheduler attached to the underlying blk-mq paths of a DM multipath
+device.  The crash occured in blk_mq_sched_insert_request()'s call to
+e->type->ops.mq.insert_requests().
+
+Paolo Valente correctly summarized why the crash occured with:
+"the call chain (dm_mq_queue_rq -> map_request -> setup_clone ->
+blk_rq_prep_clone) creates a cloned request without invoking
+e->type->ops.mq.prepare_request for the target elevator e.  The cloned
+request is therefore not initialized for the scheduler, but it is
+however inserted into the scheduler by blk_mq_sched_insert_request."
+
+All said, a request-based DM multipath device's IO scheduler should be
+the only one used -- when the original requests are issued to the
+underlying paths as cloned requests they are inserted directly in the
+underlying dispatch queue(s) rather than through an additional elevator.
+
+But commit bd166ef18 ("blk-mq-sched: add framework for MQ capable IO
+schedulers") switched blk_insert_cloned_request() from using
+blk_mq_insert_request() to blk_mq_sched_insert_request().  Which
+incorrectly added elevator machinery into a call chain that isn't
+supposed to have any.
+
+To fix this introduce a blk-mq private blk_mq_request_bypass_insert()
+that blk_insert_cloned_request() calls to insert the request without
+involving any elevator that may be attached to the cloned request's
+request_queue.
+
+Fixes: bd166ef183c2 ("blk-mq-sched: add framework for MQ capable IO schedulers")
+Reported-by: Bart Van Assche <Bart.VanAssche@wdc.com>
+Tested-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c |    7 ++++++-
+ block/blk-mq.c   |   16 ++++++++++++++++
+ block/blk-mq.h   |    1 +
+ 3 files changed, 23 insertions(+), 1 deletion(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2330,7 +2330,12 @@ blk_status_t blk_insert_cloned_request(s
+       if (q->mq_ops) {
+               if (blk_queue_io_stat(q))
+                       blk_account_io_start(rq, true);
+-              blk_mq_sched_insert_request(rq, false, true, false, false);
++              /*
++               * Since we have a scheduler attached on the top device,
++               * bypass a potential scheduler on the bottom device for
++               * insert.
++               */
++              blk_mq_request_bypass_insert(rq);
+               return BLK_STS_OK;
+       }
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1357,6 +1357,22 @@ void __blk_mq_insert_request(struct blk_
+       blk_mq_hctx_mark_pending(hctx, ctx);
+ }
++/*
++ * Should only be used carefully, when the caller knows we want to
++ * bypass a potential IO scheduler on the target device.
++ */
++void blk_mq_request_bypass_insert(struct request *rq)
++{
++      struct blk_mq_ctx *ctx = rq->mq_ctx;
++      struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
++
++      spin_lock(&hctx->lock);
++      list_add_tail(&rq->queuelist, &hctx->dispatch);
++      spin_unlock(&hctx->lock);
++
++      blk_mq_run_hw_queue(hctx, false);
++}
++
+ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
+                           struct list_head *list)
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -54,6 +54,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_s
+  */
+ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+                               bool at_head);
++void blk_mq_request_bypass_insert(struct request *rq);
+ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
+                               struct list_head *list);
diff --git a/queue-4.13/block-relax-a-check-in-blk_start_queue.patch b/queue-4.13/block-relax-a-check-in-blk_start_queue.patch
new file mode 100644 (file)
index 0000000..a79d94c
--- /dev/null
@@ -0,0 +1,52 @@
+From 4ddd56b003f251091a67c15ae3fe4a5c5c5e390a Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Thu, 17 Aug 2017 13:12:44 -0700
+Subject: block: Relax a check in blk_start_queue()
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit 4ddd56b003f251091a67c15ae3fe4a5c5c5e390a upstream.
+
+Calling blk_start_queue() from interrupt context with the queue
+lock held and without disabling IRQs, as the skd driver does, is
+safe. This patch avoids that loading the skd driver triggers the
+following warning:
+
+WARNING: CPU: 11 PID: 1348 at block/blk-core.c:283 blk_start_queue+0x84/0xa0
+RIP: 0010:blk_start_queue+0x84/0xa0
+Call Trace:
+ skd_unquiesce_dev+0x12a/0x1d0 [skd]
+ skd_complete_internal+0x1e7/0x5a0 [skd]
+ skd_complete_other+0xc2/0xd0 [skd]
+ skd_isr_completion_posted.isra.30+0x2a5/0x470 [skd]
+ skd_isr+0x14f/0x180 [skd]
+ irq_forced_thread_fn+0x2a/0x70
+ irq_thread+0x144/0x1a0
+ kthread+0x125/0x140
+ ret_from_fork+0x2a/0x40
+
+Fixes: commit a038e2536472 ("[PATCH] blk_start_queue() must be called with irq disabled - add warning")
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
+Cc: Andrew Morton <akpm@osdl.org>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -280,7 +280,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
+ void blk_start_queue(struct request_queue *q)
+ {
+       lockdep_assert_held(q->queue_lock);
+-      WARN_ON(!irqs_disabled());
++      WARN_ON(!in_interrupt() && !irqs_disabled());
+       WARN_ON_ONCE(q->mq_ops);
+       queue_flag_clear(QUEUE_FLAG_STOPPED, q);
diff --git a/queue-4.13/crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch b/queue-4.13/crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch
new file mode 100644 (file)
index 0000000..8fb3da1
--- /dev/null
@@ -0,0 +1,51 @@
+From smueller@chronox.de  Fri Sep 22 11:04:43 2017
+From: Stephan Mueller <smueller@chronox.de>
+Date: Thu, 21 Sep 2017 10:16:53 +0200
+Subject: [PATCH - RESEND] crypto: AF_ALG - remove SGL terminator indicator when  chaining
+To: herbert@gondor.apana.org.au, greg@kroah.com
+Cc: linux-crypto@vger.kernel.org
+Message-ID: <5857040.2sfW0oRrdW@tauon.chronox.de>
+
+From: Stephan Mueller <smueller@chronox.de>
+
+Fixed differently upstream as commit 2d97591ef43d ("crypto: af_alg - consolidation of duplicate code")
+
+The SGL is MAX_SGL_ENTS + 1 in size. The last SG entry is used for the
+chaining and is properly updated with the sg_chain invocation. During
+the filling-in of the initial SG entries, sg_mark_end is called for each
+SG entry. This is appropriate as long as no additional SGL is chained
+with the current SGL. However, when a new SGL is chained and the last
+SG entry is updated with sg_chain, the last but one entry still contains
+the end marker from the sg_mark_end. This end marker must be removed as
+otherwise a walk of the chained SGLs will cause a NULL pointer
+dereference at the last but one SG entry, because sg_next will return
+NULL.
+
+The patch only applies to all kernels up to and including 4.13. The
+patch 2d97591ef43d0587be22ad1b0d758d6df4999a0b added to 4.14-rc1
+introduced a complete new code base which addresses this bug in
+a different way. Yet, that patch is too invasive for stable kernels
+and was therefore not marked for stable.
+
+Fixes: 8ff590903d5fc ("crypto: algif_skcipher - User-space interface for skcipher operations")
+Signed-off-by: Stephan Mueller <smueller@chronox.de>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/algif_skcipher.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -144,8 +144,10 @@ static int skcipher_alloc_sgl(struct soc
+               sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+               sgl->cur = 0;
+-              if (sg)
++              if (sg) {
+                       sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
++                      sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
++              }
+               list_add_tail(&sgl->list, &ctx->tsgl);
+       }
diff --git a/queue-4.13/crypto-caam-qi-fix-typo-in-authenc-alg-driver-name.patch b/queue-4.13/crypto-caam-qi-fix-typo-in-authenc-alg-driver-name.patch
new file mode 100644 (file)
index 0000000..0c658e5
--- /dev/null
@@ -0,0 +1,34 @@
+From 84ea95436b83884fa55780618ffaf4bbe3312166 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Mon, 10 Jul 2017 08:40:27 +0300
+Subject: crypto: caam/qi - fix typo in authenc alg driver name
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 84ea95436b83884fa55780618ffaf4bbe3312166 upstream.
+
+s/desi/des for echainiv(authenc(hmac(sha256),cbc(des))) alg.
+
+Fixes: b189817cf7894 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_qi.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -1968,7 +1968,7 @@ static struct caam_aead_alg driver_aeads
+                               .cra_name = "echainiv(authenc(hmac(sha256),"
+                                           "cbc(des)))",
+                               .cra_driver_name = "echainiv-authenc-"
+-                                                 "hmac-sha256-cbc-desi-"
++                                                 "hmac-sha256-cbc-des-"
+                                                  "caam-qi",
+                               .cra_blocksize = DES_BLOCK_SIZE,
+                       },
diff --git a/queue-4.13/crypto-caam-qi-properly-set-iv-after-en-de-crypt.patch b/queue-4.13/crypto-caam-qi-properly-set-iv-after-en-de-crypt.patch
new file mode 100644 (file)
index 0000000..a6cb197
--- /dev/null
@@ -0,0 +1,53 @@
+From a68a193805224d90bedd94e9e8ac287600f07b78 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Mon, 10 Jul 2017 08:40:30 +0300
+Subject: crypto: caam/qi - properly set IV after {en,de}crypt
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit a68a193805224d90bedd94e9e8ac287600f07b78 upstream.
+
+caam/qi needs a fix similar to what was done for caam/jr in
+commit "crypto: caam/qi - properly set IV after {en,de}crypt",
+to allow for ablkcipher/skcipher chunking/streaming.
+
+Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
+Suggested-by: David Gstir <david@sigma-star.at>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_qi.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -776,9 +776,9 @@ static void ablkcipher_done(struct caam_
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+       struct device *qidev = caam_ctx->qidev;
+-#ifdef DEBUG
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++#ifdef DEBUG
+       dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
+ #endif
+@@ -799,6 +799,13 @@ static void ablkcipher_done(struct caam_
+       ablkcipher_unmap(qidev, edesc, req);
+       qi_cache_free(edesc);
++      /*
++       * The crypto API expects us to set the IV (req->info) to the last
++       * ciphertext block. This is used e.g. by the CTS mode.
++       */
++      scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
++                               ivsize, 0);
++
+       ablkcipher_request_complete(req, status);
+ }
diff --git a/queue-4.13/crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch b/queue-4.13/crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch
new file mode 100644 (file)
index 0000000..6c4d891
--- /dev/null
@@ -0,0 +1,176 @@
+From e652399edba99a5497f0d80f240c9075d3b43493 Mon Sep 17 00:00:00 2001
+From: Gary R Hook <gary.hook@amd.com>
+Date: Tue, 25 Jul 2017 14:12:11 -0500
+Subject: crypto: ccp - Fix XTS-AES-128 support on v5 CCPs
+
+From: Gary R Hook <gary.hook@amd.com>
+
+commit e652399edba99a5497f0d80f240c9075d3b43493 upstream.
+
+Version 5 CCPs have some new requirements for XTS-AES: the type field
+must be specified, and the key requires 512 bits, with each part
+occupying 256 bits and padded with zeroes.
+
+Signed-off-by: Gary R Hook <ghook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-crypto-aes-xts.c |    4 ++
+ drivers/crypto/ccp/ccp-dev-v5.c         |    2 +
+ drivers/crypto/ccp/ccp-dev.h            |    2 +
+ drivers/crypto/ccp/ccp-ops.c            |   43 +++++++++++++++++++++++++-------
+ include/linux/ccp.h                     |    3 +-
+ 5 files changed, 43 insertions(+), 11 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+@@ -1,8 +1,9 @@
+ /*
+  * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
+  *
+- * Copyright (C) 2013 Advanced Micro Devices, Inc.
++ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+  *
++ * Author: Gary R Hook <gary.hook@amd.com>
+  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+  *
+  * This program is free software; you can redistribute it and/or modify
+@@ -164,6 +165,7 @@ static int ccp_aes_xts_crypt(struct ablk
+       memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+       INIT_LIST_HEAD(&rctx->cmd.entry);
+       rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
++      rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
+       rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
+                                          : CCP_AES_ACTION_DECRYPT;
+       rctx->cmd.u.xts.unit_size = unit_size;
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -145,6 +145,7 @@ union ccp_function {
+ #define       CCP_AES_MODE(p)         ((p)->aes.mode)
+ #define       CCP_AES_TYPE(p)         ((p)->aes.type)
+ #define       CCP_XTS_SIZE(p)         ((p)->aes_xts.size)
++#define       CCP_XTS_TYPE(p)         ((p)->aes_xts.type)
+ #define       CCP_XTS_ENCRYPT(p)      ((p)->aes_xts.encrypt)
+ #define       CCP_DES3_SIZE(p)        ((p)->des3.size)
+ #define       CCP_DES3_ENCRYPT(p)     ((p)->des3.encrypt)
+@@ -344,6 +345,7 @@ static int ccp5_perform_xts_aes(struct c
+       CCP5_CMD_PROT(&desc) = 0;
+       function.raw = 0;
++      CCP_XTS_TYPE(&function) = op->u.xts.type;
+       CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
+       CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
+       CCP5_CMD_FUNCTION(&desc) = function.raw;
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -192,6 +192,7 @@
+ #define CCP_AES_CTX_SB_COUNT          1
+ #define CCP_XTS_AES_KEY_SB_COUNT      1
++#define CCP5_XTS_AES_KEY_SB_COUNT     2
+ #define CCP_XTS_AES_CTX_SB_COUNT      1
+ #define CCP_DES3_KEY_SB_COUNT         1
+@@ -497,6 +498,7 @@ struct ccp_aes_op {
+ };
+ struct ccp_xts_aes_op {
++      enum ccp_aes_type type;
+       enum ccp_aes_action action;
+       enum ccp_xts_aes_unit_size unit_size;
+ };
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct cc
+       struct ccp_op op;
+       unsigned int unit_size, dm_offset;
+       bool in_place = false;
++      unsigned int sb_count;
++      enum ccp_aes_type aestype;
+       int ret;
+       switch (xts->unit_size) {
+@@ -1061,7 +1063,9 @@ static int ccp_run_xts_aes_cmd(struct cc
+               return -EINVAL;
+       }
+-      if (xts->key_len != AES_KEYSIZE_128)
++      if (xts->key_len == AES_KEYSIZE_128)
++              aestype = CCP_AES_TYPE_128;
++      else
+               return -EINVAL;
+       if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
+@@ -1083,23 +1087,44 @@ static int ccp_run_xts_aes_cmd(struct cc
+       op.sb_key = cmd_q->sb_key;
+       op.sb_ctx = cmd_q->sb_ctx;
+       op.init = 1;
++      op.u.xts.type = aestype;
+       op.u.xts.action = xts->action;
+       op.u.xts.unit_size = xts->unit_size;
+-      /* All supported key sizes fit in a single (32-byte) SB entry
+-       * and must be in little endian format. Use the 256-bit byte
+-       * swap passthru option to convert from big endian to little
+-       * endian.
++      /* A version 3 device only supports 128-bit keys, which fits into a
++       * single SB entry. A version 5 device uses a 512-bit vector, so two
++       * SB entries.
+        */
++      if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
++              sb_count = CCP_XTS_AES_KEY_SB_COUNT;
++      else
++              sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
+       ret = ccp_init_dm_workarea(&key, cmd_q,
+-                                 CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
++                                 sb_count * CCP_SB_BYTES,
+                                  DMA_TO_DEVICE);
+       if (ret)
+               return ret;
+-      dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+-      ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+-      ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
++      if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
++              /* All supported key sizes must be in little endian format.
++               * Use the 256-bit byte swap passthru option to convert from
++               * big endian to little endian.
++               */
++              dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
++              ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
++              ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
++      } else {
++              /* Version 5 CCPs use a 512-bit space for the key: each portion
++               * occupies 256 bits, or one entire slot, and is zero-padded.
++               */
++              unsigned int pad;
++
++              dm_offset = CCP_SB_BYTES;
++              pad = dm_offset - xts->key_len;
++              ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
++              ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
++                              xts->key_len);
++      }
+       ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+                            CCP_PASSTHRU_BYTESWAP_256BIT);
+       if (ret) {
+--- a/include/linux/ccp.h
++++ b/include/linux/ccp.h
+@@ -1,7 +1,7 @@
+ /*
+  * AMD Cryptographic Coprocessor (CCP) driver
+  *
+- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
++ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+  *
+  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+  * Author: Gary R Hook <gary.hook@amd.com>
+@@ -231,6 +231,7 @@ enum ccp_xts_aes_unit_size {
+  * AES operation the new IV overwrites the old IV.
+  */
+ struct ccp_xts_aes_engine {
++      enum ccp_aes_type type;
+       enum ccp_aes_action action;
+       enum ccp_xts_aes_unit_size unit_size;
diff --git a/queue-4.13/crypto-scompress-don-t-sleep-with-preemption-disabled.patch b/queue-4.13/crypto-scompress-don-t-sleep-with-preemption-disabled.patch
new file mode 100644 (file)
index 0000000..4429758
--- /dev/null
@@ -0,0 +1,35 @@
+From 3c08377262880afc1621ab9cb6dbe7df47a6033d Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Fri, 21 Jul 2017 16:42:36 +0100
+Subject: crypto: scompress - don't sleep with preemption disabled
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 3c08377262880afc1621ab9cb6dbe7df47a6033d upstream.
+
+Due to the use of per-CPU buffers, scomp_acomp_comp_decomp() executes
+with preemption disabled, and so whether the CRYPTO_TFM_REQ_MAY_SLEEP
+flag is set is irrelevant, since we cannot sleep anyway. So disregard
+the flag, and use GFP_ATOMIC unconditionally.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/scompress.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/crypto/scompress.c
++++ b/crypto/scompress.c
+@@ -211,9 +211,7 @@ static int scomp_acomp_comp_decomp(struc
+                                             scratch_dst, &req->dlen, *ctx);
+       if (!ret) {
+               if (!req->dst) {
+-                      req->dst = crypto_scomp_sg_alloc(req->dlen,
+-                                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+-                                 GFP_KERNEL : GFP_ATOMIC);
++                      req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC);
+                       if (!req->dst)
+                               goto out;
+               }
diff --git a/queue-4.13/cxl-fix-driver-use-count.patch b/queue-4.13/cxl-fix-driver-use-count.patch
new file mode 100644 (file)
index 0000000..87516cb
--- /dev/null
@@ -0,0 +1,81 @@
+From 197267d0356004a31c4d6b6336598f5dff3301e1 Mon Sep 17 00:00:00 2001
+From: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Date: Wed, 30 Aug 2017 12:15:49 +0200
+Subject: cxl: Fix driver use count
+
+From: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+
+commit 197267d0356004a31c4d6b6336598f5dff3301e1 upstream.
+
+cxl keeps a driver use count, which is used with the hash memory model
+on p8 to know when to upgrade local TLBIs to global and to trigger
+callbacks to manage the MMU for PSL8.
+
+If a process opens a context and closes without attaching or fails the
+attachment, the driver use count is never decremented. As a
+consequence, TLB invalidations remain global, even if there are no
+active cxl contexts.
+
+We should increment the driver use count when the process is attaching
+to the cxl adapter, and not on open. It's not needed before the
+adapter starts using the context and the use count is decremented on
+the detach path, so it makes more sense.
+
+It affects only the user api. The kernel api is already doing The
+Right Thing.
+
+Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Fixes: 7bb5d91a4dda ("cxl: Rework context lifetimes")
+Acked-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/cxl/api.c  |    4 ++++
+ drivers/misc/cxl/file.c |    8 +++++++-
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -336,6 +336,10 @@ int cxl_start_context(struct cxl_context
+                       mmput(ctx->mm);
+       }
++      /*
++       * Increment driver use count. Enables global TLBIs for hash
++       * and callbacks to handle the segment table
++       */
+       cxl_ctx_get();
+       if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+--- a/drivers/misc/cxl/file.c
++++ b/drivers/misc/cxl/file.c
+@@ -95,7 +95,6 @@ static int __afu_open(struct inode *inod
+       pr_devel("afu_open pe: %i\n", ctx->pe);
+       file->private_data = ctx;
+-      cxl_ctx_get();
+       /* indicate success */
+       rc = 0;
+@@ -225,6 +224,12 @@ static long afu_ioctl_start_work(struct
+       if (ctx->mm)
+               mmput(ctx->mm);
++      /*
++       * Increment driver use count. Enables global TLBIs for hash
++       * and callbacks to handle the segment table
++       */
++      cxl_ctx_get();
++
+       trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
+       if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
+@@ -233,6 +238,7 @@ static long afu_ioctl_start_work(struct
+               cxl_adapter_context_put(ctx->afu->adapter);
+               put_pid(ctx->pid);
+               ctx->pid = NULL;
++              cxl_ctx_put();
+               cxl_context_mm_count_put(ctx);
+               goto out;
+       }
diff --git a/queue-4.13/ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch b/queue-4.13/ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch
new file mode 100644 (file)
index 0000000..3dc561f
--- /dev/null
@@ -0,0 +1,70 @@
+From b0a5a9589decd07db755d6a8d9c0910d96ff7992 Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Thu, 24 Aug 2017 15:19:39 -0400
+Subject: ext4: fix incorrect quotaoff if the quota feature is enabled
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit b0a5a9589decd07db755d6a8d9c0910d96ff7992 upstream.
+
+Current ext4 quota should always "usage enabled" if the
+quota feautre is enabled. But in ext4_orphan_cleanup(), it
+turn quotas off directly (used for the older journaled
+quota), so we cannot turn it on again via "quotaon" unless
+umount and remount ext4.
+
+Simple reproduce:
+
+  mkfs.ext4 -O project,quota /dev/vdb1
+  mount -o prjquota /dev/vdb1 /mnt
+  chattr -p 123 /mnt
+  chattr +P /mnt
+  touch /mnt/aa /mnt/bb
+  exec 100<>/mnt/aa
+  rm -f /mnt/aa
+  sync
+  echo c > /proc/sysrq-trigger
+
+  #reboot and mount
+  mount -o prjquota /dev/vdb1 /mnt
+  #query status
+  quotaon -Ppv /dev/vdb1
+  #output
+  quotaon: Cannot find mountpoint for device /dev/vdb1
+  quotaon: No correct mountpoint specified.
+
+This patch add check for journaled quotas to avoid incorrect
+quotaoff when ext4 has quota feautre.
+
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/super.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2442,7 +2442,7 @@ static void ext4_orphan_cleanup(struct s
+ #ifdef CONFIG_QUOTA
+       /* Needed for iput() to work correctly and not trash data */
+       sb->s_flags |= MS_ACTIVE;
+-      /* Turn on quotas so that they are updated correctly */
++      /* Turn on journaled quotas so that they are updated correctly */
+       for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+               if (EXT4_SB(sb)->s_qf_names[i]) {
+                       int ret = ext4_quota_on_mount(sb, i);
+@@ -2510,9 +2510,9 @@ static void ext4_orphan_cleanup(struct s
+               ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
+                      PLURAL(nr_truncates));
+ #ifdef CONFIG_QUOTA
+-      /* Turn quotas off */
++      /* Turn off journaled quotas if they were enabled for orphan cleanup */
+       for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+-              if (sb_dqopt(sb)->files[i])
++              if (EXT4_SB(sb)->s_qf_names[i] && sb_dqopt(sb)->files[i])
+                       dquot_quota_off(sb, i);
+       }
+ #endif
diff --git a/queue-4.13/ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch b/queue-4.13/ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch
new file mode 100644 (file)
index 0000000..04cce1d
--- /dev/null
@@ -0,0 +1,99 @@
+From 95f1fda47c9d8738f858c3861add7bf0a36a7c0b Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Thu, 24 Aug 2017 15:21:50 -0400
+Subject: ext4: fix quota inconsistency during orphan cleanup for read-only mounts
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit 95f1fda47c9d8738f858c3861add7bf0a36a7c0b upstream.
+
+Quota does not get enabled for read-only mounts if filesystem
+has quota feature, so that quotas cannot updated during orphan
+cleanup, which will lead to quota inconsistency.
+
+This patch turn on quotas during orphan cleanup for this case,
+make sure quotas can be updated correctly.
+
+Reported-by: Jan Kara <jack@suse.cz>
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/super.c |   38 +++++++++++++++++++++++++++++++-------
+ 1 file changed, 31 insertions(+), 7 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2404,6 +2404,7 @@ static void ext4_orphan_cleanup(struct s
+       unsigned int s_flags = sb->s_flags;
+       int ret, nr_orphans = 0, nr_truncates = 0;
+ #ifdef CONFIG_QUOTA
++      int quota_update = 0;
+       int i;
+ #endif
+       if (!es->s_last_orphan) {
+@@ -2442,14 +2443,32 @@ static void ext4_orphan_cleanup(struct s
+ #ifdef CONFIG_QUOTA
+       /* Needed for iput() to work correctly and not trash data */
+       sb->s_flags |= MS_ACTIVE;
+-      /* Turn on journaled quotas so that they are updated correctly */
++
++      /*
++       * Turn on quotas which were not enabled for read-only mounts if
++       * filesystem has quota feature, so that they are updated correctly.
++       */
++      if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
++              int ret = ext4_enable_quotas(sb);
++
++              if (!ret)
++                      quota_update = 1;
++              else
++                      ext4_msg(sb, KERN_ERR,
++                              "Cannot turn on quotas: error %d", ret);
++      }
++
++      /* Turn on journaled quotas used for old sytle */
+       for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+               if (EXT4_SB(sb)->s_qf_names[i]) {
+                       int ret = ext4_quota_on_mount(sb, i);
+-                      if (ret < 0)
++
++                      if (!ret)
++                              quota_update = 1;
++                      else
+                               ext4_msg(sb, KERN_ERR,
+                                       "Cannot turn on journaled "
+-                                      "quota: error %d", ret);
++                                      "quota: type %d: error %d", i, ret);
+               }
+       }
+ #endif
+@@ -2510,10 +2529,12 @@ static void ext4_orphan_cleanup(struct s
+               ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
+                      PLURAL(nr_truncates));
+ #ifdef CONFIG_QUOTA
+-      /* Turn off journaled quotas if they were enabled for orphan cleanup */
+-      for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+-              if (EXT4_SB(sb)->s_qf_names[i] && sb_dqopt(sb)->files[i])
+-                      dquot_quota_off(sb, i);
++      /* Turn off quotas if they were enabled for orphan cleanup */
++      if (quota_update) {
++              for (i = 0; i < EXT4_MAXQUOTAS; i++) {
++                      if (sb_dqopt(sb)->files[i])
++                              dquot_quota_off(sb, i);
++              }
+       }
+ #endif
+       sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+@@ -5512,6 +5533,9 @@ static int ext4_enable_quotas(struct sup
+                               DQUOT_USAGE_ENABLED |
+                               (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
+                       if (err) {
++                              for (type--; type >= 0; type--)
++                                      dquot_quota_off(sb, type);
++
+                               ext4_warning(sb,
+                                       "Failed to enable quota tracking "
+                                       "(type=%d, err=%d). Please run "
diff --git a/queue-4.13/ext4-in-ext4_seek_-hole-data-return-enxio-for-negative-offsets.patch b/queue-4.13/ext4-in-ext4_seek_-hole-data-return-enxio-for-negative-offsets.patch
new file mode 100644 (file)
index 0000000..6b44d8f
--- /dev/null
@@ -0,0 +1,42 @@
+From 1bd8d6cd3e413d64e543ec3e69ff43e75a1cf1ea Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Thu, 24 Aug 2017 13:22:06 -0400
+Subject: ext4: in ext4_seek_{hole,data}, return -ENXIO for negative offsets
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+commit 1bd8d6cd3e413d64e543ec3e69ff43e75a1cf1ea upstream.
+
+In the ext4 implementations of SEEK_HOLE and SEEK_DATA, make sure we
+return -ENXIO for negative offsets instead of banging around inside
+the extent code and returning -EFSCORRUPTED.
+
+Reported-by: Mateusz S <muttdini@gmail.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/file.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -595,7 +595,7 @@ static loff_t ext4_seek_data(struct file
+       inode_lock(inode);
+       isize = i_size_read(inode);
+-      if (offset >= isize) {
++      if (offset < 0 || offset >= isize) {
+               inode_unlock(inode);
+               return -ENXIO;
+       }
+@@ -658,7 +658,7 @@ static loff_t ext4_seek_hole(struct file
+       inode_lock(inode);
+       isize = i_size_read(inode);
+-      if (offset >= isize) {
++      if (offset < 0 || offset >= isize) {
+               inode_unlock(inode);
+               return -ENXIO;
+       }
diff --git a/queue-4.13/iwlwifi-add-workaround-to-disable-wide-channels-in-5ghz.patch b/queue-4.13/iwlwifi-add-workaround-to-disable-wide-channels-in-5ghz.patch
new file mode 100644 (file)
index 0000000..c12b26c
--- /dev/null
@@ -0,0 +1,189 @@
+From 01a9c948a09348950515bf2abb6113ed83e696d8 Mon Sep 17 00:00:00 2001
+From: Luca Coelho <luciano.coelho@intel.com>
+Date: Tue, 15 Aug 2017 20:48:41 +0300
+Subject: iwlwifi: add workaround to disable wide channels in 5GHz
+
+From: Luca Coelho <luciano.coelho@intel.com>
+
+commit 01a9c948a09348950515bf2abb6113ed83e696d8 upstream.
+
+The OTP in some SKUs have erroneously allowed 40MHz and 80MHz channels
+in the 5.2GHz band.  The firmware has been modified to not allow this
+in those SKUs, so the driver needs to do the same otherwise the
+firmware will assert when we try to use it.
+
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c
+index ae03d0f5564f..e81f6dd3744e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c
+@@ -148,7 +148,8 @@ struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt)
+                       rsp->regulatory.channel_profile,
+                       nvm->valid_tx_ant & fwrt->fw->valid_tx_ant,
+                       nvm->valid_rx_ant & fwrt->fw->valid_rx_ant,
+-                      rsp->regulatory.lar_enabled && lar_fw_supported);
++                      rsp->regulatory.lar_enabled && lar_fw_supported,
++                      false);
+       iwl_free_resp(&hcmd);
+       return nvm;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+index 1172e4572a82..ea165b3e6dd3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+@@ -79,6 +79,7 @@
+ /* NVM offsets (in words) definitions */
+ enum wkp_nvm_offsets {
+       /* NVM HW-Section offset (in words) definitions */
++      SUBSYSTEM_ID = 0x0A,
+       HW_ADDR = 0x15,
+       /* NVM SW-Section offset (in words) definitions */
+@@ -258,13 +259,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
+ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+                               struct iwl_nvm_data *data,
+                               const __le16 * const nvm_ch_flags,
+-                              bool lar_supported)
++                              bool lar_supported, bool no_wide_in_5ghz)
+ {
+       int ch_idx;
+       int n_channels = 0;
+       struct ieee80211_channel *channel;
+       u16 ch_flags;
+-      bool is_5ghz;
+       int num_of_ch, num_2ghz_channels;
+       const u8 *nvm_chan;
+@@ -279,12 +279,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+       }
+       for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
++              bool is_5ghz = (ch_idx >= num_2ghz_channels);
++
+               ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+-              if (ch_idx >= num_2ghz_channels &&
+-                  !data->sku_cap_band_52GHz_enable)
++              if (is_5ghz && !data->sku_cap_band_52GHz_enable)
+                       continue;
++              /* workaround to disable wide channels in 5GHz */
++              if (no_wide_in_5ghz && is_5ghz) {
++                      ch_flags &= ~(NVM_CHANNEL_40MHZ |
++                                   NVM_CHANNEL_80MHZ |
++                                   NVM_CHANNEL_160MHZ);
++              }
++
+               if (ch_flags & NVM_CHANNEL_160MHZ)
+                       data->vht160_supported = true;
+@@ -307,8 +315,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+               n_channels++;
+               channel->hw_value = nvm_chan[ch_idx];
+-              channel->band = (ch_idx < num_2ghz_channels) ?
+-                              NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
++              channel->band = is_5ghz ?
++                              NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
+               channel->center_freq =
+                       ieee80211_channel_to_frequency(
+                               channel->hw_value, channel->band);
+@@ -320,7 +328,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+                * is not used in mvm, and is used for backwards compatibility
+                */
+               channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
+-              is_5ghz = channel->band == NL80211_BAND_5GHZ;
+               /* don't put limitations in case we're using LAR */
+               if (!lar_supported)
+@@ -438,14 +445,15 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
+ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+                    struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
+-                   u8 tx_chains, u8 rx_chains, bool lar_supported)
++                   u8 tx_chains, u8 rx_chains, bool lar_supported,
++                   bool no_wide_in_5ghz)
+ {
+       int n_channels;
+       int n_used = 0;
+       struct ieee80211_supported_band *sband;
+       n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
+-                                        lar_supported);
++                                        lar_supported, no_wide_in_5ghz);
+       sband = &data->bands[NL80211_BAND_2GHZ];
+       sband->band = NL80211_BAND_2GHZ;
+       sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+@@ -651,6 +659,39 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
+       return 0;
+ }
++static bool
++iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg,
++                      const __le16 *nvm_hw)
++{
++      /*
++       * Workaround a bug in Indonesia SKUs where the regulatory in
++       * some 7000-family OTPs erroneously allow wide channels in
++       * 5GHz.  To check for Indonesia, we take the SKU value from
++       * bits 1-4 in the subsystem ID and check if it is either 5 or
++       * 9.  In those cases, we need to force-disable wide channels
++       * in 5GHz otherwise the FW will throw a sysassert when we try
++       * to use them.
++       */
++      if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
++              /*
++               * Unlike the other sections in the NVM, the hw
++               * section uses big-endian.
++               */
++              u16 subsystem_id = be16_to_cpup((const __be16 *)nvm_hw
++                                              + SUBSYSTEM_ID);
++              u8 sku = (subsystem_id & 0x1e) >> 1;
++
++              if (sku == 5 || sku == 9) {
++                      IWL_DEBUG_EEPROM(dev,
++                                       "disabling wide channels in 5GHz (0x%0x %d)\n",
++                                       subsystem_id, sku);
++                      return true;
++              }
++      }
++
++      return false;
++}
++
+ struct iwl_nvm_data *
+ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+                  const __le16 *nvm_hw, const __le16 *nvm_sw,
+@@ -661,6 +702,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+       struct device *dev = trans->dev;
+       struct iwl_nvm_data *data;
+       bool lar_enabled;
++      bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw);
+       u32 sku, radio_cfg;
+       u16 lar_config;
+       const __le16 *ch_section;
+@@ -731,7 +773,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+       }
+       iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
+-                      lar_fw_supported && lar_enabled);
++                      lar_fw_supported && lar_enabled, no_wide_in_5ghz);
+       data->calib_version = 255;
+       return data;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+index 3fd6506a02ab..50d9b3eaa4f8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+@@ -93,7 +93,8 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+  */
+ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+                    struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
+-                   u8 tx_chains, u8 rx_chains, bool lar_supported);
++                   u8 tx_chains, u8 rx_chains, bool lar_supported,
++                   bool no_wide_in_5ghz);
+ /**
+  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
diff --git a/queue-4.13/md-bitmap-copy-correct-data-for-bitmap-super.patch b/queue-4.13/md-bitmap-copy-correct-data-for-bitmap-super.patch
new file mode 100644 (file)
index 0000000..99a9efd
--- /dev/null
@@ -0,0 +1,46 @@
+From 8031c3ddc70ab93099e7d1814382dba39f57b43e Mon Sep 17 00:00:00 2001
+From: Shaohua Li <shli@fb.com>
+Date: Thu, 17 Aug 2017 10:35:11 -0700
+Subject: md/bitmap: copy correct data for bitmap super
+
+From: Shaohua Li <shli@fb.com>
+
+commit 8031c3ddc70ab93099e7d1814382dba39f57b43e upstream.
+
+raid5 cache could write bitmap superblock before bitmap superblock is
+initialized. The bitmap superblock is less than 512B. The current code will
+only copy the superblock to a new page and write the whole 512B, which will
+zero the the data after the superblock. Unfortunately the data could include
+bitmap, which we should preserve. The patch will make superblock read do 4k
+chunk and we always copy the 4k data to new page, so the superblock write will
+old data to disk and we don't change the bitmap.
+
+Reported-by: Song Liu <songliubraving@fb.com>
+Reviewed-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bitmap.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -625,7 +625,7 @@ re_read:
+               err = read_sb_page(bitmap->mddev,
+                                  offset,
+                                  sb_page,
+-                                 0, sizeof(bitmap_super_t));
++                                 0, PAGE_SIZE);
+       }
+       if (err)
+               return err;
+@@ -2118,7 +2118,7 @@ int bitmap_resize(struct bitmap *bitmap,
+       if (store.sb_page && bitmap->storage.sb_page)
+               memcpy(page_address(store.sb_page),
+                      page_address(bitmap->storage.sb_page),
+-                     sizeof(bitmap_super_t));
++                     PAGE_SIZE);
+       bitmap_file_unmap(&bitmap->storage);
+       bitmap->storage = store;
diff --git a/queue-4.13/md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch b/queue-4.13/md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch
new file mode 100644 (file)
index 0000000..f15b8b3
--- /dev/null
@@ -0,0 +1,49 @@
+From e8a27f836f165c26f867ece7f31eb5c811692319 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Thu, 31 Aug 2017 10:23:25 +1000
+Subject: md/bitmap: disable bitmap_resize for file-backed bitmaps.
+
+From: NeilBrown <neilb@suse.com>
+
+commit e8a27f836f165c26f867ece7f31eb5c811692319 upstream.
+
+bitmap_resize() does not work for file-backed bitmaps.
+The buffer_heads are allocated and initialized when
+the bitmap is read from the file, but resize doesn't
+read from the file, it loads from the internal bitmap.
+When it comes time to write the new bitmap, the bh is
+non-existent and we crash.
+
+The common case when growing an array involves making the array larger,
+and that normally means making the bitmap larger.  Doing
+that inside the kernel is possible, but would need more code.
+It is probably easier to require people who use file-backed
+bitmaps to remove them and re-add after a reshape.
+
+So this patch disables the resizing of arrays which have
+file-backed bitmaps.  This is better than crashing.
+
+Reported-by: Zhilong Liu <zlliu@suse.com>
+Fixes: d60b479d177a ("md/bitmap: add bitmap_resize function to allow bitmap resizing.")
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bitmap.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -2058,6 +2058,11 @@ int bitmap_resize(struct bitmap *bitmap,
+       long pages;
+       struct bitmap_page *new_bp;
++      if (bitmap->storage.file && !init) {
++              pr_info("md: cannot resize file-based bitmap\n");
++              return -EINVAL;
++      }
++
+       if (chunksize == 0) {
+               /* If there is enough space, leave the chunk size unchanged,
+                * else increase by factor of two until there is enough space.
diff --git a/queue-4.13/powerpc-fix-dar-reporting-when-alignment-handler-faults.patch b/queue-4.13/powerpc-fix-dar-reporting-when-alignment-handler-faults.patch
new file mode 100644 (file)
index 0000000..c7914b8
--- /dev/null
@@ -0,0 +1,265 @@
+From f9effe925039cf54489b5c04e0d40073bb3a123d Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 24 Aug 2017 20:49:57 +1000
+Subject: powerpc: Fix DAR reporting when alignment handler faults
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit f9effe925039cf54489b5c04e0d40073bb3a123d upstream.
+
+Anton noticed that if we fault part way through emulating an unaligned
+instruction, we don't update the DAR to reflect that.
+
+The DAR value is eventually reported back to userspace as the address
+in the SEGV signal, and if userspace is using that value to demand
+fault then it can be confused by us not setting the value correctly.
+
+This patch is ugly as hell, but is intended to be the minimal fix and
+back ports easily.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Reviewed-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/align.c |  119 +++++++++++++++++++++++++++-----------------
+ 1 file changed, 74 insertions(+), 45 deletions(-)
+
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -235,6 +235,28 @@ static int emulate_dcbz(struct pt_regs *
+ #define SWIZ_PTR(p)           ((unsigned char __user *)((p) ^ swiz))
++#define __get_user_or_set_dar(_regs, _dest, _addr)            \
++      ({                                                      \
++              int rc = 0;                                     \
++              typeof(_addr) __addr = (_addr);                 \
++              if (__get_user_inatomic(_dest, __addr)) {       \
++                      _regs->dar = (unsigned long)__addr;     \
++                      rc = -EFAULT;                           \
++              }                                               \
++              rc;                                             \
++      })
++
++#define __put_user_or_set_dar(_regs, _src, _addr)             \
++      ({                                                      \
++              int rc = 0;                                     \
++              typeof(_addr) __addr = (_addr);                 \
++              if (__put_user_inatomic(_src, __addr)) {        \
++                      _regs->dar = (unsigned long)__addr;     \
++                      rc = -EFAULT;                           \
++              }                                               \
++              rc;                                             \
++      })
++
+ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+                           unsigned int reg, unsigned int nb,
+                           unsigned int flags, unsigned int instr,
+@@ -263,9 +285,10 @@ static int emulate_multiple(struct pt_re
+               } else {
+                       unsigned long pc = regs->nip ^ (swiz & 4);
+-                      if (__get_user_inatomic(instr,
+-                                              (unsigned int __user *)pc))
++                      if (__get_user_or_set_dar(regs, instr,
++                                                (unsigned int __user *)pc))
+                               return -EFAULT;
++
+                       if (swiz == 0 && (flags & SW))
+                               instr = cpu_to_le32(instr);
+                       nb = (instr >> 11) & 0x1f;
+@@ -309,31 +332,31 @@ static int emulate_multiple(struct pt_re
+                              ((nb0 + 3) / 4) * sizeof(unsigned long));
+               for (i = 0; i < nb; ++i, ++p)
+-                      if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+-                                              SWIZ_PTR(p)))
++                      if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++                                                SWIZ_PTR(p)))
+                               return -EFAULT;
+               if (nb0 > 0) {
+                       rptr = &regs->gpr[0];
+                       addr += nb;
+                       for (i = 0; i < nb0; ++i, ++p)
+-                              if (__get_user_inatomic(REG_BYTE(rptr,
+-                                                               i ^ bswiz),
+-                                                      SWIZ_PTR(p)))
++                              if (__get_user_or_set_dar(regs,
++                                                        REG_BYTE(rptr, i ^ bswiz),
++                                                        SWIZ_PTR(p)))
+                                       return -EFAULT;
+               }
+       } else {
+               for (i = 0; i < nb; ++i, ++p)
+-                      if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+-                                              SWIZ_PTR(p)))
++                      if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++                                                SWIZ_PTR(p)))
+                               return -EFAULT;
+               if (nb0 > 0) {
+                       rptr = &regs->gpr[0];
+                       addr += nb;
+                       for (i = 0; i < nb0; ++i, ++p)
+-                              if (__put_user_inatomic(REG_BYTE(rptr,
+-                                                               i ^ bswiz),
+-                                                      SWIZ_PTR(p)))
++                              if (__put_user_or_set_dar(regs,
++                                                        REG_BYTE(rptr, i ^ bswiz),
++                                                        SWIZ_PTR(p)))
+                                       return -EFAULT;
+               }
+       }
+@@ -345,29 +368,32 @@ static int emulate_multiple(struct pt_re
+  * Only POWER6 has these instructions, and it does true little-endian,
+  * so we don't need the address swizzling.
+  */
+-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
+-                         unsigned int flags)
++static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
++                         unsigned int reg, unsigned int flags)
+ {
+       char *ptr0 = (char *) &current->thread.TS_FPR(reg);
+       char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
+-      int i, ret, sw = 0;
++      int i, sw = 0;
+       if (reg & 1)
+               return 0;       /* invalid form: FRS/FRT must be even */
+       if (flags & SW)
+               sw = 7;
+-      ret = 0;
++
+       for (i = 0; i < 8; ++i) {
+               if (!(flags & ST)) {
+-                      ret |= __get_user(ptr0[i^sw], addr + i);
+-                      ret |= __get_user(ptr1[i^sw], addr + i + 8);
++                      if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++                              return -EFAULT;
++                      if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++                              return -EFAULT;
+               } else {
+-                      ret |= __put_user(ptr0[i^sw], addr + i);
+-                      ret |= __put_user(ptr1[i^sw], addr + i + 8);
++                      if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++                              return -EFAULT;
++                      if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++                              return -EFAULT;
+               }
+       }
+-      if (ret)
+-              return -EFAULT;
++
+       return 1;       /* exception handled and fixed up */
+ }
+@@ -377,24 +403,27 @@ static int emulate_lq_stq(struct pt_regs
+ {
+       char *ptr0 = (char *)&regs->gpr[reg];
+       char *ptr1 = (char *)&regs->gpr[reg+1];
+-      int i, ret, sw = 0;
++      int i, sw = 0;
+       if (reg & 1)
+               return 0;       /* invalid form: GPR must be even */
+       if (flags & SW)
+               sw = 7;
+-      ret = 0;
++
+       for (i = 0; i < 8; ++i) {
+               if (!(flags & ST)) {
+-                      ret |= __get_user(ptr0[i^sw], addr + i);
+-                      ret |= __get_user(ptr1[i^sw], addr + i + 8);
++                      if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++                              return -EFAULT;
++                      if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++                              return -EFAULT;
+               } else {
+-                      ret |= __put_user(ptr0[i^sw], addr + i);
+-                      ret |= __put_user(ptr1[i^sw], addr + i + 8);
++                      if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++                              return -EFAULT;
++                      if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++                              return -EFAULT;
+               }
+       }
+-      if (ret)
+-              return -EFAULT;
++
+       return 1;       /* exception handled and fixed up */
+ }
+ #endif /* CONFIG_PPC64 */
+@@ -687,9 +716,14 @@ static int emulate_vsx(unsigned char __u
+       for (j = 0; j < length; j += elsize) {
+               for (i = 0; i < elsize; ++i) {
+                       if (flags & ST)
+-                              ret |= __put_user(ptr[i^sw], addr + i);
++                              ret = __put_user_or_set_dar(regs, ptr[i^sw],
++                                                          addr + i);
+                       else
+-                              ret |= __get_user(ptr[i^sw], addr + i);
++                              ret = __get_user_or_set_dar(regs, ptr[i^sw],
++                                                          addr + i);
++
++                      if (ret)
++                              return ret;
+               }
+               ptr  += elsize;
+ #ifdef __LITTLE_ENDIAN__
+@@ -739,7 +773,7 @@ int fix_alignment(struct pt_regs *regs)
+       unsigned int dsisr;
+       unsigned char __user *addr;
+       unsigned long p, swiz;
+-      int ret, i;
++      int i;
+       union data {
+               u64 ll;
+               double dd;
+@@ -936,7 +970,7 @@ int fix_alignment(struct pt_regs *regs)
+               if (flags & F) {
+                       /* Special case for 16-byte FP loads and stores */
+                       PPC_WARN_ALIGNMENT(fp_pair, regs);
+-                      return emulate_fp_pair(addr, reg, flags);
++                      return emulate_fp_pair(regs, addr, reg, flags);
+               } else {
+ #ifdef CONFIG_PPC64
+                       /* Special case for 16-byte loads and stores */
+@@ -966,15 +1000,12 @@ int fix_alignment(struct pt_regs *regs)
+               }
+               data.ll = 0;
+-              ret = 0;
+               p = (unsigned long)addr;
+               for (i = 0; i < nb; i++)
+-                      ret |= __get_user_inatomic(data.v[start + i],
+-                                                 SWIZ_PTR(p++));
+-
+-              if (unlikely(ret))
+-                      return -EFAULT;
++                      if (__get_user_or_set_dar(regs, data.v[start + i],
++                                                SWIZ_PTR(p++)))
++                              return -EFAULT;
+       } else if (flags & F) {
+               data.ll = current->thread.TS_FPR(reg);
+@@ -1046,15 +1077,13 @@ int fix_alignment(struct pt_regs *regs)
+                       break;
+               }
+-              ret = 0;
+               p = (unsigned long)addr;
+               for (i = 0; i < nb; i++)
+-                      ret |= __put_user_inatomic(data.v[start + i],
+-                                                 SWIZ_PTR(p++));
++                      if (__put_user_or_set_dar(regs, data.v[start + i],
++                                                SWIZ_PTR(p++)))
++                              return -EFAULT;
+-              if (unlikely(ret))
+-                      return -EFAULT;
+       } else if (flags & F)
+               current->thread.TS_FPR(reg) = data.ll;
+       else
diff --git a/queue-4.13/powerpc-powernv-npu-move-tlb-flush-before-launching-atsd.patch b/queue-4.13/powerpc-powernv-npu-move-tlb-flush-before-launching-atsd.patch
new file mode 100644 (file)
index 0000000..e4eeaef
--- /dev/null
@@ -0,0 +1,50 @@
+From bab9f954aaf352127725a9b7920226abdb65b604 Mon Sep 17 00:00:00 2001
+From: Alistair Popple <alistair@popple.id.au>
+Date: Fri, 11 Aug 2017 16:22:56 +1000
+Subject: powerpc/powernv/npu: Move tlb flush before launching ATSD
+
+From: Alistair Popple <alistair@popple.id.au>
+
+commit bab9f954aaf352127725a9b7920226abdb65b604 upstream.
+
+The nest MMU tlb flush needs to happen before the GPU translation
+shootdown is launched to avoid the GPU refilling its tlb with stale
+nmmu translations prior to the nmmu flush completing.
+
+Fixes: 1ab66d1fbada ("powerpc/powernv: Introduce address translation services for Nvlink2")
+Signed-off-by: Alistair Popple <alistair@popple.id.au>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/npu-dma.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/platforms/powernv/npu-dma.c
++++ b/arch/powerpc/platforms/powernv/npu-dma.c
+@@ -546,6 +546,12 @@ static void mmio_invalidate(struct npu_c
+       unsigned long pid = npu_context->mm->context.id;
+       /*
++       * Unfortunately the nest mmu does not support flushing specific
++       * addresses so we have to flush the whole mm.
++       */
++      flush_tlb_mm(npu_context->mm);
++
++      /*
+        * Loop over all the NPUs this process is active on and launch
+        * an invalidate.
+        */
+@@ -576,12 +582,6 @@ static void mmio_invalidate(struct npu_c
+               }
+       }
+-      /*
+-       * Unfortunately the nest mmu does not support flushing specific
+-       * addresses so we have to flush the whole mm.
+-       */
+-      flush_tlb_mm(npu_context->mm);
+-
+       mmio_invalidate_wait(mmio_atsd_reg, flush);
+       if (flush)
+               /* Wait for the flush to complete */
diff --git a/queue-4.13/powerpc-pseries-don-t-attempt-to-acquire-drc-during-memory-hot-add-for-assigned-lmbs.patch b/queue-4.13/powerpc-pseries-don-t-attempt-to-acquire-drc-during-memory-hot-add-for-assigned-lmbs.patch
new file mode 100644 (file)
index 0000000..2fc5d1d
--- /dev/null
@@ -0,0 +1,47 @@
+From afb5519fdb346201728040cab4e08ce53e7ff4fd Mon Sep 17 00:00:00 2001
+From: John Allen <jallen@linux.vnet.ibm.com>
+Date: Wed, 23 Aug 2017 12:18:43 -0500
+Subject: powerpc/pseries: Don't attempt to acquire drc during memory hot add for assigned lmbs
+
+From: John Allen <jallen@linux.vnet.ibm.com>
+
+commit afb5519fdb346201728040cab4e08ce53e7ff4fd upstream.
+
+Check if an LMB is assigned before attempting to call dlpar_acquire_drc
+in order to avoid any unnecessary rtas calls. This substantially
+reduces the running time of memory hot add on lpars with large amounts
+of memory.
+
+[mpe: We need to explicitly set rc to 0 in the success case, otherwise
+ the compiler might think we use rc without initialising it.]
+
+Fixes: c21f515c7436 ("powerpc/pseries: Make the acquire/release of the drc for memory a seperate step")
+Signed-off-by: John Allen <jallen@linux.vnet.ibm.com>
+Reviewed-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/hotplug-memory.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -817,6 +817,9 @@ static int dlpar_memory_add_by_count(u32
+               return -EINVAL;
+       for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
++              if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
++                      continue;
++
+               rc = dlpar_acquire_drc(lmbs[i].drc_index);
+               if (rc)
+                       continue;
+@@ -859,6 +862,7 @@ static int dlpar_memory_add_by_count(u32
+                               lmbs[i].base_addr, lmbs[i].drc_index);
+                       lmbs[i].reserved = 0;
+               }
++              rc = 0;
+       }
+       return rc;
diff --git a/queue-4.13/regulator-cpcap-fix-standby-mode.patch b/queue-4.13/regulator-cpcap-fix-standby-mode.patch
new file mode 100644 (file)
index 0000000..3d57604
--- /dev/null
@@ -0,0 +1,57 @@
+From 91a024e80336528d12b67b5a2e636b9e4467d3ec Mon Sep 17 00:00:00 2001
+From: Sebastian Reichel <sebastian.reichel@collabora.co.uk>
+Date: Mon, 10 Jul 2017 16:33:39 +0200
+Subject: regulator: cpcap: Fix standby mode
+
+From: Sebastian Reichel <sebastian.reichel@collabora.co.uk>
+
+commit 91a024e80336528d12b67b5a2e636b9e4467d3ec upstream.
+
+The original patch from Tony uses standby mode bit inverted, which is
+not correct. This fixes all instances in the driver code for get & set
+mode. This did not yet make problems, since mode has not been changed
+by any mainline driver so far.
+
+Fixes: 0ad4c07edd41 ("regulator: cpcap: Add basic regulator support")
+Acked-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.co.uk>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/cpcap-regulator.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/regulator/cpcap-regulator.c
++++ b/drivers/regulator/cpcap-regulator.c
+@@ -77,6 +77,8 @@
+ #define CPCAP_BIT_VAUDIO_MODE0                BIT(1)
+ #define CPCAP_BIT_V_AUDIO_EN          BIT(0)
++#define CPCAP_BIT_AUDIO_NORMAL_MODE   0x00
++
+ /*
+  * Off mode configuration bit. Used currently only by SW5 on omap4. There's
+  * the following comment in Motorola Linux kernel tree for it:
+@@ -217,7 +219,7 @@ static unsigned int cpcap_regulator_get_
+       regmap_read(rdev->regmap, rdev->desc->enable_reg, &value);
+-      if (!(value & CPCAP_BIT_AUDIO_LOW_PWR))
++      if (value & CPCAP_BIT_AUDIO_LOW_PWR)
+               return REGULATOR_MODE_STANDBY;
+       return REGULATOR_MODE_NORMAL;
+@@ -230,10 +232,10 @@ static int cpcap_regulator_set_mode(stru
+       switch (mode) {
+       case REGULATOR_MODE_NORMAL:
+-              value = CPCAP_BIT_AUDIO_LOW_PWR;
++              value = CPCAP_BIT_AUDIO_NORMAL_MODE;
+               break;
+       case REGULATOR_MODE_STANDBY:
+-              value = 0;
++              value = CPCAP_BIT_AUDIO_LOW_PWR;
+               break;
+       default:
+               return -EINVAL;
index 1970516e6230b843d5551e4ae87f4c7d0f5c5fcf..4026f53ccd44b850f67c595f2aacfdd3779711ab 100644 (file)
@@ -27,3 +27,22 @@ mips-math-emu-maddf-msubf-.-d-s-clean-up-maddf_flags-enumeration.patch
 mips-math-emu-maddf-msubf-.s-fix-accuracy-32-bit-case.patch
 mips-math-emu-maddf-msubf-.d-fix-accuracy-64-bit-case.patch
 docs-disable-kaslr-when-debugging-kernel.patch
+crypto-ccp-fix-xts-aes-128-support-on-v5-ccps.patch
+crypto-scompress-don-t-sleep-with-preemption-disabled.patch
+crypto-caam-qi-fix-typo-in-authenc-alg-driver-name.patch
+crypto-caam-qi-properly-set-iv-after-en-de-crypt.patch
+crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch
+regulator-cpcap-fix-standby-mode.patch
+wcn36xx-introduce-mutual-exclusion-of-fw-configuration.patch
+ext4-in-ext4_seek_-hole-data-return-enxio-for-negative-offsets.patch
+ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch
+ext4-fix-quota-inconsistency-during-orphan-cleanup-for-read-only-mounts.patch
+cxl-fix-driver-use-count.patch
+powerpc-powernv-npu-move-tlb-flush-before-launching-atsd.patch
+powerpc-pseries-don-t-attempt-to-acquire-drc-during-memory-hot-add-for-assigned-lmbs.patch
+powerpc-fix-dar-reporting-when-alignment-handler-faults.patch
+block-relax-a-check-in-blk_start_queue.patch
+block-directly-insert-blk-mq-request-from-blk_insert_cloned_request.patch
+md-bitmap-copy-correct-data-for-bitmap-super.patch
+md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch
+iwlwifi-add-workaround-to-disable-wide-channels-in-5ghz.patch
diff --git a/queue-4.13/wcn36xx-introduce-mutual-exclusion-of-fw-configuration.patch b/queue-4.13/wcn36xx-introduce-mutual-exclusion-of-fw-configuration.patch
new file mode 100644 (file)
index 0000000..29d4b9f
--- /dev/null
@@ -0,0 +1,274 @@
+From 39efc7cc7ccf82d1cd946580cdb70760f347305a Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Wed, 2 Aug 2017 18:28:00 -0700
+Subject: wcn36xx: Introduce mutual exclusion of fw configuration
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit 39efc7cc7ccf82d1cd946580cdb70760f347305a upstream.
+
+As the association status changes the driver needs to configure the
+hardware. This is done based on information in the "sta" acquired by
+ieee80211_find_sta(), which requires the caller to ensure that the "sta"
+is valid while its being used; generally by entering an rcu read
+section.
+
+But the operations acting on the "sta" has to communicate with the
+firmware and may therefor sleep, resulting in the following report:
+
+[   31.418190] BUG: sleeping function called from invalid context at
+kernel/locking/mutex.c:238
+[   31.425919] in_atomic(): 0, irqs_disabled(): 0, pid: 34, name:
+kworker/u8:1
+[   31.434609] CPU: 0 PID: 34 Comm: kworker/u8:1 Tainted: G        W
+4.12.0-rc4-next-20170607+ #993
+[   31.441002] Hardware name: Qualcomm Technologies, Inc. APQ 8016 SBC
+(DT)
+[   31.450380] Workqueue: phy0 ieee80211_iface_work
+[   31.457226] Call trace:
+[   31.461830] [<ffffff8008088c58>] dump_backtrace+0x0/0x260
+[   31.464004] [<ffffff8008088f7c>] show_stack+0x14/0x20
+[   31.469557] [<ffffff8008392e70>] dump_stack+0x98/0xb8
+[   31.474592] [<ffffff80080e4330>] ___might_sleep+0xf0/0x118
+[   31.479626] [<ffffff80080e43a8>] __might_sleep+0x50/0x88
+[   31.485010] [<ffffff80088ff9a4>] mutex_lock+0x24/0x60
+[   31.490479] [<ffffff8008595c38>] wcn36xx_smd_set_link_st+0x30/0x130
+[   31.495428] [<ffffff8008591ed8>] wcn36xx_bss_info_changed+0x148/0x448
+[   31.501504] [<ffffff80088ab3c4>]
+ieee80211_bss_info_change_notify+0xbc/0x118
+[   31.508102] [<ffffff80088f841c>] ieee80211_assoc_success+0x664/0x7f8
+[   31.515220] [<ffffff80088e13d4>]
+ieee80211_rx_mgmt_assoc_resp+0x144/0x2d8
+[   31.521555] [<ffffff80088e1e20>]
+ieee80211_sta_rx_queued_mgmt+0x190/0x698
+[   31.528239] [<ffffff80088bc44c>] ieee80211_iface_work+0x234/0x368
+[   31.535011] [<ffffff80080d81ac>] process_one_work+0x1cc/0x340
+[   31.541086] [<ffffff80080d8368>] worker_thread+0x48/0x430
+[   31.546814] [<ffffff80080de448>] kthread+0x108/0x138
+[   31.552195] [<ffffff8008082ec0>] ret_from_fork+0x10/0x50
+
+In order to ensure that the "sta" remains alive (and consistent) for the
+duration of bss_info_changed() mutual exclusion has to be ensured with
+sta_remove().
+
+This is done by introducing a mutex to cover firmware configuration
+changes, which is made to also ensure mutual exclusion between other
+operations changing the state or configuration of the firmware. With
+this we can drop the rcu read lock.
+
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/wcn36xx/main.c    |   52 +++++++++++++++++++++++++++--
+ drivers/net/wireless/ath/wcn36xx/wcn36xx.h |    3 +
+ 2 files changed, 53 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/ath/wcn36xx/main.c
++++ b/drivers/net/wireless/ath/wcn36xx/main.c
+@@ -372,6 +372,8 @@ static int wcn36xx_config(struct ieee802
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
++      mutex_lock(&wcn->conf_mutex);
++
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               int ch = WCN36XX_HW_CHANNEL(wcn);
+               wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
+@@ -382,6 +384,8 @@ static int wcn36xx_config(struct ieee802
+               }
+       }
++      mutex_unlock(&wcn->conf_mutex);
++
+       return 0;
+ }
+@@ -396,6 +400,8 @@ static void wcn36xx_configure_filter(str
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
++      mutex_lock(&wcn->conf_mutex);
++
+       *total &= FIF_ALLMULTI;
+       fp = (void *)(unsigned long)multicast;
+@@ -408,6 +414,8 @@ static void wcn36xx_configure_filter(str
+               else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc)
+                       wcn36xx_smd_set_mc_list(wcn, vif, fp);
+       }
++
++      mutex_unlock(&wcn->conf_mutex);
+       kfree(fp);
+ }
+@@ -471,6 +479,8 @@ static int wcn36xx_set_key(struct ieee80
+                        key_conf->key,
+                        key_conf->keylen);
++      mutex_lock(&wcn->conf_mutex);
++
+       switch (key_conf->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+               vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+@@ -565,6 +575,8 @@ static int wcn36xx_set_key(struct ieee80
+       }
+ out:
++      mutex_unlock(&wcn->conf_mutex);
++
+       return ret;
+ }
+@@ -725,6 +737,8 @@ static void wcn36xx_bss_info_changed(str
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+                   vif, changed);
++      mutex_lock(&wcn->conf_mutex);
++
+       if (changed & BSS_CHANGED_BEACON_INFO) {
+               wcn36xx_dbg(WCN36XX_DBG_MAC,
+                           "mac bss changed dtim period %d\n",
+@@ -787,7 +801,13 @@ static void wcn36xx_bss_info_changed(str
+                                    bss_conf->aid);
+                       vif_priv->sta_assoc = true;
+-                      rcu_read_lock();
++
++                      /*
++                       * Holding conf_mutex ensures mutal exclusion with
++                       * wcn36xx_sta_remove() and as such ensures that sta
++                       * won't be freed while we're operating on it. As such
++                       * we do not need to hold the rcu_read_lock().
++                       */
+                       sta = ieee80211_find_sta(vif, bss_conf->bssid);
+                       if (!sta) {
+                               wcn36xx_err("sta %pM is not found\n",
+@@ -811,7 +831,6 @@ static void wcn36xx_bss_info_changed(str
+                        * place where AID is available.
+                        */
+                       wcn36xx_smd_config_sta(wcn, vif, sta);
+-                      rcu_read_unlock();
+               } else {
+                       wcn36xx_dbg(WCN36XX_DBG_MAC,
+                                   "disassociated bss %pM vif %pM AID=%d\n",
+@@ -873,6 +892,9 @@ static void wcn36xx_bss_info_changed(str
+               }
+       }
+ out:
++
++      mutex_unlock(&wcn->conf_mutex);
++
+       return;
+ }
+@@ -882,7 +904,10 @@ static int wcn36xx_set_rts_threshold(str
+       struct wcn36xx *wcn = hw->priv;
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
++      mutex_lock(&wcn->conf_mutex);
+       wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
++      mutex_unlock(&wcn->conf_mutex);
++
+       return 0;
+ }
+@@ -893,8 +918,12 @@ static void wcn36xx_remove_interface(str
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
++      mutex_lock(&wcn->conf_mutex);
++
+       list_del(&vif_priv->list);
+       wcn36xx_smd_delete_sta_self(wcn, vif->addr);
++
++      mutex_unlock(&wcn->conf_mutex);
+ }
+ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+@@ -915,9 +944,13 @@ static int wcn36xx_add_interface(struct
+               return -EOPNOTSUPP;
+       }
++      mutex_lock(&wcn->conf_mutex);
++
+       list_add(&vif_priv->list, &wcn->vif_list);
+       wcn36xx_smd_add_sta_self(wcn, vif);
++      mutex_unlock(&wcn->conf_mutex);
++
+       return 0;
+ }
+@@ -930,6 +963,8 @@ static int wcn36xx_sta_add(struct ieee80
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
+                   vif, sta->addr);
++      mutex_lock(&wcn->conf_mutex);
++
+       spin_lock_init(&sta_priv->ampdu_lock);
+       sta_priv->vif = vif_priv;
+       /*
+@@ -941,6 +976,9 @@ static int wcn36xx_sta_add(struct ieee80
+               sta_priv->aid = sta->aid;
+               wcn36xx_smd_config_sta(wcn, vif, sta);
+       }
++
++      mutex_unlock(&wcn->conf_mutex);
++
+       return 0;
+ }
+@@ -954,8 +992,13 @@ static int wcn36xx_sta_remove(struct iee
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
+                   vif, sta->addr, sta_priv->sta_index);
++      mutex_lock(&wcn->conf_mutex);
++
+       wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
+       sta_priv->vif = NULL;
++
++      mutex_unlock(&wcn->conf_mutex);
++
+       return 0;
+ }
+@@ -999,6 +1042,8 @@ static int wcn36xx_ampdu_action(struct i
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
+                   action, tid);
++      mutex_lock(&wcn->conf_mutex);
++
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               sta_priv->tid = tid;
+@@ -1038,6 +1083,8 @@ static int wcn36xx_ampdu_action(struct i
+               wcn36xx_err("Unknown AMPDU action\n");
+       }
++      mutex_unlock(&wcn->conf_mutex);
++
+       return 0;
+ }
+@@ -1216,6 +1263,7 @@ static int wcn36xx_probe(struct platform
+       wcn = hw->priv;
+       wcn->hw = hw;
+       wcn->dev = &pdev->dev;
++      mutex_init(&wcn->conf_mutex);
+       mutex_init(&wcn->hal_mutex);
+       mutex_init(&wcn->scan_lock);
+--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
++++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+@@ -202,6 +202,9 @@ struct wcn36xx {
+       struct qcom_smem_state  *tx_rings_empty_state;
+       unsigned                tx_rings_empty_state_bit;
++      /* prevents concurrent FW reconfiguration */
++      struct mutex            conf_mutex;
++
+       /*
+        * smd_buf must be protected with smd_mutex to garantee
+        * that all messages are sent one after another