--- /dev/null
+From smueller@chronox.de Mon Oct 2 11:36:13 2017
+From: Stephan Mueller <smueller@chronox.de>
+Date: Thu, 21 Sep 2017 10:16:53 +0200
+Subject: [PATCH - RESEND] crypto: AF_ALG - remove SGL terminator indicator when chaining
+To: herbert@gondor.apana.org.au, greg@kroah.com
+Cc: linux-crypto@vger.kernel.org
+Message-ID: <5857040.2sfW0oRrdW@tauon.chronox.de>
+
+From: Stephan Mueller <smueller@chronox.de>
+
+Not upstream as-is due to massive rewrite in commit 2d97591ef43d ("crypto:
+af_alg - consolidation of duplicate code")
+
+The SGL is MAX_SGL_ENTS + 1 in size. The last SG entry is used for the
+chaining and is properly updated with the sg_chain invocation. During
+the filling-in of the initial SG entries, sg_mark_end is called for each
+SG entry. This is appropriate as long as no additional SGL is chained
+with the current SGL. However, when a new SGL is chained and the last
+SG entry is updated with sg_chain, the last but one entry still contains
+the end marker from the sg_mark_end. This end marker must be removed as
+otherwise a walk of the chained SGLs will cause a NULL pointer
+dereference at the last but one SG entry, because sg_next will return
+NULL.
+
+The patch only applies to all kernels up to and including 4.13. The
+patch 2d97591ef43d0587be22ad1b0d758d6df4999a0b added to 4.14-rc1
+introduced a complete new code base which addresses this bug in
+a different way. Yet, that patch is too invasive for stable kernels
+and was therefore not marked for stable.
+
+Fixes: 8ff590903d5fc ("crypto: algif_skcipher - User-space interface for skcipher operations")
+Signed-off-by: Stephan Mueller <smueller@chronox.de>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -139,8 +139,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
+ sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+ sgl->cur = 0;
+
+- if (sg)
++ if (sg) {
+ sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
++ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
++ }
+
+ list_add_tail(&sgl->list, &ctx->tsgl);
+ }
+--
+2.13.5
+
--- /dev/null
+From bd6227a150fdb56e7bb734976ef6e53a2c1cb334 Mon Sep 17 00:00:00 2001
+From: Stephan Mueller <smueller@chronox.de>
+Date: Thu, 14 Sep 2017 17:10:28 +0200
+Subject: crypto: drbg - fix freeing of resources
+
+From: Stephan Mueller <smueller@chronox.de>
+
+commit bd6227a150fdb56e7bb734976ef6e53a2c1cb334 upstream.
+
+During the change to use aligned buffers, the deallocation code path was
+not updated correctly. The current code tries to free the aligned buffer
+pointer and not the original buffer pointer as it is supposed to.
+
+Thus, the code is updated to free the original buffer pointer and set
+the aligned buffer pointer that is used throughout the code to NULL.
+
+Fixes: 3cfc3b9721123 ("crypto: drbg - use aligned buffers")
+CC: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Stephan Mueller <smueller@chronox.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/drbg.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(st
+ {
+ if (!drbg)
+ return;
+- kzfree(drbg->V);
+- drbg->Vbuf = NULL;
+- kzfree(drbg->C);
+- drbg->Cbuf = NULL;
++ kzfree(drbg->Vbuf);
++ drbg->V = NULL;
++ kzfree(drbg->Cbuf);
++ drbg->C = NULL;
+ kzfree(drbg->scratchpadbuf);
+ drbg->scratchpadbuf = NULL;
+ drbg->reseed_ctr = 0;
--- /dev/null
+From 56136631573baa537a15e0012055ffe8cfec1a33 Mon Sep 17 00:00:00 2001
+From: LEROY Christophe <christophe.leroy@c-s.fr>
+Date: Tue, 12 Sep 2017 11:03:39 +0200
+Subject: crypto: talitos - Don't provide setkey for non hmac hashing algs.
+
+From: LEROY Christophe <christophe.leroy@c-s.fr>
+
+commit 56136631573baa537a15e0012055ffe8cfec1a33 upstream.
+
+Today, md5sum fails with error -ENOKEY because a setkey
+function is set for non hmac hashing algs, see strace output below:
+
+mmap(NULL, 378880, PROT_READ, MAP_SHARED, 6, 0) = 0x77f50000
+accept(3, 0, NULL) = 7
+vmsplice(5, [{"bin/\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"..., 378880}], 1, SPLICE_F_MORE|SPLICE_F_GIFT) = 262144
+splice(4, NULL, 7, NULL, 262144, SPLICE_F_MORE) = -1 ENOKEY (Required key not available)
+write(2, "Generation of hash for file kcap"..., 50) = 50
+munmap(0x77f50000, 378880) = 0
+
+This patch ensures that setkey() function is set only
+for hmac hashing.
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/talitos.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talito
+ t_alg->algt.alg.hash.final = ahash_final;
+ t_alg->algt.alg.hash.finup = ahash_finup;
+ t_alg->algt.alg.hash.digest = ahash_digest;
+- t_alg->algt.alg.hash.setkey = ahash_setkey;
++ if (!strncmp(alg->cra_name, "hmac", 4))
++ t_alg->algt.alg.hash.setkey = ahash_setkey;
+ t_alg->algt.alg.hash.import = ahash_import;
+ t_alg->algt.alg.hash.export = ahash_export;
+
--- /dev/null
+From 886a27c0fc8a34633aadb0986dba11d8c150ae2e Mon Sep 17 00:00:00 2001
+From: LEROY Christophe <christophe.leroy@c-s.fr>
+Date: Wed, 13 Sep 2017 12:44:57 +0200
+Subject: crypto: talitos - fix hashing
+
+From: LEROY Christophe <christophe.leroy@c-s.fr>
+
+commit 886a27c0fc8a34633aadb0986dba11d8c150ae2e upstream.
+
+md5sum on some files gives wrong result
+
+Exemple:
+
+With the md5sum from libkcapi:
+c15115c05bad51113f81bdaee735dd09 test
+
+With the original md5sum:
+bbdf41d80ba7e8b2b7be3a0772be76cb test
+
+This patch fixes this issue
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/talitos.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct t
+
+ sg_count = edesc->src_nents ?: 1;
+ if (is_sec1 && sg_count > 1)
+- sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
++ sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
+ else
+ sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
+ DMA_TO_DEVICE);
--- /dev/null
+From afd62fa26343be6445479e75de9f07092a061459 Mon Sep 17 00:00:00 2001
+From: LEROY Christophe <christophe.leroy@c-s.fr>
+Date: Wed, 13 Sep 2017 12:44:51 +0200
+Subject: crypto: talitos - fix sha224
+
+From: LEROY Christophe <christophe.leroy@c-s.fr>
+
+commit afd62fa26343be6445479e75de9f07092a061459 upstream.
+
+Kernel crypto tests report the following error at startup
+
+[ 2.752626] alg: hash: Test 4 failed for sha224-talitos
+[ 2.757907] 00000000: 30 e2 86 e2 e7 8a dd 0d d7 eb 9f d5 83 fe f1 b0
+00000010: 2d 5a 6c a5 f9 55 ea fd 0e 72 05 22
+
+This patch fixes it
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/talitos.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct t
+ req_ctx->swinit = 0;
+ } else {
+ desc->ptr[1] = zero_entry;
+- /* Indicate next op is not the first. */
+- req_ctx->first = 0;
+ }
++ /* Indicate next op is not the first. */
++ req_ctx->first = 0;
+
+ /* HMAC key */
+ if (ctx->keylen)
--- /dev/null
+From c4d6a1b8e8ea79c439a4871cba540443c9eb13b9 Mon Sep 17 00:00:00 2001
+From: Shaohua Li <shli@fb.com>
+Date: Thu, 21 Sep 2017 10:29:22 -0700
+Subject: dm-raid: fix a race condition in request handling
+
+From: Shaohua Li <shli@fb.com>
+
+commit c4d6a1b8e8ea79c439a4871cba540443c9eb13b9 upstream.
+
+raid_map calls pers->make_request, which missed the suspend check. Fix it with
+the new md_handle_request API.
+
+Fix: cc27b0c78c79(md: fix deadlock between mddev_suspend() and md_write_start())
+Cc: Heinz Mauelshagen <heinzm@redhat.com>
+Cc: Mike Snitzer <snitzer@redhat.com>
+Reviewed-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-raid.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti
+ if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
+ return DM_MAPIO_REQUEUE;
+
+- mddev->pers->make_request(mddev, bio);
++ md_handle_request(mddev, bio);
+
+ return DM_MAPIO_SUBMITTED;
+ }
--- /dev/null
+From 4cf97582b46f123a4b7cd88d999f1806c2eb4093 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Mon, 11 Sep 2017 17:43:56 +0200
+Subject: drm/amdgpu: revert tile table update for oland
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jean Delvare <jdelvare@suse.de>
+
+commit 4cf97582b46f123a4b7cd88d999f1806c2eb4093 upstream.
+
+Several users have complained that the tile table update broke Oland
+support. Despite several attempts to fix it, the root cause is still
+unknown at this point and no solution is available. As it is not
+acceptable to leave a known regression breaking a major functionality
+in the kernel for several releases, let's just reverse this
+optimization for now. It can be implemented again later if and only
+if the breakage is understood and fixed.
+
+As there were no complaints for Hainan so far, only the Oland part of
+the offending commit is reverted. Optimization is preserved on
+Hainan, so this commit isn't an actual revert of the original.
+
+This fixes bug #194761:
+https://bugzilla.kernel.org/show_bug.cgi?id=194761
+
+Reviewed-by: Marek Olšák <marek.olsak@amd.com>
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Fixes: f8d9422ef80c ("drm/amdgpu: update tile table for oland/hainan")
+Cc: Flora Cui <Flora.Cui@amd.com>
+Cc: Junwei Zhang <Jerry.Zhang@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Marek Olšák <maraeo@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 189 +++++++++++++++++++++++++++++++++-
+ 1 file changed, 188 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_i
+ NUM_BANKS(ADDR_SURF_2_BANK);
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
+- } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) {
++ } else if (adev->asic_type == CHIP_OLAND) {
++ tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++ tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++ tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++ tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++ tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(split_equal_to_row_size) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(split_equal_to_row_size) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(split_equal_to_row_size) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++ tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++ tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++ TILE_SPLIT(split_equal_to_row_size) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++ tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++ NUM_BANKS(ADDR_SURF_16_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++ tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
++ NUM_BANKS(ADDR_SURF_8_BANK) |
++ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1);
++ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
++ WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
++ } else if (adev->asic_type == CHIP_HAINAN) {
+ tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
--- /dev/null
+From 5baf6bb0fd2388742a0846cc7bcacee6dec78235 Mon Sep 17 00:00:00 2001
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+Date: Thu, 14 Sep 2017 14:01:00 +0200
+Subject: drm/exynos: Fix locking in the suspend/resume paths
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+commit 5baf6bb0fd2388742a0846cc7bcacee6dec78235 upstream.
+
+Commit 48a92916729b ("drm/exynos: use drm_for_each_connector_iter()")
+replaced unsafe drm_for_each_connector() with drm_for_each_connector_iter()
+and removed surrounding drm_modeset_lock calls. However, that lock was
+there not only to protect unsafe drm_for_each_connector(), but it was also
+required to be held by the dpms code which was called from the loop body.
+This patch restores those drm_modeset_lock calls to fix broken suspend
+and resume of Exynos DRM subsystem in v4.13 kernel.
+
+Fixes: 48a92916729b ("drm/exynos: use drm_for_each_connector_iter()")
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Inki Dae <inki.dae@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/exynos/exynos_drm_drv.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+@@ -176,6 +176,7 @@ static int exynos_drm_suspend(struct dev
+ if (pm_runtime_suspended(dev) || !drm_dev)
+ return 0;
+
++ drm_modeset_lock_all(drm_dev);
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ int old_dpms = connector->dpms;
+@@ -187,6 +188,7 @@ static int exynos_drm_suspend(struct dev
+ connector->dpms = old_dpms;
+ }
+ drm_connector_list_iter_end(&conn_iter);
++ drm_modeset_unlock_all(drm_dev);
+
+ return 0;
+ }
+@@ -200,6 +202,7 @@ static int exynos_drm_resume(struct devi
+ if (pm_runtime_suspended(dev) || !drm_dev)
+ return 0;
+
++ drm_modeset_lock_all(drm_dev);
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->funcs->dpms) {
+@@ -210,6 +213,7 @@ static int exynos_drm_resume(struct devi
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
++ drm_modeset_unlock_all(drm_dev);
+
+ return 0;
+ }
--- /dev/null
+From 7b4dc3c0da0d66e7b20a826c537d41bb73e4df54 Mon Sep 17 00:00:00 2001
+From: Changbin Du <changbin.du@intel.com>
+Date: Fri, 18 Aug 2017 17:49:58 +0800
+Subject: drm/i915/gvt: Fix incorrect PCI BARs reporting
+
+From: Changbin Du <changbin.du@intel.com>
+
+commit 7b4dc3c0da0d66e7b20a826c537d41bb73e4df54 upstream.
+
+Looking at our virtual PCI device, we can see surprising Region 4 and Region 5.
+00:10.0 VGA compatible controller: Intel Corporation Sky Lake Integrated Graphics (rev 06) (prog-if 00 [VGA controller])
+ ....
+ Region 0: Memory at 140000000 (64-bit, non-prefetchable) [size=16M]
+ Region 2: Memory at 180000000 (64-bit, prefetchable) [size=1G]
+ Region 4: Memory at <ignored> (32-bit, non-prefetchable)
+ Region 5: Memory at <ignored> (32-bit, non-prefetchable)
+ Expansion ROM at febd6000 [disabled] [size=2K]
+
+The fact is that we only implemented BAR0 and BAR2. Surprising Region 4 and
+Region 5 are shown because we report their size as 0xffffffff. They should
+report size 0 instead.
+
+BTW, the physical GPU has a PIO BAR. GVTg hasn't implemented PIO access, so
+we ignored this BAR for vGPU device.
+
+v2: fix BAR size value calculation.
+
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=1458032
+Signed-off-by: Changbin Du <changbin.du@intel.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+(cherry picked from commit f1751362d6357a90bc6e53176cec715ff2dbed74)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gvt/cfg_space.c | 113 ++++++++++++++---------------------
+ 1 file changed, 48 insertions(+), 65 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
++++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
+@@ -197,78 +197,65 @@ static int emulate_pci_command_write(str
+ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+ {
+- unsigned int bar_index =
+- (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
+ u32 new = *(u32 *)(p_data);
+ bool lo = IS_ALIGNED(offset, 8);
+ u64 size;
+ int ret = 0;
+ bool mmio_enabled =
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
++ struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
+
+- if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
+- return -EINVAL;
+-
++ /*
++ * Power-up software can determine how much address
++ * space the device requires by writing a value of
++ * all 1's to the register and then reading the value
++ * back. The device will return 0's in all don't-care
++ * address bits.
++ */
+ if (new == 0xffffffff) {
+- /*
+- * Power-up software can determine how much address
+- * space the device requires by writing a value of
+- * all 1's to the register and then reading the value
+- * back. The device will return 0's in all don't-care
+- * address bits.
+- */
+- size = vgpu->cfg_space.bar[bar_index].size;
+- if (lo) {
+- new = rounddown(new, size);
+- } else {
+- u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
+- /* for 32bit mode bar it returns all-0 in upper 32
+- * bit, for 64bit mode bar it will calculate the
+- * size with lower 32bit and return the corresponding
+- * value
++ switch (offset) {
++ case PCI_BASE_ADDRESS_0:
++ case PCI_BASE_ADDRESS_1:
++ size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
++ intel_vgpu_write_pci_bar(vgpu, offset,
++ size >> (lo ? 0 : 32), lo);
++ /*
++ * Untrap the BAR, since guest hasn't configured a
++ * valid GPA
+ */
+- if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+- new &= (~(size-1)) >> 32;
+- else
+- new = 0;
+- }
+- /*
+- * Unmapp & untrap the BAR, since guest hasn't configured a
+- * valid GPA
+- */
+- switch (bar_index) {
+- case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, false);
+ break;
+- case INTEL_GVT_PCI_BAR_APERTURE:
++ case PCI_BASE_ADDRESS_2:
++ case PCI_BASE_ADDRESS_3:
++ size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
++ intel_vgpu_write_pci_bar(vgpu, offset,
++ size >> (lo ? 0 : 32), lo);
+ ret = map_aperture(vgpu, false);
+ break;
++ default:
++ /* Unimplemented BARs */
++ intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
+ }
+- intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ } else {
+- /*
+- * Unmapp & untrap the old BAR first, since guest has
+- * re-configured the BAR
+- */
+- switch (bar_index) {
+- case INTEL_GVT_PCI_BAR_GTTMMIO:
+- ret = trap_gttmmio(vgpu, false);
++ switch (offset) {
++ case PCI_BASE_ADDRESS_0:
++ case PCI_BASE_ADDRESS_1:
++ /*
++ * Untrap the old BAR first, since guest has
++ * re-configured the BAR
++ */
++ trap_gttmmio(vgpu, false);
++ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
++ ret = trap_gttmmio(vgpu, mmio_enabled);
+ break;
+- case INTEL_GVT_PCI_BAR_APERTURE:
+- ret = map_aperture(vgpu, false);
++ case PCI_BASE_ADDRESS_2:
++ case PCI_BASE_ADDRESS_3:
++ map_aperture(vgpu, false);
++ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
++ ret = map_aperture(vgpu, mmio_enabled);
+ break;
+- }
+- intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+- /* Track the new BAR */
+- if (mmio_enabled) {
+- switch (bar_index) {
+- case INTEL_GVT_PCI_BAR_GTTMMIO:
+- ret = trap_gttmmio(vgpu, true);
+- break;
+- case INTEL_GVT_PCI_BAR_APERTURE:
+- ret = map_aperture(vgpu, true);
+- break;
+- }
++ default:
++ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ }
+ }
+ return ret;
+@@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct
+ }
+
+ switch (rounddown(offset, 4)) {
+- case PCI_BASE_ADDRESS_0:
+- case PCI_BASE_ADDRESS_1:
+- case PCI_BASE_ADDRESS_2:
+- case PCI_BASE_ADDRESS_3:
++ case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
+@@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct in
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ u16 *gmch_ctl;
+- int i;
+
+ memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+ info->cfg_space_size);
+@@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct in
+ */
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
++ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
+ memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+- for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+- vgpu->cfg_space.bar[i].size = pci_resource_len(
+- gvt->dev_priv->drm.pdev, i * 2);
+- vgpu->cfg_space.bar[i].tracked = false;
+- }
++ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
++ pci_resource_len(gvt->dev_priv->drm.pdev, 0);
++ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
++ pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+ }
+
+ /**
--- /dev/null
+From 820608548737e315c6f93e3099b4e65bde062334 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 15 Sep 2017 11:55:27 -0400
+Subject: drm/radeon: disable hard reset in hibernate for APUs
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 820608548737e315c6f93e3099b4e65bde062334 upstream.
+
+Fixes a hibernation regression on APUs.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=191571
+Fixes: 274ad65c9d02bdc (drm/radeon: hard reset r600 and newer GPU when hibernating.)
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device
+ radeon_agp_suspend(rdev);
+
+ pci_save_state(dev->pdev);
+- if (freeze && rdev->family >= CHIP_CEDAR) {
++ if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
+ rdev->asic->asic_reset(rdev, true);
+ pci_restore_state(dev->pdev);
+ } else if (suspend) {
--- /dev/null
+From d222af072380c4470295c07d84ecb15f4937e365 Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Wed, 6 Sep 2017 15:20:55 +1000
+Subject: KVM: PPC: Book3S HV: Don't access XIVE PIPR register using byte accesses
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit d222af072380c4470295c07d84ecb15f4937e365 upstream.
+
+The XIVE interrupt controller on POWER9 machines doesn't support byte
+accesses to any register in the thread management area other than the
+CPPR (current processor priority register). In particular, when
+reading the PIPR (pending interrupt priority register), we need to
+do a 32-bit or 64-bit load.
+
+Fixes: 2c4fb78f78b6 ("KVM: PPC: Book3S HV: Workaround POWER9 DD1.0 bug causing IPB bit loss")
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rm_xive.c | 1 -
+ arch/powerpc/kvm/book3s_xive.c | 1 -
+ arch/powerpc/kvm/book3s_xive_template.c | 7 ++++---
+ 3 files changed, 4 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rm_xive.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c
+@@ -38,7 +38,6 @@ static inline void __iomem *get_tima_phy
+ #define __x_tima get_tima_phys()
+ #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page))
+ #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page))
+-#define __x_readb __raw_rm_readb
+ #define __x_writeb __raw_rm_writeb
+ #define __x_readw __raw_rm_readw
+ #define __x_readq __raw_rm_readq
+--- a/arch/powerpc/kvm/book3s_xive.c
++++ b/arch/powerpc/kvm/book3s_xive.c
+@@ -48,7 +48,6 @@
+ #define __x_tima xive_tima
+ #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
+ #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
+-#define __x_readb __raw_readb
+ #define __x_writeb __raw_writeb
+ #define __x_readw __raw_readw
+ #define __x_readq __raw_readq
+--- a/arch/powerpc/kvm/book3s_xive_template.c
++++ b/arch/powerpc/kvm/book3s_xive_template.c
+@@ -28,7 +28,8 @@ static void GLUE(X_PFX,ack_pending)(stru
+ * bit.
+ */
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+- u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
++ __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
++ u8 pipr = be64_to_cpu(qw1) & 0xff;
+ if (pipr >= xc->hw_cppr)
+ return;
+ }
+@@ -336,7 +337,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipol
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 pending = xc->pending;
+ u32 hirq;
+- u8 pipr;
+
+ pr_devel("H_IPOLL(server=%ld)\n", server);
+
+@@ -353,7 +353,8 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipol
+ pending = 0xff;
+ } else {
+ /* Grab pending interrupt if any */
+- pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
++ __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
++ u8 pipr = be64_to_cpu(qw1) & 0xff;
+ if (pipr < 8)
+ pending |= 1 << pipr;
+ }
--- /dev/null
+From 67f8a8c1151c9ef3d1285905d1e66ebb769ecdf7 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Tue, 12 Sep 2017 13:47:23 +1000
+Subject: KVM: PPC: Book3S HV: Fix bug causing host SLB to be restored incorrectly
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit 67f8a8c1151c9ef3d1285905d1e66ebb769ecdf7 upstream.
+
+Aneesh Kumar reported seeing host crashes when running recent kernels
+on POWER8. The symptom was an oops like this:
+
+Unable to handle kernel paging request for data at address 0xf00000000786c620
+Faulting instruction address: 0xc00000000030e1e4
+Oops: Kernel access of bad area, sig: 11 [#1]
+LE SMP NR_CPUS=2048 NUMA PowerNV
+Modules linked in: powernv_op_panel
+CPU: 24 PID: 6663 Comm: qemu-system-ppc Tainted: G W 4.13.0-rc7-43932-gfc36c59 #2
+task: c000000fdeadfe80 task.stack: c000000fdeb68000
+NIP: c00000000030e1e4 LR: c00000000030de6c CTR: c000000000103620
+REGS: c000000fdeb6b450 TRAP: 0300 Tainted: G W (4.13.0-rc7-43932-gfc36c59)
+MSR: 9000000000009033 <SF,HV,EE,ME,IR,DR,RI,LE> CR: 24044428 XER: 20000000
+CFAR: c00000000030e134 DAR: f00000000786c620 DSISR: 40000000 SOFTE: 0
+GPR00: 0000000000000000 c000000fdeb6b6d0 c0000000010bd000 000000000000e1b0
+GPR04: c00000000115e168 c000001fffa6e4b0 c00000000115d000 c000001e1b180386
+GPR08: f000000000000000 c000000f9a8913e0 f00000000786c600 00007fff587d0000
+GPR12: c000000fdeb68000 c00000000fb0f000 0000000000000001 00007fff587cffff
+GPR16: 0000000000000000 c000000000000000 00000000003fffff c000000fdebfe1f8
+GPR20: 0000000000000004 c000000fdeb6b8a8 0000000000000001 0008000000000040
+GPR24: 07000000000000c0 00007fff587cffff c000000fdec20bf8 00007fff587d0000
+GPR28: c000000fdeca9ac0 00007fff587d0000 00007fff587c0000 00007fff587d0000
+NIP [c00000000030e1e4] __get_user_pages_fast+0x434/0x1070
+LR [c00000000030de6c] __get_user_pages_fast+0xbc/0x1070
+Call Trace:
+[c000000fdeb6b6d0] [c00000000139dab8] lock_classes+0x0/0x35fe50 (unreliable)
+[c000000fdeb6b7e0] [c00000000030ef38] get_user_pages_fast+0xf8/0x120
+[c000000fdeb6b830] [c000000000112318] kvmppc_book3s_hv_page_fault+0x308/0xf30
+[c000000fdeb6b960] [c00000000010e10c] kvmppc_vcpu_run_hv+0xfdc/0x1f00
+[c000000fdeb6bb20] [c0000000000e915c] kvmppc_vcpu_run+0x2c/0x40
+[c000000fdeb6bb40] [c0000000000e5650] kvm_arch_vcpu_ioctl_run+0x110/0x300
+[c000000fdeb6bbe0] [c0000000000d6468] kvm_vcpu_ioctl+0x528/0x900
+[c000000fdeb6bd40] [c0000000003bc04c] do_vfs_ioctl+0xcc/0x950
+[c000000fdeb6bde0] [c0000000003bc930] SyS_ioctl+0x60/0x100
+[c000000fdeb6be30] [c00000000000b96c] system_call+0x58/0x6c
+Instruction dump:
+7ca81a14 2fa50000 41de0010 7cc8182a 68c60002 78c6ffe2 0b060000 3cc2000a
+794a3664 390610d8 e9080000 7d485214 <e90a0020> 7d435378 790507e1 408202f0
+---[ end trace fad4a342d0414aa2 ]---
+
+It turns out that what has happened is that the SLB entry for the
+vmmemap region hasn't been reloaded on exit from a guest, and it has
+the wrong page size. Then, when the host next accesses the vmemmap
+region, it gets a page fault.
+
+Commit a25bd72badfa ("powerpc/mm/radix: Workaround prefetch issue with
+KVM", 2017-07-24) modified the guest exit code so that it now only clears
+out the SLB for hash guest. The code tests the radix flag and puts the
+result in a non-volatile CR field, CR2, and later branches based on CR2.
+
+Unfortunately, the kvmppc_save_tm function, which gets called between
+those two points, modifies all the user-visible registers in the case
+where the guest was in transactional or suspended state, except for a
+few which it restores (namely r1, r2, r9 and r13). Thus the hash/radix indication in CR2 gets corrupted.
+
+This fixes the problem by re-doing the comparison just before the
+result is needed. For good measure, this also adds comments next to
+the call sites of kvmppc_save_tm and kvmppc_restore_tm pointing out
+that non-volatile register state will be lost.
+
+Fixes: a25bd72badfa ("powerpc/mm/radix: Workaround prefetch issue with KVM")
+Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -765,6 +765,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
++ /*
++ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
++ */
+ bl kvmppc_restore_tm
+ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+@@ -1623,6 +1626,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
++ /*
++ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
++ */
+ bl kvmppc_save_tm
+ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+@@ -1742,7 +1748,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ /*
+ * Are we running hash or radix ?
+ */
+- beq cr2,3f
++ ld r5, VCPU_KVM(r9)
++ lbz r0, KVM_RADIX(r5)
++ cmpwi cr2, r0, 0
++ beq cr2, 3f
+
+ /* Radix: Handle the case where the guest used an illegal PID */
+ LOAD_REG_ADDR(r4, mmu_base_pid)
+@@ -2459,6 +2468,9 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu poi
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
++ /*
++ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
++ */
+ ld r9, HSTATE_KVM_VCPU(r13)
+ bl kvmppc_save_tm
+ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+@@ -2569,6 +2581,9 @@ kvm_end_cede:
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
++ /*
++ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
++ */
+ bl kvmppc_restore_tm
+ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
--- /dev/null
+From cf5f6f3125241853462334b1bc696f3c3c492178 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Mon, 11 Sep 2017 16:05:30 +1000
+Subject: KVM: PPC: Book3S HV: Hold kvm->lock around call to kvmppc_update_lpcr
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit cf5f6f3125241853462334b1bc696f3c3c492178 upstream.
+
+Commit 468808bd35c4 ("KVM: PPC: Book3S HV: Set process table for HPT
+guests on POWER9", 2017-01-30) added a call to kvmppc_update_lpcr()
+which doesn't hold the kvm->lock mutex around the call, as required.
+This adds the lock/unlock pair, and for good measure, includes
+the kvmppc_setup_partition_table() call in the locked region, since
+it is altering global state of the VM.
+
+This error appears not to have any fatal consequences for the host;
+the consequences would be that the VCPUs could end up running with
+different LPCR values, or an update to the LPCR value by userspace
+using the one_reg interface could get overwritten, or the update
+done by kvmhv_configure_mmu() could get overwritten.
+
+Fixes: 468808bd35c4 ("KVM: PPC: Book3S HV: Set process table for HPT guests on POWER9")
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -4187,11 +4187,13 @@ static int kvmhv_configure_mmu(struct kv
+ if ((cfg->process_table & PRTS_MASK) > 24)
+ return -EINVAL;
+
++ mutex_lock(&kvm->lock);
+ kvm->arch.process_table = cfg->process_table;
+ kvmppc_setup_partition_table(kvm);
+
+ lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0;
+ kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
++ mutex_unlock(&kvm->lock);
+
+ return 0;
+ }
--- /dev/null
+From 3664847d95e60a9a943858b7800f8484669740fc Mon Sep 17 00:00:00 2001
+From: Shaohua Li <shli@fb.com>
+Date: Fri, 25 Aug 2017 10:40:02 -0700
+Subject: md/raid5: fix a race condition in stripe batch
+
+From: Shaohua Li <shli@fb.com>
+
+commit 3664847d95e60a9a943858b7800f8484669740fc upstream.
+
+We have a race condition in below scenario, say have 3 continuous stripes, sh1,
+sh2 and sh3, sh1 is the stripe_head of sh2 and sh3:
+
+CPU1 CPU2 CPU3
+handle_stripe(sh3)
+ stripe_add_to_batch_list(sh3)
+ -> lock(sh2, sh3)
+ -> lock batch_lock(sh1)
+ -> add sh3 to batch_list of sh1
+ -> unlock batch_lock(sh1)
+ clear_batch_ready(sh1)
+ -> lock(sh1) and batch_lock(sh1)
+ -> clear STRIPE_BATCH_READY for all stripes in batch_list
+ -> unlock(sh1) and batch_lock(sh1)
+->clear_batch_ready(sh3)
+-->test_and_clear_bit(STRIPE_BATCH_READY, sh3)
+--->return 0 as sh->batch == NULL
+ -> sh3->batch_head = sh1
+ -> unlock (sh2, sh3)
+
+In CPU1, handle_stripe will continue handle sh3 even it's in batch stripe list
+of sh1. By moving sh3->batch_head assignment in to batch_lock, we make it
+impossible to clear STRIPE_BATCH_READY before batch_head is set.
+
+Thanks Stephane for helping debug this tricky issue.
+
+Reported-and-tested-by: Stephane Thiell <sthiell@stanford.edu>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid5.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -812,6 +812,14 @@ static void stripe_add_to_batch_list(str
+ spin_unlock(&head->batch_head->batch_lock);
+ goto unlock_out;
+ }
++ /*
++ * We must assign batch_head of this stripe within the
++ * batch_lock, otherwise clear_batch_ready of batch head
++ * stripe could clear BATCH_READY bit of this stripe and
++ * this stripe->batch_head doesn't get assigned, which
++ * could confuse clear_batch_ready for this stripe
++ */
++ sh->batch_head = head->batch_head;
+
+ /*
+ * at this point, head's BATCH_READY could be cleared, but we
+@@ -819,8 +827,6 @@ static void stripe_add_to_batch_list(str
+ */
+ list_add(&sh->batch_list, &head->batch_list);
+ spin_unlock(&head->batch_head->batch_lock);
+-
+- sh->batch_head = head->batch_head;
+ } else {
+ head->batch_head = head;
+ sh->batch_head = head->batch_head;
--- /dev/null
+From 184a09eb9a2fe425e49c9538f1604b05ed33cfef Mon Sep 17 00:00:00 2001
+From: Dennis Yang <dennisyang@qnap.com>
+Date: Wed, 6 Sep 2017 11:02:35 +0800
+Subject: md/raid5: preserve STRIPE_ON_UNPLUG_LIST in break_stripe_batch_list
+
+From: Dennis Yang <dennisyang@qnap.com>
+
+commit 184a09eb9a2fe425e49c9538f1604b05ed33cfef upstream.
+
+In release_stripe_plug(), if a stripe_head has its STRIPE_ON_UNPLUG_LIST
+set, it indicates that this stripe_head is already in the raid5_plug_cb
+list and release_stripe() would be called instead to drop a reference
+count. Otherwise, the STRIPE_ON_UNPLUG_LIST bit would be set for this
+stripe_head and it will get queued into the raid5_plug_cb list.
+
+Since break_stripe_batch_list() did not preserve STRIPE_ON_UNPLUG_LIST,
+A stripe could be re-added to plug list while it is still on that list
+in the following situation. If stripe_head A is added to another
+stripe_head B's batch list, in this case A will have its
+batch_head != NULL and be added into the plug list. After that,
+stripe_head B gets handled and called break_stripe_batch_list() to
+reset all the batched stripe_head(including A which is still on
+the plug list)'s state and reset their batch_head to NULL.
+Before the plug list gets processed, if there is another write request
+comes in and get stripe_head A, A will have its batch_head == NULL
+(cleared by calling break_stripe_batch_list() on B) and be added to
+plug list once again.
+
+Signed-off-by: Dennis Yang <dennisyang@qnap.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid5.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -4614,7 +4614,8 @@ static void break_stripe_batch_list(stru
+
+ set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ (1 << STRIPE_PREREAD_ACTIVE) |
+- (1 << STRIPE_DEGRADED)),
++ (1 << STRIPE_DEGRADED) |
++ (1 << STRIPE_ON_UNPLUG_LIST)),
+ head_sh->state & (1 << STRIPE_INSYNC));
+
+ sh->check_state = head_sh->check_state;
--- /dev/null
+From abeae421b03d800d33894df7fbca6d00c70c358e Mon Sep 17 00:00:00 2001
+From: Uma Shankar <uma.shankar@intel.com>
+Date: Tue, 5 Sep 2017 15:14:31 +0530
+Subject: Revert "drm/i915/bxt: Disable device ready before shutdown command"
+
+From: Uma Shankar <uma.shankar@intel.com>
+
+commit abeae421b03d800d33894df7fbca6d00c70c358e upstream.
+
+This reverts commit bbdf0b2ff32a ("drm/i915/bxt: Disable device ready
+before shutdown command").
+
+Disable device ready before shutdown command was added previously to
+avoid a split screen issue seen on dual link DSI panels. As of now, dual
+link is not supported and will need some rework in the upstream
+code. For single link DSI panels, the change is not required. This will
+cause failure in sending SHUTDOWN packet during disable. Hence reverting
+the change. Will handle the change as part of dual link enabling in
+upstream.
+
+Fixes: bbdf0b2ff32a ("drm/i915/bxt: Disable device ready before shutdown command")
+Signed-off-by: Uma Shankar <uma.shankar@intel.com>
+Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/1504604671-17237-1-git-send-email-vidya.srinivas@intel.com
+(cherry picked from commit 33c8d8870c67faf3161898a56af98ac3c1c71450)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_dsi.c | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_dsi.c
++++ b/drivers/gpu/drm/i915/intel_dsi.c
+@@ -892,8 +892,6 @@ static void intel_dsi_disable(struct int
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
+ {
+- struct drm_device *dev = encoder->base.dev;
+- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+
+@@ -903,15 +901,6 @@ static void intel_dsi_disable(struct int
+ intel_panel_disable_backlight(old_conn_state);
+
+ /*
+- * Disable Device ready before the port shutdown in order
+- * to avoid split screen
+- */
+- if (IS_BROXTON(dev_priv)) {
+- for_each_dsi_port(port, intel_dsi->ports)
+- I915_WRITE(MIPI_DEVICE_READY(port), 0);
+- }
+-
+- /*
+ * According to the spec we should send SHUTDOWN before
+ * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
+ * has shown that the v3 sequence works for v2 VBTs too
--- /dev/null
+From d1b490939d8c117a06dfc562c41d933f71d30289 Mon Sep 17 00:00:00 2001
+From: "Guilherme G. Piccoli" <gpiccoli@linux.vnet.ibm.com>
+Date: Tue, 19 Sep 2017 12:11:55 -0300
+Subject: scsi: aacraid: Add a small delay after IOP reset
+
+From: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
+
+commit d1b490939d8c117a06dfc562c41d933f71d30289 upstream.
+
+Commit 0e9973ed3382 ("scsi: aacraid: Add periodic checks to see IOP reset
+status") changed the way driver checks if a reset succeeded. Now, after an
+IOP reset, aacraid immediately start polling a register to verify the reset
+is complete.
+
+This behavior cause regressions on the reset path in PowerPC (at least).
+Since the delay after the IOP reset was removed by the aforementioned patch,
+the fact driver just starts to read a register instantly after the reset
+was issued (by writing in another register) "corrupts" the reset procedure,
+which ends up failing all the time.
+
+The issue highly impacted kdump on PowerPC, since on kdump path we
+proactively issue a reset in adapter (through the reset_devices kernel
+parameter).
+
+This patch (re-)adds a delay right after IOP reset is issued. Empirically
+we measured that 3 seconds is enough, but for safety reasons we delay
+for 5s (and since it was 30s before, 5s is still a small amount).
+
+For reference, without this patch we observe the following messages
+on kdump kernel boot process:
+
+ [ 76.294] aacraid 0003:01:00.0: IOP reset failed
+ [ 76.294] aacraid 0003:01:00.0: ARC Reset attempt failed
+ [ 86.524] aacraid 0003:01:00.0: adapter kernel panic'd ff.
+ [ 86.524] aacraid 0003:01:00.0: Controller reset type is 3
+ [ 86.524] aacraid 0003:01:00.0: Issuing IOP reset
+ [146.534] aacraid 0003:01:00.0: IOP reset failed
+ [146.534] aacraid 0003:01:00.0: ARC Reset attempt failed
+
+Fixes: 0e9973ed3382 ("scsi: aacraid: Add periodic checks to see IOP reset status")
+Signed-off-by: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
+Acked-by: Dave Carroll <david.carroll@microsemi.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/aacraid/src.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aa
+ aac_set_intx_mode(dev);
+
+ src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
++
++ msleep(5000);
+ }
+
+ static void aac_send_hardware_soft_reset(struct aac_dev *dev)
--- /dev/null
+From 6c92f7dbf25c36f35320e4ae0b508676410bac04 Mon Sep 17 00:00:00 2001
+From: Dave Carroll <david.carroll@microsemi.com>
+Date: Fri, 15 Sep 2017 11:04:28 -0600
+Subject: scsi: aacraid: Fix 2T+ drives on SmartIOC-2000
+
+From: Dave Carroll <david.carroll@microsemi.com>
+
+commit 6c92f7dbf25c36f35320e4ae0b508676410bac04 upstream.
+
+The logic for supporting large drives was previously tied to 4Kn support
+for SmartIOC-2000. As SmartIOC-2000 does not support volumes using 4Kn
+drives, use the intended option flag AAC_OPT_NEW_COMM_64 to determine
+support for volumes greater than 2T.
+
+Signed-off-by: Dave Carroll <david.carroll@microsemi.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Raghava Aditya Renukunta <RaghavaAditya.Renukunta@microsemi.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/aacraid/aachba.c | 12 ++++++------
+ drivers/scsi/aacraid/aacraid.h | 5 +++++
+ 2 files changed, 11 insertions(+), 6 deletions(-)
+
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -699,13 +699,13 @@ static void _aac_probe_container1(void *
+ int status;
+
+ dresp = (struct aac_mount *) fib_data(fibptr);
+- if (!(fibptr->dev->supplement_adapter_info.supported_options2 &
+- AAC_OPTION_VARIABLE_BLOCK_SIZE))
++ if (!aac_supports_2T(fibptr->dev)) {
+ dresp->mnt[0].capacityhigh = 0;
+- if ((le32_to_cpu(dresp->status) != ST_OK) ||
+- (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+- _aac_probe_container2(context, fibptr);
+- return;
++ if ((le32_to_cpu(dresp->status) == ST_OK) &&
++ (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
++ _aac_probe_container2(context, fibptr);
++ return;
++ }
+ }
+ scsicmd = (struct scsi_cmnd *) context;
+
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -2700,6 +2700,11 @@ static inline int aac_is_src(struct aac_
+ return 0;
+ }
+
++static inline int aac_supports_2T(struct aac_dev *dev)
++{
++ return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64);
++}
++
+ char * get_container_type(unsigned type);
+ extern int numacb;
+ extern char aac_driver_version[];
--- /dev/null
+From c88f0e6b06f4092995688211a631bb436125d77b Mon Sep 17 00:00:00 2001
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sun, 27 Aug 2017 20:25:26 +0800
+Subject: scsi: scsi_transport_iscsi: fix the issue that iscsi_if_rx doesn't parse nlmsg properly
+
+From: Xin Long <lucien.xin@gmail.com>
+
+commit c88f0e6b06f4092995688211a631bb436125d77b upstream.
+
+ChunYu found a kernel crash by syzkaller:
+
+[ 651.617875] kasan: CONFIG_KASAN_INLINE enabled
+[ 651.618217] kasan: GPF could be caused by NULL-ptr deref or user memory access
+[ 651.618731] general protection fault: 0000 [#1] SMP KASAN
+[ 651.621543] CPU: 1 PID: 9539 Comm: scsi Not tainted 4.11.0.cov #32
+[ 651.621938] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
+[ 651.622309] task: ffff880117780000 task.stack: ffff8800a3188000
+[ 651.622762] RIP: 0010:skb_release_data+0x26c/0x590
+[...]
+[ 651.627260] Call Trace:
+[ 651.629156] skb_release_all+0x4f/0x60
+[ 651.629450] consume_skb+0x1a5/0x600
+[ 651.630705] netlink_unicast+0x505/0x720
+[ 651.632345] netlink_sendmsg+0xab2/0xe70
+[ 651.633704] sock_sendmsg+0xcf/0x110
+[ 651.633942] ___sys_sendmsg+0x833/0x980
+[ 651.637117] __sys_sendmsg+0xf3/0x240
+[ 651.638820] SyS_sendmsg+0x32/0x50
+[ 651.639048] entry_SYSCALL_64_fastpath+0x1f/0xc2
+
+It's caused by skb_shared_info at the end of sk_buff was overwritten by
+ISCSI_KEVENT_IF_ERROR when parsing nlmsg info from skb in iscsi_if_rx.
+
+During the loop if skb->len == nlh->nlmsg_len and both are sizeof(*nlh),
+ev = nlmsg_data(nlh) will acutally get skb_shinfo(SKB) instead and set a
+new value to skb_shinfo(SKB)->nr_frags by ev->type.
+
+This patch is to fix it by checking nlh->nlmsg_len properly there to
+avoid over accessing sk_buff.
+
+Reported-by: ChunYu Wang <chunwang@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Chris Leech <cleech@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/scsi_transport_iscsi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3689,7 +3689,7 @@ iscsi_if_rx(struct sk_buff *skb)
+ uint32_t group;
+
+ nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
++ if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
+ skb->len < nlh->nlmsg_len) {
+ break;
+ }
genirq-make-sparse_irq_lock-protect-what-it-should-protect.patch
genirq-msi-fix-populating-multiple-interrupts.patch
genirq-fix-cpumask-check-in-__irq_startup_managed.patch
+kvm-ppc-book3s-hv-hold-kvm-lock-around-call-to-kvmppc_update_lpcr.patch
+kvm-ppc-book3s-hv-fix-bug-causing-host-slb-to-be-restored-incorrectly.patch
+kvm-ppc-book3s-hv-don-t-access-xive-pipr-register-using-byte-accesses.patch
+tracing-fix-trace_pipe-behavior-for-instance-traces.patch
+tracing-erase-irqsoff-trace-with-empty-write.patch
+tracing-remove-rcu-work-arounds-from-stack-tracer.patch
+md-raid5-fix-a-race-condition-in-stripe-batch.patch
+md-raid5-preserve-stripe_on_unplug_list-in-break_stripe_batch_list.patch
+dm-raid-fix-a-race-condition-in-request-handling.patch
+scsi-scsi_transport_iscsi-fix-the-issue-that-iscsi_if_rx-doesn-t-parse-nlmsg-properly.patch
+scsi-aacraid-fix-2t-drives-on-smartioc-2000.patch
+scsi-aacraid-add-a-small-delay-after-iop-reset.patch
+drm-exynos-fix-locking-in-the-suspend-resume-paths.patch
+drm-i915-gvt-fix-incorrect-pci-bars-reporting.patch
+revert-drm-i915-bxt-disable-device-ready-before-shutdown-command.patch
+drm-amdgpu-revert-tile-table-update-for-oland.patch
+drm-radeon-disable-hard-reset-in-hibernate-for-apus.patch
+crypto-drbg-fix-freeing-of-resources.patch
+crypto-talitos-don-t-provide-setkey-for-non-hmac-hashing-algs.patch
+crypto-talitos-fix-sha224.patch
+crypto-talitos-fix-hashing.patch
+crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch
--- /dev/null
+From 8dd33bcb7050dd6f8c1432732f930932c9d3a33e Mon Sep 17 00:00:00 2001
+From: Bo Yan <byan@nvidia.com>
+Date: Mon, 18 Sep 2017 10:03:35 -0700
+Subject: tracing: Erase irqsoff trace with empty write
+
+From: Bo Yan <byan@nvidia.com>
+
+commit 8dd33bcb7050dd6f8c1432732f930932c9d3a33e upstream.
+
+One convenient way to erase trace is "echo > trace". However, this
+is currently broken if the current tracer is irqsoff tracer. This
+is because irqsoff tracer use max_buffer as the default trace
+buffer.
+
+Set the max_buffer as the one to be cleared when it's the trace
+buffer currently in use.
+
+Link: http://lkml.kernel.org/r/1505754215-29411-1-git-send-email-byan@nvidia.com
+
+Cc: <mingo@redhat.com>
+Fixes: 4acd4d00f ("tracing: give easy way to clear trace buffer")
+Signed-off-by: Bo Yan <byan@nvidia.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4017,11 +4017,17 @@ static int tracing_open(struct inode *in
+ /* If this file was open for write, then erase contents */
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ int cpu = tracing_get_cpu(inode);
++ struct trace_buffer *trace_buf = &tr->trace_buffer;
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++ if (tr->current_trace->print_max)
++ trace_buf = &tr->max_buffer;
++#endif
+
+ if (cpu == RING_BUFFER_ALL_CPUS)
+- tracing_reset_online_cpus(&tr->trace_buffer);
++ tracing_reset_online_cpus(trace_buf);
+ else
+- tracing_reset(&tr->trace_buffer, cpu);
++ tracing_reset(trace_buf, cpu);
+ }
+
+ if (file->f_mode & FMODE_READ) {
--- /dev/null
+From 75df6e688ccd517e339a7c422ef7ad73045b18a2 Mon Sep 17 00:00:00 2001
+From: Tahsin Erdogan <tahsin@google.com>
+Date: Sun, 17 Sep 2017 03:23:48 -0700
+Subject: tracing: Fix trace_pipe behavior for instance traces
+
+From: Tahsin Erdogan <tahsin@google.com>
+
+commit 75df6e688ccd517e339a7c422ef7ad73045b18a2 upstream.
+
+When reading data from trace_pipe, tracing_wait_pipe() performs a
+check to see if tracing has been turned off after some data was read.
+Currently, this check always looks at global trace state, but it
+should be checking the trace instance where trace_pipe is located at.
+
+Because of this bug, cat instances/i1/trace_pipe in the following
+script will immediately exit instead of waiting for data:
+
+cd /sys/kernel/debug/tracing
+echo 0 > tracing_on
+mkdir -p instances/i1
+echo 1 > instances/i1/tracing_on
+echo 1 > instances/i1/events/sched/sched_process_exec/enable
+cat instances/i1/trace_pipe
+
+Link: http://lkml.kernel.org/r/20170917102348.1615-1-tahsin@google.com
+
+Fixes: 10246fa35d4f ("tracing: give easy way to clear trace buffer")
+Signed-off-by: Tahsin Erdogan <tahsin@google.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5664,7 +5664,7 @@ static int tracing_wait_pipe(struct file
+ *
+ * iter->pos will be 0 if we haven't read anything.
+ */
+- if (!tracing_is_on() && iter->pos)
++ if (!tracer_tracing_is_on(iter->tr) && iter->pos)
+ break;
+
+ mutex_unlock(&iter->mutex);
--- /dev/null
+From 15516c89acce948debc4c598e03c3fee53045797 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Thu, 21 Sep 2017 13:00:21 -0400
+Subject: tracing: Remove RCU work arounds from stack tracer
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 15516c89acce948debc4c598e03c3fee53045797 upstream.
+
+Currently the stack tracer calls rcu_irq_enter() to make sure RCU
+is watching when it records a stack trace. But if the stack tracer
+is triggered while tracing inside of a rcu_irq_enter(), calling
+rcu_irq_enter() unconditionally can be problematic.
+
+The reason for having rcu_irq_enter() in the first place has been
+fixed from within the saving of the stack trace code, and there's no
+reason for doing it in the stack tracer itself. Just remove it.
+
+Fixes: 0be964be0 ("module: Sanitize RCU usage and locking")
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Suggested-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_stack.c | 15 ---------------
+ 1 file changed, 15 deletions(-)
+
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -96,23 +96,9 @@ check_stack(unsigned long ip, unsigned l
+ if (in_nmi())
+ return;
+
+- /*
+- * There's a slight chance that we are tracing inside the
+- * RCU infrastructure, and rcu_irq_enter() will not work
+- * as expected.
+- */
+- if (unlikely(rcu_irq_enter_disabled()))
+- return;
+-
+ local_irq_save(flags);
+ arch_spin_lock(&stack_trace_max_lock);
+
+- /*
+- * RCU may not be watching, make it see us.
+- * The stack trace code uses rcu_sched.
+- */
+- rcu_irq_enter();
+-
+ /* In case another CPU set the tracer_frame on us */
+ if (unlikely(!frame_size))
+ this_size -= tracer_frame;
+@@ -205,7 +191,6 @@ check_stack(unsigned long ip, unsigned l
+ }
+
+ out:
+- rcu_irq_exit();
+ arch_spin_unlock(&stack_trace_max_lock);
+ local_irq_restore(flags);
+ }