--- /dev/null
+From d6040764adcb5cb6de1489422411d701c158bb69 Mon Sep 17 00:00:00 2001
+From: Salvatore Benedetto <salvatore.benedetto@intel.com>
+Date: Fri, 13 Jan 2017 11:54:08 +0000
+Subject: crypto: api - Clear CRYPTO_ALG_DEAD bit before registering an alg
+
+From: Salvatore Benedetto <salvatore.benedetto@intel.com>
+
+commit d6040764adcb5cb6de1489422411d701c158bb69 upstream.
+
+Make sure CRYPTO_ALG_DEAD bit is cleared before proceeding with
+the algorithm registration. This fixes qat-dh registration when
+driver is restarted
+
+Signed-off-by: Salvatore Benedetto <salvatore.benedetto@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/algapi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_al
+ struct crypto_larval *larval;
+ int err;
+
++ alg->cra_flags &= ~CRYPTO_ALG_DEAD;
+ err = crypto_check_alg(alg);
+ if (err)
+ return err;
--- /dev/null
+From 11e3b725cfc282efe9d4a354153e99d86a16af08 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue, 17 Jan 2017 13:46:29 +0000
+Subject: crypto: arm64/aes-blk - honour iv_out requirement in CBC and CTR modes
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 11e3b725cfc282efe9d4a354153e99d86a16af08 upstream.
+
+Update the ARMv8 Crypto Extensions and the plain NEON AES implementations
+in CBC and CTR modes to return the next IV back to the skcipher API client.
+This is necessary for chaining to work correctly.
+
+Note that for CTR, this is only done if the request is a round multiple of
+the block size, since otherwise, chaining is impossible anyway.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/aes-modes.S | 88 ++++++++++++++++++++----------------------
+ 1 file changed, 42 insertions(+), 46 deletions(-)
+
+--- a/arch/arm64/crypto/aes-modes.S
++++ b/arch/arm64/crypto/aes-modes.S
+@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
+ cbz w6, .Lcbcencloop
+
+ ld1 {v0.16b}, [x5] /* get iv */
+- enc_prepare w3, x2, x5
++ enc_prepare w3, x2, x6
+
+ .Lcbcencloop:
+ ld1 {v1.16b}, [x1], #16 /* get next pt block */
+ eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
+- encrypt_block v0, w3, x2, x5, w6
++ encrypt_block v0, w3, x2, x6, w7
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lcbcencloop
++ st1 {v0.16b}, [x5] /* return iv */
+ ret
+ AES_ENDPROC(aes_cbc_encrypt)
+
+@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ cbz w6, .LcbcdecloopNx
+
+ ld1 {v7.16b}, [x5] /* get iv */
+- dec_prepare w3, x2, x5
++ dec_prepare w3, x2, x6
+
+ .LcbcdecloopNx:
+ #if INTERLEAVE >= 2
+@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ .Lcbcdecloop:
+ ld1 {v1.16b}, [x1], #16 /* get next ct block */
+ mov v0.16b, v1.16b /* ...and copy to v0 */
+- decrypt_block v0, w3, x2, x5, w6
++ decrypt_block v0, w3, x2, x6, w7
+ eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
+ mov v7.16b, v1.16b /* ct is next iv */
+ st1 {v0.16b}, [x0], #16
+@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ bne .Lcbcdecloop
+ .Lcbcdecout:
+ FRAME_POP
++ st1 {v7.16b}, [x5] /* return iv */
+ ret
+ AES_ENDPROC(aes_cbc_decrypt)
+
+@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
+
+ AES_ENTRY(aes_ctr_encrypt)
+ FRAME_PUSH
+- cbnz w6, .Lctrfirst /* 1st time around? */
+- umov x5, v4.d[1] /* keep swabbed ctr in reg */
+- rev x5, x5
+-#if INTERLEAVE >= 2
+- cmn w5, w4 /* 32 bit overflow? */
+- bcs .Lctrinc
+- add x5, x5, #1 /* increment BE ctr */
+- b .LctrincNx
+-#else
+- b .Lctrinc
+-#endif
+-.Lctrfirst:
++ cbz w6, .Lctrnotfirst /* 1st time around? */
+ enc_prepare w3, x2, x6
+ ld1 {v4.16b}, [x5]
+- umov x5, v4.d[1] /* keep swabbed ctr in reg */
+- rev x5, x5
++
++.Lctrnotfirst:
++ umov x8, v4.d[1] /* keep swabbed ctr in reg */
++ rev x8, x8
+ #if INTERLEAVE >= 2
+- cmn w5, w4 /* 32 bit overflow? */
++ cmn w8, w4 /* 32 bit overflow? */
+ bcs .Lctrloop
+ .LctrloopNx:
+ subs w4, w4, #INTERLEAVE
+@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
+ #if INTERLEAVE == 2
+ mov v0.8b, v4.8b
+ mov v1.8b, v4.8b
+- rev x7, x5
+- add x5, x5, #1
++ rev x7, x8
++ add x8, x8, #1
+ ins v0.d[1], x7
+- rev x7, x5
+- add x5, x5, #1
++ rev x7, x8
++ add x8, x8, #1
+ ins v1.d[1], x7
+ ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
+ do_encrypt_block2x
+@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
+ st1 {v0.16b-v1.16b}, [x0], #32
+ #else
+ ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
+- dup v7.4s, w5
++ dup v7.4s, w8
+ mov v0.16b, v4.16b
+ add v7.4s, v7.4s, v8.4s
+ mov v1.16b, v4.16b
+@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
+ eor v2.16b, v7.16b, v2.16b
+ eor v3.16b, v5.16b, v3.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+- add x5, x5, #INTERLEAVE
++ add x8, x8, #INTERLEAVE
+ #endif
+- cbz w4, .LctroutNx
+-.LctrincNx:
+- rev x7, x5
++ rev x7, x8
+ ins v4.d[1], x7
++ cbz w4, .Lctrout
+ b .LctrloopNx
+-.LctroutNx:
+- sub x5, x5, #1
+- rev x7, x5
+- ins v4.d[1], x7
+- b .Lctrout
+ .Lctr1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lctrout
+@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
+ .Lctrloop:
+ mov v0.16b, v4.16b
+ encrypt_block v0, w3, x2, x6, w7
++
++ adds x8, x8, #1 /* increment BE ctr */
++ rev x7, x8
++ ins v4.d[1], x7
++ bcs .Lctrcarry /* overflow? */
++
++.Lctrcarrydone:
+ subs w4, w4, #1
+ bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
+ ld1 {v3.16b}, [x1], #16
+ eor v3.16b, v0.16b, v3.16b
+ st1 {v3.16b}, [x0], #16
+- beq .Lctrout
+-.Lctrinc:
+- adds x5, x5, #1 /* increment BE ctr */
+- rev x7, x5
+- ins v4.d[1], x7
+- bcc .Lctrloop /* no overflow? */
+- umov x7, v4.d[0] /* load upper word of ctr */
+- rev x7, x7 /* ... to handle the carry */
+- add x7, x7, #1
+- rev x7, x7
+- ins v4.d[0], x7
+- b .Lctrloop
++ bne .Lctrloop
++
++.Lctrout:
++ st1 {v4.16b}, [x5] /* return next CTR value */
++ FRAME_POP
++ ret
++
+ .Lctrhalfblock:
+ ld1 {v3.8b}, [x1]
+ eor v3.8b, v0.8b, v3.8b
+ st1 {v3.8b}, [x0]
+-.Lctrout:
+ FRAME_POP
+ ret
++
++.Lctrcarry:
++ umov x7, v4.d[0] /* load upper word of ctr */
++ rev x7, x7 /* ... to handle the carry */
++ add x7, x7, #1
++ rev x7, x7
++ ins v4.d[0], x7
++ b .Lctrcarrydone
+ AES_ENDPROC(aes_ctr_encrypt)
+ .ltorg
+
--- /dev/null
+From 57bcd0a6364cd4eaa362d7ff1777e88ddf501602 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 27 Jan 2017 10:31:52 -0500
+Subject: drm/amdgpu/si: fix crash on headless asics
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 57bcd0a6364cd4eaa362d7ff1777e88ddf501602 upstream.
+
+Missing check for crtcs present.
+
+Fixes:
+https://bugzilla.kernel.org/show_bug.cgi?id=193341
+https://bugs.freedesktop.org/show_bug.cgi?id=99387
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -227,6 +227,9 @@ static void gmc_v6_0_mc_program(struct a
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
++ if (adev->mode_info.num_crtc)
++ amdgpu_display_set_vga_render_state(adev, false);
++
+ gmc_v6_0_mc_stop(adev, &save);
+
+ if (gmc_v6_0_wait_for_idle((void *)adev)) {
+@@ -256,7 +259,6 @@ static void gmc_v6_0_mc_program(struct a
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+ gmc_v6_0_mc_resume(adev, &save);
+- amdgpu_display_set_vga_render_state(adev, false);
+ }
+
+ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
--- /dev/null
+From d347583a39e2df609a9e40c835f72d3614665b53 Mon Sep 17 00:00:00 2001
+From: Alastair Bridgewater <alastair.bridgewater@gmail.com>
+Date: Wed, 11 Jan 2017 15:47:18 -0500
+Subject: drm/nouveau/disp/gt215: Fix HDA ELD handling (thus, HDMI audio) on gt215
+
+From: Alastair Bridgewater <alastair.bridgewater@gmail.com>
+
+commit d347583a39e2df609a9e40c835f72d3614665b53 upstream.
+
+Store the ELD correctly, not just enough copies of the first byte
+to pad out the given ELD size.
+
+Signed-off-by: Alastair Bridgewater <alastair.bridgewater@gmail.com>
+Fixes: 120b0c39c756 ("drm/nv50-/disp: audit and version SOR_HDA_ELD method")
+Reviewed-by: Ilia Mirkin <imirkin@alum.mit.edu>
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
+ );
+ }
+ for (i = 0; i < size; i++)
+- nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
++ nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
+ for (; i < 0x60; i++)
+ nvkm_wr32(device, 0x61c440 + soff, (i << 8));
+ nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
--- /dev/null
+From 24bf7ae359b8cca165bb30742d2b1c03a1eb23af Mon Sep 17 00:00:00 2001
+From: Ilia Mirkin <imirkin@alum.mit.edu>
+Date: Thu, 19 Jan 2017 22:56:30 -0500
+Subject: drm/nouveau/nv1a,nv1f/disp: fix memory clock rate retrieval
+
+From: Ilia Mirkin <imirkin@alum.mit.edu>
+
+commit 24bf7ae359b8cca165bb30742d2b1c03a1eb23af upstream.
+
+Based on the xf86-video-nv code, NFORCE (NV1A) and NFORCE2 (NV1F) have a
+different way of retrieving clocks. See the
+nv_hw.c:nForceUpdateArbitrationSettings function in the original code
+for how these clocks were accessed.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54587
+Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/dispnv04/hw.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
+@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *
+ uint32_t mpllP;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
++ mpllP = (mpllP >> 8) & 0xf;
+ if (!mpllP)
+ mpllP = 4;
+
+@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *
+ uint32_t clock;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+- return clock;
++ return clock / 1000;
+ }
+
+ ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
--- /dev/null
+From c8f325a59cfc718d13a50fbc746ed9b415c25e92 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 1 Feb 2017 17:45:02 +0000
+Subject: efi/fdt: Avoid FDT manipulation after ExitBootServices()
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit c8f325a59cfc718d13a50fbc746ed9b415c25e92 upstream.
+
+Some AArch64 UEFI implementations disable the MMU in ExitBootServices(),
+after which unaligned accesses to RAM are no longer supported.
+
+Commit:
+
+ abfb7b686a3e ("efi/libstub/arm*: Pass latest memory map to the kernel")
+
+fixed an issue in the memory map handling of the stub FDT code, but
+inadvertently created an issue with such firmware, by moving some
+of the FDT manipulation to after the invocation of ExitBootServices().
+
+Given that the stub's libfdt implementation uses the ordinary, accelerated
+string functions, which rely on hardware handling of unaligned accesses,
+manipulating the FDT with the MMU off may result in alignment faults.
+
+So fix the situation by moving the update_fdt_memmap() call into the
+callback function invoked by efi_exit_boot_services() right before it
+calls the ExitBootServices() UEFI service (which is arguably a better
+place for it anyway)
+
+Note that disabling the MMU in ExitBootServices() is not compliant with
+the UEFI spec, and carries great risk due to the fact that switching from
+cached to uncached memory accesses halfway through compiler generated code
+(i.e., involving a stack) can never be done in a way that is architecturally
+safe.
+
+Fixes: abfb7b686a3e ("efi/libstub/arm*: Pass latest memory map to the kernel")
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Tested-by: Riku Voipio <riku.voipio@linaro.org>
+Cc: mark.rutland@arm.com
+Cc: linux-efi@vger.kernel.org
+Cc: matt@codeblueprint.co.uk
+Cc: leif.lindholm@linaro.org
+Cc: linux-arm-kernel@lists.infradead.org
+Link: http://lkml.kernel.org/r/1485971102-23330-2-git-send-email-ard.biesheuvel@linaro.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/firmware/efi/libstub/fdt.c | 14 +++-----------
+ 1 file changed, 3 insertions(+), 11 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(vo
+ struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int *runtime_entry_count;
++ void *new_fdt_addr;
+ };
+
+ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_s
+ efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+ p->runtime_map, p->runtime_entry_count);
+
+- return EFI_SUCCESS;
++ return update_fdt_memmap(p->new_fdt_addr, map);
+ }
+
+ /*
+@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_b
+
+ priv.runtime_map = runtime_map;
+ priv.runtime_entry_count = &runtime_entry_count;
++ priv.new_fdt_addr = (void *)*new_fdt_addr;
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+ exit_boot_func);
+
+ if (status == EFI_SUCCESS) {
+ efi_set_virtual_address_map_t *svam;
+
+- status = update_fdt_memmap((void *)*new_fdt_addr, &map);
+- if (status != EFI_SUCCESS) {
+- /*
+- * The kernel won't get far without the memory map, but
+- * may still be able to print something meaningful so
+- * return success here.
+- */
+- return EFI_SUCCESS;
+- }
+-
+ /* Install the new virtual address map */
+ svam = sys_table->runtime->set_virtual_address_map;
+ status = svam(runtime_entry_count * desc_size, desc_size,
--- /dev/null
+From 3a4b77cd47bb837b8557595ec7425f281f2ca1fe Mon Sep 17 00:00:00 2001
+From: Eryu Guan <guaneryu@gmail.com>
+Date: Thu, 1 Dec 2016 15:08:37 -0500
+Subject: ext4: validate s_first_meta_bg at mount time
+
+From: Eryu Guan <guaneryu@gmail.com>
+
+commit 3a4b77cd47bb837b8557595ec7425f281f2ca1fe upstream.
+
+Ralf Spenneberg reported that he hit a kernel crash when mounting a
+modified ext4 image. And it turns out that kernel crashed when
+calculating fs overhead (ext4_calculate_overhead()), this is because
+the image has very large s_first_meta_bg (debug code shows it's
+842150400), and ext4 overruns the memory in count_overhead() when
+setting bitmap buffer, which is PAGE_SIZE.
+
+ext4_calculate_overhead():
+ buf = get_zeroed_page(GFP_NOFS); <=== PAGE_SIZE buffer
+ blks = count_overhead(sb, i, buf);
+
+count_overhead():
+ for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { <=== j = 842150400
+ ext4_set_bit(EXT4_B2C(sbi, s++), buf); <=== buffer overrun
+ count++;
+ }
+
+This can be reproduced easily for me by this script:
+
+ #!/bin/bash
+ rm -f fs.img
+ mkdir -p /mnt/ext4
+ fallocate -l 16M fs.img
+ mke2fs -t ext4 -O bigalloc,meta_bg,^resize_inode -F fs.img
+ debugfs -w -R "ssv first_meta_bg 842150400" fs.img
+ mount -o loop fs.img /mnt/ext4
+
+Fix it by validating s_first_meta_bg first at mount time, and
+refusing to mount if its value exceeds the largest possible meta_bg
+number.
+
+Reported-by: Ralf Spenneberg <ralf@os-t.de>
+Signed-off-by: Eryu Guan <guaneryu@gmail.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Andreas Dilger <adilger@dilger.ca>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/super.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3827,6 +3827,15 @@ static int ext4_fill_super(struct super_
+ (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+ db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+ EXT4_DESC_PER_BLOCK(sb);
++ if (ext4_has_feature_meta_bg(sb)) {
++ if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
++ ext4_msg(sb, KERN_WARNING,
++ "first meta block group too large: %u "
++ "(group descriptor block count %u)",
++ le32_to_cpu(es->s_first_meta_bg), db_count);
++ goto failed_mount;
++ }
++ }
+ sbi->s_group_desc = ext4_kvmalloc(db_count *
+ sizeof(struct buffer_head *),
+ GFP_KERNEL);
--- /dev/null
+From 8e9faa15469ed7c7467423db4c62aeed3ff4cae3 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 30 Jan 2017 11:26:39 +0100
+Subject: HID: cp2112: fix gpio-callback error handling
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 8e9faa15469ed7c7467423db4c62aeed3ff4cae3 upstream.
+
+In case of a zero-length report, the gpio direction_input callback would
+currently return success instead of an errno.
+
+Fixes: 1ffb3c40ffb5 ("HID: cp2112: make transfer buffers DMA capable")
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/hid-cp2112.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -206,7 +206,7 @@ static int cp2112_gpio_direction_input(s
+
+ exit:
+ mutex_unlock(&dev->lock);
+- return ret <= 0 ? ret : -EIO;
++ return ret < 0 ? ret : -EIO;
+ }
+
+ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
--- /dev/null
+From 7a7b5df84b6b4e5d599c7289526eed96541a0654 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 30 Jan 2017 11:26:38 +0100
+Subject: HID: cp2112: fix sleep-while-atomic
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 7a7b5df84b6b4e5d599c7289526eed96541a0654 upstream.
+
+A recent commit fixing DMA-buffers on stack added a shared transfer
+buffer protected by a spinlock. This is broken as the USB HID request
+callbacks can sleep. Fix this up by replacing the spinlock with a mutex.
+
+Fixes: 1ffb3c40ffb5 ("HID: cp2112: make transfer buffers DMA capable")
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/hid-cp2112.c | 26 +++++++++++---------------
+ 1 file changed, 11 insertions(+), 15 deletions(-)
+
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -167,7 +167,7 @@ struct cp2112_device {
+ atomic_t xfer_avail;
+ struct gpio_chip gc;
+ u8 *in_out_buffer;
+- spinlock_t lock;
++ struct mutex lock;
+ };
+
+ static int gpio_push_pull = 0xFF;
+@@ -179,10 +179,9 @@ static int cp2112_gpio_direction_input(s
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+@@ -206,7 +205,7 @@ static int cp2112_gpio_direction_input(s
+ ret = 0;
+
+ exit:
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+ return ret <= 0 ? ret : -EIO;
+ }
+
+@@ -215,10 +214,9 @@ static void cp2112_gpio_set(struct gpio_
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ buf[0] = CP2112_GPIO_SET;
+ buf[1] = value ? 0xff : 0;
+@@ -230,7 +228,7 @@ static void cp2112_gpio_set(struct gpio_
+ if (ret < 0)
+ hid_err(hdev, "error setting GPIO values: %d\n", ret);
+
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+ }
+
+ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+@@ -238,10 +236,9 @@ static int cp2112_gpio_get(struct gpio_c
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
+ CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
+@@ -255,7 +252,7 @@ static int cp2112_gpio_get(struct gpio_c
+ ret = (buf[1] >> offset) & 1;
+
+ exit:
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+
+ return ret;
+ }
+@@ -266,10 +263,9 @@ static int cp2112_gpio_direction_output(
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+@@ -290,7 +286,7 @@ static int cp2112_gpio_direction_output(
+ goto fail;
+ }
+
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+
+ /*
+ * Set gpio value when output direction is already set,
+@@ -301,7 +297,7 @@ static int cp2112_gpio_direction_output(
+ return 0;
+
+ fail:
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+ return ret < 0 ? ret : -EIO;
+ }
+
+@@ -1057,7 +1053,7 @@ static int cp2112_probe(struct hid_devic
+ if (!dev->in_out_buffer)
+ return -ENOMEM;
+
+- spin_lock_init(&dev->lock);
++ mutex_init(&dev->lock);
+
+ ret = hid_parse(hdev);
+ if (ret) {
--- /dev/null
+From 030305d69fc6963c16003f50d7e8d74b02d0a143 Mon Sep 17 00:00:00 2001
+From: Bjorn Helgaas <bhelgaas@google.com>
+Date: Fri, 27 Jan 2017 15:00:45 -0600
+Subject: PCI/ASPM: Handle PCI-to-PCIe bridges as roots of PCIe hierarchies
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+commit 030305d69fc6963c16003f50d7e8d74b02d0a143 upstream.
+
+In a struct pcie_link_state, link->root points to the pcie_link_state of
+the root of the PCIe hierarchy. For the topmost link, this points to
+itself (link->root = link). For others, we copy the pointer from the
+parent (link->root = link->parent->root).
+
+Previously we recognized that Root Ports originated PCIe hierarchies, but
+we treated PCI/PCI-X to PCIe Bridges as being in the middle of the
+hierarchy, and when we tried to copy the pointer from link->parent->root,
+there was no parent, and we dereferenced a NULL pointer:
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000090
+ IP: [<ffffffff9e424350>] pcie_aspm_init_link_state+0x170/0x820
+
+Recognize that PCI/PCI-X to PCIe Bridges originate PCIe hierarchies just
+like Root Ports do, so link->root for these devices should also point to
+itself.
+
+Fixes: 51ebfc92b72b ("PCI: Enumerate switches below PCI-to-PCIe bridges")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=193411
+Link: https://bugzilla.opensuse.org/show_bug.cgi?id=1022181
+Tested-by: lists@ssl-mail.com
+Tested-by: Jayachandran C. <jnair@caviumnetworks.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/pcie/aspm.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -518,25 +518,32 @@ static struct pcie_link_state *alloc_pci
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return NULL;
++
+ INIT_LIST_HEAD(&link->sibling);
+ INIT_LIST_HEAD(&link->children);
+ INIT_LIST_HEAD(&link->link);
+ link->pdev = pdev;
+- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
++
++ /*
++ * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
++ * hierarchies.
++ */
++ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
++ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
++ link->root = link;
++ } else {
+ struct pcie_link_state *parent;
++
+ parent = pdev->bus->parent->self->link_state;
+ if (!parent) {
+ kfree(link);
+ return NULL;
+ }
++
+ link->parent = parent;
++ link->root = link->parent->root;
+ list_add(&link->link, &parent->children);
+ }
+- /* Setup a pointer to the root port link */
+- if (!link->parent)
+- link->root = link;
+- else
+- link->root = link->parent->root;
+
+ list_add(&link->sibling, &link_list);
+ pdev->link_state = link;
--- /dev/null
+From cdca06e4e85974d8a3503ab15709dbbaf90d3dd1 Mon Sep 17 00:00:00 2001
+From: Alexander Stein <alexander.stein@systec-electronic.com>
+Date: Mon, 30 Jan 2017 12:35:28 +0100
+Subject: pinctrl: baytrail: Add missing spinlock usage in byt_gpio_irq_handler
+
+From: Alexander Stein <alexander.stein@systec-electronic.com>
+
+commit cdca06e4e85974d8a3503ab15709dbbaf90d3dd1 upstream.
+
+According to VLI64 Intel Atom E3800 Specification Update (#329901)
+concurrent read accesses may result in returning 0xffffffff and write
+accesses may be dropped silently.
+To workaround all accesses must be protected by locks.
+
+Signed-off-by: Alexander Stein <alexander.stein@systec-electronic.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-baytrail.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -1612,7 +1612,9 @@ static void byt_gpio_irq_handler(struct
+ continue;
+ }
+
++ raw_spin_lock(&vg->lock);
+ pending = readl(reg);
++ raw_spin_unlock(&vg->lock);
+ for_each_set_bit(pin, &pending, 32) {
+ virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
+ generic_handle_irq(virq);
--- /dev/null
+From bf29bddf0417a4783da3b24e8c9e017ac649326f Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Fri, 27 Jan 2017 22:25:52 +0000
+Subject: x86/efi: Always map the first physical page into the EFI pagetables
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit bf29bddf0417a4783da3b24e8c9e017ac649326f upstream.
+
+Commit:
+
+ 129766708 ("x86/efi: Only map RAM into EFI page tables if in mixed-mode")
+
+stopped creating 1:1 mappings for all RAM, when running in native 64-bit mode.
+
+It turns out though that there are 64-bit EFI implementations in the wild
+(this particular problem has been reported on a Lenovo Yoga 710-11IKB),
+which still make use of the first physical page for their own private use,
+even though they explicitly mark it EFI_CONVENTIONAL_MEMORY in the memory
+map.
+
+In case there is no mapping for this particular frame in the EFI pagetables,
+as soon as firmware tries to make use of it, a triple fault occurs and the
+system reboots (in case of the Yoga 710-11IKB this is very early during bootup).
+
+Fix that by always mapping the first page of physical memory into the EFI
+pagetables. We're free to hand this page to the BIOS, as trim_bios_range()
+will reserve the first page and isolate it away from memory allocators anyway.
+
+Note that just reverting 129766708 alone is not enough on v4.9-rc1+ to fix the
+regression on affected hardware, as this commit:
+
+ ab72a27da ("x86/efi: Consolidate region mapping logic")
+
+later made the first physical frame not to be mapped anyway.
+
+Reported-by: Hanka Pavlikova <hanka@ucw.cz>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Laura Abbott <labbott@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vojtech Pavlik <vojtech@ucw.cz>
+Cc: Waiman Long <waiman.long@hpe.com>
+Cc: linux-efi@vger.kernel.org
+Fixes: 129766708 ("x86/efi: Only map RAM into EFI page tables if in mixed-mode")
+Link: http://lkml.kernel.org/r/20170127222552.22336-1-matt@codeblueprint.co.uk
+[ Tidied up the changelog and the comment. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/platform/efi/efi_64.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigne
+ efi_scratch.use_pgd = true;
+
+ /*
++ * Certain firmware versions are way too sentimential and still believe
++ * they are exclusive and unquestionable owners of the first physical page,
++ * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
++ * (but then write-access it later during SetVirtualAddressMap()).
++ *
++ * Create a 1:1 mapping for this page, to avoid triple faults during early
++ * boot with such firmware. We are free to hand this page to the BIOS,
++ * as trim_bios_range() will reserve the first page and isolate it away
++ * from memory allocators anyway.
++ */
++ if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
++ pr_err("Failed to create 1:1 mapping for the first page!\n");
++ return 1;
++ }
++
++ /*
+ * When making calls to the firmware everything needs to be 1:1
+ * mapped and addressable with 32-bit pointers. Map the kernel
+ * text and allocate a new stack because we can't rely on the
--- /dev/null
+From 4b3e6f2ef3722f1a6a97b6034ed492c1a21fd4ae Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Tue, 31 Jan 2017 18:35:37 -0800
+Subject: xtensa: fix noMMU build on cores with MMU
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 4b3e6f2ef3722f1a6a97b6034ed492c1a21fd4ae upstream.
+
+Commit bf15f86b343ed8 ("xtensa: initialize MMU before jumping to reset
+vector") calls MMU management functions even when CONFIG_MMU is not
+selected. That breaks noMMU build on cores with MMU.
+
+Don't manage MMU when CONFIG_MMU is not selected.
+
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/setup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -540,7 +540,7 @@ subsys_initcall(topology_init);
+
+ void cpu_reset(void)
+ {
+-#if XCHAL_HAVE_PTP_MMU
++#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
+ local_irq_disable();
+ /*
+ * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must