--- /dev/null
+From 1f20f9ff57ca23b9f5502fca85ce3977e8496cb1 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Sun, 29 Oct 2017 11:10:43 +0100
+Subject: ALSA: seq: Fix nested rwsem annotation for lockdep splat
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 1f20f9ff57ca23b9f5502fca85ce3977e8496cb1 upstream.
+
+syzkaller reported the lockdep splat due to the possible deadlock of
+grp->list_mutex of each sequencer client object. Actually this is
+rather a false-positive report due to the missing nested lock
+annotations. The sequencer client may deliver the event directly to
+another client which takes another own lock.
+
+For addressing this issue, this patch replaces the simple down_read()
+with down_read_nested(). As a lock subclass, the already existing
+"hop" can be re-used, which indicates the depth of the call.
+
+Reference: http://lkml.kernel.org/r/089e082686ac9b482e055c832617@google.com
+Reported-by: syzbot <bot+7feb8de6b4d6bf810cf098bef942cc387e79d0ad@syzkaller.appspotmail.com>
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/seq/seq_clientmgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -663,7 +663,7 @@ static int deliver_to_subscribers(struct
+ if (atomic)
+ read_lock(&grp->list_lock);
+ else
+- down_read(&grp->list_mutex);
++ down_read_nested(&grp->list_mutex, hop);
+ list_for_each_entry(subs, &grp->list_head, src_list) {
+ /* both ports ready? */
+ if (atomic_read(&subs->ref_count) != 2)
--- /dev/null
+From 79fb0518fec8c8b4ea7f1729f54f293724b3dbb0 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Sun, 29 Oct 2017 11:02:04 +0100
+Subject: ALSA: timer: Add missing mutex lock for compat ioctls
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 79fb0518fec8c8b4ea7f1729f54f293724b3dbb0 upstream.
+
+The races among ioctl and other operations were protected by the
+commit af368027a49a ("ALSA: timer: Fix race among timer ioctls") and
+later fixes, but one code path was forgotten in the scenario: the
+32bit compat ioctl. As syzkaller recently spotted, a very similar
+use-after-free may happen with the combination of compat ioctls.
+
+The fix is simply to apply the same ioctl_lock to the compat_ioctl
+callback, too.
+
+Fixes: af368027a49a ("ALSA: timer: Fix race among timer ioctls")
+Reference: http://lkml.kernel.org/r/089e082686ac9b482e055c832617@google.com
+Reported-by: syzbot <bot+e5f3c9783e7048a74233054febbe9f1bdf54b6da@syzkaller.appspotmail.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/timer_compat.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/sound/core/timer_compat.c
++++ b/sound/core/timer_compat.c
+@@ -133,7 +133,8 @@ enum {
+ #endif /* CONFIG_X86_X32 */
+ };
+
+-static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
++static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg)
+ {
+ void __user *argp = compat_ptr(arg);
+
+@@ -153,7 +154,7 @@ static long snd_timer_user_ioctl_compat(
+ case SNDRV_TIMER_IOCTL_PAUSE:
+ case SNDRV_TIMER_IOCTL_PAUSE_OLD:
+ case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
+- return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
++ return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+ case SNDRV_TIMER_IOCTL_GPARAMS32:
+ return snd_timer_user_gparams_compat(file, argp);
+ case SNDRV_TIMER_IOCTL_INFO32:
+@@ -167,3 +168,15 @@ static long snd_timer_user_ioctl_compat(
+ }
+ return -ENOIOCTLCMD;
+ }
++
++static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ struct snd_timer_user *tu = file->private_data;
++ long ret;
++
++ mutex_lock(&tu->ioctl_lock);
++ ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
++ mutex_unlock(&tu->ioctl_lock);
++ return ret;
++}
--- /dev/null
+From 1cce91dfc8f7990ca3aea896bfb148f240b12860 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 20 Oct 2017 21:17:05 +0100
+Subject: ARM: 8715/1: add a private asm/unaligned.h
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 1cce91dfc8f7990ca3aea896bfb148f240b12860 upstream.
+
+The asm-generic/unaligned.h header provides two different implementations
+for accessing unaligned variables: the access_ok.h version used when
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set pretends that all pointers
+are in fact aligned, while the le_struct.h version convinces gcc that the
+alignment of a pointer is '1', to make it issue the correct load/store
+instructions depending on the architecture flags.
+
+On ARMv5 and older, we always use the second version, to let the compiler
+use byte accesses. On ARMv6 and newer, we currently use the access_ok.h
+version, so the compiler can use any instruction including stm/ldm and
+ldrd/strd that will cause an alignment trap. This trap can significantly
+impact performance when we have to do a lot of fixups and, worse, has
+led to crashes in the LZ4 decompressor code that does not have a trap
+handler.
+
+This adds an ARM specific version of asm/unaligned.h that uses the
+le_struct.h/be_struct.h implementation unconditionally. This should lead
+to essentially the same code on ARMv6+ as before, with the exception of
+using regular load/store instructions instead of the trapping instructions
+multi-register variants.
+
+The crash in the LZ4 decompressor code was probably introduced by the
+patch replacing the LZ4 implementation, commit 4e1a33b105dd ("lib: update
+LZ4 compressor module"), so linux-4.11 and higher would be affected most.
+However, we probably want to have this backported to all older stable
+kernels as well, to help with the performance issues.
+
+There are two follow-ups that I think we should also work on, but not
+backport to stable kernels, first to change the asm-generic version of
+the header to remove the ARM special case, and second to review all
+other uses of CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS to see if they
+might be affected by the same problem on ARM.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/Kbuild | 1 -
+ arch/arm/include/asm/unaligned.h | 27 +++++++++++++++++++++++++++
+ 2 files changed, 27 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/Kbuild
++++ b/arch/arm/include/asm/Kbuild
+@@ -20,7 +20,6 @@ generic-y += simd.h
+ generic-y += sizes.h
+ generic-y += timex.h
+ generic-y += trace_clock.h
+-generic-y += unaligned.h
+
+ generated-y += mach-types.h
+ generated-y += unistd-nr.h
+--- /dev/null
++++ b/arch/arm/include/asm/unaligned.h
+@@ -0,0 +1,27 @@
++#ifndef __ASM_ARM_UNALIGNED_H
++#define __ASM_ARM_UNALIGNED_H
++
++/*
++ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
++ * but we don't want to use linux/unaligned/access_ok.h since that can lead
++ * to traps on unaligned stm/ldm or strd/ldrd.
++ */
++#include <asm/byteorder.h>
++
++#if defined(__LITTLE_ENDIAN)
++# include <linux/unaligned/le_struct.h>
++# include <linux/unaligned/be_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned __get_unaligned_le
++# define put_unaligned __put_unaligned_le
++#elif defined(__BIG_ENDIAN)
++# include <linux/unaligned/be_struct.h>
++# include <linux/unaligned/le_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned __get_unaligned_be
++# define put_unaligned __put_unaligned_be
++#else
++# error need to define endianess
++#endif
++
++#endif /* __ASM_ARM_UNALIGNED_H */
--- /dev/null
+From f9b269f3098121b5d54aaf822e0898c8ed1d3fec Mon Sep 17 00:00:00 2001
+From: Julien Thierry <julien.thierry@arm.com>
+Date: Fri, 20 Oct 2017 12:34:17 +0100
+Subject: arm/arm64: kvm: Disable branch profiling in HYP code
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+commit f9b269f3098121b5d54aaf822e0898c8ed1d3fec upstream.
+
+When HYP code runs into branch profiling code, it attempts to jump to
+unmapped memory, causing a HYP Panic.
+
+Disable the branch profiling for code designed to run at HYP mode.
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/hyp/Makefile | 2 +-
+ arch/arm64/kvm/hyp/Makefile | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/kvm/hyp/Makefile
++++ b/arch/arm/kvm/hyp/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+
+-ccflags-y += -fno-stack-protector
++ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+
+ KVM=../../../../virt/kvm
+
+--- a/arch/arm64/kvm/hyp/Makefile
++++ b/arch/arm64/kvm/hyp/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+
+-ccflags-y += -fno-stack-protector
++ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+
+ KVM=../../../../virt/kvm
+
--- /dev/null
+From fd6c8c206fc5d0717b0433b191de0715122f33bb Mon Sep 17 00:00:00 2001
+From: Dongjiu Geng <gengdongjiu@huawei.com>
+Date: Tue, 17 Oct 2017 22:23:49 +0800
+Subject: arm/arm64: KVM: set right LR register value for 32 bit guest when inject abort
+
+From: Dongjiu Geng <gengdongjiu@huawei.com>
+
+commit fd6c8c206fc5d0717b0433b191de0715122f33bb upstream.
+
+When a exception is trapped to EL2, hardware uses ELR_ELx to hold
+the current fault instruction address. If KVM wants to inject a
+abort to 32 bit guest, it needs to set the LR register for the
+guest to emulate this abort happened in the guest. Because ARM32
+architecture is pipelined execution, so the LR value has an offset to
+the fault instruction address.
+
+The offsets applied to Link value for exceptions as shown below,
+which should be added for the ARM32 link register(LR).
+
+Table taken from ARMv8 ARM DDI0487B-B, table G1-10:
+Exception Offset, for PE state of:
+ A32 T32
+Undefined Instruction +4 +2
+Prefetch Abort +4 +4
+Data Abort +8 +8
+IRQ or FIQ +4 +4
+
+ [ Removed unused variables in inject_abt to avoid compile warnings.
+ -- Christoffer ]
+
+Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com>
+Tested-by: Haibin Zhang <zhanghaibin7@huawei.com>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/emulate.c | 6 ++----
+ arch/arm64/kvm/inject_fault.c | 16 +++++++++++++++-
+ 2 files changed, 17 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/kvm/emulate.c
++++ b/arch/arm/kvm/emulate.c
+@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcp
+ u32 return_offset = (is_thumb) ? 2 : 4;
+
+ kvm_update_psr(vcpu, UND_MODE);
+- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
++ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+
+ /* Branch to exception vector */
+ *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcp
+ */
+ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
+ {
+- unsigned long cpsr = *vcpu_cpsr(vcpu);
+- bool is_thumb = (cpsr & PSR_T_BIT);
+ u32 vect_offset;
+- u32 return_offset = (is_thumb) ? 4 : 0;
++ u32 return_offset = (is_pabt) ? 4 : 8;
+ bool is_lpae;
+
+ kvm_update_psr(vcpu, ABT_MODE);
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -33,12 +33,26 @@
+ #define LOWER_EL_AArch64_VECTOR 0x400
+ #define LOWER_EL_AArch32_VECTOR 0x600
+
++/*
++ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
++ */
++static const u8 return_offsets[8][2] = {
++ [0] = { 0, 0 }, /* Reset, unused */
++ [1] = { 4, 2 }, /* Undefined */
++ [2] = { 0, 0 }, /* SVC, unused */
++ [3] = { 4, 4 }, /* Prefetch abort */
++ [4] = { 8, 8 }, /* Data abort */
++ [5] = { 0, 0 }, /* HVC, unused */
++ [6] = { 4, 4 }, /* IRQ, unused */
++ [7] = { 4, 4 }, /* FIQ, unused */
++};
++
+ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+ {
+ unsigned long cpsr;
+ unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
+ bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
+- u32 return_offset = (is_thumb) ? 4 : 0;
++ u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
+ u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+
+ cpsr = mode | COMPAT_PSR_I_BIT;
--- /dev/null
+From cda80a82ac3e89309706c027ada6ab232be1d640 Mon Sep 17 00:00:00 2001
+From: Yan Markman <ymarkman@marvell.com>
+Date: Sun, 16 Oct 2016 00:22:32 +0300
+Subject: ARM: dts: mvebu: pl310-cache disable double-linefill
+
+From: Yan Markman <ymarkman@marvell.com>
+
+commit cda80a82ac3e89309706c027ada6ab232be1d640 upstream.
+
+Under heavy system stress mvebu SoC using Cortex A9 sporadically
+encountered instability issues.
+
+The "double linefill" feature of L2 cache was identified as causing
+dependency between read and write which lead to the deadlock.
+
+Especially, it was the cause of deadlock seen under heavy PCIe traffic,
+as this dependency violates PCIE overtaking rule.
+
+Fixes: c8f5a878e554 ("ARM: mvebu: use DT properties to fine-tune the L2 configuration")
+Signed-off-by: Yan Markman <ymarkman@marvell.com>
+Signed-off-by: Igal Liberman <igall@marvell.com>
+Signed-off-by: Nadav Haklai <nadavh@marvell.com>
+[gregory.clement@free-electrons.com: reformulate commit log, add Armada
+375 and add Fixes tag]
+Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/armada-375.dtsi | 4 ++--
+ arch/arm/boot/dts/armada-38x.dtsi | 4 ++--
+ arch/arm/boot/dts/armada-39x.dtsi | 4 ++--
+ 3 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -178,9 +178,9 @@
+ reg = <0x8000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+- arm,double-linefill-incr = <1>;
++ arm,double-linefill-incr = <0>;
+ arm,double-linefill-wrap = <0>;
+- arm,double-linefill = <1>;
++ arm,double-linefill = <0>;
+ prefetch-data = <1>;
+ };
+
+--- a/arch/arm/boot/dts/armada-38x.dtsi
++++ b/arch/arm/boot/dts/armada-38x.dtsi
+@@ -143,9 +143,9 @@
+ reg = <0x8000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+- arm,double-linefill-incr = <1>;
++ arm,double-linefill-incr = <0>;
+ arm,double-linefill-wrap = <0>;
+- arm,double-linefill = <1>;
++ arm,double-linefill = <0>;
+ prefetch-data = <1>;
+ };
+
+--- a/arch/arm/boot/dts/armada-39x.dtsi
++++ b/arch/arm/boot/dts/armada-39x.dtsi
+@@ -111,9 +111,9 @@
+ reg = <0x8000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+- arm,double-linefill-incr = <1>;
++ arm,double-linefill-incr = <0>;
+ arm,double-linefill-wrap = <0>;
+- arm,double-linefill = <1>;
++ arm,double-linefill = <0>;
+ prefetch-data = <1>;
+ };
+
--- /dev/null
+From 7a7003b1da010d2b0d1dc8bf21c10f5c73b389f1 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 2 Nov 2017 16:12:03 +0000
+Subject: arm64: ensure __dump_instr() checks addr_limit
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 7a7003b1da010d2b0d1dc8bf21c10f5c73b389f1 upstream.
+
+It's possible for a user to deliberately trigger __dump_instr with a
+chosen kernel address.
+
+Let's avoid problems resulting from this by using get_user() rather than
+__get_user(), ensuring that we don't erroneously access kernel memory.
+
+Where we use __dump_instr() on kernel text, we already switch to
+KERNEL_DS, so this shouldn't adversely affect those cases.
+
+Fixes: 60ffc30d5652810d ("arm64: Exception handling")
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/traps.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -116,7 +116,7 @@ static void __dump_instr(const char *lvl
+ for (i = -4; i < 1; i++) {
+ unsigned int val, bad;
+
+- bad = __get_user(val, &((u32 *)addr)[i]);
++ bad = get_user(val, &((u32 *)addr)[i]);
+
+ if (!bad)
+ p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
--- /dev/null
+From 1e6f4fc06f6411adf98bbbe7fcd79442cd2b2a75 Mon Sep 17 00:00:00 2001
+From: Ricard Wanderlof <ricard.wanderlof@axis.com>
+Date: Thu, 7 Sep 2017 15:31:38 +0200
+Subject: ASoC: adau17x1: Workaround for noise bug in ADC
+
+From: Ricard Wanderlof <ricard.wanderlof@axis.com>
+
+commit 1e6f4fc06f6411adf98bbbe7fcd79442cd2b2a75 upstream.
+
+The ADC in the ADAU1361 (and possibly other Analog Devices codecs)
+exhibits a cyclic variation in the noise floor (in our test setup between
+-87 and -93 dB), a new value being attained within this range whenever a
+new capture stream is started. The cycle repeats after about 10 or 11
+restarts.
+
+The workaround recommended by the manufacturer is to toggle the ADOSR bit
+in the Converter Control 0 register each time a new capture stream is
+started.
+
+I have verified that the patch fixes this problem on the ADAU1361, and
+according to the manufacturer toggling the bit in question in this manner
+will at least have no detrimental effect on other chips served by this
+driver.
+
+Signed-off-by: Ricard Wanderlof <ricardw@axis.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/adau17x1.c | 24 +++++++++++++++++++++++-
+ sound/soc/codecs/adau17x1.h | 2 ++
+ 2 files changed, 25 insertions(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/adau17x1.c
++++ b/sound/soc/codecs/adau17x1.c
+@@ -90,6 +90,27 @@ static int adau17x1_pll_event(struct snd
+ return 0;
+ }
+
++static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
++ struct adau *adau = snd_soc_codec_get_drvdata(codec);
++
++ /*
++ * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
++ * avoid losing SNR (workaround from ADI). This must be done after
++ * the ADC(s) have been enabled. According to the data sheet, it is
++ * normally illegal to set this bit when the sampling rate is 96 kHz,
++ * but according to ADI it is acceptable for this workaround.
++ */
++ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++ ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
++ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++ ADAU17X1_CONVERTER0_ADOSR, 0);
++
++ return 0;
++}
++
+ static const char * const adau17x1_mono_stereo_text[] = {
+ "Stereo",
+ "Mono Left Channel (L+R)",
+@@ -121,7 +142,8 @@ static const struct snd_soc_dapm_widget
+ SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
+ &adau17x1_dac_mode_mux),
+
+- SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
++ SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
++ adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
+ SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
+ SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
+--- a/sound/soc/codecs/adau17x1.h
++++ b/sound/soc/codecs/adau17x1.h
+@@ -129,5 +129,7 @@ bool adau17x1_has_dsp(struct adau *adau)
+
+ #define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7
+
++#define ADAU17X1_CONVERTER0_ADOSR BIT(3)
++
+
+ #endif
--- /dev/null
+From f74bc7c6679200a4a83156bb89cbf6c229fe8ec0 Mon Sep 17 00:00:00 2001
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+Date: Mon, 30 Oct 2017 13:28:03 +1100
+Subject: cifs: check MaxPathNameComponentLength != 0 before using it
+
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+
+commit f74bc7c6679200a4a83156bb89cbf6c229fe8ec0 upstream.
+
+And fix tcon leak in error path.
+
+Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Reviewed-by: David Disseldorp <ddiss@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/dir.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -204,7 +204,8 @@ check_name(struct dentry *direntry, stru
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ int i;
+
+- if (unlikely(direntry->d_name.len >
++ if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
++ direntry->d_name.len >
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
+ return -ENAMETOOLONG;
+
+@@ -520,7 +521,7 @@ cifs_atomic_open(struct inode *inode, st
+
+ rc = check_name(direntry, tcon);
+ if (rc)
+- goto out_free_xid;
++ goto out;
+
+ server = tcon->ses->server;
+
--- /dev/null
+From 2eb9eabf1e868fda15808954fb29b0f105ed65f1 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 2 Nov 2017 00:47:19 +0000
+Subject: KEYS: fix out-of-bounds read during ASN.1 parsing
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 2eb9eabf1e868fda15808954fb29b0f105ed65f1 upstream.
+
+syzkaller with KASAN reported an out-of-bounds read in
+asn1_ber_decoder(). It can be reproduced by the following command,
+assuming CONFIG_X509_CERTIFICATE_PARSER=y and CONFIG_KASAN=y:
+
+ keyctl add asymmetric desc $'\x30\x30' @s
+
+The bug is that the length of an ASN.1 data value isn't validated in the
+case where it is encoded using the short form, causing the decoder to
+read past the end of the input buffer. Fix it by validating the length.
+
+The bug report was:
+
+ BUG: KASAN: slab-out-of-bounds in asn1_ber_decoder+0x10cb/0x1730 lib/asn1_decoder.c:233
+ Read of size 1 at addr ffff88003cccfa02 by task syz-executor0/6818
+
+ CPU: 1 PID: 6818 Comm: syz-executor0 Not tainted 4.14.0-rc7-00008-g5f479447d983 #2
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+ Call Trace:
+ __dump_stack lib/dump_stack.c:16 [inline]
+ dump_stack+0xb3/0x10b lib/dump_stack.c:52
+ print_address_description+0x79/0x2a0 mm/kasan/report.c:252
+ kasan_report_error mm/kasan/report.c:351 [inline]
+ kasan_report+0x236/0x340 mm/kasan/report.c:409
+ __asan_report_load1_noabort+0x14/0x20 mm/kasan/report.c:427
+ asn1_ber_decoder+0x10cb/0x1730 lib/asn1_decoder.c:233
+ x509_cert_parse+0x1db/0x650 crypto/asymmetric_keys/x509_cert_parser.c:89
+ x509_key_preparse+0x64/0x7a0 crypto/asymmetric_keys/x509_public_key.c:174
+ asymmetric_key_preparse+0xcb/0x1a0 crypto/asymmetric_keys/asymmetric_type.c:388
+ key_create_or_update+0x347/0xb20 security/keys/key.c:855
+ SYSC_add_key security/keys/keyctl.c:122 [inline]
+ SyS_add_key+0x1cd/0x340 security/keys/keyctl.c:62
+ entry_SYSCALL_64_fastpath+0x1f/0xbe
+ RIP: 0033:0x447c89
+ RSP: 002b:00007fca7a5d3bd8 EFLAGS: 00000246 ORIG_RAX: 00000000000000f8
+ RAX: ffffffffffffffda RBX: 00007fca7a5d46cc RCX: 0000000000447c89
+ RDX: 0000000020006f4a RSI: 0000000020006000 RDI: 0000000020001ff5
+ RBP: 0000000000000046 R08: fffffffffffffffd R09: 0000000000000000
+ R10: 0000000000000002 R11: 0000000000000246 R12: 0000000000000000
+ R13: 0000000000000000 R14: 00007fca7a5d49c0 R15: 00007fca7a5d4700
+
+Fixes: 42d5ec27f873 ("X.509: Add an ASN.1 decoder")
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/asn1_decoder.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -284,6 +284,9 @@ next_op:
+ if (unlikely(len > datalen - dp))
+ goto data_overrun_error;
+ }
++ } else {
++ if (unlikely(len > datalen - dp))
++ goto data_overrun_error;
+ }
+
+ if (flags & FLAG_CONS) {
--- /dev/null
+From 3239b6f29bdfb4b0a2ba59df995fc9e6f4df7f1f Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 2 Nov 2017 00:47:03 +0000
+Subject: KEYS: return full count in keyring_read() if buffer is too small
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 3239b6f29bdfb4b0a2ba59df995fc9e6f4df7f1f upstream.
+
+Commit e645016abc80 ("KEYS: fix writing past end of user-supplied buffer
+in keyring_read()") made keyring_read() stop corrupting userspace memory
+when the user-supplied buffer is too small. However it also made the
+return value in that case be the short buffer size rather than the size
+required, yet keyctl_read() is actually documented to return the size
+required. Therefore, switch it over to the documented behavior.
+
+Note that for now we continue to have it fill the short buffer, since it
+did that before (pre-v3.13) and dump_key_tree_aux() in keyutils arguably
+relies on it.
+
+Fixes: e645016abc80 ("KEYS: fix writing past end of user-supplied buffer in keyring_read()")
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/keys/keyring.c | 39 +++++++++++++++++++--------------------
+ 1 file changed, 19 insertions(+), 20 deletions(-)
+
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -459,34 +459,33 @@ static long keyring_read(const struct ke
+ char __user *buffer, size_t buflen)
+ {
+ struct keyring_read_iterator_context ctx;
+- unsigned long nr_keys;
+- int ret;
++ long ret;
+
+ kenter("{%d},,%zu", key_serial(keyring), buflen);
+
+ if (buflen & (sizeof(key_serial_t) - 1))
+ return -EINVAL;
+
+- nr_keys = keyring->keys.nr_leaves_on_tree;
+- if (nr_keys == 0)
+- return 0;
+-
+- /* Calculate how much data we could return */
+- if (!buffer || !buflen)
+- return nr_keys * sizeof(key_serial_t);
+-
+- /* Copy the IDs of the subscribed keys into the buffer */
+- ctx.buffer = (key_serial_t __user *)buffer;
+- ctx.buflen = buflen;
+- ctx.count = 0;
+- ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+- if (ret < 0) {
+- kleave(" = %d [iterate]", ret);
+- return ret;
++ /* Copy as many key IDs as fit into the buffer */
++ if (buffer && buflen) {
++ ctx.buffer = (key_serial_t __user *)buffer;
++ ctx.buflen = buflen;
++ ctx.count = 0;
++ ret = assoc_array_iterate(&keyring->keys,
++ keyring_read_iterator, &ctx);
++ if (ret < 0) {
++ kleave(" = %ld [iterate]", ret);
++ return ret;
++ }
+ }
+
+- kleave(" = %zu [ok]", ctx.count);
+- return ctx.count;
++ /* Return the size of the buffer needed */
++ ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
++ if (ret <= buflen)
++ kleave("= %ld [ok]", ret);
++ else
++ kleave("= %ld [buffer too small]", ret);
++ return ret;
+ }
+
+ /*
--- /dev/null
+From a3c812f7cfd80cf51e8f5b7034f7418f6beb56c1 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 2 Nov 2017 00:47:12 +0000
+Subject: KEYS: trusted: fix writing past end of buffer in trusted_read()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit a3c812f7cfd80cf51e8f5b7034f7418f6beb56c1 upstream.
+
+When calling keyctl_read() on a key of type "trusted", if the
+user-supplied buffer was too small, the kernel ignored the buffer length
+and just wrote past the end of the buffer, potentially corrupting
+userspace memory. Fix it by instead returning the size required, as per
+the documentation for keyctl_read().
+
+We also don't even fill the buffer at all in this case, as this is
+slightly easier to implement than doing a short read, and either
+behavior appears to be permitted. It also makes it match the behavior
+of the "encrypted" key type.
+
+Fixes: d00a1c72f7f4 ("keys: add new trusted key-type")
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Mimi Zohar <zohar@linux.vnet.ibm.com>
+Reviewed-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/keys/trusted.c | 23 ++++++++++++-----------
+ 1 file changed, 12 insertions(+), 11 deletions(-)
+
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -1147,20 +1147,21 @@ static long trusted_read(const struct ke
+ p = dereference_key_locked(key);
+ if (!p)
+ return -EINVAL;
+- if (!buffer || buflen <= 0)
+- return 2 * p->blob_len;
+- ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+- if (!ascii_buf)
+- return -ENOMEM;
+
+- bufp = ascii_buf;
+- for (i = 0; i < p->blob_len; i++)
+- bufp = hex_byte_pack(bufp, p->blob[i]);
+- if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
++ if (buffer && buflen >= 2 * p->blob_len) {
++ ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
++ if (!ascii_buf)
++ return -ENOMEM;
++
++ bufp = ascii_buf;
++ for (i = 0; i < p->blob_len; i++)
++ bufp = hex_byte_pack(bufp, p->blob[i]);
++ if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
++ kzfree(ascii_buf);
++ return -EFAULT;
++ }
+ kzfree(ascii_buf);
+- return -EFAULT;
+ }
+- kzfree(ascii_buf);
+ return 2 * p->blob_len;
+ }
+
--- /dev/null
+From 8c1a8a32438b95792bbd8719d1cd4fe36e9eba03 Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@linaro.org>
+Date: Fri, 13 Oct 2017 11:40:11 +0200
+Subject: KVM: arm64: its: Fix missing dynamic allocation check in scan_its_table
+
+From: Christoffer Dall <christoffer.dall@linaro.org>
+
+commit 8c1a8a32438b95792bbd8719d1cd4fe36e9eba03 upstream.
+
+We currently allocate an entry dynamically, but we never check if the
+allocation actually succeeded. We actually don't need a dynamic
+allocation, because we know the maximum size of an ITS table entry, so
+we can simply use an allocation on the stack.
+
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic-its.c | 18 +++++++-----------
+ 1 file changed, 7 insertions(+), 11 deletions(-)
+
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -1803,37 +1803,33 @@ typedef int (*entry_fn_t)(struct vgic_it
+ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
+ int start_id, entry_fn_t fn, void *opaque)
+ {
+- void *entry = kzalloc(esz, GFP_KERNEL);
+ struct kvm *kvm = its->dev->kvm;
+ unsigned long len = size;
+ int id = start_id;
+ gpa_t gpa = base;
++ char entry[esz];
+ int ret;
+
++ memset(entry, 0, esz);
++
+ while (len > 0) {
+ int next_offset;
+ size_t byte_offset;
+
+ ret = kvm_read_guest(kvm, gpa, entry, esz);
+ if (ret)
+- goto out;
++ return ret;
+
+ next_offset = fn(its, id, entry, opaque);
+- if (next_offset <= 0) {
+- ret = next_offset;
+- goto out;
+- }
++ if (next_offset <= 0)
++ return next_offset;
+
+ byte_offset = next_offset * esz;
+ id += next_offset;
+ gpa += byte_offset;
+ len -= byte_offset;
+ }
+- ret = 1;
+-
+-out:
+- kfree(entry);
+- return ret;
++ return 1;
+ }
+
+ /**
--- /dev/null
+From 2a9a86d5c81389cd9afe6a4fea42c585733cd705 Mon Sep 17 00:00:00 2001
+From: Tero Kristo <t-kristo@ti.com>
+Date: Mon, 30 Oct 2017 09:10:46 +0200
+Subject: PM / QoS: Fix default runtime_pm device resume latency
+
+From: Tero Kristo <t-kristo@ti.com>
+
+commit 2a9a86d5c81389cd9afe6a4fea42c585733cd705 upstream.
+
+The recent change to the PM QoS framework to introduce a proper
+no constraint value overlooked to handle the devices which don't
+implement PM QoS OPS. Runtime PM is one of the more severely
+impacted subsystems, failing every attempt to runtime suspend
+a device. This leads into some nasty second level issues like
+probe failures and increased power consumption among other
+things.
+
+Fix this by adding a proper return value for devices that don't
+implement PM QoS.
+
+Fixes: 0cc2b4e5a020 (PM / QoS: Fix device resume latency PM QoS)
+Signed-off-by: Tero Kristo <t-kristo@ti.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/pm_qos.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -175,7 +175,8 @@ static inline s32 dev_pm_qos_requested_f
+ static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
+ {
+ return IS_ERR_OR_NULL(dev->power.qos) ?
+- 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
++ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT :
++ pm_qos_read_value(&dev->power.qos->resume_latency);
+ }
+ #else
+ static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
--- /dev/null
+From 0cc2b4e5a020fc7f4d1795741c116c983e9467d7 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Tue, 24 Oct 2017 15:20:45 +0200
+Subject: PM / QoS: Fix device resume latency PM QoS
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 0cc2b4e5a020fc7f4d1795741c116c983e9467d7 upstream.
+
+The special value of 0 for device resume latency PM QoS means
+"no restriction", but there are two problems with that.
+
+First, device resume latency PM QoS requests with 0 as the
+value are always put in front of requests with positive
+values in the priority lists used internally by the PM QoS
+framework, causing 0 to be chosen as an effective constraint
+value. However, that 0 is then interpreted as "no restriction"
+effectively overriding the other requests with specific
+restrictions which is incorrect.
+
+Second, the users of device resume latency PM QoS have no
+way to specify that *any* resume latency at all should be
+avoided, which is an artificial limitation in general.
+
+To address these issues, modify device resume latency PM QoS to
+use S32_MAX as the "no constraint" value and 0 as the "no
+latency at all" one and rework its users (the cpuidle menu
+governor, the genpd QoS governor and the runtime PM framework)
+to follow these changes.
+
+Also add a special "n/a" value to the corresponding user space I/F
+to allow user space to indicate that it cannot accept any resume
+latencies at all for the given device.
+
+Fixes: 85dc0b8a4019 (PM / QoS: Make it possible to expose PM QoS latency constraints)
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=197323
+Reported-by: Reinette Chatre <reinette.chatre@intel.com>
+Tested-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Alex Shi <alex.shi@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/ABI/testing/sysfs-devices-power | 4 +
+ drivers/base/cpu.c | 3 -
+ drivers/base/power/domain_governor.c | 53 ++++++++++++++------------
+ drivers/base/power/qos.c | 2
+ drivers/base/power/runtime.c | 2
+ drivers/base/power/sysfs.c | 25 ++++++++++--
+ drivers/cpuidle/governors/menu.c | 4 -
+ include/linux/pm_qos.h | 5 +-
+ 8 files changed, 63 insertions(+), 35 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-devices-power
++++ b/Documentation/ABI/testing/sysfs-devices-power
+@@ -211,7 +211,9 @@ Description:
+ device, after it has been suspended at run time, from a resume
+ request to the moment the device will be ready to process I/O,
+ in microseconds. If it is equal to 0, however, this means that
+- the PM QoS resume latency may be arbitrary.
++ the PM QoS resume latency may be arbitrary and the special value
++ "n/a" means that user space cannot accept any resume latency at
++ all for the given device.
+
+ Not all drivers support this attribute. If it isn't supported,
+ it is not present.
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -377,7 +377,8 @@ int register_cpu(struct cpu *cpu, int nu
+
+ per_cpu(cpu_sys_devices, num) = &cpu->dev;
+ register_cpu_under_node(num, cpu_to_node(num));
+- dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
++ dev_pm_qos_expose_latency_limit(&cpu->dev,
++ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
+
+ return 0;
+ }
+--- a/drivers/base/power/domain_governor.c
++++ b/drivers/base/power/domain_governor.c
+@@ -14,23 +14,20 @@
+ static int dev_update_qos_constraint(struct device *dev, void *data)
+ {
+ s64 *constraint_ns_p = data;
+- s32 constraint_ns = -1;
++ s64 constraint_ns = -1;
+
+ if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
+ constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
+
+- if (constraint_ns < 0) {
++ if (constraint_ns < 0)
+ constraint_ns = dev_pm_qos_read_value(dev);
+- constraint_ns *= NSEC_PER_USEC;
+- }
+- if (constraint_ns == 0)
++
++ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ return 0;
+
+- /*
+- * constraint_ns cannot be negative here, because the device has been
+- * suspended.
+- */
+- if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
++ constraint_ns *= NSEC_PER_USEC;
++
++ if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0)
+ *constraint_ns_p = constraint_ns;
+
+ return 0;
+@@ -63,10 +60,14 @@ static bool default_suspend_ok(struct de
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+- if (constraint_ns < 0)
++ if (constraint_ns == 0)
+ return false;
+
+- constraint_ns *= NSEC_PER_USEC;
++ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
++ constraint_ns = -1;
++ else
++ constraint_ns *= NSEC_PER_USEC;
++
+ /*
+ * We can walk the children without any additional locking, because
+ * they all have been suspended at this point and their
+@@ -76,14 +77,19 @@ static bool default_suspend_ok(struct de
+ device_for_each_child(dev, &constraint_ns,
+ dev_update_qos_constraint);
+
+- if (constraint_ns > 0) {
+- constraint_ns -= td->suspend_latency_ns +
+- td->resume_latency_ns;
+- if (constraint_ns == 0)
+- return false;
++ if (constraint_ns < 0) {
++ /* The children have no constraints. */
++ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
++ td->cached_suspend_ok = true;
++ } else {
++ constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns;
++ if (constraint_ns > 0) {
++ td->effective_constraint_ns = constraint_ns;
++ td->cached_suspend_ok = true;
++ } else {
++ td->effective_constraint_ns = 0;
++ }
+ }
+- td->effective_constraint_ns = constraint_ns;
+- td->cached_suspend_ok = constraint_ns >= 0;
+
+ /*
+ * The children have been suspended already, so we don't need to take
+@@ -145,13 +151,14 @@ static bool __default_power_down_ok(stru
+ td = &to_gpd_data(pdd)->td;
+ constraint_ns = td->effective_constraint_ns;
+ /* default_suspend_ok() need not be called before us. */
+- if (constraint_ns < 0) {
++ if (constraint_ns < 0)
+ constraint_ns = dev_pm_qos_read_value(pdd->dev);
+- constraint_ns *= NSEC_PER_USEC;
+- }
+- if (constraint_ns == 0)
++
++ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ continue;
+
++ constraint_ns *= NSEC_PER_USEC;
++
+ /*
+ * constraint_ns cannot be negative here, because the device has
+ * been suspended.
+--- a/drivers/base/power/qos.c
++++ b/drivers/base/power/qos.c
+@@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_alloca
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+- c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
++ c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ c->type = PM_QOS_MIN;
+ c->notifiers = n;
+
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(str
+ || (dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME))
+ retval = -EAGAIN;
+- else if (__dev_pm_qos_read_value(dev) < 0)
++ else if (__dev_pm_qos_read_value(dev) == 0)
+ retval = -EPERM;
+ else if (dev->power.runtime_status == RPM_SUSPENDED)
+ retval = 1;
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_sho
+ struct device_attribute *attr,
+ char *buf)
+ {
+- return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
++ s32 value = dev_pm_qos_requested_resume_latency(dev);
++
++ if (value == 0)
++ return sprintf(buf, "n/a\n");
++ else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
++ value = 0;
++
++ return sprintf(buf, "%d\n", value);
+ }
+
+ static ssize_t pm_qos_resume_latency_store(struct device *dev,
+@@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_sto
+ s32 value;
+ int ret;
+
+- if (kstrtos32(buf, 0, &value))
+- return -EINVAL;
++ if (!kstrtos32(buf, 0, &value)) {
++ /*
++ * Prevent users from writing negative or "no constraint" values
++ * directly.
++ */
++ if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
++ return -EINVAL;
+
+- if (value < 0)
++ if (value == 0)
++ value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
++ } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
++ value = 0;
++ } else {
+ return -EINVAL;
++ }
+
+ ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+ value);
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_dr
+ data->needs_update = 0;
+ }
+
+- /* resume_latency is 0 means no restriction */
+- if (resume_latency && resume_latency < latency_req)
++ if (resume_latency < latency_req &&
++ resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ latency_req = resume_latency;
+
+ /* Special case when user has set very strict latency requirement */
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -27,16 +27,17 @@ enum pm_qos_flags_status {
+ PM_QOS_FLAGS_ALL,
+ };
+
+-#define PM_QOS_DEFAULT_VALUE -1
++#define PM_QOS_DEFAULT_VALUE (-1)
++#define PM_QOS_LATENCY_ANY S32_MAX
+
+ #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+ #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+ #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
+ #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
+ #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
++#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
+ #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
+ #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
+-#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
+
+ #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
+ #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
--- /dev/null
+alsa-timer-add-missing-mutex-lock-for-compat-ioctls.patch
+alsa-seq-fix-nested-rwsem-annotation-for-lockdep-splat.patch
+cifs-check-maxpathnamecomponentlength-0-before-using-it.patch
+keys-return-full-count-in-keyring_read-if-buffer-is-too-small.patch
+keys-trusted-fix-writing-past-end-of-buffer-in-trusted_read.patch
+keys-fix-out-of-bounds-read-during-asn.1-parsing.patch
+asoc-adau17x1-workaround-for-noise-bug-in-adc.patch
+virtio_blk-fix-an-sg_io-regression.patch
+pm-qos-fix-device-resume-latency-pm-qos.patch
+pm-qos-fix-default-runtime_pm-device-resume-latency.patch
+arm64-ensure-__dump_instr-checks-addr_limit.patch
+kvm-arm64-its-fix-missing-dynamic-allocation-check-in-scan_its_table.patch
+arm-arm64-kvm-set-right-lr-register-value-for-32-bit-guest-when-inject-abort.patch
+arm-arm64-kvm-disable-branch-profiling-in-hyp-code.patch
+arm-dts-mvebu-pl310-cache-disable-double-linefill.patch
+arm-8715-1-add-a-private-asm-unaligned.h.patch
--- /dev/null
+From efea2abcb03215f2efadfe994ff7f652aaff196b Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Fri, 27 Oct 2017 08:23:21 -0600
+Subject: virtio_blk: Fix an SG_IO regression
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit efea2abcb03215f2efadfe994ff7f652aaff196b upstream.
+
+Avoid that submitting an SG_IO ioctl triggers a kernel oops that
+is preceded by:
+
+usercopy: kernel memory overwrite attempt detected to (null) (<null>) (6 bytes)
+kernel BUG at mm/usercopy.c:72!
+
+Reported-by: Dann Frazier <dann.frazier@canonical.com>
+Fixes: commit ca18d6f769d2 ("block: Make most scsi_req_init() calls implicit")
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Dann Frazier <dann.frazier@canonical.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Moved virtblk_initialize_rq() inside CONFIG_VIRTIO_BLK_SCSI.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+
+---
+ drivers/block/virtio_blk.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk
+ return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
+ }
+
++#ifdef CONFIG_VIRTIO_BLK_SCSI
++static void virtblk_initialize_rq(struct request *req)
++{
++ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
++
++ scsi_req_init(&vbr->sreq);
++}
++#endif
++
+ static const struct blk_mq_ops virtio_mq_ops = {
+ .queue_rq = virtio_queue_rq,
+ .complete = virtblk_request_done,
+ .init_request = virtblk_init_request,
++#ifdef CONFIG_VIRTIO_BLK_SCSI
++ .initialize_rq_fn = virtblk_initialize_rq,
++#endif
+ .map_queues = virtblk_map_queues,
+ };
+
--- /dev/null
+alsa-timer-add-missing-mutex-lock-for-compat-ioctls.patch
+alsa-seq-fix-nested-rwsem-annotation-for-lockdep-splat.patch
+cifs-check-maxpathnamecomponentlength-0-before-using-it.patch
+keys-return-full-count-in-keyring_read-if-buffer-is-too-small.patch
+keys-fix-out-of-bounds-read-during-asn.1-parsing.patch
+asoc-adau17x1-workaround-for-noise-bug-in-adc.patch
+arm64-ensure-__dump_instr-checks-addr_limit.patch
+arm-dts-mvebu-pl310-cache-disable-double-linefill.patch
+arm-8715-1-add-a-private-asm-unaligned.h.patch
--- /dev/null
+alsa-timer-add-missing-mutex-lock-for-compat-ioctls.patch
+alsa-seq-fix-nested-rwsem-annotation-for-lockdep-splat.patch
+cifs-check-maxpathnamecomponentlength-0-before-using-it.patch
+keys-return-full-count-in-keyring_read-if-buffer-is-too-small.patch
+keys-fix-out-of-bounds-read-during-asn.1-parsing.patch
+asoc-adau17x1-workaround-for-noise-bug-in-adc.patch
+arm64-ensure-__dump_instr-checks-addr_limit.patch
+arm-arm64-kvm-set-right-lr-register-value-for-32-bit-guest-when-inject-abort.patch
+arm-arm64-kvm-disable-branch-profiling-in-hyp-code.patch
+arm-8715-1-add-a-private-asm-unaligned.h.patch