--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 24 Aug 2018 15:08:30 +0100
+Subject: arm/arm64: smccc-1.1: Handle function result as parameters
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit 755a8bf5579d22eb5636685c516d8dede799e27b ]
+
+If someone has the silly idea to write something along those lines:
+
+ extern u64 foo(void);
+
+ void bar(struct arm_smccc_res *res)
+ {
+ arm_smccc_1_1_smc(0xbad, foo(), res);
+ }
+
+they are in for a surprise, as this gets compiled as:
+
+ 0000000000000588 <bar>:
+ 588: a9be7bfd stp x29, x30, [sp, #-32]!
+ 58c: 910003fd mov x29, sp
+ 590: f9000bf3 str x19, [sp, #16]
+ 594: aa0003f3 mov x19, x0
+ 598: aa1e03e0 mov x0, x30
+ 59c: 94000000 bl 0 <_mcount>
+ 5a0: 94000000 bl 0 <foo>
+ 5a4: aa0003e1 mov x1, x0
+ 5a8: d4000003 smc #0x0
+ 5ac: b4000073 cbz x19, 5b8 <bar+0x30>
+ 5b0: a9000660 stp x0, x1, [x19]
+ 5b4: a9010e62 stp x2, x3, [x19, #16]
+ 5b8: f9400bf3 ldr x19, [sp, #16]
+ 5bc: a8c27bfd ldp x29, x30, [sp], #32
+ 5c0: d65f03c0 ret
+ 5c4: d503201f nop
+
+The call to foo "overwrites" the x0 register for the return value,
+and we end up calling the wrong secure service.
+
+A solution is to evaluate all the parameters before assigning
+anything to specific registers, leading to the expected result:
+
+ 0000000000000588 <bar>:
+ 588: a9be7bfd stp x29, x30, [sp, #-32]!
+ 58c: 910003fd mov x29, sp
+ 590: f9000bf3 str x19, [sp, #16]
+ 594: aa0003f3 mov x19, x0
+ 598: aa1e03e0 mov x0, x30
+ 59c: 94000000 bl 0 <_mcount>
+ 5a0: 94000000 bl 0 <foo>
+ 5a4: aa0003e1 mov x1, x0
+ 5a8: d28175a0 mov x0, #0xbad
+ 5ac: d4000003 smc #0x0
+ 5b0: b4000073 cbz x19, 5bc <bar+0x34>
+ 5b4: a9000660 stp x0, x1, [x19]
+ 5b8: a9010e62 stp x2, x3, [x19, #16]
+ 5bc: f9400bf3 ldr x19, [sp, #16]
+ 5c0: a8c27bfd ldp x29, x30, [sp], #32
+ 5c4: d65f03c0 ret
+
+Reported-by: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h | 30 ++++++++++++++++++++----------
+ 1 file changed, 20 insertions(+), 10 deletions(-)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -205,41 +205,51 @@ asmlinkage void __arm_smccc_hvc(unsigned
+ register unsigned long r3 asm("r3")
+
+ #define __declare_arg_1(a0, a1, res) \
++ typeof(a1) __a1 = a1; \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
+- register unsigned long r1 asm("r1") = a1; \
++ register unsigned long r1 asm("r1") = __a1; \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3")
+
+ #define __declare_arg_2(a0, a1, a2, res) \
++ typeof(a1) __a1 = a1; \
++ typeof(a2) __a2 = a2; \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
+- register unsigned long r1 asm("r1") = a1; \
+- register unsigned long r2 asm("r2") = a2; \
++ register unsigned long r1 asm("r1") = __a1; \
++ register unsigned long r2 asm("r2") = __a2; \
+ register unsigned long r3 asm("r3")
+
+ #define __declare_arg_3(a0, a1, a2, a3, res) \
++ typeof(a1) __a1 = a1; \
++ typeof(a2) __a2 = a2; \
++ typeof(a3) __a3 = a3; \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
+- register unsigned long r1 asm("r1") = a1; \
+- register unsigned long r2 asm("r2") = a2; \
+- register unsigned long r3 asm("r3") = a3
++ register unsigned long r1 asm("r1") = __a1; \
++ register unsigned long r2 asm("r2") = __a2; \
++ register unsigned long r3 asm("r3") = __a3
+
+ #define __declare_arg_4(a0, a1, a2, a3, a4, res) \
++ typeof(a4) __a4 = a4; \
+ __declare_arg_3(a0, a1, a2, a3, res); \
+- register typeof(a4) r4 asm("r4") = a4
++ register unsigned long r4 asm("r4") = __a4
+
+ #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
++ typeof(a5) __a5 = a5; \
+ __declare_arg_4(a0, a1, a2, a3, a4, res); \
+- register typeof(a5) r5 asm("r5") = a5
++ register unsigned long r5 asm("r5") = __a5
+
+ #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
++ typeof(a6) __a6 = a6; \
+ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
+- register typeof(a6) r6 asm("r6") = a6
++ register unsigned long r6 asm("r6") = __a6
+
+ #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
++ typeof(a7) __a7 = a7; \
+ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
+- register typeof(a7) r7 asm("r7") = a7
++ register unsigned long r7 asm("r7") = __a7
+
+ #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
+ #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 24 Aug 2018 15:08:29 +0100
+Subject: arm/arm64: smccc-1.1: Make return values unsigned long
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit 1d8f574708a3fb6f18c85486d0c5217df893c0cf ]
+
+An unfortunate consequence of having a strong typing for the input
+values to the SMC call is that it also affects the type of the
+return values, limiting r0 to 32 bits and r{1,2,3} to whatever
+was passed as an input.
+
+Let's turn everything into "unsigned long", which satisfies the
+requirements of both architectures, and allows for the full
+range of return values.
+
+Reported-by: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -199,31 +199,31 @@ asmlinkage void __arm_smccc_hvc(unsigned
+
+ #define __declare_arg_0(a0, res) \
+ struct arm_smccc_res *___res = res; \
+- register u32 r0 asm("r0") = a0; \
++ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1"); \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3")
+
+ #define __declare_arg_1(a0, a1, res) \
+ struct arm_smccc_res *___res = res; \
+- register u32 r0 asm("r0") = a0; \
+- register typeof(a1) r1 asm("r1") = a1; \
++ register unsigned long r0 asm("r0") = (u32)a0; \
++ register unsigned long r1 asm("r1") = a1; \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3")
+
+ #define __declare_arg_2(a0, a1, a2, res) \
+ struct arm_smccc_res *___res = res; \
+- register u32 r0 asm("r0") = a0; \
+- register typeof(a1) r1 asm("r1") = a1; \
+- register typeof(a2) r2 asm("r2") = a2; \
++ register unsigned long r0 asm("r0") = (u32)a0; \
++ register unsigned long r1 asm("r1") = a1; \
++ register unsigned long r2 asm("r2") = a2; \
+ register unsigned long r3 asm("r3")
+
+ #define __declare_arg_3(a0, a1, a2, a3, res) \
+ struct arm_smccc_res *___res = res; \
+- register u32 r0 asm("r0") = a0; \
+- register typeof(a1) r1 asm("r1") = a1; \
+- register typeof(a2) r2 asm("r2") = a2; \
+- register typeof(a3) r3 asm("r3") = a3
++ register unsigned long r0 asm("r0") = (u32)a0; \
++ register unsigned long r1 asm("r1") = a1; \
++ register unsigned long r2 asm("r2") = a2; \
++ register unsigned long r3 asm("r3") = a3
+
+ #define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+ __declare_arg_3(a0, a1, a2, a3, res); \
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Tony Lindgren <tony@atomide.com>
+Date: Mon, 27 Aug 2018 19:18:21 -0700
+Subject: ARM: dts: omap4-droid4: Fix emmc errors seen on some devices
+
+From: Tony Lindgren <tony@atomide.com>
+
+[ Upstream commit 2d59bb602314a4b2593fde267734266b5e872dd0 ]
+
+Otherwise we can get the following errors occasionally on some devices:
+
+mmc1: tried to HW reset card, got error -110
+mmcblk1: error -110 requesting status
+mmcblk1: recovery failed!
+print_req_error: I/O error, dev mmcblk1, sector 14329
+...
+
+I have one device that hits this error almost on every boot, and another
+one that hits it only rarely with the other ones I've used behave without
+problems. I'm not sure if the issue is related to a particular eMMC card
+model, but in case it is, both of the machines with issues have:
+
+# cat /sys/class/mmc_host/mmc1/mmc1:0001/manfid \
+/sys/class/mmc_host/mmc1/mmc1:0001/oemid \
+/sys/class/mmc_host/mmc1/mmc1:0001/name
+0x000045
+0x0100
+SEM16G
+
+and the working ones have:
+
+0x000011
+0x0100
+016G92
+
+Note that "ti,non-removable" is different as omap_hsmmc_reg_get() does not
+call omap_hsmmc_disable_boot_regulators() if no_regulator_off_init is set.
+And currently we set no_regulator_off_init only for "ti,non-removable" and
+not for "non-removable". It seems that we should have "non-removable" with
+some other mmc generic property behave in the same way instead of having to
+use a non-generic property. But let's fix the issue first.
+
+Fixes: 7e2f8c0ae670 ("ARM: dts: Add minimal support for motorola droid 4
+xt894")
+Cc: Marcel Partap <mpartap@gmx.net>
+Cc: Merlijn Wajer <merlijn@wizzup.org>
+Cc: Michael Scott <hashcode0f@gmail.com>
+Cc: NeKit <nekit1000@gmail.com>
+Cc: Pavel Machek <pavel@ucw.cz>
+Cc: Sebastian Reichel <sre@kernel.org>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/omap4-droid4-xt894.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
++++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
+@@ -314,7 +314,7 @@
+ &mmc2 {
+ vmmc-supply = <&vsdio>;
+ bus-width = <8>;
+- non-removable;
++ ti,non-removable;
+ };
+
+ &mmc3 {
--- /dev/null
+From d26c25a9d19b5976b319af528886f89cf455692d Mon Sep 17 00:00:00 2001
+From: Dave Martin <Dave.Martin@arm.com>
+Date: Thu, 27 Sep 2018 16:53:21 +0100
+Subject: arm64: KVM: Tighten guest core register access from userspace
+
+From: Dave Martin <Dave.Martin@arm.com>
+
+commit d26c25a9d19b5976b319af528886f89cf455692d upstream.
+
+We currently allow userspace to access the core register file
+in about any possible way, including straddling multiple
+registers and doing unaligned accesses.
+
+This is not the expected use of the ABI, and nobody is actually
+using it that way. Let's tighten it by explicitly checking
+the size and alignment for each field of the register file.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 2f4a07c5f9fe ("arm64: KVM: guest one-reg interface")
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Dave Martin <Dave.Martin@arm.com>
+[maz: rewrote Dave's initial patch to be more easily backported]
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/guest.c | 45 +++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 45 insertions(+)
+
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 i
+ return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
+ }
+
++static int validate_core_offset(const struct kvm_one_reg *reg)
++{
++ u64 off = core_reg_offset_from_id(reg->id);
++ int size;
++
++ switch (off) {
++ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
++ KVM_REG_ARM_CORE_REG(regs.regs[30]):
++ case KVM_REG_ARM_CORE_REG(regs.sp):
++ case KVM_REG_ARM_CORE_REG(regs.pc):
++ case KVM_REG_ARM_CORE_REG(regs.pstate):
++ case KVM_REG_ARM_CORE_REG(sp_el1):
++ case KVM_REG_ARM_CORE_REG(elr_el1):
++ case KVM_REG_ARM_CORE_REG(spsr[0]) ...
++ KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
++ size = sizeof(__u64);
++ break;
++
++ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
++ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
++ size = sizeof(__uint128_t);
++ break;
++
++ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
++ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
++ size = sizeof(__u32);
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ if (KVM_REG_SIZE(reg->id) == size &&
++ IS_ALIGNED(off, size / sizeof(__u32)))
++ return 0;
++
++ return -EINVAL;
++}
++
+ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ {
+ /*
+@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu
+ (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
+ return -ENOENT;
+
++ if (validate_core_offset(reg))
++ return -EINVAL;
++
+ if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu
+ (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
+ return -ENOENT;
+
++ if (validate_core_offset(reg))
++ return -EINVAL;
++
+ if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
+ return -EINVAL;
+
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Sun, 15 Jul 2018 22:09:29 +0200
+Subject: ata: ftide010: Add a quirk for SQ201
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+[ Upstream commit 46cb52ad414ac829680d0bb8cc7090ac2b577ca7 ]
+
+The DMA is broken on this specific device for some unknown
+reason (probably badly designed or plain broken interface
+electronics) and will only work with PIO. Other users of
+the same hardware does not have this problem.
+
+Add a specific quirk so that this Gemini device gets
+DMA turned off. Also fix up some code around passing the
+port information around in probe while we're at it.
+
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/pata_ftide010.c | 27 +++++++++++++++++----------
+ 1 file changed, 17 insertions(+), 10 deletions(-)
+
+--- a/drivers/ata/pata_ftide010.c
++++ b/drivers/ata/pata_ftide010.c
+@@ -256,14 +256,12 @@ static struct ata_port_operations pata_f
+ .qc_issue = ftide010_qc_issue,
+ };
+
+-static struct ata_port_info ftide010_port_info[] = {
+- {
+- .flags = ATA_FLAG_SLAVE_POSS,
+- .mwdma_mask = ATA_MWDMA2,
+- .udma_mask = ATA_UDMA6,
+- .pio_mask = ATA_PIO4,
+- .port_ops = &pata_ftide010_port_ops,
+- },
++static struct ata_port_info ftide010_port_info = {
++ .flags = ATA_FLAG_SLAVE_POSS,
++ .mwdma_mask = ATA_MWDMA2,
++ .udma_mask = ATA_UDMA6,
++ .pio_mask = ATA_PIO4,
++ .port_ops = &pata_ftide010_port_ops,
+ };
+
+ #if IS_ENABLED(CONFIG_SATA_GEMINI)
+@@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_de
+ }
+
+ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
++ struct ata_port_info *pi,
+ bool is_ata1)
+ {
+ struct device *dev = ftide->dev;
+@@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(str
+
+ /* Flag port as SATA-capable */
+ if (gemini_sata_bridge_enabled(sg, is_ata1))
+- ftide010_port_info[0].flags |= ATA_FLAG_SATA;
++ pi->flags |= ATA_FLAG_SATA;
++
++ /* This device has broken DMA, only PIO works */
++ if (of_machine_is_compatible("itian,sq201")) {
++ pi->mwdma_mask = 0;
++ pi->udma_mask = 0;
++ }
+
+ /*
+ * We assume that a simple 40-wire cable is used in the PATA mode.
+@@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(str
+ }
+ #else
+ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
++ struct ata_port_info *pi,
+ bool is_ata1)
+ {
+ return -ENOTSUPP;
+@@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct pl
+ {
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+- const struct ata_port_info pi = ftide010_port_info[0];
++ struct ata_port_info pi = ftide010_port_info;
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ftide010 *ftide;
+ struct resource *res;
+@@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct pl
+ * are ATA0. This will also set up the cable types.
+ */
+ ret = pata_ftide010_gemini_init(ftide,
++ &pi,
+ (res->start == 0x63400000));
+ if (ret)
+ goto err_dis_clk;
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: John Fastabend <john.fastabend@gmail.com>
+Date: Wed, 22 Aug 2018 08:37:37 -0700
+Subject: bpf: sockmap: write_space events need to be passed to TCP handler
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+[ Upstream commit 9b2e0388bec8ec5427403e23faff3b58dd1c3200 ]
+
+When sockmap code is using the stream parser it also handles the write
+space events in order to handle the case where (a) verdict redirects
+skb to another socket and (b) the sockmap then sends the skb but due
+to memory constraints (or other EAGAIN errors) needs to do a retry.
+
+But the initial code missed a third case where the
+skb_send_sock_locked() triggers an sk_wait_event(). A typically case
+would be when sndbuf size is exceeded. If this happens because we
+do not pass the write_space event to the lower layers we never wake
+up the event and it will wait for sndtimeo. Which as noted in ktls
+fix may be rather large and look like a hang to the user.
+
+To reproduce the best test is to reduce the sndbuf size and send
+1B data chunks to stress the memory handling. To fix this pass the
+event from the upper layer to the lower layer.
+
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/sockmap.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/bpf/sockmap.c
++++ b/kernel/bpf/sockmap.c
+@@ -313,12 +313,15 @@ out:
+ static void smap_write_space(struct sock *sk)
+ {
+ struct smap_psock *psock;
++ void (*write_space)(struct sock *sk);
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
+ schedule_work(&psock->tx_work);
++ write_space = psock->save_write_space;
+ rcu_read_unlock();
++ write_space(sk);
+ }
+
+ static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Srikanth Jampala <Jampala.Srikanth@cavium.com>
+Date: Wed, 22 Aug 2018 12:40:52 +0530
+Subject: crypto: cavium/nitrox - fix for command corruption in queue full case with backlog submissions.
+
+From: Srikanth Jampala <Jampala.Srikanth@cavium.com>
+
+[ Upstream commit 3d7c82060d1fe65bde4023aac41a0b1bd7718e07 ]
+
+Earlier used to post the current command without checking queue full
+ after backlog submissions. So, post the current command only after
+ confirming the space in queue after backlog submissions.
+
+ Maintain host write index instead of reading device registers
+ to get the next free slot to post the command.
+
+ Return -ENOSPC in queue full case.
+
+Signed-off-by: Srikanth Jampala <Jampala.Srikanth@cavium.com>
+Reviewed-by: Gadam Sreerama <sgadam@cavium.com>
+Tested-by: Jha, Chandan <Chandan.Jha@cavium.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/cavium/nitrox/nitrox_dev.h | 3 -
+ drivers/crypto/cavium/nitrox/nitrox_lib.c | 1
+ drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | 57 +++++++++++++++------------
+ 3 files changed, 35 insertions(+), 26 deletions(-)
+
+--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
++++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
+@@ -35,6 +35,7 @@ struct nitrox_cmdq {
+ /* requests in backlog queues */
+ atomic_t backlog_count;
+
++ int write_idx;
+ /* command size 32B/64B */
+ u8 instr_size;
+ u8 qno;
+@@ -87,7 +88,7 @@ struct nitrox_bh {
+ struct bh_data *slc;
+ };
+
+-/* NITROX-5 driver state */
++/* NITROX-V driver state */
+ #define NITROX_UCODE_LOADED 0
+ #define NITROX_READY 1
+
+--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
+@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitro
+ cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
+ cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
+ cmdq->qsize = (qsize + PKT_IN_ALIGN);
++ cmdq->write_idx = 0;
+
+ spin_lock_init(&cmdq->response_lock);
+ spin_lock_init(&cmdq->cmdq_lock);
+--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+@@ -43,6 +43,16 @@
+ * Invalid flag options in AES-CCM IV.
+ */
+
++static inline int incr_index(int index, int count, int max)
++{
++ if ((index + count) >= max)
++ index = index + count - max;
++ else
++ index += count;
++
++ return index;
++}
++
+ /**
+ * dma_free_sglist - unmap and free the sg lists.
+ * @ndev: N5 device
+@@ -427,30 +437,29 @@ static void post_se_instr(struct nitrox_
+ struct nitrox_cmdq *cmdq)
+ {
+ struct nitrox_device *ndev = sr->ndev;
+- union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
+- u64 offset;
++ int idx;
+ u8 *ent;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+- /* get the next write offset */
+- offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
+- pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
++ idx = cmdq->write_idx;
+ /* copy the instruction */
+- ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
++ ent = cmdq->head + (idx * cmdq->instr_size);
+ memcpy(ent, &sr->instr, cmdq->instr_size);
+- /* flush the command queue updates */
+- dma_wmb();
+
+- sr->tstamp = jiffies;
+ atomic_set(&sr->status, REQ_POSTED);
+ response_list_add(sr, cmdq);
++ sr->tstamp = jiffies;
++ /* flush the command queue updates */
++ dma_wmb();
+
+ /* Ring doorbell with count 1 */
+ writeq(1, cmdq->dbell_csr_addr);
+ /* orders the doorbell rings */
+ mmiowb();
+
++ cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
++
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ }
+
+@@ -460,6 +469,9 @@ static int post_backlog_cmds(struct nitr
+ struct nitrox_softreq *sr, *tmp;
+ int ret = 0;
+
++ if (!atomic_read(&cmdq->backlog_count))
++ return 0;
++
+ spin_lock_bh(&cmdq->backlog_lock);
+
+ list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
+@@ -467,7 +479,7 @@ static int post_backlog_cmds(struct nitr
+
+ /* submit until space available */
+ if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
+- ret = -EBUSY;
++ ret = -ENOSPC;
+ break;
+ }
+ /* delete from backlog list */
+@@ -492,23 +504,20 @@ static int nitrox_enqueue_request(struct
+ {
+ struct nitrox_cmdq *cmdq = sr->cmdq;
+ struct nitrox_device *ndev = sr->ndev;
+- int ret = -EBUSY;
++
++ /* try to post backlog requests */
++ post_backlog_cmds(cmdq);
+
+ if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
+ if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+- return -EAGAIN;
+-
++ return -ENOSPC;
++ /* add to backlog list */
+ backlog_list_add(sr, cmdq);
+- } else {
+- ret = post_backlog_cmds(cmdq);
+- if (ret) {
+- backlog_list_add(sr, cmdq);
+- return ret;
+- }
+- post_se_instr(sr, cmdq);
+- ret = -EINPROGRESS;
++ return -EBUSY;
+ }
+- return ret;
++ post_se_instr(sr, cmdq);
++
++ return -EINPROGRESS;
+ }
+
+ /**
+@@ -625,11 +634,9 @@ int nitrox_process_se_request(struct nit
+ */
+ sr->instr.fdata[0] = *((u64 *)&req->gph);
+ sr->instr.fdata[1] = 0;
+- /* flush the soft_req changes before posting the cmd */
+- wmb();
+
+ ret = nitrox_enqueue_request(sr);
+- if (ret == -EAGAIN)
++ if (ret == -ENOSPC)
+ goto send_fail;
+
+ return ret;
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 24 Aug 2018 17:26:23 +0800
+Subject: drm/amdgpu: Enable/disable gfx PG feature in rlc safe mode
+
+From: Rex Zhu <Rex.Zhu@amd.com>
+
+[ Upstream commit 8ef23364b654d44244400d79988e677e504b21ba ]
+
+This is required by gfx hw and can fix the rlc hang when
+do s3 stree test on Cz/St.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Hang Zhou <hang.zhou@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -5479,6 +5479,11 @@ static int gfx_v8_0_set_powergating_stat
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
++ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
++ AMD_PG_SUPPORT_RLC_SMU_HS |
++ AMD_PG_SUPPORT_CP |
++ AMD_PG_SUPPORT_GFX_DMG))
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+@@ -5527,7 +5532,11 @@ static int gfx_v8_0_set_powergating_stat
+ default:
+ break;
+ }
+-
++ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
++ AMD_PG_SUPPORT_RLC_SMU_HS |
++ AMD_PG_SUPPORT_CP |
++ AMD_PG_SUPPORT_GFX_DMG))
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ return 0;
+ }
+
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 24 Aug 2018 16:17:54 +0800
+Subject: drm/amdgpu: Update power state at the end of smu hw_init.
+
+From: Rex Zhu <Rex.Zhu@amd.com>
+
+[ Upstream commit 2ab4d0e74256fc49b7b270f63c1d1e47c2455abc ]
+
+For SI/Kv, the power state is managed by function
+amdgpu_pm_compute_clocks.
+
+when dpm enabled, we should call amdgpu_pm_compute_clocks
+to update current power state instand of set boot state.
+
+this change can fix the oops when kfd driver was enabled on Kv.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 4 +---
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 3 +--
+ 2 files changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -1352,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_d
+ return ret;
+ }
+
+- kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
+-
+ if (adev->irq.installed &&
+ amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
+ ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
+@@ -3054,7 +3052,7 @@ static int kv_dpm_hw_init(void *handle)
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+-
++ amdgpu_pm_compute_clocks(adev);
+ return ret;
+ }
+
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -6884,7 +6884,6 @@ static int si_dpm_enable(struct amdgpu_d
+
+ si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_thermal_start_thermal_controller(adev);
+- ni_update_current_ps(adev, boot_ps);
+
+ return 0;
+ }
+@@ -7758,7 +7757,7 @@ static int si_dpm_hw_init(void *handle)
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+-
++ amdgpu_pm_compute_clocks(adev);
+ return ret;
+ }
+
--- /dev/null
+From 010e3e68cd9cb65ea50c0af605e966cda333cb2a Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Wed, 6 Dec 2017 12:49:13 +0000
+Subject: drm/i915: Remove vma from object on destroy, not close
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 010e3e68cd9cb65ea50c0af605e966cda333cb2a upstream.
+
+Originally we translated from the object to the vma by walking
+obj->vma_list to find the matching vm (for user lookups). Now we process
+user lookups using the rbtree, and we only use obj->vma_list itself for
+maintaining state (e.g. ensuring that all vma are flushed or rebound).
+As such maintenance needs to go on beyond the user's awareness of the
+vma, defer removal of the vma from the obj->vma_list from i915_vma_close()
+to i915_vma_destroy()
+
+Fixes: 5888fc9eac3c ("drm/i915: Flush pending GTT writes before unbinding")
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104155
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20171206124914.19960-1-chris@chris-wilson.co.uk
+Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem.c | 3 ++-
+ drivers/gpu/drm/i915/i915_vma.c | 4 +++-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3608,7 +3608,8 @@ restart:
+ return -EBUSY;
+ }
+
+- if (i915_gem_valid_gtt_space(vma, cache_level))
++ if (!i915_vma_is_closed(vma) &&
++ i915_gem_valid_gtt_space(vma, cache_level))
+ continue;
+
+ ret = i915_vma_unbind(vma);
+--- a/drivers/gpu/drm/i915/i915_vma.c
++++ b/drivers/gpu/drm/i915/i915_vma.c
+@@ -430,6 +430,7 @@ i915_vma_insert(struct i915_vma *vma, u6
+ u64 start, end;
+ int ret;
+
++ GEM_BUG_ON(i915_vma_is_closed(vma));
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+@@ -590,7 +591,9 @@ static void i915_vma_destroy(struct i915
+ GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
+ GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
+
++ list_del(&vma->obj_link);
+ list_del(&vma->vm_link);
++
+ if (!i915_vma_is_ggtt(vma))
+ i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+@@ -602,7 +605,6 @@ void i915_vma_close(struct i915_vma *vma
+ GEM_BUG_ON(i915_vma_is_closed(vma));
+ vma->flags |= I915_VMA_CLOSED;
+
+- list_del(&vma->obj_link);
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+
+ if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Bo Chen <chenbo@pdx.edu>
+Date: Mon, 23 Jul 2018 09:01:29 -0700
+Subject: e1000: check on netif_running() before calling e1000_up()
+
+From: Bo Chen <chenbo@pdx.edu>
+
+[ Upstream commit cf1acec008f8d7761aa3fd7c4bca7e17b2d2512d ]
+
+When the device is not up, the call to 'e1000_up()' from the error handling path
+of 'e1000_set_ringparam()' causes a kernel oops with a null-pointer
+dereference. The null-pointer dereference is triggered in function
+'e1000_alloc_rx_buffers()' at line 'buffer_info = &rx_ring->buffer_info[i]'.
+
+This bug was reported by COD, a tool for testing kernel module binaries I am
+building. This bug was also detected by KFI from Dr. Kai Cong.
+
+This patch fixes the bug by checking on 'netif_running()' before calling
+'e1000_up()' in 'e1000_set_ringparam()'.
+
+Signed-off-by: Bo Chen <chenbo@pdx.edu>
+Acked-by: Alexander Duyck <alexander.h.duyck@intel.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+@@ -664,7 +664,8 @@ err_setup_rx:
+ err_alloc_rx:
+ kfree(txdr);
+ err_alloc_tx:
+- e1000_up(adapter);
++ if (netif_running(adapter->netdev))
++ e1000_up(adapter);
+ err_setup:
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return err;
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Bo Chen <chenbo@pdx.edu>
+Date: Mon, 23 Jul 2018 09:01:30 -0700
+Subject: e1000: ensure to free old tx/rx rings in set_ringparam()
+
+From: Bo Chen <chenbo@pdx.edu>
+
+[ Upstream commit ee400a3f1bfe7004a3e14b81c38ccc5583c26295 ]
+
+In 'e1000_set_ringparam()', the tx_ring and rx_ring are updated with new value
+and the old tx/rx rings are freed only when the device is up. There are resource
+leaks on old tx/rx rings when the device is not up. This bug is reported by COD,
+a tool for testing kernel module binaries I am building.
+
+This patch fixes the bug by always calling 'kfree()' on old tx/rx rings in
+'e1000_set_ringparam()'.
+
+Signed-off-by: Bo Chen <chenbo@pdx.edu>
+Reviewed-by: Alexander Duyck <alexander.h.duyck@intel.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+@@ -644,14 +644,14 @@ static int e1000_set_ringparam(struct ne
+ adapter->tx_ring = tx_old;
+ e1000_free_all_rx_resources(adapter);
+ e1000_free_all_tx_resources(adapter);
+- kfree(tx_old);
+- kfree(rx_old);
+ adapter->rx_ring = rxdr;
+ adapter->tx_ring = txdr;
+ err = e1000_up(adapter);
+ if (err)
+ goto err_setup;
+ }
++ kfree(tx_old);
++ kfree(rx_old);
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return 0;
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 14 Aug 2018 13:07:47 +0300
+Subject: hwmon: (adt7475) Make adt7475_read_word() return errors
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit f196dec6d50abb2e65fb54a0621b2f1b4d922995 ]
+
+The adt7475_read_word() function was meant to return negative error
+codes on failure.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Tokunori Ikegami <ikegami@allied-telesis.co.jp>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwmon/adt7475.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/drivers/hwmon/adt7475.c
++++ b/drivers/hwmon/adt7475.c
+@@ -303,14 +303,18 @@ static inline u16 volt2reg(int channel,
+ return clamp_val(reg, 0, 1023) & (0xff << 2);
+ }
+
+-static u16 adt7475_read_word(struct i2c_client *client, int reg)
++static int adt7475_read_word(struct i2c_client *client, int reg)
+ {
+- u16 val;
++ int val1, val2;
+
+- val = i2c_smbus_read_byte_data(client, reg);
+- val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
++ val1 = i2c_smbus_read_byte_data(client, reg);
++ if (val1 < 0)
++ return val1;
++ val2 = i2c_smbus_read_byte_data(client, reg + 1);
++ if (val2 < 0)
++ return val2;
+
+- return val;
++ return val1 | (val2 << 8);
+ }
+
+ static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Lothar Felten <lothar.felten@gmail.com>
+Date: Tue, 14 Aug 2018 09:09:37 +0200
+Subject: hwmon: (ina2xx) fix sysfs shunt resistor read access
+
+From: Lothar Felten <lothar.felten@gmail.com>
+
+[ Upstream commit 3ad867001c91657c46dcf6656d52eb6080286fd5 ]
+
+fix the sysfs shunt resistor read access: return the shunt resistor
+value, not the calibration register contents.
+
+update email address
+
+Signed-off-by: Lothar Felten <lothar.felten@gmail.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/hwmon/ina2xx | 2 +-
+ drivers/hwmon/ina2xx.c | 13 +++++++++++--
+ include/linux/platform_data/ina2xx.h | 2 +-
+ 3 files changed, 13 insertions(+), 4 deletions(-)
+
+--- a/Documentation/hwmon/ina2xx
++++ b/Documentation/hwmon/ina2xx
+@@ -32,7 +32,7 @@ Supported chips:
+ Datasheet: Publicly available at the Texas Instruments website
+ http://www.ti.com/
+
+-Author: Lothar Felten <l-felten@ti.com>
++Author: Lothar Felten <lothar.felten@gmail.com>
+
+ Description
+ -----------
+--- a/drivers/hwmon/ina2xx.c
++++ b/drivers/hwmon/ina2xx.c
+@@ -17,7 +17,7 @@
+ * Bi-directional Current/Power Monitor with I2C Interface
+ * Datasheet: http://www.ti.com/product/ina230
+ *
+- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
++ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
+ * Thanks to Jan Volkering
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2x
+ return 0;
+ }
+
++static ssize_t ina2xx_show_shunt(struct device *dev,
++ struct device_attribute *da,
++ char *buf)
++{
++ struct ina2xx_data *data = dev_get_drvdata(dev);
++
++ return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
++}
++
+ static ssize_t ina2xx_store_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input,
+
+ /* shunt resistance */
+ static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
+- ina2xx_show_value, ina2xx_store_shunt,
++ ina2xx_show_shunt, ina2xx_store_shunt,
+ INA2XX_CALIBRATION);
+
+ /* update interval (ina226 only) */
+--- a/include/linux/platform_data/ina2xx.h
++++ b/include/linux/platform_data/ina2xx.h
+@@ -1,7 +1,7 @@
+ /*
+ * Driver for Texas Instruments INA219, INA226 power monitor chips
+ *
+- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
++ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Thu, 30 Aug 2018 11:50:13 +0300
+Subject: i2c: i801: Allow ACPI AML access I/O ports not reserved for SMBus
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+[ Upstream commit 7fd6d98b89f382d414e1db528e29a67bbd749457 ]
+
+Commit 7ae81952cda ("i2c: i801: Allow ACPI SystemIO OpRegion to conflict
+with PCI BAR") made it possible for AML code to access SMBus I/O ports
+by installing custom SystemIO OpRegion handler and blocking i80i driver
+access upon first AML read/write to this OpRegion.
+
+However, while ThinkPad T560 does have SystemIO OpRegion declared under
+the SMBus device, it does not access any of the SMBus registers:
+
+ Device (SMBU)
+ {
+ ...
+
+ OperationRegion (SMBP, PCI_Config, 0x50, 0x04)
+ Field (SMBP, DWordAcc, NoLock, Preserve)
+ {
+ , 5,
+ TCOB, 11,
+ Offset (0x04)
+ }
+
+ Name (TCBV, 0x00)
+ Method (TCBS, 0, NotSerialized)
+ {
+ If ((TCBV == 0x00))
+ {
+ TCBV = (\_SB.PCI0.SMBU.TCOB << 0x05)
+ }
+
+ Return (TCBV) /* \_SB_.PCI0.SMBU.TCBV */
+ }
+
+ OperationRegion (TCBA, SystemIO, TCBS (), 0x10)
+ Field (TCBA, ByteAcc, NoLock, Preserve)
+ {
+ Offset (0x04),
+ , 9,
+ CPSC, 1
+ }
+ }
+
+Problem with the current approach is that it blocks all I/O port access
+and because this system has touchpad connected to the SMBus controller
+after first AML access (happens during suspend/resume cycle) the
+touchpad fails to work anymore.
+
+Fix this so that we allow ACPI AML I/O port access if it does not touch
+the region reserved for the SMBus.
+
+Fixes: 7ae81952cda ("i2c: i801: Allow ACPI SystemIO OpRegion to conflict with PCI BAR")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=200737
+Reported-by: Yussuf Khalil <dev@pp3345.net>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Reviewed-by: Jean Delvare <jdelvare@suse.de>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-i801.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1416,6 +1416,13 @@ static void i801_add_tco(struct i801_pri
+ }
+
+ #ifdef CONFIG_ACPI
++static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
++ acpi_physical_address address)
++{
++ return address >= priv->smba &&
++ address <= pci_resource_end(priv->pci_dev, SMBBAR);
++}
++
+ static acpi_status
+ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ u64 *value, void *handler_context, void *region_context)
+@@ -1431,7 +1438,7 @@ i801_acpi_io_handler(u32 function, acpi_
+ */
+ mutex_lock(&priv->acpi_lock);
+
+- if (!priv->acpi_reserved) {
++ if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
+ priv->acpi_reserved = true;
+
+ dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Thu, 16 Aug 2018 21:44:02 -0500
+Subject: isofs: reject hardware sector size > 2048 bytes
+
+From: Eric Sandeen <sandeen@redhat.com>
+
+[ Upstream commit 09a4e0be5826aa66c4ce9954841f110ffe63ef4f ]
+
+The largest block size supported by isofs is ISOFS_BLOCK_SIZE (2048), but
+isofs_fill_super calls sb_min_blocksize and sets the blocksize to the
+device's logical block size if it's larger than what we ended up with after
+option parsing.
+
+If for some reason we try to mount a hard 4k device as an isofs filesystem,
+we'll set opt.blocksize to 4096, and when we try to read the superblock
+we found via:
+
+ block = iso_blknum << (ISOFS_BLOCK_BITS - s->s_blocksize_bits)
+
+with s_blocksize_bits greater than ISOFS_BLOCK_BITS, we'll have a negative
+shift and the bread will fail somewhat cryptically:
+
+ isofs_fill_super: bread failed, dev=sda, iso_blknum=17, block=-2147483648
+
+It seems best to just catch and clearly reject mounts of such a device.
+
+Reported-by: Bryan Gurney <bgurney@redhat.com>
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/isofs/inode.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -24,6 +24,7 @@
+ #include <linux/mpage.h>
+ #include <linux/user_namespace.h>
+ #include <linux/seq_file.h>
++#include <linux/blkdev.h>
+
+ #include "isofs.h"
+ #include "zisofs.h"
+@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super
+ /*
+ * What if bugger tells us to go beyond page size?
+ */
++ if (bdev_logical_block_size(s->s_bdev) > 2048) {
++ printk(KERN_WARNING
++ "ISOFS: unsupported/invalid hardware sector size %d\n",
++ bdev_logical_block_size(s->s_bdev));
++ goto out_freesbi;
++ }
+ opt.blocksize = sb_min_blocksize(s, opt.blocksize);
+
+ sbi->s_high_sierra = 0; /* default is iso9660 */
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 23 Aug 2018 11:10:10 +0800
+Subject: net: hns: fix length and page_offset overflow when CONFIG_ARM64_64K_PAGES
+
+From: Huazhong Tan <tanhuazhong@huawei.com>
+
+[ Upstream commit 3ed614dce3ca9912d22be215ff0f11104b69fe62 ]
+
+When enable the config item "CONFIG_ARM64_64K_PAGES", the size of PAGE_SIZE
+is 65536(64K). But the type of length and page_offset are u16, they will
+overflow. So change them to u32.
+
+Fixes: 6fe6611ff275 ("net: add Hisilicon Network Subsystem hnae framework support")
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/hisilicon/hns/hnae.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
++++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
+@@ -220,10 +220,10 @@ struct hnae_desc_cb {
+
+ /* priv data for the desc, e.g. skb when use with ip stack*/
+ void *priv;
+- u16 page_offset;
+- u16 reuse_flag;
++ u32 page_offset;
++ u32 length; /* length of the buffer */
+
+- u16 length; /* length of the buffer */
++ u16 reuse_flag;
+
+ /* desc type, used by the ring user to mark the type of the priv data */
+ u16 type;
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 23 Aug 2018 11:10:12 +0800
+Subject: net: hns: fix skb->truesize underestimation
+
+From: Huazhong Tan <tanhuazhong@huawei.com>
+
+[ Upstream commit b1ccd4c0ab6ef499f47dd84ed4920502a7147bba ]
+
+skb->truesize is not meant to be tracking amount of used bytes in a skb,
+but amount of reserved/consumed bytes in memory.
+
+For instance, if we use a single byte in last page fragment, we have to
+account the full size of the fragment.
+
+So skb_add_rx_frag needs to calculate the length of the entire buffer into
+turesize.
+
+Fixes: 9cbe9fd5214e ("net: hns: optimize XGE capability by reducing cpu usage")
+Signed-off-by: Huazhong tan <tanhuazhong@huawei.com>
+Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/hisilicon/hns/hns_enet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -530,7 +530,7 @@ static void hns_nic_reuse_page(struct sk
+ }
+
+ skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
+- size - pull_len, truesize - pull_len);
++ size - pull_len, truesize);
+
+ /* avoid re-using remote pages,flag default unreuse */
+ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: James Smart <jsmart2021@gmail.com>
+Date: Thu, 9 Aug 2018 16:00:14 -0700
+Subject: nvme-fcloop: Fix dropped LS's to removed target port
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit afd299ca996929f4f98ac20da0044c0cdc124879 ]
+
+When a targetport is removed from the config, fcloop will avoid calling
+the LS done() routine thinking the targetport is gone. This leaves the
+initiator reset/reconnect hanging as it waits for a status on the
+Create_Association LS for the reconnect.
+
+Change the filter in the LS callback path. If tport null (set when
+failed validation before "sending to remote port"), be sure to call
+done. This was the main bug. But, continue the logic that only calls
+done if tport was set but there is no remoteport (e.g. case where
+remoteport has been removed, thus host doesn't expect a completion).
+
+Signed-off-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/target/fcloop.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -300,7 +300,7 @@ fcloop_tgt_lsrqst_done_work(struct work_
+ struct fcloop_tport *tport = tls_req->tport;
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+
+- if (tport->remoteport)
++ if (!tport || tport->remoteport)
+ lsreq->done(lsreq, tls_req->status);
+ }
+
+@@ -318,6 +318,7 @@ fcloop_ls_req(struct nvme_fc_local_port
+
+ if (!rport->targetport) {
+ tls_req->status = -ECONNREFUSED;
++ tls_req->tport = NULL;
+ schedule_work(&tls_req->work);
+ return ret;
+ }
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Tomer Tayar <Tomer.Tayar@cavium.com>
+Date: Mon, 20 Aug 2018 00:01:45 +0300
+Subject: qed: Avoid sending mailbox commands when MFW is not responsive
+
+From: Tomer Tayar <Tomer.Tayar@cavium.com>
+
+[ Upstream commit b310974e041913231b6e3d5d475d4df55c312301 ]
+
+Keep sending mailbox commands to the MFW when it is not responsive ends up
+with a redundant amount of timeout expiries.
+This patch prints the MCP status on the first command which is not
+responded, and blocks the following commands.
+Since the (un)load request commands might be not responded due to other
+PFs, the patch also adds the option to skip the blocking upon a failure.
+
+Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com>
+Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 52 ++++++++++++++++++++++++-
+ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 6 ++
+ drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 1
+ 3 files changed, 56 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -319,6 +319,12 @@ int qed_mcp_reset(struct qed_hwfn *p_hwf
+ u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
+ int rc = 0;
+
++ if (p_hwfn->mcp_info->b_block_cmd) {
++ DP_NOTICE(p_hwfn,
++ "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
++ return -EBUSY;
++ }
++
+ /* Ensure that only a single thread is accessing the mailbox */
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+@@ -444,6 +450,33 @@ static void __qed_mcp_cmd_and_union(stru
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+ }
+
++static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
++{
++ p_hwfn->mcp_info->b_block_cmd = block_cmd;
++
++ DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
++ block_cmd ? "Block" : "Unblock");
++}
++
++static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
++ struct qed_ptt *p_ptt)
++{
++ u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
++ u32 delay = QED_MCP_RESP_ITER_US;
++
++ cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
++ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
++ cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
++ udelay(delay);
++ cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
++ udelay(delay);
++ cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
++
++ DP_NOTICE(p_hwfn,
++ "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
++ cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
++}
++
+ static int
+ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -530,11 +563,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
+ DP_NOTICE(p_hwfn,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
++ qed_mcp_print_cpu_info(p_hwfn, p_ptt);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
++ if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
++ qed_mcp_cmd_set_blocking(p_hwfn, true);
++
+ return -EAGAIN;
+ }
+
+@@ -572,6 +609,13 @@ static int qed_mcp_cmd_and_union(struct
+ return -EBUSY;
+ }
+
++ if (p_hwfn->mcp_info->b_block_cmd) {
++ DP_NOTICE(p_hwfn,
++ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
++ p_mb_params->cmd, p_mb_params->param);
++ return -EBUSY;
++ }
++
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
+ DP_ERR(p_hwfn,
+@@ -776,7 +820,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hw
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
+- mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
++ mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+@@ -1020,7 +1064,7 @@ int qed_mcp_unload_req(struct qed_hwfn *
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
+ mb_params.param = wol_param;
+- mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
++ mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
+
+ return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ }
+@@ -2047,6 +2091,8 @@ int qed_mcp_halt(struct qed_hwfn *p_hwfn
+ return -EBUSY;
+ }
+
++ qed_mcp_cmd_set_blocking(p_hwfn, true);
++
+ return 0;
+ }
+
+@@ -2071,6 +2117,8 @@ int qed_mcp_resume(struct qed_hwfn *p_hw
+ return -EBUSY;
+ }
+
++ qed_mcp_cmd_set_blocking(p_hwfn, false);
++
+ return 0;
+ }
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+@@ -540,11 +540,14 @@ struct qed_mcp_info {
+ */
+ spinlock_t cmd_lock;
+
++ /* Flag to indicate whether sending a MFW mailbox command is blocked */
++ bool b_block_cmd;
++
+ /* Spinlock used for syncing SW link-changes and link-changes
+ * originating from attention context.
+ */
+ spinlock_t link_lock;
+- bool block_mb_sending;
++
+ u32 public_base;
+ u32 drv_mb_addr;
+ u32 mfw_mb_addr;
+@@ -575,6 +578,7 @@ struct qed_mcp_mb_params {
+ u32 mcp_param;
+ u32 flags;
+ #define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
++#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
+ #define QED_MB_FLAGS_IS_SET(params, flag) \
+ ({ typeof(params) __params = (params); \
+ (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
+--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+@@ -557,6 +557,7 @@
+ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
+ #define MCP_REG_CPU_EVENT_MASK \
+ 0xe05008UL
++#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
+ #define PGLUE_B_REG_PF_BAR0_SIZE \
+ 0x2aae60UL
+ #define PGLUE_B_REG_PF_BAR1_SIZE \
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Tomer Tayar <Tomer.Tayar@cavium.com>
+Date: Mon, 20 Aug 2018 00:01:44 +0300
+Subject: qed: Prevent a possible deadlock during driver load and unload
+
+From: Tomer Tayar <Tomer.Tayar@cavium.com>
+
+[ Upstream commit eaa50fc59e5841910987e90b0438b2643041f508 ]
+
+The MFW manages an internal lock to prevent concurrent hardware
+(de)initialization of different PFs.
+This, together with the busy-waiting for the MFW's responses for commands,
+might lead to a deadlock during concurrent load or unload of PFs.
+This patch adds the option to sleep within the busy-waiting, and uses it
+for the (un)load requests (which are not sent from an interrupt context) to
+prevent the possible deadlock.
+
+Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com>
+Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 43 +++++++++++++++++++++---------
+ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 21 +++++++++-----
+ 2 files changed, 44 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -47,7 +47,7 @@
+ #include "qed_reg_addr.h"
+ #include "qed_sriov.h"
+
+-#define CHIP_MCP_RESP_ITER_US 10
++#define QED_MCP_RESP_ITER_US 10
+
+ #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
+ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+@@ -316,7 +316,7 @@ static void qed_mcp_reread_offsets(struc
+
+ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+ {
+- u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
++ u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
+ int rc = 0;
+
+ /* Ensure that only a single thread is accessing the mailbox */
+@@ -448,10 +448,10 @@ static int
+ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params,
+- u32 max_retries, u32 delay)
++ u32 max_retries, u32 usecs)
+ {
++ u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+- u32 cnt = 0;
+ u16 seq_num;
+ int rc = 0;
+
+@@ -474,7 +474,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
+ goto err;
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+- udelay(delay);
++
++ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
++ msleep(msecs);
++ else
++ udelay(usecs);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+@@ -503,7 +507,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
+ * The spinlock stays locked until the list element is removed.
+ */
+
+- udelay(delay);
++ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
++ msleep(msecs);
++ else
++ udelay(usecs);
++
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (p_cmd_elem->b_is_completed)
+@@ -538,7 +546,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp,
+ p_mb_params->mcp_param,
+- (cnt * delay) / 1000, (cnt * delay) % 1000);
++ (cnt * usecs) / 1000, (cnt * usecs) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+@@ -556,7 +564,7 @@ static int qed_mcp_cmd_and_union(struct
+ {
+ size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = QED_DRV_MB_MAX_RETRIES;
+- u32 delay = CHIP_MCP_RESP_ITER_US;
++ u32 usecs = QED_MCP_RESP_ITER_US;
+
+ /* MCP not initialized */
+ if (!qed_mcp_is_init(p_hwfn)) {
+@@ -573,8 +581,13 @@ static int qed_mcp_cmd_and_union(struct
+ return -EINVAL;
+ }
+
++ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
++ max_retries = DIV_ROUND_UP(max_retries, 1000);
++ usecs *= 1000;
++ }
++
+ return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+- delay);
++ usecs);
+ }
+
+ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+@@ -763,6 +776,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hw
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
++ mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+@@ -984,7 +998,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_
+
+ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+ {
+- u32 wol_param, mcp_resp, mcp_param;
++ struct qed_mcp_mb_params mb_params;
++ u32 wol_param;
+
+ switch (p_hwfn->cdev->wol_config) {
+ case QED_OV_WOL_DISABLED:
+@@ -1002,8 +1017,12 @@ int qed_mcp_unload_req(struct qed_hwfn *
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+ }
+
+- return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+- &mcp_resp, &mcp_param);
++ memset(&mb_params, 0, sizeof(mb_params));
++ mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
++ mb_params.param = wol_param;
++ mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
++
++ return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ }
+
+ int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+@@ -565,14 +565,19 @@ struct qed_mcp_info {
+ };
+
+ struct qed_mcp_mb_params {
+- u32 cmd;
+- u32 param;
+- void *p_data_src;
+- u8 data_src_size;
+- void *p_data_dst;
+- u8 data_dst_size;
+- u32 mcp_resp;
+- u32 mcp_param;
++ u32 cmd;
++ u32 param;
++ void *p_data_src;
++ void *p_data_dst;
++ u8 data_src_size;
++ u8 data_dst_size;
++ u32 mcp_resp;
++ u32 mcp_param;
++ u32 flags;
++#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
++#define QED_MB_FLAGS_IS_SET(params, flag) \
++ ({ typeof(params) __params = (params); \
++ (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
+ };
+
+ /**
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Tomer Tayar <Tomer.Tayar@cavium.com>
+Date: Mon, 20 Aug 2018 00:01:43 +0300
+Subject: qed: Wait for MCP halt and resume commands to take place
+
+From: Tomer Tayar <Tomer.Tayar@cavium.com>
+
+[ Upstream commit 76271809f49056f079e202bf6513d17b0d6dd34d ]
+
+Successive iterations of halting and resuming the management chip (MCP)
+might fail, since currently the driver doesn't wait for these operations to
+actually take place.
+This patch prevents the driver from moving forward before the operations
+are reflected in the state register.
+
+Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com>
+Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 46 ++++++++++++++++++++-----
+ drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 1
+ 2 files changed, 39 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -1998,31 +1998,61 @@ qed_mcp_send_drv_version(struct qed_hwfn
+ return rc;
+ }
+
++/* A maximal 100 msec waiting time for the MCP to halt */
++#define QED_MCP_HALT_SLEEP_MS 10
++#define QED_MCP_HALT_MAX_RETRIES 10
++
+ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+ {
+- u32 resp = 0, param = 0;
++ u32 resp = 0, param = 0, cpu_state, cnt = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+ ¶m);
+- if (rc)
++ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
++ return rc;
++ }
+
+- return rc;
++ do {
++ msleep(QED_MCP_HALT_SLEEP_MS);
++ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
++ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
++ break;
++ } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
++
++ if (cnt == QED_MCP_HALT_MAX_RETRIES) {
++ DP_NOTICE(p_hwfn,
++ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
++ qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
++ return -EBUSY;
++ }
++
++ return 0;
+ }
+
++#define QED_MCP_RESUME_SLEEP_MS 10
++
+ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+ {
+- u32 value, cpu_mode;
++ u32 cpu_mode, cpu_state;
+
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+- value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+- value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+- qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
+ cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
++ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
++ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
++ msleep(QED_MCP_RESUME_SLEEP_MS);
++ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
++
++ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
++ DP_NOTICE(p_hwfn,
++ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
++ cpu_mode, cpu_state);
++ return -EBUSY;
++ }
+
+- return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
++ return 0;
+ }
+
+ int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
+--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+@@ -554,6 +554,7 @@
+ 0
+ #define MCP_REG_CPU_STATE \
+ 0xe05004UL
++#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
+ #define MCP_REG_CPU_EVENT_MASK \
+ 0xe05008UL
+ #define PGLUE_B_REG_PF_BAR0_SIZE \
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Tomer Tayar <Tomer.Tayar@cavium.com>
+Date: Mon, 20 Aug 2018 00:01:42 +0300
+Subject: qed: Wait for ready indication before rereading the shmem
+
+From: Tomer Tayar <Tomer.Tayar@cavium.com>
+
+[ Upstream commit f00d25f3154b676fcea4502a25b94bd7f142ca74 ]
+
+The MFW might be reset and re-update its shared memory.
+Upon the detection of such a reset the driver rereads this memory, but it
+has to wait till the data is valid.
+This patch adds the missing wait for a data ready indication.
+
+Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com>
+Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 50 ++++++++++++++++++++++++------
+ 1 file changed, 41 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -182,18 +182,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn
+ return 0;
+ }
+
++/* Maximum of 1 sec to wait for the SHMEM ready indication */
++#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
++#define QED_MCP_SHMEM_RDY_ITER_MS 50
++
+ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+ {
+ struct qed_mcp_info *p_info = p_hwfn->mcp_info;
++ u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
++ u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
+ u32 drv_mb_offsize, mfw_mb_offsize;
+ u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+ p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+- if (!p_info->public_base)
+- return 0;
++ if (!p_info->public_base) {
++ DP_NOTICE(p_hwfn,
++ "The address of the MCP scratch-pad is not configured\n");
++ return -EINVAL;
++ }
+
+ p_info->public_base |= GRCBASE_MCP;
+
++ /* Get the MFW MB address and number of supported messages */
++ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
++ SECTION_OFFSIZE_ADDR(p_info->public_base,
++ PUBLIC_MFW_MB));
++ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
++ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
++ p_info->mfw_mb_addr +
++ offsetof(struct public_mfw_mb,
++ sup_msgs));
++
++ /* The driver can notify that there was an MCP reset, and might read the
++ * SHMEM values before the MFW has completed initializing them.
++ * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
++ * data ready indication.
++ */
++ while (!p_info->mfw_mb_length && --cnt) {
++ msleep(msec);
++ p_info->mfw_mb_length =
++ (u16)qed_rd(p_hwfn, p_ptt,
++ p_info->mfw_mb_addr +
++ offsetof(struct public_mfw_mb, sup_msgs));
++ }
++
++ if (!cnt) {
++ DP_NOTICE(p_hwfn,
++ "Failed to get the SHMEM ready notification after %d msec\n",
++ QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
++ return -EBUSY;
++ }
++
+ /* Calculate the driver and MFW mailbox address */
+ drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+@@ -203,13 +242,6 @@ static int qed_load_mcp_offsets(struct q
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+ drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+- /* Set the MFW MB address */
+- mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+- SECTION_OFFSIZE_ADDR(p_info->public_base,
+- PUBLIC_MFW_MB));
+- p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+- p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
+-
+ /* Get the current driver mailbox sequence before sending
+ * the first command
+ */
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Leonard Crestez <leonard.crestez@nxp.com>
+Date: Tue, 24 Jul 2018 19:14:19 +0300
+Subject: Revert "ARM: dts: imx7d: Invert legacy PCI irq mapping"
+
+From: Leonard Crestez <leonard.crestez@nxp.com>
+
+[ Upstream commit 538d6e9d597584e80514698e24321645debde78f ]
+
+This reverts commit 1c86c9dd82f859b474474a7fee0d5195da2c9c1d.
+
+That commit followed the reference manual but unfortunately the imx7d
+manual is incorrect.
+
+Tested with ath9k pcie card and confirmed internally.
+
+Signed-off-by: Leonard Crestez <leonard.crestez@nxp.com>
+Acked-by: Lucas Stach <l.stach@pengutronix.de>
+Fixes: 1c86c9dd82f8 ("ARM: dts: imx7d: Invert legacy PCI irq mapping")
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/imx7d.dtsi | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/boot/dts/imx7d.dtsi
++++ b/arch/arm/boot/dts/imx7d.dtsi
+@@ -144,10 +144,14 @@
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+- interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+- <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+- <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+- <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
++ /*
++ * Reference manual lists pci irqs incorrectly
++ * Real hardware ordering is same as imx6: D+MSI, C, B, A
++ */
++ interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
+ <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
+ <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
--- /dev/null
+From 7e620984b62532783912312e334f3c48cdacbd5d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Thu, 20 Sep 2018 14:11:17 +0200
+Subject: serial: imx: restore handshaking irq for imx1
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+commit 7e620984b62532783912312e334f3c48cdacbd5d upstream.
+
+Back in 2015 when irda was dropped from the driver imx1 was broken. This
+change reintroduces the support for the third interrupt of the UART.
+
+Fixes: afe9cbb1a6ad ("serial: imx: drop support for IRDA")
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Reviewed-by: Leonard Crestez <leonard.crestez@nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/tty/serial/imx.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -2213,6 +2213,14 @@ static int serial_imx_probe(struct platf
+ ret);
+ return ret;
+ }
++
++ ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0,
++ dev_name(&pdev->dev), sport);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
++ ret);
++ return ret;
++ }
+ } else {
+ ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
+ dev_name(&pdev->dev), sport);
ib-hfi1-fix-context-recovery-when-pbc-has-an-unsupportedvl.patch
rdma-uverbs-atomically-flush-and-mark-closed-the-comp-event-queue.patch
ovl-hash-non-dir-by-lower-inode-for-fsnotify.patch
+drm-i915-remove-vma-from-object-on-destroy-not-close.patch
+serial-imx-restore-handshaking-irq-for-imx1.patch
+arm64-kvm-tighten-guest-core-register-access-from-userspace.patch
+qed-wait-for-ready-indication-before-rereading-the-shmem.patch
+qed-wait-for-mcp-halt-and-resume-commands-to-take-place.patch
+qed-prevent-a-possible-deadlock-during-driver-load-and-unload.patch
+qed-avoid-sending-mailbox-commands-when-mfw-is-not-responsive.patch
+thermal-of-thermal-disable-passive-polling-when-thermal-zone-is-disabled.patch
+isofs-reject-hardware-sector-size-2048-bytes.patch
+tls-possible-hang-when-do_tcp_sendpages-hits-sndbuf-is-full-case.patch
+bpf-sockmap-write_space-events-need-to-be-passed-to-tcp-handler.patch
+net-hns-fix-length-and-page_offset-overflow-when-config_arm64_64k_pages.patch
+net-hns-fix-skb-truesize-underestimation.patch
+e1000-check-on-netif_running-before-calling-e1000_up.patch
+e1000-ensure-to-free-old-tx-rx-rings-in-set_ringparam.patch
+crypto-cavium-nitrox-fix-for-command-corruption-in-queue-full-case-with-backlog-submissions.patch
+hwmon-ina2xx-fix-sysfs-shunt-resistor-read-access.patch
+hwmon-adt7475-make-adt7475_read_word-return-errors.patch
+revert-arm-dts-imx7d-invert-legacy-pci-irq-mapping.patch
+drm-amdgpu-enable-disable-gfx-pg-feature-in-rlc-safe-mode.patch
+drm-amdgpu-update-power-state-at-the-end-of-smu-hw_init.patch
+ata-ftide010-add-a-quirk-for-sq201.patch
+nvme-fcloop-fix-dropped-ls-s-to-removed-target-port.patch
+arm-dts-omap4-droid4-fix-emmc-errors-seen-on-some-devices.patch
+arm-arm64-smccc-1.1-make-return-values-unsigned-long.patch
+arm-arm64-smccc-1.1-handle-function-result-as-parameters.patch
+i2c-i801-allow-acpi-aml-access-i-o-ports-not-reserved-for-smbus.patch
+x86-pti-fix-section-mismatch-warning-error.patch
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Anson Huang <Anson.Huang@nxp.com>
+Date: Tue, 31 Jul 2018 00:56:49 +0800
+Subject: thermal: of-thermal: disable passive polling when thermal zone is disabled
+
+From: Anson Huang <Anson.Huang@nxp.com>
+
+[ Upstream commit 152395fd03d4ce1e535a75cdbf58105e50587611 ]
+
+When thermal zone is in passive mode, disabling its mode from
+sysfs is NOT taking effect at all, it is still polling the
+temperature of the disabled thermal zone and handling all thermal
+trips, it makes user confused. The disabling operation should
+disable the thermal zone behavior completely, for both active and
+passive mode, this patch clears the passive_delay when thermal
+zone is disabled and restores it when it is enabled.
+
+Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
+Signed-off-by: Eduardo Valentin <edubezval@gmail.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/of-thermal.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/thermal/of-thermal.c
++++ b/drivers/thermal/of-thermal.c
+@@ -278,10 +278,13 @@ static int of_thermal_set_mode(struct th
+
+ mutex_lock(&tz->lock);
+
+- if (mode == THERMAL_DEVICE_ENABLED)
++ if (mode == THERMAL_DEVICE_ENABLED) {
+ tz->polling_delay = data->polling_delay;
+- else
++ tz->passive_delay = data->passive_delay;
++ } else {
+ tz->polling_delay = 0;
++ tz->passive_delay = 0;
++ }
+
+ mutex_unlock(&tz->lock);
+
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: John Fastabend <john.fastabend@gmail.com>
+Date: Wed, 22 Aug 2018 08:37:32 -0700
+Subject: tls: possible hang when do_tcp_sendpages hits sndbuf is full case
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+[ Upstream commit 67db7cd249e71f64346f481b629724376d063e08 ]
+
+Currently, the lower protocols sk_write_space handler is not called if
+TLS is sending a scatterlist via tls_push_sg. However, normally
+tls_push_sg calls do_tcp_sendpage, which may be under memory pressure,
+that in turn may trigger a wait via sk_wait_event. Typically, this
+happens when the in-flight bytes exceed the sdnbuf size. In the normal
+case when enough ACKs are received sk_write_space() will be called and
+the sk_wait_event will be woken up allowing it to send more data
+and/or return to the user.
+
+But, in the TLS case because the sk_write_space() handler does not
+wake up the events the above send will wait until the sndtimeo is
+exceeded. By default this is MAX_SCHEDULE_TIMEOUT so it look like a
+hang to the user (especially this impatient user). To fix this pass
+the sk_write_space event to the lower layers sk_write_space event
+which in the TCP case will wake any pending events.
+
+I observed the above while integrating sockmap and ktls. It
+initially appeared as test_sockmap (modified to use ktls) occasionally
+hanging. To reliably reproduce this reduce the sndbuf size and stress
+the tls layer by sending many 1B sends. This results in every byte
+needing a header and each byte individually being sent to the crypto
+layer.
+
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Dave Watson <davejwatson@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_main.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -195,9 +195,14 @@ static void tls_write_space(struct sock
+ {
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+- /* We are already sending pages, ignore notification */
+- if (ctx->in_tcp_sendpages)
++ /* If in_tcp_sendpages call lower protocol write space handler
++ * to ensure we wake up any waiting operations there. For example
++ * if do_tcp_sendpages where to call sk_wait_event.
++ */
++ if (ctx->in_tcp_sendpages) {
++ ctx->sk_write_space(sk);
+ return;
++ }
+
+ if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
+ gfp_t sk_allocation = sk->sk_allocation;
--- /dev/null
+From foo@baz Tue Oct 2 05:01:15 PDT 2018
+From: Randy Dunlap <rdunlap@infradead.org>
+Date: Sat, 1 Sep 2018 21:01:28 -0700
+Subject: x86/pti: Fix section mismatch warning/error
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit ff924c5a1ec7548825cc2d07980b03be4224ffac ]
+
+Fix the section mismatch warning in arch/x86/mm/pti.c:
+
+WARNING: vmlinux.o(.text+0x6972a): Section mismatch in reference from the function pti_clone_pgtable() to the function .init.text:pti_user_pagetable_walk_pte()
+The function pti_clone_pgtable() references
+the function __init pti_user_pagetable_walk_pte().
+This is often because pti_clone_pgtable lacks a __init
+annotation or the annotation of pti_user_pagetable_walk_pte is wrong.
+FATAL: modpost: Section mismatches detected.
+
+Fixes: 85900ea51577 ("x86/pti: Map the vsyscall page if needed")
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: https://lkml.kernel.org/r/43a6d6a3-d69d-5eda-da09-0b1c88215a2a@infradead.org
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/pti.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -224,7 +224,7 @@ static __init pmd_t *pti_user_pagetable_
+ *
+ * Returns a pointer to a PTE on success, or NULL on failure.
+ */
+-static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
++static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+ {
+ gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+ pmd_t *pmd;