--- /dev/null
+From 67d20418088680d22037119d914982cd982b5c5a Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Fri, 12 May 2017 01:15:20 +1000
+Subject: powerpc/powernv: Fix CPU_HOTPLUG=n idle.c compile error
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 67d20418088680d22037119d914982cd982b5c5a upstream.
+
+Fixes: a7cd88da97 ("powerpc/powernv: Move CPU-Offline idle state invocation from smp.c to idle.c")
+Cc: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Acked-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/idle.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/powerpc/platforms/powernv/idle.c
++++ b/arch/powerpc/platforms/powernv/idle.c
+@@ -261,6 +261,7 @@ static u64 pnv_deepest_stop_psscr_val;
+ static u64 pnv_deepest_stop_psscr_mask;
+ static bool deepest_stop_found;
+
++#ifdef CONFIG_HOTPLUG_CPU
+ /*
+ * pnv_cpu_offline: A function that puts the CPU into the deepest
+ * available platform idle state on a CPU-Offline.
+@@ -293,6 +294,7 @@ unsigned long pnv_cpu_offline(unsigned i
+
+ return srr1;
+ }
++#endif
+
+ /*
+ * Power ISA 3.0 idle initialization.
--- /dev/null
+From 236222d39347e0e486010f10c1493e83dbbdfba8 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Thu, 29 Jun 2017 15:55:58 +0200
+Subject: x86/uaccess: Optimize copy_user_enhanced_fast_string() for short strings
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 236222d39347e0e486010f10c1493e83dbbdfba8 upstream.
+
+According to the Intel datasheet, the REP MOVSB instruction
+exposes a pretty heavy setup cost (50 ticks), which hurts
+short string copy operations.
+
+This change tries to avoid this cost by calling the explicit
+loop available in the unrolled code for strings shorter
+than 64 bytes.
+
+The 64 bytes cutoff value is arbitrary from the code logic
+point of view - it has been selected based on measurements,
+as the largest value that still ensures a measurable gain.
+
+Micro benchmarks of the __copy_from_user() function with
+lengths in the [0-63] range show this performance gain
+(shorter the string, larger the gain):
+
+ - in the [55%-4%] range on Intel Xeon(R) CPU E5-2690 v4
+ - in the [72%-9%] range on Intel Core i7-4810MQ
+
+Other tested CPUs - namely Intel Atom S1260 and AMD Opteron
+8216 - show no difference, because they do not expose the
+ERMS feature bit.
+
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Alan Cox <gnomes@lxorguk.ukuu.org.uk>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/4533a1d101fd460f80e21329a34928fad521c1d4.1498744345.git.pabeni@redhat.com
+[ Clarified the changelog. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+
+---
+ arch/x86/lib/copy_user_64.S | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -37,7 +37,7 @@ ENTRY(copy_user_generic_unrolled)
+ movl %edx,%ecx
+ andl $63,%edx
+ shrl $6,%ecx
+- jz 17f
++ jz .L_copy_short_string
+ 1: movq (%rsi),%r8
+ 2: movq 1*8(%rsi),%r9
+ 3: movq 2*8(%rsi),%r10
+@@ -58,7 +58,8 @@ ENTRY(copy_user_generic_unrolled)
+ leaq 64(%rdi),%rdi
+ decl %ecx
+ jnz 1b
+-17: movl %edx,%ecx
++.L_copy_short_string:
++ movl %edx,%ecx
+ andl $7,%edx
+ shrl $3,%ecx
+ jz 20f
+@@ -174,6 +175,8 @@ EXPORT_SYMBOL(copy_user_generic_string)
+ */
+ ENTRY(copy_user_enhanced_fast_string)
+ ASM_STAC
++ cmpl $64,%edx
++ jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
+ movl %edx,%ecx
+ 1: rep
+ movsb