--- /dev/null
+From 336ae1180df5f69b9e0fb6561bec01c5f64361cf Mon Sep 17 00:00:00 2001
+From: Stephen Boyd <sboyd@codeaurora.org>
+Date: Mon, 17 Jun 2013 15:40:58 -0700
+Subject: ARM: sched_clock: Load cycle count after epoch stabilizes
+
+From: Stephen Boyd <sboyd@codeaurora.org>
+
+commit 336ae1180df5f69b9e0fb6561bec01c5f64361cf upstream.
+
+There is a small race between when the cycle count is read from
+the hardware and when the epoch stabilizes. Consider this
+scenario:
+
+ CPU0 CPU1
+ ---- ----
+ cyc = read_sched_clock()
+ cyc_to_sched_clock()
+ update_sched_clock()
+ ...
+ cd.epoch_cyc = cyc;
+ epoch_cyc = cd.epoch_cyc;
+ ...
+ epoch_ns + cyc_to_ns((cyc - epoch_cyc)
+
+The cyc on cpu0 was read before the epoch changed. But we
+calculate the nanoseconds based on the new epoch by subtracting
+the new epoch from the old cycle count. Since epoch is most likely
+larger than the old cycle count we calculate a large number that
+will be converted to nanoseconds and added to epoch_ns, causing
+time to jump forward too much.
+
+Fix this problem by reading the hardware after the epoch has
+stabilized.
+
+Cc: Russell King <linux@arm.linux.org.uk>
+Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/arm/kernel/sched_clock.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/kernel/sched_clock.c
++++ b/arch/arm/kernel/sched_clock.c
+@@ -51,10 +51,11 @@ static inline u64 notrace cyc_to_ns(u64
+ return (cyc * mult) >> shift;
+ }
+
+-static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
++static unsigned long long notrace sched_clock_32(void)
+ {
+ u64 epoch_ns;
+ u32 epoch_cyc;
++ u32 cyc;
+
+ if (cd.suspended)
+ return cd.epoch_ns;
+@@ -73,7 +74,9 @@ static unsigned long long notrace cyc_to
+ smp_rmb();
+ } while (epoch_cyc != cd.epoch_cyc_copy);
+
+- return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
++ cyc = read_sched_clock();
++ cyc = (cyc - epoch_cyc) & sched_clock_mask;
++ return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
+ }
+
+ /*
+@@ -165,12 +168,6 @@ void __init setup_sched_clock(u32 (*read
+ pr_debug("Registered %pF as sched_clock source\n", read);
+ }
+
+-static unsigned long long notrace sched_clock_32(void)
+-{
+- u32 cyc = read_sched_clock();
+- return cyc_to_sched_clock(cyc, sched_clock_mask);
+-}
+-
+ unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
+
+ unsigned long long notrace sched_clock(void)
--- /dev/null
+From df4e7ac0bb70abc97fbfd9ef09671fc084b3f9db Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 3 Dec 2013 11:20:06 +0100
+Subject: ext2: Fix oops in ext2_get_block() called from ext2_quota_write()
+
+From: Jan Kara <jack@suse.cz>
+
+commit df4e7ac0bb70abc97fbfd9ef09671fc084b3f9db upstream.
+
+ext2_quota_write() doesn't properly setup bh it passes to
+ext2_get_block() and thus we hit assertion BUG_ON(maxblocks == 0) in
+ext2_get_blocks() (or we could actually ask for mapping arbitrary number
+of blocks depending on whatever value was on stack).
+
+Fix ext2_quota_write() to properly fill in number of blocks to map.
+
+Reviewed-by: "Theodore Ts'o" <tytso@mit.edu>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reported-by: Christoph Hellwig <hch@infradead.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext2/super.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct s
+ sb->s_blocksize - offset : towrite;
+
+ tmp_bh.b_state = 0;
++ tmp_bh.b_size = sb->s_blocksize;
+ err = ext2_get_block(inode, blk, &tmp_bh, 1);
+ if (err < 0)
+ goto out;
--- /dev/null
+From aec653c43b0c55667355e26d7de1236bda9fb4e3 Mon Sep 17 00:00:00 2001
+From: Todd Fujinaka <todd.fujinaka@intel.com>
+Date: Tue, 17 Jun 2014 06:58:11 +0000
+Subject: igb: bring link up when PHY is powered up
+
+From: Todd Fujinaka <todd.fujinaka@intel.com>
+
+commit aec653c43b0c55667355e26d7de1236bda9fb4e3 upstream.
+
+Call igb_setup_link() when the PHY is powered up.
+
+Signed-off-by: Todd Fujinaka <todd.fujinaka@intel.com>
+Reported-by: Jeff Westfahl <jeff.westfahl@ni.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Cc: Vincent Donnefort <vdonnefort@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -1584,6 +1584,8 @@ void igb_power_up_link(struct igb_adapte
+ igb_power_up_phy_copper(&adapter->hw);
+ else
+ igb_power_up_serdes_link_82575(&adapter->hw);
++
++ igb_setup_link(&adapter->hw);
+ }
+
+ /**
--- /dev/null
+From 02a988e6e4511b1f6d83525710a12db9c5a45149 Mon Sep 17 00:00:00 2001
+From: Nadav Har'El <nyh@il.ibm.com>
+Date: Mon, 5 Aug 2013 11:07:17 +0300
+Subject: nEPT: Nested INVEPT
+
+From: Nadav Har'El <nyh@il.ibm.com>
+
+commit bfd0a56b90005f8c8a004baf407ad90045c2b11e upstream.
+
+If we let L1 use EPT, we should probably also support the INVEPT instruction.
+
+In our current nested EPT implementation, when L1 changes its EPT table
+for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in
+the course of this modification already calls INVEPT. But if last level
+of shadow page is unsync not all L1's changes to EPT12 are intercepted,
+which means roots need to be synced when L1 calls INVEPT. Global INVEPT
+should not be different since roots are synced by kvm_mmu_load() each
+time EPTP02 changes.
+
+Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
+Signed-off-by: Nadav Har'El <nyh@il.ibm.com>
+Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
+Signed-off-by: Xinhao Xu <xinhao.xu@intel.com>
+Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
+Signed-off-by: Gleb Natapov <gleb@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[bwh: Backported to 3.2:
+ - Adjust context, filename
+ - Simplify handle_invept() as recommended by Paolo - nEPT is not
+ supported so we always raise #UD]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Vinson Lee <vlee@twopensource.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/uapi/asm/vmx.h | 1 +
+ arch/x86/kvm/vmx.c | 8 ++++++++
+ 2 files changed, 9 insertions(+)
+
+--- a/arch/x86/include/uapi/asm/vmx.h
++++ b/arch/x86/include/uapi/asm/vmx.h
+@@ -65,6 +65,7 @@
+ #define EXIT_REASON_EOI_INDUCED 45
+ #define EXIT_REASON_EPT_VIOLATION 48
+ #define EXIT_REASON_EPT_MISCONFIG 49
++#define EXIT_REASON_INVEPT 50
+ #define EXIT_REASON_PREEMPTION_TIMER 52
+ #define EXIT_REASON_WBINVD 54
+ #define EXIT_REASON_XSETBV 55
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6242,6 +6242,12 @@ static int handle_vmptrst(struct kvm_vcp
+ return 1;
+ }
+
++static int handle_invept(struct kvm_vcpu *vcpu)
++{
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
++}
++
+ /*
+ * The exit handlers return 1 if the exit was handled fully and guest execution
+ * may resume. Otherwise they set the kvm_run parameter to indicate what needs
+@@ -6286,6 +6292,7 @@ static int (*const kvm_vmx_exit_handlers
+ [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
++ [EXIT_REASON_INVEPT] = handle_invept,
+ };
+
+ static const int kvm_vmx_max_exit_handlers =
+@@ -6512,6 +6519,7 @@ static bool nested_vmx_exit_handled(stru
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
++ case EXIT_REASON_INVEPT:
+ /*
+ * VMX instructions trap unconditionally. This allows L1 to
+ * emulate them for its L2 guest, i.e., allows 3-level nesting!
--- /dev/null
+From 152d44a853e42952f6c8a504fb1f8eefd21fd5fd Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@samba.org>
+Date: Thu, 27 Nov 2014 08:11:28 +1100
+Subject: powerpc: 32 bit getcpu VDSO function uses 64 bit instructions
+
+From: Anton Blanchard <anton@samba.org>
+
+commit 152d44a853e42952f6c8a504fb1f8eefd21fd5fd upstream.
+
+I used some 64 bit instructions when adding the 32 bit getcpu VDSO
+function. Fix it.
+
+Fixes: 18ad51dd342a ("powerpc: Add VDSO version of getcpu")
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/vdso32/getcpu.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/vdso32/getcpu.S
++++ b/arch/powerpc/kernel/vdso32/getcpu.S
+@@ -30,8 +30,8 @@
+ V_FUNCTION_BEGIN(__kernel_getcpu)
+ .cfi_startproc
+ mfspr r5,SPRN_USPRG3
+- cmpdi cr0,r3,0
+- cmpdi cr1,r4,0
++ cmpwi cr0,r3,0
++ cmpwi cr1,r4,0
+ clrlwi r6,r5,16
+ rlwinm r7,r5,16,31-15,31-0
+ beq cr0,1f
rtnetlink-release-net-refcnt-on-error-in-do_setlink.patch
net-mvneta-fix-tx-interrupt-delay.patch
net-sctp-use-max_header-for-headroom-reserve-in-output-path.patch
+nept-nested-invept.patch
+ext2-fix-oops-in-ext2_get_block-called-from-ext2_quota_write.patch
+igb-bring-link-up-when-phy-is-powered-up.patch
+arm-sched_clock-load-cycle-count-after-epoch-stabilizes.patch
+powerpc-32-bit-getcpu-vdso-function-uses-64-bit-instructions.patch