From 22f33563cdc2f433a59db5a152546f95241ce2ef Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 14 Dec 2014 12:10:15 -0800 Subject: [PATCH] 3.10-stable patches added patches: arm-sched_clock-load-cycle-count-after-epoch-stabilizes.patch ext2-fix-oops-in-ext2_get_block-called-from-ext2_quota_write.patch igb-bring-link-up-when-phy-is-powered-up.patch nept-nested-invept.patch powerpc-32-bit-getcpu-vdso-function-uses-64-bit-instructions.patch --- ...d-cycle-count-after-epoch-stabilizes.patch | 83 +++++++++++++++++++ ...t_block-called-from-ext2_quota_write.patch | 36 ++++++++ ...bring-link-up-when-phy-is-powered-up.patch | 33 ++++++++ queue-3.10/nept-nested-invept.patch | 80 ++++++++++++++++++ ...so-function-uses-64-bit-instructions.patch | 34 ++++++++ queue-3.10/series | 5 ++ 6 files changed, 271 insertions(+) create mode 100644 queue-3.10/arm-sched_clock-load-cycle-count-after-epoch-stabilizes.patch create mode 100644 queue-3.10/ext2-fix-oops-in-ext2_get_block-called-from-ext2_quota_write.patch create mode 100644 queue-3.10/igb-bring-link-up-when-phy-is-powered-up.patch create mode 100644 queue-3.10/nept-nested-invept.patch create mode 100644 queue-3.10/powerpc-32-bit-getcpu-vdso-function-uses-64-bit-instructions.patch diff --git a/queue-3.10/arm-sched_clock-load-cycle-count-after-epoch-stabilizes.patch b/queue-3.10/arm-sched_clock-load-cycle-count-after-epoch-stabilizes.patch new file mode 100644 index 00000000000..da294f52dfe --- /dev/null +++ b/queue-3.10/arm-sched_clock-load-cycle-count-after-epoch-stabilizes.patch @@ -0,0 +1,83 @@ +From 336ae1180df5f69b9e0fb6561bec01c5f64361cf Mon Sep 17 00:00:00 2001 +From: Stephen Boyd +Date: Mon, 17 Jun 2013 15:40:58 -0700 +Subject: ARM: sched_clock: Load cycle count after epoch stabilizes + +From: Stephen Boyd + +commit 336ae1180df5f69b9e0fb6561bec01c5f64361cf upstream. + +There is a small race between when the cycle count is read from +the hardware and when the epoch stabilizes. Consider this +scenario: + + CPU0 CPU1 + ---- ---- + cyc = read_sched_clock() + cyc_to_sched_clock() + update_sched_clock() + ... + cd.epoch_cyc = cyc; + epoch_cyc = cd.epoch_cyc; + ... + epoch_ns + cyc_to_ns((cyc - epoch_cyc) + +The cyc on cpu0 was read before the epoch changed. But we +calculate the nanoseconds based on the new epoch by subtracting +the new epoch from the old cycle count. Since epoch is most likely +larger than the old cycle count we calculate a large number that +will be converted to nanoseconds and added to epoch_ns, causing +time to jump forward too much. + +Fix this problem by reading the hardware after the epoch has +stabilized. + +Cc: Russell King +Signed-off-by: Stephen Boyd +Signed-off-by: John Stultz +Signed-off-by: Greg Kroah-Hartman + + +--- + arch/arm/kernel/sched_clock.c | 13 +++++-------- + 1 file changed, 5 insertions(+), 8 deletions(-) + +--- a/arch/arm/kernel/sched_clock.c ++++ b/arch/arm/kernel/sched_clock.c +@@ -51,10 +51,11 @@ static inline u64 notrace cyc_to_ns(u64 + return (cyc * mult) >> shift; + } + +-static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) ++static unsigned long long notrace sched_clock_32(void) + { + u64 epoch_ns; + u32 epoch_cyc; ++ u32 cyc; + + if (cd.suspended) + return cd.epoch_ns; +@@ -73,7 +74,9 @@ static unsigned long long notrace cyc_to + smp_rmb(); + } while (epoch_cyc != cd.epoch_cyc_copy); + +- return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); ++ cyc = read_sched_clock(); ++ cyc = (cyc - epoch_cyc) & sched_clock_mask; ++ return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); + } + + /* +@@ -165,12 +168,6 @@ void __init setup_sched_clock(u32 (*read + pr_debug("Registered %pF as sched_clock source\n", read); + } + +-static unsigned long long notrace sched_clock_32(void) +-{ +- u32 cyc = read_sched_clock(); +- return cyc_to_sched_clock(cyc, sched_clock_mask); +-} +- + unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; + + unsigned long long notrace sched_clock(void) diff --git a/queue-3.10/ext2-fix-oops-in-ext2_get_block-called-from-ext2_quota_write.patch b/queue-3.10/ext2-fix-oops-in-ext2_get_block-called-from-ext2_quota_write.patch new file mode 100644 index 00000000000..7e909e7c96b --- /dev/null +++ b/queue-3.10/ext2-fix-oops-in-ext2_get_block-called-from-ext2_quota_write.patch @@ -0,0 +1,36 @@ +From df4e7ac0bb70abc97fbfd9ef09671fc084b3f9db Mon Sep 17 00:00:00 2001 +From: Jan Kara +Date: Tue, 3 Dec 2013 11:20:06 +0100 +Subject: ext2: Fix oops in ext2_get_block() called from ext2_quota_write() + +From: Jan Kara + +commit df4e7ac0bb70abc97fbfd9ef09671fc084b3f9db upstream. + +ext2_quota_write() doesn't properly setup bh it passes to +ext2_get_block() and thus we hit assertion BUG_ON(maxblocks == 0) in +ext2_get_blocks() (or we could actually ask for mapping arbitrary number +of blocks depending on whatever value was on stack). + +Fix ext2_quota_write() to properly fill in number of blocks to map. + +Reviewed-by: "Theodore Ts'o" +Reviewed-by: Christoph Hellwig +Reported-by: Christoph Hellwig +Signed-off-by: Jan Kara +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext2/super.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/fs/ext2/super.c ++++ b/fs/ext2/super.c +@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct s + sb->s_blocksize - offset : towrite; + + tmp_bh.b_state = 0; ++ tmp_bh.b_size = sb->s_blocksize; + err = ext2_get_block(inode, blk, &tmp_bh, 1); + if (err < 0) + goto out; diff --git a/queue-3.10/igb-bring-link-up-when-phy-is-powered-up.patch b/queue-3.10/igb-bring-link-up-when-phy-is-powered-up.patch new file mode 100644 index 00000000000..fc27b6faf1f --- /dev/null +++ b/queue-3.10/igb-bring-link-up-when-phy-is-powered-up.patch @@ -0,0 +1,33 @@ +From aec653c43b0c55667355e26d7de1236bda9fb4e3 Mon Sep 17 00:00:00 2001 +From: Todd Fujinaka +Date: Tue, 17 Jun 2014 06:58:11 +0000 +Subject: igb: bring link up when PHY is powered up + +From: Todd Fujinaka + +commit aec653c43b0c55667355e26d7de1236bda9fb4e3 upstream. + +Call igb_setup_link() when the PHY is powered up. + +Signed-off-by: Todd Fujinaka +Reported-by: Jeff Westfahl +Tested-by: Aaron Brown +Signed-off-by: Jeff Kirsher +Cc: Vincent Donnefort +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/ethernet/intel/igb/igb_main.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -1584,6 +1584,8 @@ void igb_power_up_link(struct igb_adapte + igb_power_up_phy_copper(&adapter->hw); + else + igb_power_up_serdes_link_82575(&adapter->hw); ++ ++ igb_setup_link(&adapter->hw); + } + + /** diff --git a/queue-3.10/nept-nested-invept.patch b/queue-3.10/nept-nested-invept.patch new file mode 100644 index 00000000000..5885fe6b262 --- /dev/null +++ b/queue-3.10/nept-nested-invept.patch @@ -0,0 +1,80 @@ +From 02a988e6e4511b1f6d83525710a12db9c5a45149 Mon Sep 17 00:00:00 2001 +From: Nadav Har'El +Date: Mon, 5 Aug 2013 11:07:17 +0300 +Subject: nEPT: Nested INVEPT + +From: Nadav Har'El + +commit bfd0a56b90005f8c8a004baf407ad90045c2b11e upstream. + +If we let L1 use EPT, we should probably also support the INVEPT instruction. + +In our current nested EPT implementation, when L1 changes its EPT table +for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in +the course of this modification already calls INVEPT. But if last level +of shadow page is unsync not all L1's changes to EPT12 are intercepted, +which means roots need to be synced when L1 calls INVEPT. Global INVEPT +should not be different since roots are synced by kvm_mmu_load() each +time EPTP02 changes. + +Reviewed-by: Xiao Guangrong +Signed-off-by: Nadav Har'El +Signed-off-by: Jun Nakajima +Signed-off-by: Xinhao Xu +Signed-off-by: Yang Zhang +Signed-off-by: Gleb Natapov +Signed-off-by: Paolo Bonzini +[bwh: Backported to 3.2: + - Adjust context, filename + - Simplify handle_invept() as recommended by Paolo - nEPT is not + supported so we always raise #UD] +Signed-off-by: Ben Hutchings +Cc: Vinson Lee +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/uapi/asm/vmx.h | 1 + + arch/x86/kvm/vmx.c | 8 ++++++++ + 2 files changed, 9 insertions(+) + +--- a/arch/x86/include/uapi/asm/vmx.h ++++ b/arch/x86/include/uapi/asm/vmx.h +@@ -65,6 +65,7 @@ + #define EXIT_REASON_EOI_INDUCED 45 + #define EXIT_REASON_EPT_VIOLATION 48 + #define EXIT_REASON_EPT_MISCONFIG 49 ++#define EXIT_REASON_INVEPT 50 + #define EXIT_REASON_PREEMPTION_TIMER 52 + #define EXIT_REASON_WBINVD 54 + #define EXIT_REASON_XSETBV 55 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -6242,6 +6242,12 @@ static int handle_vmptrst(struct kvm_vcp + return 1; + } + ++static int handle_invept(struct kvm_vcpu *vcpu) ++{ ++ kvm_queue_exception(vcpu, UD_VECTOR); ++ return 1; ++} ++ + /* + * The exit handlers return 1 if the exit was handled fully and guest execution + * may resume. Otherwise they set the kvm_run parameter to indicate what needs +@@ -6286,6 +6292,7 @@ static int (*const kvm_vmx_exit_handlers + [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, + [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op, + [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op, ++ [EXIT_REASON_INVEPT] = handle_invept, + }; + + static const int kvm_vmx_max_exit_handlers = +@@ -6512,6 +6519,7 @@ static bool nested_vmx_exit_handled(stru + case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: + case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: + case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: ++ case EXIT_REASON_INVEPT: + /* + * VMX instructions trap unconditionally. This allows L1 to + * emulate them for its L2 guest, i.e., allows 3-level nesting! diff --git a/queue-3.10/powerpc-32-bit-getcpu-vdso-function-uses-64-bit-instructions.patch b/queue-3.10/powerpc-32-bit-getcpu-vdso-function-uses-64-bit-instructions.patch new file mode 100644 index 00000000000..ce3382162a7 --- /dev/null +++ b/queue-3.10/powerpc-32-bit-getcpu-vdso-function-uses-64-bit-instructions.patch @@ -0,0 +1,34 @@ +From 152d44a853e42952f6c8a504fb1f8eefd21fd5fd Mon Sep 17 00:00:00 2001 +From: Anton Blanchard +Date: Thu, 27 Nov 2014 08:11:28 +1100 +Subject: powerpc: 32 bit getcpu VDSO function uses 64 bit instructions + +From: Anton Blanchard + +commit 152d44a853e42952f6c8a504fb1f8eefd21fd5fd upstream. + +I used some 64 bit instructions when adding the 32 bit getcpu VDSO +function. Fix it. + +Fixes: 18ad51dd342a ("powerpc: Add VDSO version of getcpu") +Signed-off-by: Anton Blanchard +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman + +--- + arch/powerpc/kernel/vdso32/getcpu.S | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/powerpc/kernel/vdso32/getcpu.S ++++ b/arch/powerpc/kernel/vdso32/getcpu.S +@@ -30,8 +30,8 @@ + V_FUNCTION_BEGIN(__kernel_getcpu) + .cfi_startproc + mfspr r5,SPRN_USPRG3 +- cmpdi cr0,r3,0 +- cmpdi cr1,r4,0 ++ cmpwi cr0,r3,0 ++ cmpwi cr1,r4,0 + clrlwi r6,r5,16 + rlwinm r7,r5,16,31-15,31-0 + beq cr0,1f diff --git a/queue-3.10/series b/queue-3.10/series index 81e4f1b5814..54a1fa65bfd 100644 --- a/queue-3.10/series +++ b/queue-3.10/series @@ -16,3 +16,8 @@ net-mlx4_core-limit-count-field-to-24-bits-in-qp_alloc_res.patch rtnetlink-release-net-refcnt-on-error-in-do_setlink.patch net-mvneta-fix-tx-interrupt-delay.patch net-sctp-use-max_header-for-headroom-reserve-in-output-path.patch +nept-nested-invept.patch +ext2-fix-oops-in-ext2_get_block-called-from-ext2_quota_write.patch +igb-bring-link-up-when-phy-is-powered-up.patch +arm-sched_clock-load-cycle-count-after-epoch-stabilizes.patch +powerpc-32-bit-getcpu-vdso-function-uses-64-bit-instructions.patch -- 2.47.3