]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.34 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Fri, 25 Jun 2010 23:29:41 +0000 (16:29 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Fri, 25 Jun 2010 23:29:41 +0000 (16:29 -0700)
32 files changed:
queue-2.6.34/0001-KVM-SVM-Don-t-use-kmap_atomic-in-nested_svm_map.patch [new file with mode: 0644]
queue-2.6.34/0002-KVM-SVM-Fix-schedule-while-atomic-on-nested-exceptio.patch [new file with mode: 0644]
queue-2.6.34/0003-KVM-SVM-Sync-all-control-registers-on-nested-vmexit.patch [new file with mode: 0644]
queue-2.6.34/0004-KVM-SVM-Fix-nested-msr-intercept-handling.patch [new file with mode: 0644]
queue-2.6.34/0005-KVM-SVM-Don-t-sync-nested-cr8-to-lapic-and-back.patch [new file with mode: 0644]
queue-2.6.34/0006-KVM-SVM-Fix-wrong-interrupt-injection-in-enable_irq_.patch [new file with mode: 0644]
queue-2.6.34/0007-KVM-s390-Fix-possible-memory-leak-of-in-kvm_arch_vcp.patch [new file with mode: 0644]
queue-2.6.34/0008-KVM-PPC-Do-not-create-debugfs-if-fail-to-create-vcpu.patch [new file with mode: 0644]
queue-2.6.34/0009-KVM-x86-Add-callback-to-let-modules-decide-over-some.patch [new file with mode: 0644]
queue-2.6.34/0010-KVM-SVM-Report-emulated-SVM-features-to-userspace.patch [new file with mode: 0644]
queue-2.6.34/0011-x86-paravirt-Add-a-global-synchronization-point-for-.patch [new file with mode: 0644]
queue-2.6.34/0012-KVM-Don-t-allow-lmsw-to-clear-cr0.pe.patch [new file with mode: 0644]
queue-2.6.34/0013-KVM-x86-Check-LMA-bit-before-set_efer.patch [new file with mode: 0644]
queue-2.6.34/0014-KVM-MMU-Segregate-shadow-pages-with-different-cr0.wp.patch [new file with mode: 0644]
queue-2.6.34/0015-KVM-VMX-enable-VMXON-check-with-SMX-enabled-Intel-TX.patch [new file with mode: 0644]
queue-2.6.34/0016-KVM-MMU-Don-t-read-pdptrs-with-mmu-spinlock-held-in-.patch [new file with mode: 0644]
queue-2.6.34/0017-KVM-Fix-wallclock-version-writing-race.patch [new file with mode: 0644]
queue-2.6.34/0018-KVM-PPC-Add-missing-vcpu_load-vcpu_put-in-vcpu-ioctl.patch [new file with mode: 0644]
queue-2.6.34/0019-KVM-x86-Add-missing-locking-to-arch-specific-vcpu-io.patch [new file with mode: 0644]
queue-2.6.34/0020-KVM-x86-Inject-GP-with-the-right-rip-on-efer-writes.patch [new file with mode: 0644]
queue-2.6.34/0021-KVM-SVM-Don-t-allow-nested-guest-to-VMMCALL-into-hos.patch [new file with mode: 0644]
queue-2.6.34/btrfs-should-add-a-permission-check-for-setfacl.patch [new file with mode: 0644]
queue-2.6.34/cifs-allow-null-nd-as-nfs-server-uses-on-create.patch [new file with mode: 0644]
queue-2.6.34/drm-i915-don-t-touch-port_hotplug_en-in-intel_dp_detect.patch [new file with mode: 0644]
queue-2.6.34/ext4-check-s_log_groups_per_flex-in-online-resize-code.patch [new file with mode: 0644]
queue-2.6.34/ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch [new file with mode: 0644]
queue-2.6.34/gfs2-fix-permissions-checking-for-setflags-ioctl.patch [new file with mode: 0644]
queue-2.6.34/nfs-ensure-that-we-mark-the-inode-as-dirty-if-we-exit-early-from-commit.patch [new file with mode: 0644]
queue-2.6.34/nfs-fix-another-nfs_wb_page-deadlock.patch [new file with mode: 0644]
queue-2.6.34/parisc-clear-floating-point-exception-flag-on-sigfpe-signal.patch [new file with mode: 0644]
queue-2.6.34/series
queue-2.6.34/v4l-dvb-uvcvideo-prevent-division-by-0-when-control-step-value-is-0.patch [new file with mode: 0644]

diff --git a/queue-2.6.34/0001-KVM-SVM-Don-t-use-kmap_atomic-in-nested_svm_map.patch b/queue-2.6.34/0001-KVM-SVM-Don-t-use-kmap_atomic-in-nested_svm_map.patch
new file mode 100644 (file)
index 0000000..d5e2bd2
--- /dev/null
@@ -0,0 +1,202 @@
+From 7e8d09a90451d41a346c6378dd3fe048b7e86aa9 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Fri, 19 Feb 2010 16:23:00 +0100
+Subject: KVM: SVM: Don't use kmap_atomic in nested_svm_map
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+Use of kmap_atomic disables preemption but if we run in
+shadow-shadow mode the vmrun emulation executes kvm_set_cr3
+which might sleep or fault. So use kmap instead for
+nested_svm_map.
+
+Cc: stable@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 7597f129d8b6799da7a264e6d6f7401668d3a36d)
+---
+ arch/x86/kvm/svm.c |   47 ++++++++++++++++++++++++-----------------------
+ 1 file changed, 24 insertions(+), 23 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1423,7 +1423,7 @@ static inline int nested_svm_intr(struct
+       return 0;
+ }
+-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
++static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
+ {
+       struct page *page;
+@@ -1431,7 +1431,9 @@ static void *nested_svm_map(struct vcpu_
+       if (is_error_page(page))
+               goto error;
+-      return kmap_atomic(page, idx);
++      *_page = page;
++
++      return kmap(page);
+ error:
+       kvm_release_page_clean(page);
+@@ -1440,16 +1442,9 @@ error:
+       return NULL;
+ }
+-static void nested_svm_unmap(void *addr, enum km_type idx)
++static void nested_svm_unmap(struct page *page)
+ {
+-      struct page *page;
+-
+-      if (!addr)
+-              return;
+-
+-      page = kmap_atomic_to_page(addr);
+-
+-      kunmap_atomic(addr, idx);
++      kunmap(page);
+       kvm_release_page_dirty(page);
+ }
+@@ -1457,6 +1452,7 @@ static bool nested_svm_exit_handled_msr(
+ {
+       u32 param = svm->vmcb->control.exit_info_1 & 1;
+       u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
++      struct page *page;
+       bool ret = false;
+       u32 t0, t1;
+       u8 *msrpm;
+@@ -1464,7 +1460,7 @@ static bool nested_svm_exit_handled_msr(
+       if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
+               return false;
+-      msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
++      msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
+       if (!msrpm)
+               goto out;
+@@ -1492,7 +1488,7 @@ static bool nested_svm_exit_handled_msr(
+       ret = msrpm[t1] & ((1 << param) << t0);
+ out:
+-      nested_svm_unmap(msrpm, KM_USER0);
++      nested_svm_unmap(page);
+       return ret;
+ }
+@@ -1615,6 +1611,7 @@ static int nested_svm_vmexit(struct vcpu
+       struct vmcb *nested_vmcb;
+       struct vmcb *hsave = svm->nested.hsave;
+       struct vmcb *vmcb = svm->vmcb;
++      struct page *page;
+       trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
+                                      vmcb->control.exit_info_1,
+@@ -1622,7 +1619,7 @@ static int nested_svm_vmexit(struct vcpu
+                                      vmcb->control.exit_int_info,
+                                      vmcb->control.exit_int_info_err);
+-      nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
++      nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
+       if (!nested_vmcb)
+               return 1;
+@@ -1712,7 +1709,7 @@ static int nested_svm_vmexit(struct vcpu
+       /* Exit nested SVM mode */
+       svm->nested.vmcb = 0;
+-      nested_svm_unmap(nested_vmcb, KM_USER0);
++      nested_svm_unmap(page);
+       kvm_mmu_reset_context(&svm->vcpu);
+       kvm_mmu_load(&svm->vcpu);
+@@ -1723,9 +1720,10 @@ static int nested_svm_vmexit(struct vcpu
+ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
+ {
+       u32 *nested_msrpm;
++      struct page *page;
+       int i;
+-      nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
++      nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
+       if (!nested_msrpm)
+               return false;
+@@ -1734,7 +1732,7 @@ static bool nested_svm_vmrun_msrpm(struc
+       svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
+-      nested_svm_unmap(nested_msrpm, KM_USER0);
++      nested_svm_unmap(page);
+       return true;
+ }
+@@ -1744,8 +1742,9 @@ static bool nested_svm_vmrun(struct vcpu
+       struct vmcb *nested_vmcb;
+       struct vmcb *hsave = svm->nested.hsave;
+       struct vmcb *vmcb = svm->vmcb;
++      struct page *page;
+-      nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
++      nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
+       if (!nested_vmcb)
+               return false;
+@@ -1857,7 +1856,7 @@ static bool nested_svm_vmrun(struct vcpu
+       svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
+       svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
+-      nested_svm_unmap(nested_vmcb, KM_USER0);
++      nested_svm_unmap(page);
+       enable_gif(svm);
+@@ -1883,6 +1882,7 @@ static void nested_svm_vmloadsave(struct
+ static int vmload_interception(struct vcpu_svm *svm)
+ {
+       struct vmcb *nested_vmcb;
++      struct page *page;
+       if (nested_svm_check_permissions(svm))
+               return 1;
+@@ -1890,12 +1890,12 @@ static int vmload_interception(struct vc
+       svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+       skip_emulated_instruction(&svm->vcpu);
+-      nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
++      nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
+       if (!nested_vmcb)
+               return 1;
+       nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
+-      nested_svm_unmap(nested_vmcb, KM_USER0);
++      nested_svm_unmap(page);
+       return 1;
+ }
+@@ -1903,6 +1903,7 @@ static int vmload_interception(struct vc
+ static int vmsave_interception(struct vcpu_svm *svm)
+ {
+       struct vmcb *nested_vmcb;
++      struct page *page;
+       if (nested_svm_check_permissions(svm))
+               return 1;
+@@ -1910,12 +1911,12 @@ static int vmsave_interception(struct vc
+       svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+       skip_emulated_instruction(&svm->vcpu);
+-      nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
++      nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
+       if (!nested_vmcb)
+               return 1;
+       nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
+-      nested_svm_unmap(nested_vmcb, KM_USER0);
++      nested_svm_unmap(page);
+       return 1;
+ }
diff --git a/queue-2.6.34/0002-KVM-SVM-Fix-schedule-while-atomic-on-nested-exceptio.patch b/queue-2.6.34/0002-KVM-SVM-Fix-schedule-while-atomic-on-nested-exceptio.patch
new file mode 100644 (file)
index 0000000..6ea57d2
--- /dev/null
@@ -0,0 +1,81 @@
+From be446d2f1f3c4956726ae748a375bb67cbfdd01c Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Fri, 19 Feb 2010 16:23:02 +0100
+Subject: KVM: SVM: Fix schedule-while-atomic on nested exception handling
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+Move the actual vmexit routine out of code that runs with
+irqs and preemption disabled.
+
+Cc: stable@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit b8e88bc8ffba5fe53fb8d8a0a4be3bbcffeebe56)
+---
+ arch/x86/kvm/svm.c |   23 +++++++++++++++++++----
+ 1 file changed, 19 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -129,6 +129,7 @@ static void svm_flush_tlb(struct kvm_vcp
+ static void svm_complete_interrupts(struct vcpu_svm *svm);
+ static int nested_svm_exit_handled(struct vcpu_svm *svm);
++static int nested_svm_intercept(struct vcpu_svm *svm);
+ static int nested_svm_vmexit(struct vcpu_svm *svm);
+ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
+                                     bool has_error_code, u32 error_code);
+@@ -1384,6 +1385,8 @@ static int nested_svm_check_permissions(
+ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
+                                     bool has_error_code, u32 error_code)
+ {
++      int vmexit;
++
+       if (!is_nested(svm))
+               return 0;
+@@ -1392,7 +1395,11 @@ static int nested_svm_check_exception(st
+       svm->vmcb->control.exit_info_1 = error_code;
+       svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
+-      return nested_svm_exit_handled(svm);
++      vmexit = nested_svm_intercept(svm);
++      if (vmexit == NESTED_EXIT_DONE)
++              svm->nested.exit_required = true;
++
++      return vmexit;
+ }
+ static inline int nested_svm_intr(struct vcpu_svm *svm)
+@@ -1521,7 +1528,7 @@ static int nested_svm_exit_special(struc
+ /*
+  * If this function returns true, this #vmexit was already handled
+  */
+-static int nested_svm_exit_handled(struct vcpu_svm *svm)
++static int nested_svm_intercept(struct vcpu_svm *svm)
+ {
+       u32 exit_code = svm->vmcb->control.exit_code;
+       int vmexit = NESTED_EXIT_HOST;
+@@ -1567,9 +1574,17 @@ static int nested_svm_exit_handled(struc
+       }
+       }
+-      if (vmexit == NESTED_EXIT_DONE) {
++      return vmexit;
++}
++
++static int nested_svm_exit_handled(struct vcpu_svm *svm)
++{
++      int vmexit;
++
++      vmexit = nested_svm_intercept(svm);
++
++      if (vmexit == NESTED_EXIT_DONE)
+               nested_svm_vmexit(svm);
+-      }
+       return vmexit;
+ }
diff --git a/queue-2.6.34/0003-KVM-SVM-Sync-all-control-registers-on-nested-vmexit.patch b/queue-2.6.34/0003-KVM-SVM-Sync-all-control-registers-on-nested-vmexit.patch
new file mode 100644 (file)
index 0000000..d656938
--- /dev/null
@@ -0,0 +1,39 @@
+From 63260ff877e19556454a2f4a5e32a68a1282de52 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Fri, 19 Feb 2010 16:23:03 +0100
+Subject: KVM: SVM: Sync all control registers on nested vmexit
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+Currently the vmexit emulation does not sync control
+registers were the access is typically intercepted by the
+nested hypervisor. But we can not count on that intercepts
+to sync these registers too and make the code
+architecturally more correct.
+
+Cc: stable@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit cdbbdc1210223879450555fee04c29ebf116576b)
+---
+ arch/x86/kvm/svm.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1647,9 +1647,13 @@ static int nested_svm_vmexit(struct vcpu
+       nested_vmcb->save.ds     = vmcb->save.ds;
+       nested_vmcb->save.gdtr   = vmcb->save.gdtr;
+       nested_vmcb->save.idtr   = vmcb->save.idtr;
++      nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
+       if (npt_enabled)
+               nested_vmcb->save.cr3    = vmcb->save.cr3;
++      else
++              nested_vmcb->save.cr3    = svm->vcpu.arch.cr3;
+       nested_vmcb->save.cr2    = vmcb->save.cr2;
++      nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
+       nested_vmcb->save.rflags = vmcb->save.rflags;
+       nested_vmcb->save.rip    = vmcb->save.rip;
+       nested_vmcb->save.rsp    = vmcb->save.rsp;
diff --git a/queue-2.6.34/0004-KVM-SVM-Fix-nested-msr-intercept-handling.patch b/queue-2.6.34/0004-KVM-SVM-Fix-nested-msr-intercept-handling.patch
new file mode 100644 (file)
index 0000000..e81ea88
--- /dev/null
@@ -0,0 +1,58 @@
+From d3b1034ce389e2e6d9f9af31b1e00b507a49fba1 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Fri, 19 Feb 2010 16:23:05 +0100
+Subject: KVM: SVM: Fix nested msr intercept handling
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+The nested_svm_exit_handled_msr() function maps only one
+page of the guests msr permission bitmap. This patch changes
+the code to use kvm_read_guest to fix the bug.
+
+Cc: stable@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 4c7da8cb43c09e71a405b5aeaa58a1dbac3c39e9)
+---
+ arch/x86/kvm/svm.c |   13 +++----------
+ 1 file changed, 3 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1459,19 +1459,13 @@ static bool nested_svm_exit_handled_msr(
+ {
+       u32 param = svm->vmcb->control.exit_info_1 & 1;
+       u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+-      struct page *page;
+       bool ret = false;
+       u32 t0, t1;
+-      u8 *msrpm;
++      u8 val;
+       if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
+               return false;
+-      msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
+-
+-      if (!msrpm)
+-              goto out;
+-
+       switch (msr) {
+       case 0 ... 0x1fff:
+               t0 = (msr * 2) % 8;
+@@ -1492,11 +1486,10 @@ static bool nested_svm_exit_handled_msr(
+               goto out;
+       }
+-      ret = msrpm[t1] & ((1 << param) << t0);
++      if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1))
++              ret = val & ((1 << param) << t0);
+ out:
+-      nested_svm_unmap(page);
+-
+       return ret;
+ }
diff --git a/queue-2.6.34/0005-KVM-SVM-Don-t-sync-nested-cr8-to-lapic-and-back.patch b/queue-2.6.34/0005-KVM-SVM-Don-t-sync-nested-cr8-to-lapic-and-back.patch
new file mode 100644 (file)
index 0000000..ff6e339
--- /dev/null
@@ -0,0 +1,110 @@
+From 922928bc370153851178e03b623b6be3047dc9ea Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Fri, 19 Feb 2010 16:23:06 +0100
+Subject: KVM: SVM: Don't sync nested cr8 to lapic and back
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+This patch makes syncing of the guest tpr to the lapic
+conditional on !nested. Otherwise a nested guest using the
+TPR could freeze the guest.
+Another important change this patch introduces is that the
+cr8 intercept bits are no longer ORed at vmrun emulation if
+the guest sets VINTR_MASKING in its VMCB. The reason is that
+nested cr8 accesses need alway be handled by the nested
+hypervisor because they change the shadow version of the
+tpr.
+
+Cc: stable@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 88ab24adc7142506c8583ac36a34fa388300b750)
+---
+ arch/x86/kvm/svm.c |   46 +++++++++++++++++++++++++++++++---------------
+ 1 file changed, 31 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1830,21 +1830,6 @@ static bool nested_svm_vmrun(struct vcpu
+       svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
+       svm->vmcb->save.cpl = nested_vmcb->save.cpl;
+-      /* We don't want a nested guest to be more powerful than the guest,
+-         so all intercepts are ORed */
+-      svm->vmcb->control.intercept_cr_read |=
+-              nested_vmcb->control.intercept_cr_read;
+-      svm->vmcb->control.intercept_cr_write |=
+-              nested_vmcb->control.intercept_cr_write;
+-      svm->vmcb->control.intercept_dr_read |=
+-              nested_vmcb->control.intercept_dr_read;
+-      svm->vmcb->control.intercept_dr_write |=
+-              nested_vmcb->control.intercept_dr_write;
+-      svm->vmcb->control.intercept_exceptions |=
+-              nested_vmcb->control.intercept_exceptions;
+-
+-      svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
+-
+       svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
+       /* cache intercepts */
+@@ -1862,6 +1847,28 @@ static bool nested_svm_vmrun(struct vcpu
+       else
+               svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
++      if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
++              /* We only want the cr8 intercept bits of the guest */
++              svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
++              svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
++      }
++
++      /* We don't want a nested guest to be more powerful than the guest,
++         so all intercepts are ORed */
++      svm->vmcb->control.intercept_cr_read |=
++              nested_vmcb->control.intercept_cr_read;
++      svm->vmcb->control.intercept_cr_write |=
++              nested_vmcb->control.intercept_cr_write;
++      svm->vmcb->control.intercept_dr_read |=
++              nested_vmcb->control.intercept_dr_read;
++      svm->vmcb->control.intercept_dr_write |=
++              nested_vmcb->control.intercept_dr_write;
++      svm->vmcb->control.intercept_exceptions |=
++              nested_vmcb->control.intercept_exceptions;
++
++      svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
++
++      svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
+       svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
+       svm->vmcb->control.int_state = nested_vmcb->control.int_state;
+       svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
+@@ -2524,6 +2531,9 @@ static void update_cr8_intercept(struct
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
++      if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
++              return;
++
+       if (irr == -1)
+               return;
+@@ -2627,6 +2637,9 @@ static inline void sync_cr8_to_lapic(str
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
++      if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
++              return;
++
+       if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
+               int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
+               kvm_set_cr8(vcpu, cr8);
+@@ -2638,6 +2651,9 @@ static inline void sync_lapic_to_cr8(str
+       struct vcpu_svm *svm = to_svm(vcpu);
+       u64 cr8;
++      if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
++              return;
++
+       cr8 = kvm_get_cr8(vcpu);
+       svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
+       svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
diff --git a/queue-2.6.34/0006-KVM-SVM-Fix-wrong-interrupt-injection-in-enable_irq_.patch b/queue-2.6.34/0006-KVM-SVM-Fix-wrong-interrupt-injection-in-enable_irq_.patch
new file mode 100644 (file)
index 0000000..fe2f1e5
--- /dev/null
@@ -0,0 +1,74 @@
+From 4df17c7e8758c2017d5fdff4fd2c036503b0bdf2 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Fri, 19 Feb 2010 16:23:01 +0100
+Subject: KVM: SVM: Fix wrong interrupt injection in enable_irq_windows
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+The nested_svm_intr() function does not execute the vmexit
+anymore. Therefore we may still be in the nested state after
+that function ran. This patch changes the nested_svm_intr()
+function to return wether the irq window could be enabled.
+
+Cc: stable@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 8fe546547cf6857a9d984bfe2f2194910f3fc5d0)
+---
+ arch/x86/kvm/svm.c |   17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1402,16 +1402,17 @@ static int nested_svm_check_exception(st
+       return vmexit;
+ }
+-static inline int nested_svm_intr(struct vcpu_svm *svm)
++/* This function returns true if it is save to enable the irq window */
++static inline bool nested_svm_intr(struct vcpu_svm *svm)
+ {
+       if (!is_nested(svm))
+-              return 0;
++              return true;
+       if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
+-              return 0;
++              return true;
+       if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
+-              return 0;
++              return false;
+       svm->vmcb->control.exit_code = SVM_EXIT_INTR;
+@@ -1424,10 +1425,10 @@ static inline int nested_svm_intr(struct
+                */
+               svm->nested.exit_required = true;
+               trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
+-              return 1;
++              return false;
+       }
+-      return 0;
++      return true;
+ }
+ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
+@@ -2591,13 +2592,11 @@ static void enable_irq_window(struct kvm
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
+-      nested_svm_intr(svm);
+-
+       /* In case GIF=0 we can't rely on the CPU to tell us when
+        * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
+        * The next time we get that intercept, this function will be
+        * called again though and we'll get the vintr intercept. */
+-      if (gif_set(svm)) {
++      if (gif_set(svm) && nested_svm_intr(svm)) {
+               svm_set_vintr(svm);
+               svm_inject_irq(svm, 0x0);
+       }
diff --git a/queue-2.6.34/0007-KVM-s390-Fix-possible-memory-leak-of-in-kvm_arch_vcp.patch b/queue-2.6.34/0007-KVM-s390-Fix-possible-memory-leak-of-in-kvm_arch_vcp.patch
new file mode 100644 (file)
index 0000000..c58bcab
--- /dev/null
@@ -0,0 +1,38 @@
+From f5d9ab3dba58eb91b95bcee13abef3ef0c24a2f9 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <yjwei@cn.fujitsu.com>
+Date: Tue, 9 Mar 2010 14:37:53 +0800
+Subject: KVM: s390: Fix possible memory leak of in kvm_arch_vcpu_create()
+
+From: Wei Yongjun <yjwei@cn.fujitsu.com>
+
+This patch fixed possible memory leak in kvm_arch_vcpu_create()
+under s390, which would happen when kvm_arch_vcpu_create() fails.
+
+Signed-off-by: Wei Yongjun <yjwei@cn.fujitsu.com>
+Acked-by: Carsten Otte <cotte@de.ibm.com>
+Cc: stable@kernel.org
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 7b06bf2ffa15e119c7439ed0b024d44f66d7b605)
+---
+ arch/s390/kvm/kvm-s390.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -341,11 +341,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(st
+       rc = kvm_vcpu_init(vcpu, kvm, id);
+       if (rc)
+-              goto out_free_cpu;
++              goto out_free_sie_block;
+       VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
+                vcpu->arch.sie_block);
+       return vcpu;
++out_free_sie_block:
++      free_page((unsigned long)(vcpu->arch.sie_block));
+ out_free_cpu:
+       kfree(vcpu);
+ out_nomem:
diff --git a/queue-2.6.34/0008-KVM-PPC-Do-not-create-debugfs-if-fail-to-create-vcpu.patch b/queue-2.6.34/0008-KVM-PPC-Do-not-create-debugfs-if-fail-to-create-vcpu.patch
new file mode 100644 (file)
index 0000000..de89e55
--- /dev/null
@@ -0,0 +1,33 @@
+From ce50a489510399f48bda3ed88088d2ebbf6543c1 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <yjwei@cn.fujitsu.com>
+Date: Tue, 9 Mar 2010 14:13:43 +0800
+Subject: KVM: PPC: Do not create debugfs if fail to create vcpu
+
+From: Wei Yongjun <yjwei@cn.fujitsu.com>
+
+If fail to create the vcpu, we should not create the debugfs
+for it.
+
+Signed-off-by: Wei Yongjun <yjwei@cn.fujitsu.com>
+Acked-by: Alexander Graf <agraf@suse.de>
+Cc: stable@kernel.org
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 06056bfb944a0302a8f22eb45f09123de7fb417b)
+---
+ arch/powerpc/kvm/powerpc.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -193,7 +193,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(st
+ {
+       struct kvm_vcpu *vcpu;
+       vcpu = kvmppc_core_vcpu_create(kvm, id);
+-      kvmppc_create_vcpu_debugfs(vcpu, id);
++      if (!IS_ERR(vcpu))
++              kvmppc_create_vcpu_debugfs(vcpu, id);
+       return vcpu;
+ }
diff --git a/queue-2.6.34/0009-KVM-x86-Add-callback-to-let-modules-decide-over-some.patch b/queue-2.6.34/0009-KVM-x86-Add-callback-to-let-modules-decide-over-some.patch
new file mode 100644 (file)
index 0000000..ba4d260
--- /dev/null
@@ -0,0 +1,92 @@
+From 9ad047ffa6854d8b000f24b27d49262073ef54e8 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Thu, 22 Apr 2010 12:33:11 +0200
+Subject: KVM: x86: Add callback to let modules decide over some supported cpuid bits
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+This patch adds the get_supported_cpuid callback to
+kvm_x86_ops. It will be used in do_cpuid_ent to delegate the
+decission about some supported cpuid bits to the
+architecture modules.
+
+Cc: stable@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit d4330ef2fb2236a1e3a176f0f68360f4c0a8661b)
+---
+ arch/x86/include/asm/kvm_host.h |    2 ++
+ arch/x86/kvm/svm.c              |    6 ++++++
+ arch/x86/kvm/vmx.c              |    6 ++++++
+ arch/x86/kvm/x86.c              |    3 +++
+ 4 files changed, 17 insertions(+)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -541,6 +541,8 @@ struct kvm_x86_ops {
+       int (*get_lpage_level)(void);
+       bool (*rdtscp_supported)(void);
++      void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
++
+       const struct trace_print_flags *exit_reasons_str;
+ };
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2907,6 +2907,10 @@ static void svm_cpuid_update(struct kvm_
+ {
+ }
++static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
++{
++}
++
+ static const struct trace_print_flags svm_exit_reasons_str[] = {
+       { SVM_EXIT_READ_CR0,                    "read_cr0" },
+       { SVM_EXIT_READ_CR3,                    "read_cr3" },
+@@ -3051,6 +3055,8 @@ static struct kvm_x86_ops svm_x86_ops =
+       .cpuid_update = svm_cpuid_update,
+       .rdtscp_supported = svm_rdtscp_supported,
++
++      .set_supported_cpuid = svm_set_supported_cpuid,
+ };
+ static int __init svm_init(void)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4115,6 +4115,10 @@ static void vmx_cpuid_update(struct kvm_
+       }
+ }
++static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
++{
++}
++
+ static struct kvm_x86_ops vmx_x86_ops = {
+       .cpu_has_kvm_support = cpu_has_kvm_support,
+       .disabled_by_bios = vmx_disabled_by_bios,
+@@ -4186,6 +4190,8 @@ static struct kvm_x86_ops vmx_x86_ops =
+       .cpuid_update = vmx_cpuid_update,
+       .rdtscp_supported = vmx_rdtscp_supported,
++
++      .set_supported_cpuid = vmx_set_supported_cpuid,
+ };
+ static int __init vmx_init(void)
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1917,6 +1917,9 @@ static void do_cpuid_ent(struct kvm_cpui
+               entry->ecx &= kvm_supported_word6_x86_features;
+               break;
+       }
++
++      kvm_x86_ops->set_supported_cpuid(function, entry);
++
+       put_cpu();
+ }
diff --git a/queue-2.6.34/0010-KVM-SVM-Report-emulated-SVM-features-to-userspace.patch b/queue-2.6.34/0010-KVM-SVM-Report-emulated-SVM-features-to-userspace.patch
new file mode 100644 (file)
index 0000000..42dd126
--- /dev/null
@@ -0,0 +1,41 @@
+From 6156fef4380a83aec31475a791fb3ae1bbe7865e Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Thu, 22 Apr 2010 12:33:12 +0200
+Subject: KVM: SVM: Report emulated SVM features to userspace
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+This patch implements the reporting of the emulated SVM
+features to userspace instead of the real hardware
+capabilities. Every real hardware capability needs emulation
+in nested svm so the old behavior was broken.
+
+Cc: stable@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit c2c63a493924e09a1984d1374a0e60dfd54fc0b0)
+---
+ arch/x86/kvm/svm.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2909,6 +2909,16 @@ static void svm_cpuid_update(struct kvm_
+ static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+ {
++      switch (func) {
++      case 0x8000000A:
++              entry->eax = 1; /* SVM revision 1 */
++              entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
++                                 ASID emulation to nested SVM */
++              entry->ecx = 0; /* Reserved */
++              entry->edx = 0; /* Do not support any additional features */
++
++              break;
++      }
+ }
+ static const struct trace_print_flags svm_exit_reasons_str[] = {
diff --git a/queue-2.6.34/0011-x86-paravirt-Add-a-global-synchronization-point-for-.patch b/queue-2.6.34/0011-x86-paravirt-Add-a-global-synchronization-point-for-.patch
new file mode 100644 (file)
index 0000000..b36ee66
--- /dev/null
@@ -0,0 +1,103 @@
+From b1db60943cec14d04fff137e80c0031bea5449bf Mon Sep 17 00:00:00 2001
+From: Glauber Costa <glommer@redhat.com>
+Date: Tue, 11 May 2010 12:17:40 -0400
+Subject: x86, paravirt: Add a global synchronization point for pvclock
+
+From: Glauber Costa <glommer@redhat.com>
+
+In recent stress tests, it was found that pvclock-based systems
+could seriously warp in smp systems. Using ingo's time-warp-test.c,
+I could trigger a scenario as bad as 1.5mi warps a minute in some systems.
+(to be fair, it wasn't that bad in most of them). Investigating further, I
+found out that such warps were caused by the very offset-based calculation
+pvclock is based on.
+
+This happens even on some machines that report constant_tsc in its tsc flags,
+specially on multi-socket ones.
+
+Two reads of the same kernel timestamp at approx the same time, will likely
+have tsc timestamped in different occasions too. This means the delta we
+calculate is unpredictable at best, and can probably be smaller in a cpu
+that is legitimately reading clock in a forward ocasion.
+
+Some adjustments on the host could make this window less likely to happen,
+but still, it pretty much poses as an intrinsic problem of the mechanism.
+
+A while ago, I though about using a shared variable anyway, to hold clock
+last state, but gave up due to the high contention locking was likely
+to introduce, possibly rendering the thing useless on big machines. I argue,
+however, that locking is not necessary.
+
+We do a read-and-return sequence in pvclock, and between read and return,
+the global value can have changed. However, it can only have changed
+by means of an addition of a positive value. So if we detected that our
+clock timestamp is less than the current global, we know that we need to
+return a higher one, even though it is not exactly the one we compared to.
+
+OTOH, if we detect we're greater than the current time source, we atomically
+replace the value with our new readings. This do causes contention on big
+boxes (but big here means *BIG*), but it seems like a good trade off, since
+it provide us with a time source guaranteed to be stable wrt time warps.
+
+After this patch is applied, I don't see a single warp in time during 5 days
+of execution, in any of the machines I saw them before.
+
+Signed-off-by: Glauber Costa <glommer@redhat.com>
+Acked-by: Zachary Amsden <zamsden@redhat.com>
+CC: Jeremy Fitzhardinge <jeremy@goop.org>
+CC: Avi Kivity <avi@redhat.com>
+CC: Marcelo Tosatti <mtosatti@redhat.com>
+CC: Zachary Amsden <zamsden@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 489fb490dbf8dab0249ad82b56688ae3842a79e8)
+---
+ arch/x86/kernel/pvclock.c |   24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/arch/x86/kernel/pvclock.c
++++ b/arch/x86/kernel/pvclock.c
+@@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvc
+       return pv_tsc_khz;
+ }
++static atomic64_t last_value = ATOMIC64_INIT(0);
++
+ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+ {
+       struct pvclock_shadow_time shadow;
+       unsigned version;
+       cycle_t ret, offset;
++      u64 last;
+       do {
+               version = pvclock_get_time_values(&shadow, src);
+@@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct
+               barrier();
+       } while (version != src->version);
++      /*
++       * Assumption here is that last_value, a global accumulator, always goes
++       * forward. If we are less than that, we should not be much smaller.
++       * We assume there is an error marging we're inside, and then the correction
++       * does not sacrifice accuracy.
++       *
++       * For reads: global may have changed between test and return,
++       * but this means someone else updated poked the clock at a later time.
++       * We just need to make sure we are not seeing a backwards event.
++       *
++       * For updates: last_value = ret is not enough, since two vcpus could be
++       * updating at the same time, and one of them could be slightly behind,
++       * making the assumption that last_value always go forward fail to hold.
++       */
++      last = atomic64_read(&last_value);
++      do {
++              if (ret < last)
++                      return last;
++              last = atomic64_cmpxchg(&last_value, last, ret);
++      } while (unlikely(last != ret));
++
+       return ret;
+ }
diff --git a/queue-2.6.34/0012-KVM-Don-t-allow-lmsw-to-clear-cr0.pe.patch b/queue-2.6.34/0012-KVM-Don-t-allow-lmsw-to-clear-cr0.pe.patch
new file mode 100644 (file)
index 0000000..cfc36af
--- /dev/null
@@ -0,0 +1,32 @@
+From 9b07320c87942ef1f3fab174644ba490401af2c2 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Wed, 12 May 2010 00:28:44 +0300
+Subject: KVM: Don't allow lmsw to clear cr0.pe
+
+From: Avi Kivity <avi@redhat.com>
+
+The current lmsw implementation allows the guest to clear cr0.pe, contrary
+to the manual, which breaks EMM386.EXE.
+
+Fix by ORing the old cr0.pe with lmsw's operand.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit f78e917688edbf1f14c318d2e50dc8e7dad20445)
+---
+ arch/x86/kvm/x86.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -484,7 +484,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
+ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
+ {
+-      kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
++      kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
+ }
+ EXPORT_SYMBOL_GPL(kvm_lmsw);
diff --git a/queue-2.6.34/0013-KVM-x86-Check-LMA-bit-before-set_efer.patch b/queue-2.6.34/0013-KVM-x86-Check-LMA-bit-before-set_efer.patch
new file mode 100644 (file)
index 0000000..d295ee6
--- /dev/null
@@ -0,0 +1,35 @@
+From 6e0ac7d910bc4c943b21553736d0bf593c30eda7 Mon Sep 17 00:00:00 2001
+From: Sheng Yang <sheng@linux.intel.com>
+Date: Wed, 12 May 2010 16:40:40 +0800
+Subject: KVM: x86: Check LMA bit before set_efer
+
+From: Sheng Yang <sheng@linux.intel.com>
+
+kvm_x86_ops->set_efer() would execute vcpu->arch.efer = efer, so the
+checking of LMA bit didn't work.
+
+Signed-off-by: Sheng Yang <sheng@linux.intel.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit a3d204e28579427609c3d15d2310127ebaa47d94)
+---
+ arch/x86/kvm/x86.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -657,11 +657,11 @@ static void set_efer(struct kvm_vcpu *vc
+               }
+       }
+-      kvm_x86_ops->set_efer(vcpu, efer);
+-
+       efer &= ~EFER_LMA;
+       efer |= vcpu->arch.efer & EFER_LMA;
++      kvm_x86_ops->set_efer(vcpu, efer);
++
+       vcpu->arch.efer = efer;
+       vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
diff --git a/queue-2.6.34/0014-KVM-MMU-Segregate-shadow-pages-with-different-cr0.wp.patch b/queue-2.6.34/0014-KVM-MMU-Segregate-shadow-pages-with-different-cr0.wp.patch
new file mode 100644 (file)
index 0000000..b63c36a
--- /dev/null
@@ -0,0 +1,53 @@
+From e605d4e365cde0418bfb3bd45ef6399043fcabe0 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Wed, 12 May 2010 11:48:18 +0300
+Subject: KVM: MMU: Segregate shadow pages with different cr0.wp
+
+From: Avi Kivity <avi@redhat.com>
+
+When cr0.wp=0, we may shadow a gpte having u/s=1 and r/w=0 with an spte
+having u/s=0 and r/w=1.  This allows excessive access if the guest sets
+cr0.wp=1 and accesses through this spte.
+
+Fix by making cr0.wp part of the base role; we'll have different sptes for
+the two cases and the problem disappears.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 3dbe141595faa48a067add3e47bba3205b79d33c)
+---
+ arch/x86/include/asm/kvm_host.h |    1 +
+ arch/x86/kvm/mmu.c              |    3 ++-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -180,6 +180,7 @@ union kvm_mmu_page_role {
+               unsigned invalid:1;
+               unsigned cr4_pge:1;
+               unsigned nxe:1;
++              unsigned cr0_wp:1;
+       };
+ };
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -223,7 +223,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
+-static int is_write_protection(struct kvm_vcpu *vcpu)
++static bool is_write_protection(struct kvm_vcpu *vcpu)
+ {
+       return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
+ }
+@@ -2439,6 +2439,7 @@ static int init_kvm_softmmu(struct kvm_v
+               r = paging32_init_context(vcpu);
+       vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
++      vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
+       return r;
+ }
diff --git a/queue-2.6.34/0015-KVM-VMX-enable-VMXON-check-with-SMX-enabled-Intel-TX.patch b/queue-2.6.34/0015-KVM-VMX-enable-VMXON-check-with-SMX-enabled-Intel-TX.patch
new file mode 100644 (file)
index 0000000..1037874
--- /dev/null
@@ -0,0 +1,124 @@
+From cda3861a696da23d86328b45dd9151a6e42eb03c Mon Sep 17 00:00:00 2001
+From: Shane Wang <shane.wang@intel.com>
+Date: Thu, 29 Apr 2010 12:09:01 -0400
+Subject: KVM: VMX: enable VMXON check with SMX enabled (Intel TXT)
+
+From: Shane Wang <shane.wang@intel.com>
+
+Per document, for feature control MSR:
+
+  Bit 1 enables VMXON in SMX operation. If the bit is clear, execution
+        of VMXON in SMX operation causes a general-protection exception.
+  Bit 2 enables VMXON outside SMX operation. If the bit is clear, execution
+        of VMXON outside SMX operation causes a general-protection exception.
+
+This patch is to enable this kind of check with SMX for VMXON in KVM.
+
+Signed-off-by: Shane Wang <shane.wang@intel.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit cafd66595d92591e4bd25c3904e004fc6f897e2d)
+---
+ arch/x86/include/asm/msr-index.h |    5 +++--
+ arch/x86/kernel/tboot.c          |    1 +
+ arch/x86/kvm/vmx.c               |   32 +++++++++++++++++++++-----------
+ include/linux/tboot.h            |    1 +
+ 4 files changed, 26 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -199,8 +199,9 @@
+ #define MSR_IA32_EBL_CR_POWERON               0x0000002a
+ #define MSR_IA32_FEATURE_CONTROL        0x0000003a
+-#define FEATURE_CONTROL_LOCKED                (1<<0)
+-#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
++#define FEATURE_CONTROL_LOCKED                                (1<<0)
++#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX      (1<<1)
++#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX     (1<<2)
+ #define MSR_IA32_APICBASE             0x0000001b
+ #define MSR_IA32_APICBASE_BSP         (1<<8)
+--- a/arch/x86/kernel/tboot.c
++++ b/arch/x86/kernel/tboot.c
+@@ -46,6 +46,7 @@
+ /* Global pointer to shared data; NULL means no measured launch. */
+ struct tboot *tboot __read_mostly;
++EXPORT_SYMBOL(tboot);
+ /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
+ #define AP_WAIT_TIMEOUT               1
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -27,6 +27,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/ftrace_event.h>
+ #include <linux/slab.h>
++#include <linux/tboot.h>
+ #include "kvm_cache_regs.h"
+ #include "x86.h"
+@@ -1176,9 +1177,16 @@ static __init int vmx_disabled_by_bios(v
+       u64 msr;
+       rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+-      return (msr & (FEATURE_CONTROL_LOCKED |
+-                     FEATURE_CONTROL_VMXON_ENABLED))
+-          == FEATURE_CONTROL_LOCKED;
++      if (msr & FEATURE_CONTROL_LOCKED) {
++              if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
++                      && tboot_enabled())
++                      return 1;
++              if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
++                      && !tboot_enabled())
++                      return 1;
++      }
++
++      return 0;
+       /* locked but not enabled */
+ }
+@@ -1186,21 +1194,23 @@ static int hardware_enable(void *garbage
+ {
+       int cpu = raw_smp_processor_id();
+       u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
+-      u64 old;
++      u64 old, test_bits;
+       if (read_cr4() & X86_CR4_VMXE)
+               return -EBUSY;
+       INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
+       rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
+-      if ((old & (FEATURE_CONTROL_LOCKED |
+-                  FEATURE_CONTROL_VMXON_ENABLED))
+-          != (FEATURE_CONTROL_LOCKED |
+-              FEATURE_CONTROL_VMXON_ENABLED))
++
++      test_bits = FEATURE_CONTROL_LOCKED;
++      test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
++      if (tboot_enabled())
++              test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
++
++      if ((old & test_bits) != test_bits) {
+               /* enable and lock */
+-              wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
+-                     FEATURE_CONTROL_LOCKED |
+-                     FEATURE_CONTROL_VMXON_ENABLED);
++              wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
++      }
+       write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
+       asm volatile (ASM_VMX_VMXON_RAX
+                     : : "a"(&phys_addr), "m"(phys_addr)
+--- a/include/linux/tboot.h
++++ b/include/linux/tboot.h
+@@ -150,6 +150,7 @@ extern int tboot_force_iommu(void);
+ #else
++#define tboot_enabled()                       0
+ #define tboot_probe()                 do { } while (0)
+ #define tboot_shutdown(shutdown_type) do { } while (0)
+ #define tboot_sleep(sleep_state, pm1a_control, pm1b_control)  \
diff --git a/queue-2.6.34/0016-KVM-MMU-Don-t-read-pdptrs-with-mmu-spinlock-held-in-.patch b/queue-2.6.34/0016-KVM-MMU-Don-t-read-pdptrs-with-mmu-spinlock-held-in-.patch
new file mode 100644 (file)
index 0000000..a576f3d
--- /dev/null
@@ -0,0 +1,62 @@
+From b750cba1ab93811bc8da6d4eb7bd51e7827fc209 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Tue, 4 May 2010 12:58:32 +0300
+Subject: KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots
+
+From: Avi Kivity <avi@redhat.com>
+
+On svm, kvm_read_pdptr() may require reading guest memory, which can sleep.
+
+Push the spinlock into mmu_alloc_roots(), and only take it after we've read
+the pdptr.
+
+Tested-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 8facbbff071ff2b19268d3732e31badc60471e21)
+---
+ arch/x86/kvm/mmu.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -2085,11 +2085,13 @@ static int mmu_alloc_roots(struct kvm_vc
+                       direct = 1;
+               if (mmu_check_root(vcpu, root_gfn))
+                       return 1;
++              spin_lock(&vcpu->kvm->mmu_lock);
+               sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
+                                     PT64_ROOT_LEVEL, direct,
+                                     ACC_ALL, NULL);
+               root = __pa(sp->spt);
+               ++sp->root_count;
++              spin_unlock(&vcpu->kvm->mmu_lock);
+               vcpu->arch.mmu.root_hpa = root;
+               return 0;
+       }
+@@ -2111,11 +2113,14 @@ static int mmu_alloc_roots(struct kvm_vc
+                       root_gfn = 0;
+               if (mmu_check_root(vcpu, root_gfn))
+                       return 1;
++              spin_lock(&vcpu->kvm->mmu_lock);
+               sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
+                                     PT32_ROOT_LEVEL, direct,
+                                     ACC_ALL, NULL);
+               root = __pa(sp->spt);
+               ++sp->root_count;
++              spin_unlock(&vcpu->kvm->mmu_lock);
++
+               vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
+       }
+       vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+@@ -2479,7 +2484,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
+               goto out;
+       spin_lock(&vcpu->kvm->mmu_lock);
+       kvm_mmu_free_some_pages(vcpu);
++      spin_unlock(&vcpu->kvm->mmu_lock);
+       r = mmu_alloc_roots(vcpu);
++      spin_lock(&vcpu->kvm->mmu_lock);
+       mmu_sync_roots(vcpu);
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       if (r)
diff --git a/queue-2.6.34/0017-KVM-Fix-wallclock-version-writing-race.patch b/queue-2.6.34/0017-KVM-Fix-wallclock-version-writing-race.patch
new file mode 100644 (file)
index 0000000..0502038
--- /dev/null
@@ -0,0 +1,47 @@
+From 57aac94b10f3342bc9affdda286c0f8eb47fac69 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Tue, 4 May 2010 15:00:37 +0300
+Subject: KVM: Fix wallclock version writing race
+
+From: Avi Kivity <avi@redhat.com>
+
+Wallclock writing uses an unprotected global variable to hold the version;
+this can cause one guest to interfere with another if both write their
+wallclock at the same time.
+
+Acked-by: Glauber Costa <glommer@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 9ed3c444ab8987c7b219173a2f7807e3f71e234e)
+---
+ arch/x86/kvm/x86.c |   12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -695,14 +695,22 @@ static int do_set_msr(struct kvm_vcpu *v
+ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
+ {
+-      static int version;
++      int version;
++      int r;
+       struct pvclock_wall_clock wc;
+       struct timespec boot;
+       if (!wall_clock)
+               return;
+-      version++;
++      r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
++      if (r)
++              return;
++
++      if (version & 1)
++              ++version;  /* first time write, random junk */
++
++      ++version;
+       kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
diff --git a/queue-2.6.34/0018-KVM-PPC-Add-missing-vcpu_load-vcpu_put-in-vcpu-ioctl.patch b/queue-2.6.34/0018-KVM-PPC-Add-missing-vcpu_load-vcpu_put-in-vcpu-ioctl.patch
new file mode 100644 (file)
index 0000000..9ff6657
--- /dev/null
@@ -0,0 +1,108 @@
+From cd0f492deb2db86a1edab4713b917e8279a78fec Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Thu, 13 May 2010 11:05:49 +0300
+Subject: KVM: PPC: Add missing vcpu_load()/vcpu_put() in vcpu ioctls
+
+From: Avi Kivity <avi@redhat.com>
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 98001d8d017cea1ee0f9f35c6227bbd63ef5005b)
+---
+ arch/powerpc/kvm/book3s.c |   10 ++++++++++
+ arch/powerpc/kvm/booke.c  |   15 ++++++++++++++-
+ 2 files changed, 24 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -922,6 +922,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct
+       struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+       int i;
++      vcpu_load(vcpu);
++
+       sregs->pvr = vcpu->arch.pvr;
+       sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
+@@ -940,6 +942,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct
+                       sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
+               }
+       }
++
++      vcpu_put(vcpu);
++
+       return 0;
+ }
+@@ -949,6 +954,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct
+       struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+       int i;
++      vcpu_load(vcpu);
++
+       kvmppc_set_pvr(vcpu, sregs->pvr);
+       vcpu3s->sdr1 = sregs->u.s.sdr1;
+@@ -975,6 +982,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct
+       /* Flush the MMU after messing with the segments */
+       kvmppc_mmu_pte_flush(vcpu, 0, 0);
++
++      vcpu_put(vcpu);
++
+       return 0;
+ }
+--- a/arch/powerpc/kvm/booke.c
++++ b/arch/powerpc/kvm/booke.c
+@@ -479,6 +479,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct
+ {
+       int i;
++      vcpu_load(vcpu);
++
+       regs->pc = vcpu->arch.pc;
+       regs->cr = kvmppc_get_cr(vcpu);
+       regs->ctr = vcpu->arch.ctr;
+@@ -499,6 +501,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct
+       for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+               regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
++      vcpu_put(vcpu);
++
+       return 0;
+ }
+@@ -506,6 +510,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct
+ {
+       int i;
++      vcpu_load(vcpu);
++
+       vcpu->arch.pc = regs->pc;
+       kvmppc_set_cr(vcpu, regs->cr);
+       vcpu->arch.ctr = regs->ctr;
+@@ -525,6 +531,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct
+       for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+               kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
++      vcpu_put(vcpu);
++
+       return 0;
+ }
+@@ -553,7 +561,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct k
+ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+                                   struct kvm_translation *tr)
+ {
+-      return kvmppc_core_vcpu_translate(vcpu, tr);
++      int r;
++
++      vcpu_load(vcpu);
++      r = kvmppc_core_vcpu_translate(vcpu, tr);
++      vcpu_put(vcpu);
++      return r;
+ }
+ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
diff --git a/queue-2.6.34/0019-KVM-x86-Add-missing-locking-to-arch-specific-vcpu-io.patch b/queue-2.6.34/0019-KVM-x86-Add-missing-locking-to-arch-specific-vcpu-io.patch
new file mode 100644 (file)
index 0000000..e201b9c
--- /dev/null
@@ -0,0 +1,59 @@
+From 816cae099d49f24ad67297fe068449e16c231899 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Thu, 13 May 2010 11:50:19 +0300
+Subject: KVM: x86: Add missing locking to arch specific vcpu ioctls
+
+From: Avi Kivity <avi@redhat.com>
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 8fbf065d625617bbbf6b72d5f78f84ad13c8b547)
+---
+ arch/x86/kvm/x86.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1776,6 +1776,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
+ {
+       int r;
++      vcpu_load(vcpu);
+       r = -E2BIG;
+       if (cpuid->nent < vcpu->arch.cpuid_nent)
+               goto out;
+@@ -1787,6 +1788,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
+ out:
+       cpuid->nent = vcpu->arch.cpuid_nent;
++      vcpu_put(vcpu);
+       return r;
+ }
+@@ -2042,6 +2044,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(
+       int r;
+       unsigned bank_num = mcg_cap & 0xff, bank;
++      vcpu_load(vcpu);
+       r = -EINVAL;
+       if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
+               goto out;
+@@ -2056,6 +2059,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(
+       for (bank = 0; bank < bank_num; bank++)
+               vcpu->arch.mce_banks[bank*4] = ~(u64)0;
+ out:
++      vcpu_put(vcpu);
+       return r;
+ }
+@@ -2323,7 +2327,9 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+               r = -EFAULT;
+               if (copy_from_user(&mce, argp, sizeof mce))
+                       goto out;
++              vcpu_load(vcpu);
+               r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
++              vcpu_put(vcpu);
+               break;
+       }
+       case KVM_GET_VCPU_EVENTS: {
diff --git a/queue-2.6.34/0020-KVM-x86-Inject-GP-with-the-right-rip-on-efer-writes.patch b/queue-2.6.34/0020-KVM-x86-Inject-GP-with-the-right-rip-on-efer-writes.patch
new file mode 100644 (file)
index 0000000..692987c
--- /dev/null
@@ -0,0 +1,92 @@
+From 28a41891c7ba5cb62e8d37f749b7f984795259d6 Mon Sep 17 00:00:00 2001
+From: Roedel, Joerg <Joerg.Roedel@amd.com>
+Date: Thu, 6 May 2010 11:38:43 +0200
+Subject: KVM: x86: Inject #GP with the right rip on efer writes
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+This patch fixes a bug in the KVM efer-msr write path. If a
+guest writes to a reserved efer bit the set_efer function
+injects the #GP directly. The architecture dependent wrmsr
+function does not see this, assumes success and advances the
+rip. This results in a #GP in the guest with the wrong rip.
+This patch fixes this by reporting efer write errors back to
+the architectural wrmsr function.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit b69e8caef5b190af48c525f6d715e7b7728a77f6)
+---
+ arch/x86/kvm/x86.c |   31 ++++++++++++-------------------
+ 1 file changed, 12 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -624,37 +624,29 @@ static u32 emulated_msrs[] = {
+       MSR_IA32_MISC_ENABLE,
+ };
+-static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
++static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+ {
+-      if (efer & efer_reserved_bits) {
+-              kvm_inject_gp(vcpu, 0);
+-              return;
+-      }
++      if (efer & efer_reserved_bits)
++              return 1;
+       if (is_paging(vcpu)
+-          && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
+-              kvm_inject_gp(vcpu, 0);
+-              return;
+-      }
++          && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
++              return 1;
+       if (efer & EFER_FFXSR) {
+               struct kvm_cpuid_entry2 *feat;
+               feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+-              if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
+-                      kvm_inject_gp(vcpu, 0);
+-                      return;
+-              }
++              if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
++                      return 1;
+       }
+       if (efer & EFER_SVME) {
+               struct kvm_cpuid_entry2 *feat;
+               feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+-              if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
+-                      kvm_inject_gp(vcpu, 0);
+-                      return;
+-              }
++              if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
++                      return 1;
+       }
+       efer &= ~EFER_LMA;
+@@ -666,6 +658,8 @@ static void set_efer(struct kvm_vcpu *vc
+       vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
+       kvm_mmu_reset_context(vcpu);
++
++      return 0;
+ }
+ void kvm_enable_efer_bits(u64 mask)
+@@ -1094,8 +1088,7 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ {
+       switch (msr) {
+       case MSR_EFER:
+-              set_efer(vcpu, data);
+-              break;
++              return set_efer(vcpu, data);
+       case MSR_K7_HWCR:
+               data &= ~(u64)0x40;     /* ignore flush filter disable */
+               if (data != 0) {
diff --git a/queue-2.6.34/0021-KVM-SVM-Don-t-allow-nested-guest-to-VMMCALL-into-hos.patch b/queue-2.6.34/0021-KVM-SVM-Don-t-allow-nested-guest-to-VMMCALL-into-hos.patch
new file mode 100644 (file)
index 0000000..7b60375
--- /dev/null
@@ -0,0 +1,33 @@
+From 9bfa6444f4b7db2ebdd2a104bbfd1003b7eca2ef Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Wed, 5 May 2010 16:04:45 +0200
+Subject: KVM: SVM: Don't allow nested guest to VMMCALL into host
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+This patch disables the possibility for a l2-guest to do a
+VMMCALL directly into the host. This would happen if the
+l1-hypervisor doesn't intercept VMMCALL and the l2-guest
+executes this instruction.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+(Cherry-picked from commit 0d945bd9351199744c1e89d57a70615b6ee9f394)
+---
+ arch/x86/kvm/svm.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1854,6 +1854,9 @@ static bool nested_svm_vmrun(struct vcpu
+               svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+       }
++      /* We don't want to see VMMCALLs from a nested guest */
++      svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
++
+       /* We don't want a nested guest to be more powerful than the guest,
+          so all intercepts are ORed */
+       svm->vmcb->control.intercept_cr_read |=
diff --git a/queue-2.6.34/btrfs-should-add-a-permission-check-for-setfacl.patch b/queue-2.6.34/btrfs-should-add-a-permission-check-for-setfacl.patch
new file mode 100644 (file)
index 0000000..5115057
--- /dev/null
@@ -0,0 +1,58 @@
+From 2f26afba46f0ebf155cf9be746496a0304a5b7cf Mon Sep 17 00:00:00 2001
+From: Shi Weihua <shiwh@cn.fujitsu.com>
+Date: Tue, 18 May 2010 00:50:32 +0000
+Subject: Btrfs: should add a permission check for setfacl
+
+From: Shi Weihua <shiwh@cn.fujitsu.com>
+
+commit 2f26afba46f0ebf155cf9be746496a0304a5b7cf upstream.
+
+On btrfs, do the following
+------------------
+# su user1
+# cd btrfs-part/
+# touch aaa
+# getfacl aaa
+  # file: aaa
+  # owner: user1
+  # group: user1
+  user::rw-
+  group::rw-
+  other::r--
+# su user2
+# cd btrfs-part/
+# setfacl -m u::rwx aaa
+# getfacl aaa
+  # file: aaa
+  # owner: user1
+  # group: user1
+  user::rwx           <- successed to setfacl
+  group::rw-
+  other::r--
+------------------
+but we should prohibit it that user2 changing user1's acl.
+In fact, on ext3 and other fs, a message occurs:
+  setfacl: aaa: Operation not permitted
+
+This patch fixed it.
+
+Signed-off-by: Shi Weihua <shiwh@cn.fujitsu.com>
+Signed-off-by: Chris Mason <chris.mason@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/btrfs/acl.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/btrfs/acl.c
++++ b/fs/btrfs/acl.c
+@@ -160,6 +160,9 @@ static int btrfs_xattr_acl_set(struct de
+       int ret;
+       struct posix_acl *acl = NULL;
++      if (!is_owner_or_cap(dentry->d_inode))
++              return -EPERM;
++
+       if (value) {
+               acl = posix_acl_from_xattr(value, size);
+               if (acl == NULL) {
diff --git a/queue-2.6.34/cifs-allow-null-nd-as-nfs-server-uses-on-create.patch b/queue-2.6.34/cifs-allow-null-nd-as-nfs-server-uses-on-create.patch
new file mode 100644 (file)
index 0000000..8151945
--- /dev/null
@@ -0,0 +1,133 @@
+From fa588e0c57048b3d4bfcd772d80dc0615f83fd35 Mon Sep 17 00:00:00 2001
+From: Steve French <sfrench@us.ibm.com>
+Date: Thu, 22 Apr 2010 19:21:55 +0000
+Subject: CIFS: Allow null nd (as nfs server uses) on create
+
+From: Steve French <sfrench@us.ibm.com>
+
+commit fa588e0c57048b3d4bfcd772d80dc0615f83fd35 upstream.
+
+While creating a file on a server which supports unix extensions
+such as Samba, if a file is being created which does not supply
+nameidata (i.e. nd is null), cifs client can oops when calling
+cifs_posix_open.
+
+Signed-off-by: Shirish Pargaonkar <shirishp@us.ibm.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/cifsproto.h |    6 ++++--
+ fs/cifs/dir.c       |   20 ++++++++++++--------
+ fs/cifs/file.c      |   11 +++++++----
+ 3 files changed, 23 insertions(+), 14 deletions(-)
+
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fil
+                               __u16 fileHandle, struct file *file,
+                               struct vfsmount *mnt, unsigned int oflags);
+ extern int cifs_posix_open(char *full_path, struct inode **pinode,
+-                         struct vfsmount *mnt, int mode, int oflags,
+-                         __u32 *poplock, __u16 *pnetfid, int xid);
++                              struct vfsmount *mnt,
++                              struct super_block *sb,
++                              int mode, int oflags,
++                              __u32 *poplock, __u16 *pnetfid, int xid);
+ extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
+                                    FILE_UNIX_BASIC_INFO *info,
+                                    struct cifs_sb_info *cifs_sb);
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode
+ }
+ int cifs_posix_open(char *full_path, struct inode **pinode,
+-                  struct vfsmount *mnt, int mode, int oflags,
+-                  __u32 *poplock, __u16 *pnetfid, int xid)
++                      struct vfsmount *mnt, struct super_block *sb,
++                      int mode, int oflags,
++                      __u32 *poplock, __u16 *pnetfid, int xid)
+ {
+       int rc;
+       FILE_UNIX_BASIC_INFO *presp_data;
+       __u32 posix_flags = 0;
+-      struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
++      struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+       struct cifs_fattr fattr;
+       cFYI(1, ("posix open %s", full_path));
+@@ -242,7 +243,7 @@ int cifs_posix_open(char *full_path, str
+       /* get new inode and set it up */
+       if (*pinode == NULL) {
+-              *pinode = cifs_iget(mnt->mnt_sb, &fattr);
++              *pinode = cifs_iget(sb, &fattr);
+               if (!*pinode) {
+                       rc = -ENOMEM;
+                       goto posix_open_ret;
+@@ -251,7 +252,8 @@ int cifs_posix_open(char *full_path, str
+               cifs_fattr_to_inode(*pinode, &fattr);
+       }
+-      cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
++      if (mnt)
++              cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
+ posix_open_ret:
+       kfree(presp_data);
+@@ -315,13 +317,14 @@ cifs_create(struct inode *inode, struct
+       if (nd && (nd->flags & LOOKUP_OPEN))
+               oflags = nd->intent.open.flags;
+       else
+-              oflags = FMODE_READ;
++              oflags = FMODE_READ | SMB_O_CREAT;
+       if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
+           (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+                       le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+-              rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
+-                                   mode, oflags, &oplock, &fileHandle, xid);
++              rc = cifs_posix_open(full_path, &newinode,
++                      nd ? nd->path.mnt : NULL,
++                      inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
+               /* EIO could indicate that (posix open) operation is not
+                  supported, despite what server claimed in capability
+                  negotation.  EREMOTE indicates DFS junction, which is not
+@@ -678,6 +681,7 @@ cifs_lookup(struct inode *parent_dir_ino
+                    (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
+                    (nd->intent.open.flags & O_CREAT)) {
+                       rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
++                                      parent_dir_inode->i_sb,
+                                       nd->intent.open.create_mode,
+                                       nd->intent.open.flags, &oplock,
+                                       &fileHandle, xid);
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -298,10 +298,12 @@ int cifs_open(struct inode *inode, struc
+           (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+                       le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+               int oflags = (int) cifs_posix_convert_flags(file->f_flags);
++              oflags |= SMB_O_CREAT;
+               /* can not refresh inode info since size could be stale */
+               rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
+-                                   cifs_sb->mnt_file_mode /* ignored */,
+-                                   oflags, &oplock, &netfid, xid);
++                              inode->i_sb,
++                              cifs_sb->mnt_file_mode /* ignored */,
++                              oflags, &oplock, &netfid, xid);
+               if (rc == 0) {
+                       cFYI(1, ("posix open succeeded"));
+                       /* no need for special case handling of setting mode
+@@ -513,8 +515,9 @@ reopen_error_exit:
+               int oflags = (int) cifs_posix_convert_flags(file->f_flags);
+               /* can not refresh inode info since size could be stale */
+               rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
+-                                   cifs_sb->mnt_file_mode /* ignored */,
+-                                   oflags, &oplock, &netfid, xid);
++                              inode->i_sb,
++                              cifs_sb->mnt_file_mode /* ignored */,
++                              oflags, &oplock, &netfid, xid);
+               if (rc == 0) {
+                       cFYI(1, ("posix reopen succeeded"));
+                       goto reopen_success;
diff --git a/queue-2.6.34/drm-i915-don-t-touch-port_hotplug_en-in-intel_dp_detect.patch b/queue-2.6.34/drm-i915-don-t-touch-port_hotplug_en-in-intel_dp_detect.patch
new file mode 100644 (file)
index 0000000..1f91957
--- /dev/null
@@ -0,0 +1,44 @@
+From 6e0032f0ae4440e75256bee11b163552cae21962 Mon Sep 17 00:00:00 2001
+From: Karsten Wiese <fzuuzf@googlemail.com>
+Date: Sat, 27 Mar 2010 22:48:33 +0100
+Subject: drm/i915: Don't touch PORT_HOTPLUG_EN in intel_dp_detect()
+
+From: Karsten Wiese <fzuuzf@googlemail.com>
+
+commit 6e0032f0ae4440e75256bee11b163552cae21962 upstream.
+
+PORT_HOTPLUG_EN has allready been setup in i915_driver_irq_postinstall(),
+when intel_dp_detect() runs.
+
+Delete the DP[BCD]_HOTPLUG_INT_EN defines, they are not referenced anymore.
+
+I found this while searching for a fix for
+        https://bugzilla.redhat.com/show_bug.cgi?id=528312
+
+Signed-off-by: Karsten Wiese <fzu@wemgehoertderstaat.de>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_dp.c |   10 ----------
+ 1 file changed, 10 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1180,16 +1180,6 @@ intel_dp_detect(struct drm_connector *co
+       if (HAS_PCH_SPLIT(dev))
+               return ironlake_dp_detect(connector);
+-      temp = I915_READ(PORT_HOTPLUG_EN);
+-
+-      I915_WRITE(PORT_HOTPLUG_EN,
+-             temp |
+-             DPB_HOTPLUG_INT_EN |
+-             DPC_HOTPLUG_INT_EN |
+-             DPD_HOTPLUG_INT_EN);
+-
+-      POSTING_READ(PORT_HOTPLUG_EN);
+-
+       switch (dp_priv->output_reg) {
+       case DP_B:
+               bit = DPB_HOTPLUG_INT_STATUS;
diff --git a/queue-2.6.34/ext4-check-s_log_groups_per_flex-in-online-resize-code.patch b/queue-2.6.34/ext4-check-s_log_groups_per_flex-in-online-resize-code.patch
new file mode 100644 (file)
index 0000000..c7a8b41
--- /dev/null
@@ -0,0 +1,49 @@
+From 42007efd569f1cf3bfb9a61da60ef6c2179508ca Mon Sep 17 00:00:00 2001
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Sun, 16 May 2010 01:00:00 -0400
+Subject: ext4: check s_log_groups_per_flex in online resize code
+
+From: Eric Sandeen <sandeen@redhat.com>
+
+commit 42007efd569f1cf3bfb9a61da60ef6c2179508ca upstream.
+
+If groups_per_flex < 2, sbi->s_flex_groups[] doesn't get filled out,
+and every other access to this first tests s_log_groups_per_flex;
+same thing needs to happen in resize or we'll wander off into
+a null pointer when doing an online resize of the file system.
+
+Thanks to Christoph Biedl, who came up with the trivial testcase:
+
+# truncate --size 128M fsfile
+# mkfs.ext3 -F fsfile
+# tune2fs -O extents,uninit_bg,dir_index,flex_bg,huge_file,dir_nlink,extra_isize fsfile
+# e2fsck -yDf -C0 fsfile
+# truncate --size 132M fsfile
+# losetup /dev/loop0 fsfile
+# mount /dev/loop0 mnt
+# resize2fs -p /dev/loop0
+
+       https://bugzilla.kernel.org/show_bug.cgi?id=13549
+
+Reported-by: Alessandro Polverini <alex@nibbles.it>
+Test-case-by: Christoph Biedl  <bugzilla.kernel.bpeb@manchmal.in-ulm.de>
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/resize.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -911,7 +911,8 @@ int ext4_group_add(struct super_block *s
+       percpu_counter_add(&sbi->s_freeinodes_counter,
+                          EXT4_INODES_PER_GROUP(sb));
+-      if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
++      if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
++          sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group;
+               flex_group = ext4_flex_group(sbi, input->group);
+               atomic_add(input->free_blocks_count,
diff --git a/queue-2.6.34/ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch b/queue-2.6.34/ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch
new file mode 100644 (file)
index 0000000..14407a5
--- /dev/null
@@ -0,0 +1,34 @@
+From 1f5a81e41f8b1a782c68d3843e9ec1bfaadf7d72 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Wed, 2 Jun 2010 22:04:39 -0400
+Subject: ext4: Make sure the MOVE_EXT ioctl can't overwrite append-only files
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 1f5a81e41f8b1a782c68d3843e9ec1bfaadf7d72 upstream.
+
+Dan Roseberg has reported a problem with the MOVE_EXT ioctl.  If the
+donor file is an append-only file, we should not allow the operation
+to proceed, lest we end up overwriting the contents of an append-only
+file.
+
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Cc: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/move_extent.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -959,6 +959,9 @@ mext_check_arguments(struct inode *orig_
+               return -EINVAL;
+       }
++      if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
++              return -EPERM;
++
+       /* Ext4 move extent does not support swapfile */
+       if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
+               ext4_debug("ext4 move extent: The argument files should "
diff --git a/queue-2.6.34/gfs2-fix-permissions-checking-for-setflags-ioctl.patch b/queue-2.6.34/gfs2-fix-permissions-checking-for-setflags-ioctl.patch
new file mode 100644 (file)
index 0000000..60f9487
--- /dev/null
@@ -0,0 +1,45 @@
+From 7df0e0397b9a18358573274db9fdab991941062f Mon Sep 17 00:00:00 2001
+From: Steven Whitehouse <swhiteho@redhat.com>
+Date: Mon, 24 May 2010 14:36:48 +0100
+Subject: GFS2: Fix permissions checking for setflags ioctl()
+
+From: Steven Whitehouse <swhiteho@redhat.com>
+
+commit 7df0e0397b9a18358573274db9fdab991941062f upstream.
+
+We should be checking for the ownership of the file for which
+flags are being set, rather than just for write access.
+
+Reported-by: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/gfs2/file.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -218,6 +218,11 @@ static int do_gfs2_set_flags(struct file
+       if (error)
+               goto out_drop_write;
++      error = -EACCES;
++      if (!is_owner_or_cap(inode))
++              goto out;
++
++      error = 0;
+       flags = ip->i_diskflags;
+       new_flags = (flags & ~mask) | (reqflags & mask);
+       if ((new_flags ^ flags) == 0)
+@@ -275,8 +280,10 @@ static int gfs2_set_flags(struct file *f
+ {
+       struct inode *inode = filp->f_path.dentry->d_inode;
+       u32 fsflags, gfsflags;
++
+       if (get_user(fsflags, ptr))
+               return -EFAULT;
++
+       gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
+       if (!S_ISDIR(inode->i_mode)) {
+               if (gfsflags & GFS2_DIF_INHERIT_JDATA)
diff --git a/queue-2.6.34/nfs-ensure-that-we-mark-the-inode-as-dirty-if-we-exit-early-from-commit.patch b/queue-2.6.34/nfs-ensure-that-we-mark-the-inode-as-dirty-if-we-exit-early-from-commit.patch
new file mode 100644 (file)
index 0000000..66ef3a9
--- /dev/null
@@ -0,0 +1,52 @@
+From c5efa5fc91f1f6d1d47e65f39e7ec6d1157c777d Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 26 May 2010 08:42:11 -0400
+Subject: NFS: Ensure that we mark the inode as dirty if we exit early from commit
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit c5efa5fc91f1f6d1d47e65f39e7ec6d1157c777d upstream.
+
+If we exit from nfs_commit_inode() without ensuring that the COMMIT rpc
+call has been completed, we must re-mark the inode as dirty. Otherwise,
+future calls to sync_inode() with the WB_SYNC_ALL flag set will fail to
+ensure that the data is on the disk.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/write.c |   13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1386,7 +1386,7 @@ static int nfs_commit_inode(struct inode
+       int res = 0;
+       if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
+-              goto out;
++              goto out_mark_dirty;
+       spin_lock(&inode->i_lock);
+       res = nfs_scan_commit(inode, &head, 0, 0);
+       spin_unlock(&inode->i_lock);
+@@ -1398,9 +1398,18 @@ static int nfs_commit_inode(struct inode
+                       wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
+                                       nfs_wait_bit_killable,
+                                       TASK_KILLABLE);
++              else
++                      goto out_mark_dirty;
+       } else
+               nfs_commit_clear_lock(NFS_I(inode));
+-out:
++      return res;
++      /* Note: If we exit without ensuring that the commit is complete,
++       * we must mark the inode as dirty. Otherwise, future calls to
++       * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
++       * that the data is on the disk.
++       */
++out_mark_dirty:
++      __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+       return res;
+ }
diff --git a/queue-2.6.34/nfs-fix-another-nfs_wb_page-deadlock.patch b/queue-2.6.34/nfs-fix-another-nfs_wb_page-deadlock.patch
new file mode 100644 (file)
index 0000000..8ba0857
--- /dev/null
@@ -0,0 +1,45 @@
+From 0522f6adedd2736cbca3c0e16ca51df668993eee Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 26 May 2010 08:42:24 -0400
+Subject: NFS: Fix another nfs_wb_page() deadlock
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 0522f6adedd2736cbca3c0e16ca51df668993eee upstream.
+
+J.R. Okajima reports that the call to sync_inode() in nfs_wb_page() can
+deadlock with other writeback flush calls. It boils down to the fact
+that we cannot ever call writeback_single_inode() while holding a page
+lock (even if we do set nr_to_write to zero) since another process may
+already be waiting in the call to do_writepages(), and so will deny us
+the I_SYNC lock.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/write.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1518,14 +1518,17 @@ int nfs_wb_page(struct inode *inode, str
+       };
+       int ret;
+-      while(PagePrivate(page)) {
++      for (;;) {
+               wait_on_page_writeback(page);
+               if (clear_page_dirty_for_io(page)) {
+                       ret = nfs_writepage_locked(page, &wbc);
+                       if (ret < 0)
+                               goto out_error;
++                      continue;
+               }
+-              ret = sync_inode(inode, &wbc);
++              if (!PagePrivate(page))
++                      break;
++              ret = nfs_commit_inode(inode, FLUSH_SYNC);
+               if (ret < 0)
+                       goto out_error;
+       }
diff --git a/queue-2.6.34/parisc-clear-floating-point-exception-flag-on-sigfpe-signal.patch b/queue-2.6.34/parisc-clear-floating-point-exception-flag-on-sigfpe-signal.patch
new file mode 100644 (file)
index 0000000..3080f86
--- /dev/null
@@ -0,0 +1,34 @@
+From 550f0d922286556c7ea43974bb7921effb5a5278 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Mon, 3 May 2010 20:44:21 +0000
+Subject: parisc: clear floating point exception flag on SIGFPE signal
+
+From: Helge Deller <deller@gmx.de>
+
+commit 550f0d922286556c7ea43974bb7921effb5a5278 upstream.
+
+Clear the floating point exception flag before returning to
+user space. This is needed, else the libc trampoline handler
+may hit the same SIGFPE again while building up a trampoline
+to a signal handler.
+
+Fixes debian bug #559406.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/parisc/math-emu/decode_exc.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/parisc/math-emu/decode_exc.c
++++ b/arch/parisc/math-emu/decode_exc.c
+@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[],
+               return SIGNALCODE(SIGFPE, FPE_FLTINV);
+         case DIVISIONBYZEROEXCEPTION:
+               update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
++              Clear_excp_register(exception_index);
+               return SIGNALCODE(SIGFPE, FPE_FLTDIV);
+         case INEXACTEXCEPTION:
+               update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
index eb07a6a04561894342e6c69a9eea086ac0568ada..2cf692bd2d0940b25c679d8fa9b84b46c7c87767 100644 (file)
@@ -167,3 +167,34 @@ nfsd-nfsd_setattr-needs-to-call-commit_metadata.patch
 wl1251-fix-a-memory-leak-in-probe.patch
 iwlwifi-add-missing-rcu_read_lock.patch
 perf_events-fix-races-and-clean-up-perf_event-and-perf_mmap_data-interaction.patch
+ext4-check-s_log_groups_per_flex-in-online-resize-code.patch
+ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch
+gfs2-fix-permissions-checking-for-setflags-ioctl.patch
+cifs-allow-null-nd-as-nfs-server-uses-on-create.patch
+btrfs-should-add-a-permission-check-for-setfacl.patch
+nfs-ensure-that-we-mark-the-inode-as-dirty-if-we-exit-early-from-commit.patch
+nfs-fix-another-nfs_wb_page-deadlock.patch
+v4l-dvb-uvcvideo-prevent-division-by-0-when-control-step-value-is-0.patch
+0001-KVM-SVM-Don-t-use-kmap_atomic-in-nested_svm_map.patch
+0002-KVM-SVM-Fix-schedule-while-atomic-on-nested-exceptio.patch
+0003-KVM-SVM-Sync-all-control-registers-on-nested-vmexit.patch
+0004-KVM-SVM-Fix-nested-msr-intercept-handling.patch
+0005-KVM-SVM-Don-t-sync-nested-cr8-to-lapic-and-back.patch
+0006-KVM-SVM-Fix-wrong-interrupt-injection-in-enable_irq_.patch
+0007-KVM-s390-Fix-possible-memory-leak-of-in-kvm_arch_vcp.patch
+0008-KVM-PPC-Do-not-create-debugfs-if-fail-to-create-vcpu.patch
+0009-KVM-x86-Add-callback-to-let-modules-decide-over-some.patch
+0010-KVM-SVM-Report-emulated-SVM-features-to-userspace.patch
+0011-x86-paravirt-Add-a-global-synchronization-point-for-.patch
+0012-KVM-Don-t-allow-lmsw-to-clear-cr0.pe.patch
+0013-KVM-x86-Check-LMA-bit-before-set_efer.patch
+0014-KVM-MMU-Segregate-shadow-pages-with-different-cr0.wp.patch
+0015-KVM-VMX-enable-VMXON-check-with-SMX-enabled-Intel-TX.patch
+0016-KVM-MMU-Don-t-read-pdptrs-with-mmu-spinlock-held-in-.patch
+0017-KVM-Fix-wallclock-version-writing-race.patch
+0018-KVM-PPC-Add-missing-vcpu_load-vcpu_put-in-vcpu-ioctl.patch
+0019-KVM-x86-Add-missing-locking-to-arch-specific-vcpu-io.patch
+0020-KVM-x86-Inject-GP-with-the-right-rip-on-efer-writes.patch
+0021-KVM-SVM-Don-t-allow-nested-guest-to-VMMCALL-into-hos.patch
+drm-i915-don-t-touch-port_hotplug_en-in-intel_dp_detect.patch
+parisc-clear-floating-point-exception-flag-on-sigfpe-signal.patch
diff --git a/queue-2.6.34/v4l-dvb-uvcvideo-prevent-division-by-0-when-control-step-value-is-0.patch b/queue-2.6.34/v4l-dvb-uvcvideo-prevent-division-by-0-when-control-step-value-is-0.patch
new file mode 100644 (file)
index 0000000..0e1c330
--- /dev/null
@@ -0,0 +1,33 @@
+From cf7a50eeb6f462a0b7d1619fcb27a727a2981769 Mon Sep 17 00:00:00 2001
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Date: Sun, 25 Apr 2010 16:27:14 -0300
+Subject: V4L/DVB: uvcvideo: Prevent division by 0 when control step value is 0
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+commit cf7a50eeb6f462a0b7d1619fcb27a727a2981769 upstream.
+
+The control step values reported by the device are used as a divisor
+unchecked, which can result in a division by zero.
+
+Check the step value and make it 1 when null.
+
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/uvc/uvc_ctrl.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/media/video/uvc/uvc_ctrl.c
++++ b/drivers/media/video/uvc/uvc_ctrl.c
+@@ -1047,6 +1047,8 @@ int uvc_ctrl_set(struct uvc_video_chain
+                                  uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+               step = mapping->get(mapping, UVC_GET_RES,
+                                   uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
++              if (step == 0)
++                      step = 1;
+               xctrl->value = min + (xctrl->value - min + step/2) / step * step;
+               xctrl->value = clamp(xctrl->value, min, max);