]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
ima and kvm patches for .30
authorGreg Kroah-Hartman <gregkh@suse.de>
Mon, 29 Jun 2009 18:41:12 +0000 (11:41 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Mon, 29 Jun 2009 18:41:12 +0000 (11:41 -0700)
queue-2.6.30/ima-handle-dentry_open-failures.patch [new file with mode: 0644]
queue-2.6.30/ima-open-all-files-o_largefile.patch [new file with mode: 0644]
queue-2.6.30/ima-use-current_cred-instead-of-current-cred.patch [new file with mode: 0644]
queue-2.6.30/kvm-add-vt-x-machine-check-support.patch [new file with mode: 0644]
queue-2.6.30/kvm-disable-large-pages-on-misaligned-memory-slots.patch [new file with mode: 0644]
queue-2.6.30/kvm-fix-dirty-bit-tracking-for-slots-with-large-pages.patch [new file with mode: 0644]
queue-2.6.30/kvm-prevent-overflow-in-largepages-calculation.patch [new file with mode: 0644]
queue-2.6.30/kvm-protect-concurrent-make_all_cpus_request.patch [new file with mode: 0644]
queue-2.6.30/kvm-vmx-handle-vmx-instruction-vmexits.patch [new file with mode: 0644]
queue-2.6.30/kvm-x86-check-for-cr3-validity-in-ioctl_set_sregs.patch [new file with mode: 0644]
queue-2.6.30/series

diff --git a/queue-2.6.30/ima-handle-dentry_open-failures.patch b/queue-2.6.30/ima-handle-dentry_open-failures.patch
new file mode 100644 (file)
index 0000000..6bab5f5
--- /dev/null
@@ -0,0 +1,48 @@
+From f06dd16a03f6f7f72fab4db03be36e28c28c6fd6 Mon Sep 17 00:00:00 2001
+From: Eric Paris <eparis@redhat.com>
+Date: Mon, 11 May 2009 13:59:16 -0400
+Subject: IMA: Handle dentry_open failures
+
+From: Eric Paris <eparis@redhat.com>
+
+commit f06dd16a03f6f7f72fab4db03be36e28c28c6fd6 upstream.
+
+Currently IMA does not handle failures from dentry_open().  This means that we
+leave a pointer set to ERR_PTR(errno) and then try to use it just a few lines
+later in fput().  Oops.
+
+Signed-off-by: Eric Paris <eparis@redhat.com>
+Acked-by: Mimi Zohar <zohar@us.ibm.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/integrity/ima/ima_main.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -128,10 +128,6 @@ static int get_path_measurement(struct i
+ {
+       int rc = 0;
+-      if (IS_ERR(file)) {
+-              pr_info("%s dentry_open failed\n", filename);
+-              return rc;
+-      }
+       iint->opencount++;
+       iint->readcount++;
+@@ -197,6 +193,12 @@ int ima_path_check(struct path *path, in
+               struct vfsmount *mnt = mntget(path->mnt);
+               file = dentry_open(dentry, mnt, O_RDONLY, current_cred());
++              if (IS_ERR(file)) {
++                      pr_info("%s dentry_open failed\n", dentry->d_name.name);
++                      rc = PTR_ERR(file);
++                      file = NULL;
++                      goto out;
++              }
+               rc = get_path_measurement(iint, file, dentry->d_name.name);
+       }
+ out:
diff --git a/queue-2.6.30/ima-open-all-files-o_largefile.patch b/queue-2.6.30/ima-open-all-files-o_largefile.patch
new file mode 100644 (file)
index 0000000..f73b3ed
--- /dev/null
@@ -0,0 +1,34 @@
+From 1a62e958fa4aaeeb752311b4f5e16b2a86737b23 Mon Sep 17 00:00:00 2001
+From: Eric Paris <eparis@redhat.com>
+Date: Mon, 11 May 2009 13:59:22 -0400
+Subject: IMA: open all files O_LARGEFILE
+
+From: Eric Paris <eparis@redhat.com>
+
+commit 1a62e958fa4aaeeb752311b4f5e16b2a86737b23 upstream.
+
+If IMA tried to measure a file which was larger than 4G dentry_open would fail
+with -EOVERFLOW since IMA wasn't passing O_LARGEFILE.  This patch passes
+O_LARGEFILE to all IMA opens to avoid this problem.
+
+Signed-off-by: Eric Paris <eparis@redhat.com>
+Acked-by: Mimi Zohar <zohar@us.ibm.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/integrity/ima/ima_main.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -192,7 +192,8 @@ int ima_path_check(struct path *path, in
+               struct dentry *dentry = dget(path->dentry);
+               struct vfsmount *mnt = mntget(path->mnt);
+-              file = dentry_open(dentry, mnt, O_RDONLY, current_cred());
++              file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE,
++                                 current_cred());
+               if (IS_ERR(file)) {
+                       pr_info("%s dentry_open failed\n", dentry->d_name.name);
+                       rc = PTR_ERR(file);
diff --git a/queue-2.6.30/ima-use-current_cred-instead-of-current-cred.patch b/queue-2.6.30/ima-use-current_cred-instead-of-current-cred.patch
new file mode 100644 (file)
index 0000000..36d3784
--- /dev/null
@@ -0,0 +1,44 @@
+From 37bcbf13d32e4e453e9def79ee72bd953b88302f Mon Sep 17 00:00:00 2001
+From: Eric Paris <eparis@redhat.com>
+Date: Mon, 11 May 2009 13:59:10 -0400
+Subject: IMA: use current_cred() instead of current->cred
+
+From: Eric Paris <eparis@redhat.com>
+
+commit 37bcbf13d32e4e453e9def79ee72bd953b88302f upstream.
+
+Proper invocation of the current credentials is to use current_cred() not
+current->cred.  This patches makes IMA use the new method.
+
+Signed-off-by: Eric Paris <eparis@redhat.com>
+Acked-by: Mimi Zohar <zohar@us.ibm.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/integrity/ima/ima_audit.c |    2 +-
+ security/integrity/ima/ima_main.c  |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/security/integrity/ima/ima_audit.c
++++ b/security/integrity/ima/ima_audit.c
+@@ -50,7 +50,7 @@ void integrity_audit_msg(int audit_msgno
+       ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
+       audit_log_format(ab, "integrity: pid=%d uid=%u auid=%u ses=%u",
+-                       current->pid, current->cred->uid,
++                       current->pid, current_cred()->uid,
+                        audit_get_loginuid(current),
+                        audit_get_sessionid(current));
+       audit_log_task_context(ab);
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -196,7 +196,7 @@ int ima_path_check(struct path *path, in
+               struct dentry *dentry = dget(path->dentry);
+               struct vfsmount *mnt = mntget(path->mnt);
+-              file = dentry_open(dentry, mnt, O_RDONLY, current->cred);
++              file = dentry_open(dentry, mnt, O_RDONLY, current_cred());
+               rc = get_path_measurement(iint, file, dentry->d_name.name);
+       }
+ out:
diff --git a/queue-2.6.30/kvm-add-vt-x-machine-check-support.patch b/queue-2.6.30/kvm-add-vt-x-machine-check-support.patch
new file mode 100644 (file)
index 0000000..366d5f0
--- /dev/null
@@ -0,0 +1,168 @@
+From a0861c02a981c943573478ea13b29b1fb958ee5b Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Mon, 8 Jun 2009 17:37:09 +0800
+Subject: KVM: Add VT-x machine check support
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit a0861c02a981c943573478ea13b29b1fb958ee5b upstream.
+
+VT-x needs an explicit MC vector intercept to handle machine checks in the
+hyper visor.
+
+It also has a special option to catch machine checks that happen
+during VT entry.
+
+Do these interceptions and forward them to the Linux machine check
+handler. Make it always look like user space is interrupted because
+the machine check handler treats kernel/user space differently.
+
+Thanks to Jiang Yunhong for help and testing.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Huang Ying <ying.huang@intel.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/vmx.h          |    1 
+ arch/x86/kernel/cpu/mcheck/mce_64.c |    1 
+ arch/x86/kvm/vmx.c                  |   50 ++++++++++++++++++++++++++++++++++--
+ 3 files changed, 50 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -247,6 +247,7 @@ enum vmcs_field {
+ #define EXIT_REASON_MSR_READ            31
+ #define EXIT_REASON_MSR_WRITE           32
+ #define EXIT_REASON_MWAIT_INSTRUCTION   36
++#define EXIT_REASON_MCE_DURING_VMENTRY         41
+ #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
+ #define EXIT_REASON_APIC_ACCESS         44
+ #define EXIT_REASON_EPT_VIOLATION       48
+--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
+@@ -420,6 +420,7 @@ void do_machine_check(struct pt_regs * r
+  out2:
+       atomic_dec(&mce_entry);
+ }
++EXPORT_SYMBOL_GPL(do_machine_check);
+ #ifdef CONFIG_X86_MCE_INTEL
+ /***
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -32,6 +32,7 @@
+ #include <asm/desc.h>
+ #include <asm/vmx.h>
+ #include <asm/virtext.h>
++#include <asm/mce.h>
+ #define __ex(x) __kvm_handle_fault_on_reboot(x)
+@@ -97,6 +98,7 @@ struct vcpu_vmx {
+       int soft_vnmi_blocked;
+       ktime_t entry_time;
+       s64 vnmi_blocked_time;
++      u32 exit_reason;
+ };
+ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
+@@ -213,6 +215,13 @@ static inline int is_external_interrupt(
+               == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
+ }
++static inline int is_machine_check(u32 intr_info)
++{
++      return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
++                           INTR_INFO_VALID_MASK)) ==
++              (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
++}
++
+ static inline int cpu_has_vmx_msr_bitmap(void)
+ {
+       return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS);
+@@ -478,7 +487,7 @@ static void update_exception_bitmap(stru
+ {
+       u32 eb;
+-      eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
++      eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
+       if (!vcpu->fpu_active)
+               eb |= 1u << NM_VECTOR;
+       if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
+@@ -2585,6 +2594,31 @@ static int handle_rmode_exception(struct
+       return 0;
+ }
++/*
++ * Trigger machine check on the host. We assume all the MSRs are already set up
++ * by the CPU and that we still run on the same CPU as the MCE occurred on.
++ * We pass a fake environment to the machine check handler because we want
++ * the guest to be always treated like user space, no matter what context
++ * it used internally.
++ */
++static void kvm_machine_check(void)
++{
++#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
++      struct pt_regs regs = {
++              .cs = 3, /* Fake ring 3 no matter what the guest ran on */
++              .flags = X86_EFLAGS_IF,
++      };
++
++      do_machine_check(&regs, 0);
++#endif
++}
++
++static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++      /* already handled by vcpu_run */
++      return 1;
++}
++
+ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -2596,6 +2630,9 @@ static int handle_exception(struct kvm_v
+       vect_info = vmx->idt_vectoring_info;
+       intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
++      if (is_machine_check(intr_info))
++              return handle_machine_check(vcpu, kvm_run);
++
+       if ((vect_info & VECTORING_INFO_VALID_MASK) &&
+                                               !is_page_fault(intr_info))
+               printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
+@@ -3150,6 +3187,7 @@ static int (*kvm_vmx_exit_handlers[])(st
+       [EXIT_REASON_WBINVD]                  = handle_wbinvd,
+       [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
+       [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
++      [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
+ };
+ static const int kvm_vmx_max_exit_handlers =
+@@ -3161,8 +3199,8 @@ static const int kvm_vmx_max_exit_handle
+  */
+ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+ {
+-      u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
++      u32 exit_reason = vmx->exit_reason;
+       u32 vectoring_info = vmx->idt_vectoring_info;
+       KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
+@@ -3512,6 +3550,14 @@ static void vmx_vcpu_run(struct kvm_vcpu
+       intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
++      vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
++
++      /* Handle machine checks before interrupts are enabled */
++      if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
++          || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI
++              && is_machine_check(exit_intr_info)))
++              kvm_machine_check();
++
+       /* We need to handle NMIs before interrupts are enabled */
+       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
+           (intr_info & INTR_INFO_VALID_MASK)) {
diff --git a/queue-2.6.30/kvm-disable-large-pages-on-misaligned-memory-slots.patch b/queue-2.6.30/kvm-disable-large-pages-on-misaligned-memory-slots.patch
new file mode 100644 (file)
index 0000000..86316a2
--- /dev/null
@@ -0,0 +1,47 @@
+From ac04527f7947020c5890090b2ac87af4e98d977e Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Mon, 8 Jun 2009 15:52:39 +0300
+Subject: KVM: Disable large pages on misaligned memory slots
+
+From: Avi Kivity <avi@redhat.com>
+
+commit ac04527f7947020c5890090b2ac87af4e98d977e upstream.
+
+If a slots guest physical address and host virtual address unequal (mod
+large page size), then we would erronously try to back guest large pages
+with host large pages.  Detect this misalignment and diable large page
+support for the trouble slot.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ virt/kvm/kvm_main.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -919,7 +919,7 @@ int __kvm_set_memory_region(struct kvm *
+ {
+       int r;
+       gfn_t base_gfn;
+-      unsigned long npages;
++      unsigned long npages, ugfn;
+       int largepages;
+       unsigned long i;
+       struct kvm_memory_slot *memslot;
+@@ -1010,6 +1010,14 @@ int __kvm_set_memory_region(struct kvm *
+                       new.lpage_info[0].write_count = 1;
+               if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
+                       new.lpage_info[largepages-1].write_count = 1;
++              ugfn = new.userspace_addr >> PAGE_SHIFT;
++              /*
++               * If the gfn and userspace address are not aligned wrt each
++               * other, disable large page support for this slot
++               */
++              if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1))
++                      for (i = 0; i < largepages; ++i)
++                              new.lpage_info[i].write_count = 1;
+       }
+       /* Allocate page dirty bitmap if needed */
diff --git a/queue-2.6.30/kvm-fix-dirty-bit-tracking-for-slots-with-large-pages.patch b/queue-2.6.30/kvm-fix-dirty-bit-tracking-for-slots-with-large-pages.patch
new file mode 100644 (file)
index 0000000..9314613
--- /dev/null
@@ -0,0 +1,34 @@
+From e244584fe3a5c20deddeca246548ac86dbc6e1d1 Mon Sep 17 00:00:00 2001
+From: Izik Eidus <ieidus@redhat.com>
+Date: Wed, 10 Jun 2009 19:23:24 +0300
+Subject: KVM: Fix dirty bit tracking for slots with large pages
+
+From: Izik Eidus <ieidus@redhat.com>
+
+commit e244584fe3a5c20deddeca246548ac86dbc6e1d1 upstream.
+
+When slot is already allocated and being asked to be tracked we need
+to break the large pages.
+
+This code flush the mmu when someone ask a slot to start dirty bit
+tracking.
+
+Signed-off-by: Izik Eidus <ieidus@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ virt/kvm/kvm_main.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1030,6 +1030,8 @@ int __kvm_set_memory_region(struct kvm *
+               if (!new.dirty_bitmap)
+                       goto out_free;
+               memset(new.dirty_bitmap, 0, dirty_bytes);
++              if (old.npages)
++                      kvm_arch_flush_shadow(kvm);
+       }
+ #endif /* not defined CONFIG_S390 */
diff --git a/queue-2.6.30/kvm-prevent-overflow-in-largepages-calculation.patch b/queue-2.6.30/kvm-prevent-overflow-in-largepages-calculation.patch
new file mode 100644 (file)
index 0000000..3a8d95d
--- /dev/null
@@ -0,0 +1,31 @@
+From 09f8ca74ae6c2d78b2c7f6c0751ed0cbe815a3d9 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Mon, 8 Jun 2009 15:55:21 +0300
+Subject: KVM: Prevent overflow in largepages calculation
+
+From: Avi Kivity <avi@redhat.com>
+
+commit 09f8ca74ae6c2d78b2c7f6c0751ed0cbe815a3d9 upstream.
+
+If userspace specifies a memory slot that is larger than 8 petabytes, it
+could overflow the largepages variable.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ virt/kvm/kvm_main.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -920,8 +920,7 @@ int __kvm_set_memory_region(struct kvm *
+       int r;
+       gfn_t base_gfn;
+       unsigned long npages, ugfn;
+-      int largepages;
+-      unsigned long i;
++      unsigned long largepages, i;
+       struct kvm_memory_slot *memslot;
+       struct kvm_memory_slot old, new;
diff --git a/queue-2.6.30/kvm-protect-concurrent-make_all_cpus_request.patch b/queue-2.6.30/kvm-protect-concurrent-make_all_cpus_request.patch
new file mode 100644 (file)
index 0000000..273cd0f
--- /dev/null
@@ -0,0 +1,67 @@
+From 84261923d3dddb766736023bead6fa07b7e218d5 Mon Sep 17 00:00:00 2001
+From: Marcelo Tosatti <mtosatti@redhat.com>
+Date: Wed, 17 Jun 2009 10:53:47 -0300
+Subject: KVM: protect concurrent make_all_cpus_request
+
+From: Marcelo Tosatti <mtosatti@redhat.com>
+
+commit 84261923d3dddb766736023bead6fa07b7e218d5 upstream.
+
+make_all_cpus_request contains a race condition which can
+trigger false request completed status, as follows:
+
+CPU0                                              CPU1
+
+if (test_and_set_bit(req,&vcpu->requests))
+   ....                                                   if (test_and_set_bit(req,&vcpu->requests))
+   ..                                                  return
+proceed to smp_call_function_many(wait=1)
+
+Use a spinlock to serialize concurrent CPUs.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/kvm_host.h |    1 +
+ virt/kvm/kvm_main.c      |    3 +++
+ 2 files changed, 4 insertions(+)
+
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -125,6 +125,7 @@ struct kvm_kernel_irq_routing_entry {
+ struct kvm {
+       struct mutex lock; /* protects the vcpus array and APIC accesses */
+       spinlock_t mmu_lock;
++      spinlock_t requests_lock;
+       struct rw_semaphore slots_lock;
+       struct mm_struct *mm; /* userspace tied to this vm */
+       int nmemslots;
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -581,6 +581,7 @@ static bool make_all_cpus_request(struct
+               cpumask_clear(cpus);
+       me = get_cpu();
++      spin_lock(&kvm->requests_lock);
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               vcpu = kvm->vcpus[i];
+               if (!vcpu)
+@@ -597,6 +598,7 @@ static bool make_all_cpus_request(struct
+               smp_call_function_many(cpus, ack_flush, NULL, 1);
+       else
+               called = false;
++      spin_unlock(&kvm->requests_lock);
+       put_cpu();
+       free_cpumask_var(cpus);
+       return called;
+@@ -817,6 +819,7 @@ static struct kvm *kvm_create_vm(void)
+       kvm->mm = current->mm;
+       atomic_inc(&kvm->mm->mm_count);
+       spin_lock_init(&kvm->mmu_lock);
++      spin_lock_init(&kvm->requests_lock);
+       kvm_io_bus_init(&kvm->pio_bus);
+       mutex_init(&kvm->lock);
+       kvm_io_bus_init(&kvm->mmio_bus);
diff --git a/queue-2.6.30/kvm-vmx-handle-vmx-instruction-vmexits.patch b/queue-2.6.30/kvm-vmx-handle-vmx-instruction-vmexits.patch
new file mode 100644 (file)
index 0000000..f8c32bc
--- /dev/null
@@ -0,0 +1,52 @@
+From e3c7cb6ad7191e92ba89d00a7ae5f5dd1ca0c214 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Tue, 16 Jun 2009 14:19:52 +0300
+Subject: KVM: VMX: Handle vmx instruction vmexits
+
+From: Avi Kivity <avi@redhat.com>
+
+commit e3c7cb6ad7191e92ba89d00a7ae5f5dd1ca0c214 upstream.
+
+IF a guest tries to use vmx instructions, inject a #UD to let it know the
+instruction is not implemented, rather than crashing.
+
+This prevents guest userspace from crashing the guest kernel.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/vmx.c |   15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3015,6 +3015,12 @@ static int handle_vmcall(struct kvm_vcpu
+       return 1;
+ }
++static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++      kvm_queue_exception(vcpu, UD_VECTOR);
++      return 1;
++}
++
+ static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ {
+       u64 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
+@@ -3182,6 +3188,15 @@ static int (*kvm_vmx_exit_handlers[])(st
+       [EXIT_REASON_HLT]                     = handle_halt,
+       [EXIT_REASON_INVLPG]                  = handle_invlpg,
+       [EXIT_REASON_VMCALL]                  = handle_vmcall,
++      [EXIT_REASON_VMCLEAR]                 = handle_vmx_insn,
++      [EXIT_REASON_VMLAUNCH]                = handle_vmx_insn,
++      [EXIT_REASON_VMPTRLD]                 = handle_vmx_insn,
++      [EXIT_REASON_VMPTRST]                 = handle_vmx_insn,
++      [EXIT_REASON_VMREAD]                  = handle_vmx_insn,
++      [EXIT_REASON_VMRESUME]                = handle_vmx_insn,
++      [EXIT_REASON_VMWRITE]                 = handle_vmx_insn,
++      [EXIT_REASON_VMOFF]                   = handle_vmx_insn,
++      [EXIT_REASON_VMON]                    = handle_vmx_insn,
+       [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
+       [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
+       [EXIT_REASON_WBINVD]                  = handle_wbinvd,
diff --git a/queue-2.6.30/kvm-x86-check-for-cr3-validity-in-ioctl_set_sregs.patch b/queue-2.6.30/kvm-x86-check-for-cr3-validity-in-ioctl_set_sregs.patch
new file mode 100644 (file)
index 0000000..b48a47d
--- /dev/null
@@ -0,0 +1,53 @@
+From 59839dfff5eabca01cc4e20b45797a60a80af8cb Mon Sep 17 00:00:00 2001
+From: Marcelo Tosatti <mtosatti@redhat.com>
+Date: Thu, 16 Apr 2009 08:30:44 -0300
+Subject: KVM: x86: check for cr3 validity in ioctl_set_sregs
+
+From: Marcelo Tosatti <mtosatti@redhat.com>
+
+commit 59839dfff5eabca01cc4e20b45797a60a80af8cb upstream.
+
+Matt T. Yourst notes that kvm_arch_vcpu_ioctl_set_sregs lacks validity
+checking for the new cr3 value:
+
+"Userspace callers of KVM_SET_SREGS can pass a bogus value of cr3 to
+the kernel. This will trigger a NULL pointer access in gfn_to_rmap()
+when userspace next tries to call KVM_RUN on the affected VCPU and kvm
+attempts to activate the new non-existent page table root.
+
+This happens since kvm only validates that cr3 points to a valid guest
+physical memory page when code *inside* the guest sets cr3. However, kvm
+currently trusts the userspace caller (e.g. QEMU) on the host machine to
+always supply a valid page table root, rather than properly validating
+it along with the rest of the reloaded guest state."
+
+http://sourceforge.net/tracker/?func=detail&atid=893831&aid=2687641&group_id=180599
+
+Check for a valid cr3 address in kvm_arch_vcpu_ioctl_set_sregs, triple
+fault in case of failure.
+
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/x86.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3934,7 +3934,13 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct
+       vcpu->arch.cr2 = sregs->cr2;
+       mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
+-      vcpu->arch.cr3 = sregs->cr3;
++
++      down_read(&vcpu->kvm->slots_lock);
++      if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
++              vcpu->arch.cr3 = sregs->cr3;
++      else
++              set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
++      up_read(&vcpu->kvm->slots_lock);
+       kvm_set_cr8(vcpu, sregs->cr8);
index b5d5a5374ef8b86496a142e763b32d0efb6be0ab..7bd85c2f9f390abca7200bc5ba6c5dd48143356a 100644 (file)
@@ -1,3 +1,13 @@
 firmware_map-fix-hang-with-x86-32bit.patch
 fs-remove-incorrect-i_new-warnings.patch
 pci-disable-aspm-on-via-root-port-under-bridge-configurations.patch
+kvm-add-vt-x-machine-check-support.patch
+kvm-disable-large-pages-on-misaligned-memory-slots.patch
+kvm-prevent-overflow-in-largepages-calculation.patch
+kvm-x86-check-for-cr3-validity-in-ioctl_set_sregs.patch
+kvm-vmx-handle-vmx-instruction-vmexits.patch
+kvm-protect-concurrent-make_all_cpus_request.patch
+kvm-fix-dirty-bit-tracking-for-slots-with-large-pages.patch
+ima-use-current_cred-instead-of-current-cred.patch
+ima-handle-dentry_open-failures.patch
+ima-open-all-files-o_largefile.patch