]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.32 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Fri, 29 Oct 2010 22:29:15 +0000 (15:29 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Fri, 29 Oct 2010 22:29:15 +0000 (15:29 -0700)
19 files changed:
queue-2.6.32/0001-KVM-SVM-Fix-wrong-intercept-masks-on-32-bit.patch [new file with mode: 0644]
queue-2.6.32/0002-KVM-MMU-fix-direct-sp-s-access-corrupted.patch [new file with mode: 0644]
queue-2.6.32/0003-KVM-MMU-fix-conflict-access-permissions-in-direct-sp.patch [new file with mode: 0644]
queue-2.6.32/0004-KVM-VMX-Fix-host-GDT.LIMIT-corruption.patch [new file with mode: 0644]
queue-2.6.32/0005-KVM-SVM-Adjust-tsc_offset-only-if-tsc_unstable.patch [new file with mode: 0644]
queue-2.6.32/0006-KVM-x86-Fix-SVM-VMCB-reset.patch [new file with mode: 0644]
queue-2.6.32/0007-KVM-x86-Move-TSC-reset-out-of-vmcb_init.patch [new file with mode: 0644]
queue-2.6.32/0008-KVM-Fix-fs-gs-reload-oops-with-invalid-ldt.patch [new file with mode: 0644]
queue-2.6.32/bluetooth-fix-missing-null-check.patch [new file with mode: 0644]
queue-2.6.32/futex-fix-errors-in-nested-key-ref-counting.patch [new file with mode: 0644]
queue-2.6.32/mm-x86-saving-vmcore-with-non-lazy-freeing-of-vmas.patch [new file with mode: 0644]
queue-2.6.32/pipe-fix-failure-to-return-error-code-on-confirm.patch [new file with mode: 0644]
queue-2.6.32/series
queue-2.6.32/x86-cpu-fix-renamed-not-yet-shipping-amd-cpuid-feature-bit.patch [new file with mode: 0644]
queue-2.6.32/x86-intr-remap-set-redirection-hint-in-the-irte.patch [new file with mode: 0644]
queue-2.6.32/x86-kdump-change-copy_oldmem_page-to-use-cached-addressing.patch [new file with mode: 0644]
queue-2.6.32/x86-kexec-make-sure-to-stop-all-cpus-before-exiting-the-kernel.patch [new file with mode: 0644]
queue-2.6.32/x86-mtrr-assume-sys_cfg-exists-on-all-future-amd-cpus.patch [new file with mode: 0644]
queue-2.6.32/x86-olpc-don-t-retry-ec-commands-forever.patch [new file with mode: 0644]

diff --git a/queue-2.6.32/0001-KVM-SVM-Fix-wrong-intercept-masks-on-32-bit.patch b/queue-2.6.32/0001-KVM-SVM-Fix-wrong-intercept-masks-on-32-bit.patch
new file mode 100644 (file)
index 0000000..970d5d7
--- /dev/null
@@ -0,0 +1,50 @@
+From mtosatti@redhat.com  Fri Oct 29 15:18:43 2010
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Thu, 28 Oct 2010 16:48:09 -0200
+Subject: KVM: SVM: Fix wrong intercept masks on 32 bit
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org, Jan Kiszka <jan.kiszka@siemens.com>, Gleb Natapov <gleb@redhat.com>, Joerg Roedel <joerg.roedel@amd.com>
+Message-ID: <20101028185026.905098341@amt.cnet>
+Content-Disposition: inline; filename=0001-KVM-SVM-Fix-wrong-intercept-masks-on-32-bit.patch
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit 061e2fd16863009c8005b4b5fdfb75c7215c0b99 upstream.
+
+This patch makes KVM on 32 bit SVM working again by
+correcting the masks used for iret interception. With the
+wrong masks the upper 32 bits of the intercepts are masked
+out which leaves vmrun unintercepted. This is not legal on
+svm and the vmrun fails.
+Bug was introduced by commits 95ba827313 and 3cfc3092.
+
+Cc: Jan Kiszka <jan.kiszka@siemens.com>
+Cc: Gleb Natapov <gleb@redhat.com>
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/svm.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2111,7 +2111,7 @@ static int cpuid_interception(struct vcp
+ static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+ {
+       ++svm->vcpu.stat.nmi_window_exits;
+-      svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
++      svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+       svm->vcpu.arch.hflags |= HF_IRET_MASK;
+       return 1;
+ }
+@@ -2506,7 +2506,7 @@ static void svm_inject_nmi(struct kvm_vc
+       svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
+       vcpu->arch.hflags |= HF_NMI_MASK;
+-      svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
++      svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+       ++vcpu->stat.nmi_injections;
+ }
diff --git a/queue-2.6.32/0002-KVM-MMU-fix-direct-sp-s-access-corrupted.patch b/queue-2.6.32/0002-KVM-MMU-fix-direct-sp-s-access-corrupted.patch
new file mode 100644 (file)
index 0000000..3851f99
--- /dev/null
@@ -0,0 +1,67 @@
+From mtosatti@redhat.com  Fri Oct 29 15:19:23 2010
+From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Date: Thu, 28 Oct 2010 16:48:10 -0200
+Subject: KVM: MMU: fix direct sps access corrupted
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org, Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>, Greg Kroah-Hartman <gregkh@suse.de>
+Message-ID: <20101028185027.102565890@amt.cnet>
+Content-Disposition: inline; filename=0002-KVM-MMU-fix-direct-sp-s-access-corrupted.patch
+
+From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+
+commit 9e7b0e7fba45ca3c6357aeb7091ebc281f1de365 upstream.
+
+If the mapping is writable but the dirty flag is not set, we will find
+the read-only direct sp and setup the mapping, then if the write #PF
+occur, we will mark this mapping writable in the read-only direct sp,
+now, other real read-only mapping will happily write it without #PF.
+
+It may hurt guest's COW
+
+Fixed by re-install the mapping when write #PF occur.
+
+Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/paging_tmpl.h |   28 ++++++++++++++++++++++++++--
+ 1 file changed, 26 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -318,8 +318,32 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
+                       break;
+               }
+-              if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
+-                      continue;
++              if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
++                      struct kvm_mmu_page *child;
++                      unsigned direct_access;
++
++                      if (level != gw->level)
++                              continue;
++
++                      /*
++                       * For the direct sp, if the guest pte's dirty bit
++                       * changed form clean to dirty, it will corrupt the
++                       * sp's access: allow writable in the read-only sp,
++                       * so we should update the spte at this point to get
++                       * a new sp with the correct access.
++                       */
++                      direct_access = gw->pt_access & gw->pte_access;
++                      if (!is_dirty_gpte(gw->ptes[gw->level - 1]))
++                              direct_access &= ~ACC_WRITE_MASK;
++
++                      child = page_header(*sptep & PT64_BASE_ADDR_MASK);
++                      if (child->role.access == direct_access)
++                              continue;
++
++                      mmu_page_remove_parent_pte(child, sptep);
++                      __set_spte(sptep, shadow_trap_nonpresent_pte);
++                      kvm_flush_remote_tlbs(vcpu->kvm);
++              }
+               if (is_large_pte(*sptep)) {
+                       rmap_remove(vcpu->kvm, sptep);
diff --git a/queue-2.6.32/0003-KVM-MMU-fix-conflict-access-permissions-in-direct-sp.patch b/queue-2.6.32/0003-KVM-MMU-fix-conflict-access-permissions-in-direct-sp.patch
new file mode 100644 (file)
index 0000000..7a1fe60
--- /dev/null
@@ -0,0 +1,57 @@
+From mtosatti@redhat.com  Fri Oct 29 15:19:51 2010
+From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Date: Thu, 28 Oct 2010 16:48:11 -0200
+Subject: KVM: MMU: fix conflict access permissions in direct sp
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org, Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Message-ID: <20101028185027.298066184@amt.cnet>
+Content-Disposition: inline; filename=0003-KVM-MMU-fix-conflict-access-permissions-in-direct-sp.patch
+
+From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+
+commit 5fd5387c89ec99ff6cb82d2477ffeb7211b781c2 upstream.
+
+In no-direct mapping, we mark sp is 'direct' when we mapping the
+guest's larger page, but its access is encoded form upper page-struct
+entire not include the last mapping, it will cause access conflict.
+
+For example, have this mapping:
+        [W]
+      / PDE1 -> |---|
+  P[W]          |   | LPA
+      \ PDE2 -> |---|
+        [R]
+
+P have two children, PDE1 and PDE2, both PDE1 and PDE2 mapping the
+same lage page(LPA). The P's access is WR, PDE1's access is WR,
+PDE2's access is RO(just consider read-write permissions here)
+
+When guest access PDE1, we will create a direct sp for LPA, the sp's
+access is from P, is W, then we will mark the ptes is W in this sp.
+
+Then, guest access PDE2, we will find LPA's shadow page, is the same as
+PDE's, and mark the ptes is RO.
+
+So, if guest access PDE1, the incorrect #PF is occured.
+
+Fixed by encode the last mapping access into direct shadow page
+
+Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/paging_tmpl.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -360,6 +360,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
+                       /* advance table_gfn when emulating 1gb pages with 4k */
+                       if (delta == 0)
+                               table_gfn += PT_INDEX(addr, level);
++                      access &= gw->pte_access;
+               } else {
+                       direct = 0;
+                       table_gfn = gw->table_gfn[level - 2];
diff --git a/queue-2.6.32/0004-KVM-VMX-Fix-host-GDT.LIMIT-corruption.patch b/queue-2.6.32/0004-KVM-VMX-Fix-host-GDT.LIMIT-corruption.patch
new file mode 100644 (file)
index 0000000..b770472
--- /dev/null
@@ -0,0 +1,53 @@
+From mtosatti@redhat.com  Fri Oct 29 15:20:19 2010
+From: Avi Kivity <avi@redhat.com>
+Date: Thu, 28 Oct 2010 16:48:12 -0200
+Subject: KVM: VMX: Fix host GDT.LIMIT corruption
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org
+Message-ID: <20101028185027.500376357@amt.cnet>
+Content-Disposition: inline; filename=0004-KVM-VMX-Fix-host-GDT.LIMIT-corruption.patch
+
+From: Avi Kivity <avi@redhat.com>
+
+commit 3444d7da1839b851eefedd372978d8a982316c36 upstream.
+
+vmx does not restore GDT.LIMIT to the host value, instead it sets it to 64KB.
+This means host userspace can learn a few bits of host memory.
+
+Fix by reloading GDTR when we load other host state.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/vmx.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -130,6 +130,7 @@ static u64 construct_eptp(unsigned long
+ static DEFINE_PER_CPU(struct vmcs *, vmxarea);
+ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
+ static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
++static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
+ static unsigned long *vmx_io_bitmap_a;
+ static unsigned long *vmx_io_bitmap_b;
+@@ -690,6 +691,7 @@ static void __vmx_load_host_state(struct
+       save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
+       load_msrs(vmx->host_msrs, vmx->save_nmsrs);
+       reload_host_efer(vmx);
++      load_gdt(&__get_cpu_var(host_gdt));
+ }
+ static void vmx_load_host_state(struct vcpu_vmx *vmx)
+@@ -1176,6 +1178,8 @@ static void hardware_enable(void *garbag
+       asm volatile (ASM_VMX_VMXON_RAX
+                     : : "a"(&phys_addr), "m"(phys_addr)
+                     : "memory", "cc");
++
++      store_gdt(&__get_cpu_var(host_gdt));
+ }
+ static void vmclear_local_vcpus(void)
diff --git a/queue-2.6.32/0005-KVM-SVM-Adjust-tsc_offset-only-if-tsc_unstable.patch b/queue-2.6.32/0005-KVM-SVM-Adjust-tsc_offset-only-if-tsc_unstable.patch
new file mode 100644 (file)
index 0000000..3f3e904
--- /dev/null
@@ -0,0 +1,60 @@
+From mtosatti@redhat.com  Fri Oct 29 15:20:39 2010
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Thu, 28 Oct 2010 16:48:13 -0200
+Subject: KVM: SVM: Adjust tsc_offset only if tsc_unstable
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org, Joerg Roedel <joerg.roedel@amd.com>
+Message-ID: <20101028185027.705918116@amt.cnet>
+Content-Disposition: inline; filename=0005-KVM-SVM-Adjust-tsc_offset-only-if-tsc_unstable.patch
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit 953899b659adce62cbe83d6a7527550ab8797c48 upstream.
+
+The tsc_offset adjustment in svm_vcpu_load is executed
+unconditionally even if Linux considers the host tsc as
+stable. This causes a Linux guest detecting an unstable tsc
+in any case.
+This patch removes the tsc_offset adjustment if the host tsc
+is stable. The guest will now get the benefit of a stable
+tsc too.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/svm.c |   21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -795,17 +795,18 @@ static void svm_vcpu_load(struct kvm_vcp
+       int i;
+       if (unlikely(cpu != vcpu->cpu)) {
+-              u64 tsc_this, delta;
++              u64 delta;
+-              /*
+-               * Make sure that the guest sees a monotonically
+-               * increasing TSC.
+-               */
+-              rdtscll(tsc_this);
+-              delta = vcpu->arch.host_tsc - tsc_this;
+-              svm->vmcb->control.tsc_offset += delta;
+-              if (is_nested(svm))
+-                      svm->nested.hsave->control.tsc_offset += delta;
++              if (check_tsc_unstable()) {
++                      /*
++                       * Make sure that the guest sees a monotonically
++                       * increasing TSC.
++                       */
++                      delta = vcpu->arch.host_tsc - native_read_tsc();
++                      svm->vmcb->control.tsc_offset += delta;
++                      if (is_nested(svm))
++                              svm->nested.hsave->control.tsc_offset += delta;
++              }
+               vcpu->cpu = cpu;
+               kvm_migrate_timers(vcpu);
+               svm->asid_generation = 0;
diff --git a/queue-2.6.32/0006-KVM-x86-Fix-SVM-VMCB-reset.patch b/queue-2.6.32/0006-KVM-x86-Fix-SVM-VMCB-reset.patch
new file mode 100644 (file)
index 0000000..2b87293
--- /dev/null
@@ -0,0 +1,35 @@
+From mtosatti@redhat.com  Fri Oct 29 15:21:03 2010
+From: Zachary Amsden <zamsden@redhat.com>
+Date: Thu, 28 Oct 2010 16:48:14 -0200
+Subject: KVM: x86: Fix SVM VMCB reset
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org, Zachary Amsden <zamsden@redhat.com>
+Message-ID: <20101028185027.916574115@amt.cnet>
+Content-Disposition: inline; filename=0006-KVM-x86-Fix-SVM-VMCB-reset.patch
+
+From: Zachary Amsden <zamsden@redhat.com>
+
+commit 58877679fd393d3ef71aa383031ac7817561463d upstream.
+
+On reset, VMCB TSC should be set to zero.  Instead, code was setting
+tsc_offset to zero, which passes through the underlying TSC.
+
+Signed-off-by: Zachary Amsden <zamsden@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/svm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -621,7 +621,7 @@ static void init_vmcb(struct vcpu_svm *s
+       control->iopm_base_pa = iopm_base;
+       control->msrpm_base_pa = __pa(svm->msrpm);
+-      control->tsc_offset = 0;
++      control->tsc_offset = 0-native_read_tsc();
+       control->int_ctl = V_INTR_MASKING_MASK;
+       init_seg(&save->es);
diff --git a/queue-2.6.32/0007-KVM-x86-Move-TSC-reset-out-of-vmcb_init.patch b/queue-2.6.32/0007-KVM-x86-Move-TSC-reset-out-of-vmcb_init.patch
new file mode 100644 (file)
index 0000000..ece32b7
--- /dev/null
@@ -0,0 +1,45 @@
+From mtosatti@redhat.com  Fri Oct 29 15:21:28 2010
+From: Zachary Amsden <zamsden@redhat.com>
+Date: Thu, 28 Oct 2010 16:48:15 -0200
+Subject: [PATCH 7/8] KVM: x86: Move TSC reset out of vmcb_init
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org, Zachary Amsden <zamsden@redhat.com>
+Message-ID: <20101028185028.133694655@amt.cnet>
+Content-Disposition: inline; filename=0007-KVM-x86-Move-TSC-reset-out-of-vmcb_init.patch
+
+From: Zachary Amsden <zamsden@redhat.com>
+
+commit 47008cd887c1836bcadda123ba73e1863de7a6c4 upstream.
+
+The VMCB is reset whenever we receive a startup IPI, so Linux is setting
+TSC back to zero happens very late in the boot process and destabilizing
+the TSC.  Instead, just set TSC to zero once at VCPU creation time.
+
+Why the separate patch?  So git-bisect is your friend.
+
+Signed-off-by: Zachary Amsden <zamsden@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/svm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -621,7 +621,6 @@ static void init_vmcb(struct vcpu_svm *s
+       control->iopm_base_pa = iopm_base;
+       control->msrpm_base_pa = __pa(svm->msrpm);
+-      control->tsc_offset = 0-native_read_tsc();
+       control->int_ctl = V_INTR_MASKING_MASK;
+       init_seg(&save->es);
+@@ -754,6 +753,7 @@ static struct kvm_vcpu *svm_create_vcpu(
+       svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
+       svm->asid_generation = 0;
+       init_vmcb(svm);
++      svm->vmcb->control.tsc_offset = 0-native_read_tsc();
+       fx_init(&svm->vcpu);
+       svm->vcpu.fpu_active = 1;
diff --git a/queue-2.6.32/0008-KVM-Fix-fs-gs-reload-oops-with-invalid-ldt.patch b/queue-2.6.32/0008-KVM-Fix-fs-gs-reload-oops-with-invalid-ldt.patch
new file mode 100644 (file)
index 0000000..726f1b2
--- /dev/null
@@ -0,0 +1,168 @@
+From mtosatti@redhat.com  Fri Oct 29 15:21:49 2010
+From: Avi Kivity <avi@redhat.com>
+Date: Thu, 28 Oct 2010 16:48:16 -0200
+Subject: KVM: Fix fs/gs reload oops with invalid ldt
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org
+Message-ID: <20101028185028.315840662@amt.cnet>
+Content-Disposition: inline; filename=0008-KVM-Fix-fs-gs-reload-oops-with-invalid-ldt.patch
+
+From: Avi Kivity <avi@redhat.com>
+
+commit 9581d442b9058d3699b4be568b6e5eae38a41493 upstream
+
+kvm reloads the host's fs and gs blindly, however the underlying segment
+descriptors may be invalid due to the user modifying the ldt after loading
+them.
+
+Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
+of home grown unsafe versions.
+
+This is CVE-2010-3698.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/kvm_host.h |   24 ------------------------
+ arch/x86/kvm/svm.c              |   15 ++++++++++-----
+ arch/x86/kvm/vmx.c              |   24 +++++++++---------------
+ 3 files changed, 19 insertions(+), 44 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -674,20 +674,6 @@ static inline struct kvm_mmu_page *page_
+       return (struct kvm_mmu_page *)page_private(page);
+ }
+-static inline u16 kvm_read_fs(void)
+-{
+-      u16 seg;
+-      asm("mov %%fs, %0" : "=g"(seg));
+-      return seg;
+-}
+-
+-static inline u16 kvm_read_gs(void)
+-{
+-      u16 seg;
+-      asm("mov %%gs, %0" : "=g"(seg));
+-      return seg;
+-}
+-
+ static inline u16 kvm_read_ldt(void)
+ {
+       u16 ldt;
+@@ -695,16 +681,6 @@ static inline u16 kvm_read_ldt(void)
+       return ldt;
+ }
+-static inline void kvm_load_fs(u16 sel)
+-{
+-      asm("mov %0, %%fs" : : "rm"(sel));
+-}
+-
+-static inline void kvm_load_gs(u16 sel)
+-{
+-      asm("mov %0, %%gs" : : "rm"(sel));
+-}
+-
+ static inline void kvm_load_ldt(u16 sel)
+ {
+       asm("lldt %0" : : "rm"(sel));
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2698,8 +2698,8 @@ static void svm_vcpu_run(struct kvm_vcpu
+       sync_lapic_to_cr8(vcpu);
+       save_host_msrs(vcpu);
+-      fs_selector = kvm_read_fs();
+-      gs_selector = kvm_read_gs();
++      savesegment(fs, fs_selector);
++      savesegment(gs, gs_selector);
+       ldt_selector = kvm_read_ldt();
+       svm->vmcb->save.cr2 = vcpu->arch.cr2;
+       /* required for live migration with NPT */
+@@ -2786,10 +2786,15 @@ static void svm_vcpu_run(struct kvm_vcpu
+       vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+       vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
+-      kvm_load_fs(fs_selector);
+-      kvm_load_gs(gs_selector);
+-      kvm_load_ldt(ldt_selector);
+       load_host_msrs(vcpu);
++      loadsegment(fs, fs_selector);
++#ifdef CONFIG_X86_64
++      load_gs_index(gs_selector);
++      wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
++#else
++      loadsegment(gs, gs_selector);
++#endif
++      kvm_load_ldt(ldt_selector);
+       reload_tss(vcpu);
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -629,7 +629,7 @@ static void vmx_save_host_state(struct k
+        */
+       vmx->host_state.ldt_sel = kvm_read_ldt();
+       vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
+-      vmx->host_state.fs_sel = kvm_read_fs();
++      savesegment(fs, vmx->host_state.fs_sel);
+       if (!(vmx->host_state.fs_sel & 7)) {
+               vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
+               vmx->host_state.fs_reload_needed = 0;
+@@ -637,7 +637,7 @@ static void vmx_save_host_state(struct k
+               vmcs_write16(HOST_FS_SELECTOR, 0);
+               vmx->host_state.fs_reload_needed = 1;
+       }
+-      vmx->host_state.gs_sel = kvm_read_gs();
++      savesegment(gs, vmx->host_state.gs_sel);
+       if (!(vmx->host_state.gs_sel & 7))
+               vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
+       else {
+@@ -665,27 +665,21 @@ static void vmx_save_host_state(struct k
+ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
+ {
+-      unsigned long flags;
+-
+       if (!vmx->host_state.loaded)
+               return;
+       ++vmx->vcpu.stat.host_state_reload;
+       vmx->host_state.loaded = 0;
+       if (vmx->host_state.fs_reload_needed)
+-              kvm_load_fs(vmx->host_state.fs_sel);
++              loadsegment(fs, vmx->host_state.fs_sel);
+       if (vmx->host_state.gs_ldt_reload_needed) {
+               kvm_load_ldt(vmx->host_state.ldt_sel);
+-              /*
+-               * If we have to reload gs, we must take care to
+-               * preserve our gs base.
+-               */
+-              local_irq_save(flags);
+-              kvm_load_gs(vmx->host_state.gs_sel);
+ #ifdef CONFIG_X86_64
+-              wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
++              load_gs_index(vmx->host_state.gs_sel);
++              wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
++#else
++              loadsegment(gs, vmx->host_state.gs_sel);
+ #endif
+-              local_irq_restore(flags);
+       }
+       reload_tss();
+       save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
+@@ -2342,8 +2336,8 @@ static int vmx_vcpu_setup(struct vcpu_vm
+       vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
+       vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+       vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+-      vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs());    /* 22.2.4 */
+-      vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs());    /* 22.2.4 */
++      vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
++      vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
+       vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+ #ifdef CONFIG_X86_64
+       rdmsrl(MSR_FS_BASE, a);
diff --git a/queue-2.6.32/bluetooth-fix-missing-null-check.patch b/queue-2.6.32/bluetooth-fix-missing-null-check.patch
new file mode 100644 (file)
index 0000000..a023a79
--- /dev/null
@@ -0,0 +1,40 @@
+From c19483cc5e56ac5e22dd19cf25ba210ab1537773 Mon Sep 17 00:00:00 2001
+From: Alan Cox <alan@linux.intel.com>
+Date: Fri, 22 Oct 2010 14:11:26 +0100
+Subject: bluetooth: Fix missing NULL check
+
+From: Alan Cox <alan@linux.intel.com>
+
+commit c19483cc5e56ac5e22dd19cf25ba210ab1537773 upstream.
+
+Fortunately this is only exploitable on very unusual hardware.
+
+[Reported a while ago but nothing happened so just fixing it]
+
+Signed-off-by: Alan Cox <alan@linux.intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/bluetooth/hci_ldisc.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -258,9 +258,16 @@ static int hci_uart_tty_open(struct tty_
+       BT_DBG("tty %p", tty);
++      /* FIXME: This btw is bogus, nothing requires the old ldisc to clear
++         the pointer */
+       if (hu)
+               return -EEXIST;
++      /* Error if the tty has no write op instead of leaving an exploitable
++         hole */
++      if (tty->ops->write == NULL)
++              return -EOPNOTSUPP;
++
+       if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
+               BT_ERR("Can't allocate control structure");
+               return -ENFILE;
diff --git a/queue-2.6.32/futex-fix-errors-in-nested-key-ref-counting.patch b/queue-2.6.32/futex-fix-errors-in-nested-key-ref-counting.patch
new file mode 100644 (file)
index 0000000..62e9bc4
--- /dev/null
@@ -0,0 +1,142 @@
+From 7ada876a8703f23befbb20a7465a702ee39b1704 Mon Sep 17 00:00:00 2001
+From: Darren Hart <dvhart@linux.intel.com>
+Date: Sun, 17 Oct 2010 08:35:04 -0700
+Subject: futex: Fix errors in nested key ref-counting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Darren Hart <dvhart@linux.intel.com>
+
+commit 7ada876a8703f23befbb20a7465a702ee39b1704 upstream.
+
+futex_wait() is leaking key references due to futex_wait_setup()
+acquiring an additional reference via the queue_lock() routine. The
+nested key ref-counting has been masking bugs and complicating code
+analysis. queue_lock() is only called with a previously ref-counted
+key, so remove the additional ref-counting from the queue_(un)lock()
+functions.
+
+Also futex_wait_requeue_pi() drops one key reference too many in
+unqueue_me_pi(). Remove the key reference handling from
+unqueue_me_pi(). This was paired with a queue_lock() in
+futex_lock_pi(), so the count remains unchanged.
+
+Document remaining nested key ref-counting sites.
+
+Signed-off-by: Darren Hart <dvhart@linux.intel.com>
+Reported-and-tested-by: Matthieu Fertré<matthieu.fertre@kerlabs.com>
+Reported-by: Louis Rilling<louis.rilling@kerlabs.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: John Kacur <jkacur@redhat.com>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+LKML-Reference: <4CBB17A8.70401@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/futex.c |   31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1363,7 +1363,6 @@ static inline struct futex_hash_bucket *
+ {
+       struct futex_hash_bucket *hb;
+-      get_futex_key_refs(&q->key);
+       hb = hash_futex(&q->key);
+       q->lock_ptr = &hb->lock;
+@@ -1375,7 +1374,6 @@ static inline void
+ queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
+ {
+       spin_unlock(&hb->lock);
+-      drop_futex_key_refs(&q->key);
+ }
+ /**
+@@ -1480,8 +1478,6 @@ static void unqueue_me_pi(struct futex_q
+       q->pi_state = NULL;
+       spin_unlock(q->lock_ptr);
+-
+-      drop_futex_key_refs(&q->key);
+ }
+ /*
+@@ -1812,7 +1808,10 @@ static int futex_wait(u32 __user *uaddr,
+       }
+ retry:
+-      /* Prepare to wait on uaddr. */
++      /*
++       * Prepare to wait on uaddr. On success, holds hb lock and increments
++       * q.key refs.
++       */
+       ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
+       if (ret)
+               goto out;
+@@ -1822,24 +1821,23 @@ retry:
+       /* If we were woken (and unqueued), we succeeded, whatever. */
+       ret = 0;
++      /* unqueue_me() drops q.key ref */
+       if (!unqueue_me(&q))
+-              goto out_put_key;
++              goto out;
+       ret = -ETIMEDOUT;
+       if (to && !to->task)
+-              goto out_put_key;
++              goto out;
+       /*
+        * We expect signal_pending(current), but we might be the
+        * victim of a spurious wakeup as well.
+        */
+-      if (!signal_pending(current)) {
+-              put_futex_key(fshared, &q.key);
++      if (!signal_pending(current))
+               goto retry;
+-      }
+       ret = -ERESTARTSYS;
+       if (!abs_time)
+-              goto out_put_key;
++              goto out;
+       restart = &current_thread_info()->restart_block;
+       restart->fn = futex_wait_restart;
+@@ -1856,8 +1854,6 @@ retry:
+       ret = -ERESTART_RESTARTBLOCK;
+-out_put_key:
+-      put_futex_key(fshared, &q.key);
+ out:
+       if (to) {
+               hrtimer_cancel(&to->timer);
+@@ -2236,7 +2232,10 @@ static int futex_wait_requeue_pi(u32 __u
+       q.rt_waiter = &rt_waiter;
+       q.requeue_pi_key = &key2;
+-      /* Prepare to wait on uaddr. */
++      /*
++       * Prepare to wait on uaddr. On success, increments q.key (key1) ref
++       * count.
++       */
+       ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
+       if (ret)
+               goto out_key2;
+@@ -2254,7 +2253,9 @@ static int futex_wait_requeue_pi(u32 __u
+        * In order for us to be here, we know our q.key == key2, and since
+        * we took the hb->lock above, we also know that futex_requeue() has
+        * completed and we no longer have to concern ourselves with a wakeup
+-       * race with the atomic proxy lock acquition by the requeue code.
++       * race with the atomic proxy lock acquisition by the requeue code. The
++       * futex_requeue dropped our key1 reference and incremented our key2
++       * reference count.
+        */
+       /* Check if the requeue code acquired the second futex for us. */
diff --git a/queue-2.6.32/mm-x86-saving-vmcore-with-non-lazy-freeing-of-vmas.patch b/queue-2.6.32/mm-x86-saving-vmcore-with-non-lazy-freeing-of-vmas.patch
new file mode 100644 (file)
index 0000000..cc94c87
--- /dev/null
@@ -0,0 +1,72 @@
+From 3ee48b6af49cf534ca2f481ecc484b156a41451d Mon Sep 17 00:00:00 2001
+From: Cliff Wickman <cpw@sgi.com>
+Date: Thu, 16 Sep 2010 11:44:02 -0500
+Subject: mm, x86: Saving vmcore with non-lazy freeing of vmas
+
+From: Cliff Wickman <cpw@sgi.com>
+
+commit 3ee48b6af49cf534ca2f481ecc484b156a41451d upstream.
+
+During the reading of /proc/vmcore the kernel is doing
+ioremap()/iounmap() repeatedly. And the buildup of un-flushed
+vm_area_struct's is causing a great deal of overhead. (rb_next()
+is chewing up most of that time).
+
+This solution is to provide function set_iounmap_nonlazy(). It
+causes a subsequent call to iounmap() to immediately purge the
+vma area (with try_purge_vmap_area_lazy()).
+
+With this patch we have seen the time for writing a 250MB
+compressed dump drop from 71 seconds to 44 seconds.
+
+Signed-off-by: Cliff Wickman <cpw@sgi.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: kexec@lists.infradead.org
+LKML-Reference: <E1OwHZ4-0005WK-Tw@eag09.americas.sgi.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/io.h       |    1 +
+ arch/x86/kernel/crash_dump_64.c |    1 +
+ mm/vmalloc.c                    |    9 +++++++++
+ 3 files changed, 11 insertions(+)
+
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -172,6 +172,7 @@ static inline void __iomem *ioremap(reso
+ extern void iounmap(volatile void __iomem *addr);
++extern void set_iounmap_nonlazy(void);
+ #ifdef CONFIG_X86_32
+ # include "io_32.h"
+--- a/arch/x86/kernel/crash_dump_64.c
++++ b/arch/x86/kernel/crash_dump_64.c
+@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long p
+       } else
+               memcpy(buf, vaddr + offset, csize);
++      set_iounmap_nonlazy();
+       iounmap(vaddr);
+       return csize;
+ }
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -513,6 +513,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_IN
+ static void purge_fragmented_blocks_allcpus(void);
+ /*
++ * called before a call to iounmap() if the caller wants vm_area_struct's
++ * immediately freed.
++ */
++void set_iounmap_nonlazy(void)
++{
++      atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
++}
++
++/*
+  * Purges all lazily-freed vmap areas.
+  *
+  * If sync is 0 then don't purge if there is already a purge in progress.
diff --git a/queue-2.6.32/pipe-fix-failure-to-return-error-code-on-confirm.patch b/queue-2.6.32/pipe-fix-failure-to-return-error-code-on-confirm.patch
new file mode 100644 (file)
index 0000000..192292f
--- /dev/null
@@ -0,0 +1,31 @@
+From e5953cbdff26f7cbae7eff30cd9b18c4e19b7594 Mon Sep 17 00:00:00 2001
+From: Nicolas Kaiser <nikai@nikai.net>
+Date: Thu, 21 Oct 2010 14:56:00 +0200
+Subject: pipe: fix failure to return error code on ->confirm()
+
+From: Nicolas Kaiser <nikai@nikai.net>
+
+commit e5953cbdff26f7cbae7eff30cd9b18c4e19b7594 upstream.
+
+The arguments were transposed, we want to assign the error code to
+'ret', which is being returned.
+
+Signed-off-by: Nicolas Kaiser <nikai@nikai.net>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/pipe.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -363,7 +363,7 @@ pipe_read(struct kiocb *iocb, const stru
+                       error = ops->confirm(pipe, buf);
+                       if (error) {
+                               if (!ret)
+-                                      error = ret;
++                                      ret = error;
+                               break;
+                       }
index 126bf49a844a44adcaa339c7a9eefd274e8922bd..98a954d7473cb44bbe2a225c2c793999b78e7d96 100644 (file)
@@ -3,3 +3,21 @@ staging-usbip-process-event-flags-without-delay.patch
 powerpc-perf-fix-sampling-enable-for-ppc970.patch
 pcmcia-synclink_cs-fix-information-leak-to-userland.patch
 sched-fix-string-comparison-in-proc-sched_features.patch
+bluetooth-fix-missing-null-check.patch
+futex-fix-errors-in-nested-key-ref-counting.patch
+mm-x86-saving-vmcore-with-non-lazy-freeing-of-vmas.patch
+x86-cpu-fix-renamed-not-yet-shipping-amd-cpuid-feature-bit.patch
+x86-kexec-make-sure-to-stop-all-cpus-before-exiting-the-kernel.patch
+x86-olpc-don-t-retry-ec-commands-forever.patch
+x86-mtrr-assume-sys_cfg-exists-on-all-future-amd-cpus.patch
+x86-intr-remap-set-redirection-hint-in-the-irte.patch
+x86-kdump-change-copy_oldmem_page-to-use-cached-addressing.patch
+0001-KVM-SVM-Fix-wrong-intercept-masks-on-32-bit.patch
+0002-KVM-MMU-fix-direct-sp-s-access-corrupted.patch
+0003-KVM-MMU-fix-conflict-access-permissions-in-direct-sp.patch
+0004-KVM-VMX-Fix-host-GDT.LIMIT-corruption.patch
+0005-KVM-SVM-Adjust-tsc_offset-only-if-tsc_unstable.patch
+0006-KVM-x86-Fix-SVM-VMCB-reset.patch
+0007-KVM-x86-Move-TSC-reset-out-of-vmcb_init.patch
+0008-KVM-Fix-fs-gs-reload-oops-with-invalid-ldt.patch
+pipe-fix-failure-to-return-error-code-on-confirm.patch
diff --git a/queue-2.6.32/x86-cpu-fix-renamed-not-yet-shipping-amd-cpuid-feature-bit.patch b/queue-2.6.32/x86-cpu-fix-renamed-not-yet-shipping-amd-cpuid-feature-bit.patch
new file mode 100644 (file)
index 0000000..ce1bb3d
--- /dev/null
@@ -0,0 +1,48 @@
+From 7ef8aa72ab176e0288f363d1247079732c5d5792 Mon Sep 17 00:00:00 2001
+From: Andre Przywara <andre.przywara@amd.com>
+Date: Mon, 6 Sep 2010 15:14:17 +0200
+Subject: x86, cpu: Fix renamed, not-yet-shipping AMD CPUID feature bit
+
+From: Andre Przywara <andre.przywara@amd.com>
+
+commit 7ef8aa72ab176e0288f363d1247079732c5d5792 upstream.
+
+The AMD SSE5 feature set as-it has been replaced by some extensions
+to the AVX instruction set. Thus the bit formerly advertised as SSE5
+is re-used for one of these extensions (XOP).
+Although this changes the /proc/cpuinfo output, it is not user visible, as
+there are no CPUs (yet) having this feature.
+To avoid confusion this should be added to the stable series, too.
+
+Signed-off-by: Andre Przywara <andre.przywara@amd.com>
+LKML-Reference: <1283778860-26843-2-git-send-email-andre.przywara@amd.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/cpufeature.h |    2 +-
+ arch/x86/kvm/x86.c                |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -150,7 +150,7 @@
+ #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
+ #define X86_FEATURE_OSVW      (6*32+ 9) /* OS Visible Workaround */
+ #define X86_FEATURE_IBS               (6*32+10) /* Instruction Based Sampling */
+-#define X86_FEATURE_SSE5      (6*32+11) /* SSE-5 */
++#define X86_FEATURE_XOP               (6*32+11) /* extended AVX instructions */
+ #define X86_FEATURE_SKINIT    (6*32+12) /* SKINIT/STGI instructions */
+ #define X86_FEATURE_WDT               (6*32+13) /* Watchdog timer */
+ #define X86_FEATURE_NODEID_MSR        (6*32+19) /* NodeId MSR */
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1485,7 +1485,7 @@ static void do_cpuid_ent(struct kvm_cpui
+       const u32 kvm_supported_word6_x86_features =
+               F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
+               F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
+-              F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
++              F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+               0 /* SKINIT */ | 0 /* WDT */;
+       /* all calls to cpuid_count() should be made on the same cpu */
diff --git a/queue-2.6.32/x86-intr-remap-set-redirection-hint-in-the-irte.patch b/queue-2.6.32/x86-intr-remap-set-redirection-hint-in-the-irte.patch
new file mode 100644 (file)
index 0000000..ee50e58
--- /dev/null
@@ -0,0 +1,55 @@
+From 75e3cfbed6f71a8f151dc6e413b6ce3c390030cb Mon Sep 17 00:00:00 2001
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+Date: Fri, 27 Aug 2010 11:09:48 -0700
+Subject: x86, intr-remap: Set redirection hint in the IRTE
+
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+
+commit 75e3cfbed6f71a8f151dc6e413b6ce3c390030cb upstream.
+
+Currently the redirection hint in the interrupt-remapping table entry
+is set to 0, which means the remapped interrupt is directed to the
+processors listed in the destination. So in logical flat mode
+in the presence of intr-remapping, this results in a single
+interrupt multi-casted to multiple cpu's as specified by the destination
+bit mask. But what we really want is to send that interrupt to one of the cpus
+based on the lowest priority delivery mode.
+
+Set the redirection hint in the IRTE to '1' to indicate that we want
+the remapped interrupt to be directed to only one of the processors
+listed in the destination.
+
+This fixes the issue of same interrupt getting delivered to multiple cpu's
+in the logical flat mode in the presence of interrupt-remapping. While
+there is no functional issue observed with this behavior, this will
+impact performance of such configurations (<=8 cpu's using logical flat
+mode in the presence of interrupt-remapping)
+
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+LKML-Reference: <20100827181049.013051492@sbsiddha-MOBL3.sc.intel.com>
+Cc: Weidong Han <weidong.han@intel.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/apic/io_apic.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1410,6 +1410,7 @@ int setup_ioapic_entry(int apic_id, int
+               irte.dlvry_mode = apic->irq_delivery_mode;
+               irte.vector = vector;
+               irte.dest_id = IRTE_DEST(destination);
++              irte.redir_hint = 1;
+               /* Set source-id of interrupt request */
+               set_ioapic_sid(&irte, apic_id);
+@@ -3289,6 +3290,7 @@ static int msi_compose_msg(struct pci_de
+               irte.dlvry_mode = apic->irq_delivery_mode;
+               irte.vector = cfg->vector;
+               irte.dest_id = IRTE_DEST(dest);
++              irte.redir_hint = 1;
+               /* Set source-id of interrupt request */
+               set_msi_sid(&irte, pdev);
diff --git a/queue-2.6.32/x86-kdump-change-copy_oldmem_page-to-use-cached-addressing.patch b/queue-2.6.32/x86-kdump-change-copy_oldmem_page-to-use-cached-addressing.patch
new file mode 100644 (file)
index 0000000..4081fe4
--- /dev/null
@@ -0,0 +1,39 @@
+From 37a2f9f30a360fb03522d15c85c78265ccd80287 Mon Sep 17 00:00:00 2001
+From: Cliff Wickman <cpw@sgi.com>
+Date: Wed, 8 Sep 2010 10:14:27 -0500
+Subject: x86, kdump: Change copy_oldmem_page() to use cached addressing
+
+From: Cliff Wickman <cpw@sgi.com>
+
+commit 37a2f9f30a360fb03522d15c85c78265ccd80287 upstream.
+
+The copy of /proc/vmcore to a user buffer proceeds much faster
+if the kernel addresses memory as cached.
+
+With this patch we have seen an increase in transfer rate from
+less than 15MB/s to 80-460MB/s, depending on size of the
+transfer. This makes a big difference in time needed to save a
+system dump.
+
+Signed-off-by: Cliff Wickman <cpw@sgi.com>
+Acked-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Cc: kexec@lists.infradead.org
+LKML-Reference: <E1OtMLz-0001yp-Ia@eag09.americas.sgi.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/crash_dump_64.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/crash_dump_64.c
++++ b/arch/x86/kernel/crash_dump_64.c
+@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long p
+       if (!csize)
+               return 0;
+-      vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++      vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
+       if (!vaddr)
+               return -ENOMEM;
diff --git a/queue-2.6.32/x86-kexec-make-sure-to-stop-all-cpus-before-exiting-the-kernel.patch b/queue-2.6.32/x86-kexec-make-sure-to-stop-all-cpus-before-exiting-the-kernel.patch
new file mode 100644 (file)
index 0000000..45cc35b
--- /dev/null
@@ -0,0 +1,145 @@
+From 76fac077db6b34e2c6383a7b4f3f4f7b7d06d8ce Mon Sep 17 00:00:00 2001
+From: Alok Kataria <akataria@vmware.com>
+Date: Mon, 11 Oct 2010 14:37:08 -0700
+Subject: x86, kexec: Make sure to stop all CPUs before exiting the kernel
+
+From: Alok Kataria <akataria@vmware.com>
+
+commit 76fac077db6b34e2c6383a7b4f3f4f7b7d06d8ce upstream.
+
+x86 smp_ops now has a new op, stop_other_cpus which takes a parameter
+"wait" this allows the caller to specify if it wants to stop until all
+the cpus have processed the stop IPI.  This is required specifically
+for the kexec case where we should wait for all the cpus to be stopped
+before starting the new kernel.  We now wait for the cpus to stop in
+all cases except for panic/kdump where we expect things to be broken
+and we are doing our best to make things work anyway.
+
+This patch fixes a legitimate regression, which was introduced during
+2.6.30, by commit id 4ef702c10b5df18ab04921fc252c26421d4d6c75.
+
+Signed-off-by: Alok N Kataria <akataria@vmware.com>
+LKML-Reference: <1286833028.1372.20.camel@ank32.eng.vmware.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/smp.h |    9 +++++++--
+ arch/x86/kernel/reboot.c   |    2 +-
+ arch/x86/kernel/smp.c      |   15 +++++++++------
+ arch/x86/xen/enlighten.c   |    2 +-
+ arch/x86/xen/smp.c         |    6 +++---
+ 5 files changed, 21 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -50,7 +50,7 @@ struct smp_ops {
+       void (*smp_prepare_cpus)(unsigned max_cpus);
+       void (*smp_cpus_done)(unsigned max_cpus);
+-      void (*smp_send_stop)(void);
++      void (*stop_other_cpus)(int wait);
+       void (*smp_send_reschedule)(int cpu);
+       int (*cpu_up)(unsigned cpu);
+@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
+ static inline void smp_send_stop(void)
+ {
+-      smp_ops.smp_send_stop();
++      smp_ops.stop_other_cpus(0);
++}
++
++static inline void stop_other_cpus(void)
++{
++      smp_ops.stop_other_cpus(1);
+ }
+ static inline void smp_prepare_boot_cpu(void)
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -633,7 +633,7 @@ void native_machine_shutdown(void)
+       /* O.K Now that I'm on the appropriate processor,
+        * stop all of the others.
+        */
+-      smp_send_stop();
++      stop_other_cpus();
+ #endif
+       lapic_shutdown();
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -158,10 +158,10 @@ asmlinkage void smp_reboot_interrupt(voi
+       irq_exit();
+ }
+-static void native_smp_send_stop(void)
++static void native_stop_other_cpus(int wait)
+ {
+       unsigned long flags;
+-      unsigned long wait;
++      unsigned long timeout;
+       if (reboot_force)
+               return;
+@@ -178,9 +178,12 @@ static void native_smp_send_stop(void)
+       if (num_online_cpus() > 1) {
+               apic->send_IPI_allbutself(REBOOT_VECTOR);
+-              /* Don't wait longer than a second */
+-              wait = USEC_PER_SEC;
+-              while (num_online_cpus() > 1 && wait--)
++              /*
++               * Don't wait longer than a second if the caller
++               * didn't ask us to wait.
++               */
++              timeout = USEC_PER_SEC;
++              while (num_online_cpus() > 1 && (wait || timeout--))
+                       udelay(1);
+       }
+@@ -226,7 +229,7 @@ struct smp_ops smp_ops = {
+       .smp_prepare_cpus       = native_smp_prepare_cpus,
+       .smp_cpus_done          = native_smp_cpus_done,
+-      .smp_send_stop          = native_smp_send_stop,
++      .stop_other_cpus        = native_stop_other_cpus,
+       .smp_send_reschedule    = native_smp_send_reschedule,
+       .cpu_up                 = native_cpu_up,
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -998,7 +998,7 @@ static void xen_reboot(int reason)
+       struct sched_shutdown r = { .reason = reason };
+ #ifdef CONFIG_SMP
+-      smp_send_stop();
++      stop_other_cpus();
+ #endif
+       if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -396,9 +396,9 @@ static void stop_self(void *v)
+       BUG();
+ }
+-static void xen_smp_send_stop(void)
++static void xen_stop_other_cpus(int wait)
+ {
+-      smp_call_function(stop_self, NULL, 0);
++      smp_call_function(stop_self, NULL, wait);
+ }
+ static void xen_smp_send_reschedule(int cpu)
+@@ -466,7 +466,7 @@ static const struct smp_ops xen_smp_ops
+       .cpu_disable = xen_cpu_disable,
+       .play_dead = xen_play_dead,
+-      .smp_send_stop = xen_smp_send_stop,
++      .stop_other_cpus = xen_stop_other_cpus,
+       .smp_send_reschedule = xen_smp_send_reschedule,
+       .send_call_func_ipi = xen_smp_send_call_function_ipi,
diff --git a/queue-2.6.32/x86-mtrr-assume-sys_cfg-exists-on-all-future-amd-cpus.patch b/queue-2.6.32/x86-mtrr-assume-sys_cfg-exists-on-all-future-amd-cpus.patch
new file mode 100644 (file)
index 0000000..ba64ab1
--- /dev/null
@@ -0,0 +1,42 @@
+From 3fdbf004c1706480a7c7fac3c9d836fa6df20d7d Mon Sep 17 00:00:00 2001
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+Date: Thu, 30 Sep 2010 14:32:35 +0200
+Subject: x86, mtrr: Assume SYS_CFG[Tom2ForceMemTypeWB] exists on all future AMD CPUs
+
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+
+commit 3fdbf004c1706480a7c7fac3c9d836fa6df20d7d upstream.
+
+Instead of adapting the CPU family check in amd_special_default_mtrr()
+for each new CPU family assume that all new AMD CPUs support the
+necessary bits in SYS_CFG MSR.
+
+Tom2Enabled is architectural (defined in APM Vol.2).
+Tom2ForceMemTypeWB is defined in all BKDGs starting with K8 NPT.
+In pre K8-NPT BKDG this bit is reserved (read as zero).
+
+W/o this adaption Linux would unnecessarily complain about bad MTRR
+settings on every new AMD CPU family, e.g.
+
+[    0.000000] WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing 4863MB of RAM.
+
+Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
+LKML-Reference: <20100930123235.GB20545@loge.amd.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/mtrr/cleanup.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
++++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
+@@ -948,7 +948,7 @@ int __init amd_special_default_mtrr(void
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return 0;
+-      if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
++      if (boot_cpu_data.x86 < 0xf)
+               return 0;
+       /* In case some hypervisor doesn't pass SYSCFG through: */
+       if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
diff --git a/queue-2.6.32/x86-olpc-don-t-retry-ec-commands-forever.patch b/queue-2.6.32/x86-olpc-don-t-retry-ec-commands-forever.patch
new file mode 100644 (file)
index 0000000..7b184d2
--- /dev/null
@@ -0,0 +1,44 @@
+From 286e5b97eb22baab9d9a41ca76c6b933a484252c Mon Sep 17 00:00:00 2001
+From: Paul Fox <pgf@laptop.org>
+Date: Fri, 1 Oct 2010 18:17:19 +0100
+Subject: x86, olpc: Don't retry EC commands forever
+
+From: Paul Fox <pgf@laptop.org>
+
+commit 286e5b97eb22baab9d9a41ca76c6b933a484252c upstream.
+
+Avoids a potential infinite loop.
+
+It was observed once, during an EC hacking/debugging
+session - not in regular operation.
+
+Signed-off-by: Daniel Drake <dsd@laptop.org>
+Cc: dilinger@queued.net
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/olpc.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/olpc.c
++++ b/arch/x86/kernel/olpc.c
+@@ -115,6 +115,7 @@ int olpc_ec_cmd(unsigned char cmd, unsig
+       unsigned long flags;
+       int ret = -EIO;
+       int i;
++      int restarts = 0;
+       spin_lock_irqsave(&ec_lock, flags);
+@@ -171,7 +172,9 @@ restart:
+                       if (wait_on_obf(0x6c, 1)) {
+                               printk(KERN_ERR "olpc-ec:  timeout waiting for"
+                                               " EC to provide data!\n");
+-                              goto restart;
++                              if (restarts++ < 10)
++                                      goto restart;
++                              goto err;
+                       }
+                       outbuf[i] = inb(0x68);
+                       printk(KERN_DEBUG "olpc-ec:  received 0x%x\n",