--- /dev/null
+From 02a46bf0f13d244e8128f6587023dd9bb9183f32 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <yjwei@cn.fujitsu.com>
+Date: Tue, 9 Mar 2010 14:37:53 +0800
+Subject: KVM: s390: Fix possible memory leak of in kvm_arch_vcpu_create()
+
+From: Wei Yongjun <yjwei@cn.fujitsu.com>
+
+This patch fixed possible memory leak in kvm_arch_vcpu_create()
+under s390, which would happen when kvm_arch_vcpu_create() fails.
+
+Signed-off-by: Wei Yongjun <yjwei@cn.fujitsu.com>
+Acked-by: Carsten Otte <cotte@de.ibm.com>
+Cc: stable@kernel.org
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit 7b06bf2ffa15e119c7439ed0b024d44f66d7b605)
+---
+ arch/s390/kvm/kvm-s390.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -338,11 +338,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(st
+
+ rc = kvm_vcpu_init(vcpu, kvm, id);
+ if (rc)
+- goto out_free_cpu;
++ goto out_free_sie_block;
+ VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
+ vcpu->arch.sie_block);
+
+ return vcpu;
++out_free_sie_block:
++ free_page((unsigned long)(vcpu->arch.sie_block));
+ out_free_cpu:
+ kfree(vcpu);
+ out_nomem:
--- /dev/null
+From ed4ac632676d697550dd7b649650948ec9e08349 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <yjwei@cn.fujitsu.com>
+Date: Tue, 9 Mar 2010 14:13:43 +0800
+Subject: KVM: PPC: Do not create debugfs if fail to create vcpu
+
+From: Wei Yongjun <yjwei@cn.fujitsu.com>
+
+If fail to create the vcpu, we should not create the debugfs
+for it.
+
+Signed-off-by: Wei Yongjun <yjwei@cn.fujitsu.com>
+Acked-by: Alexander Graf <agraf@suse.de>
+Cc: stable@kernel.org
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit 06056bfb944a0302a8f22eb45f09123de7fb417b)
+---
+ arch/powerpc/kvm/powerpc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -176,7 +176,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(st
+ {
+ struct kvm_vcpu *vcpu;
+ vcpu = kvmppc_core_vcpu_create(kvm, id);
+- kvmppc_create_vcpu_debugfs(vcpu, id);
++ if (!IS_ERR(vcpu))
++ kvmppc_create_vcpu_debugfs(vcpu, id);
+ return vcpu;
+ }
+
--- /dev/null
+From 9247fa63fb87cb96531da28bcec9261513279aa0 Mon Sep 17 00:00:00 2001
+From: Glauber Costa <glommer@redhat.com>
+Date: Tue, 11 May 2010 12:17:40 -0400
+Subject: x86, paravirt: Add a global synchronization point for pvclock
+
+From: Glauber Costa <glommer@redhat.com>
+
+In recent stress tests, it was found that pvclock-based systems
+could seriously warp in smp systems. Using ingo's time-warp-test.c,
+I could trigger a scenario as bad as 1.5mi warps a minute in some systems.
+(to be fair, it wasn't that bad in most of them). Investigating further, I
+found out that such warps were caused by the very offset-based calculation
+pvclock is based on.
+
+This happens even on some machines that report constant_tsc in its tsc flags,
+specially on multi-socket ones.
+
+Two reads of the same kernel timestamp at approx the same time, will likely
+have tsc timestamped in different occasions too. This means the delta we
+calculate is unpredictable at best, and can probably be smaller in a cpu
+that is legitimately reading clock in a forward ocasion.
+
+Some adjustments on the host could make this window less likely to happen,
+but still, it pretty much poses as an intrinsic problem of the mechanism.
+
+A while ago, I though about using a shared variable anyway, to hold clock
+last state, but gave up due to the high contention locking was likely
+to introduce, possibly rendering the thing useless on big machines. I argue,
+however, that locking is not necessary.
+
+We do a read-and-return sequence in pvclock, and between read and return,
+the global value can have changed. However, it can only have changed
+by means of an addition of a positive value. So if we detected that our
+clock timestamp is less than the current global, we know that we need to
+return a higher one, even though it is not exactly the one we compared to.
+
+OTOH, if we detect we're greater than the current time source, we atomically
+replace the value with our new readings. This do causes contention on big
+boxes (but big here means *BIG*), but it seems like a good trade off, since
+it provide us with a time source guaranteed to be stable wrt time warps.
+
+After this patch is applied, I don't see a single warp in time during 5 days
+of execution, in any of the machines I saw them before.
+
+Signed-off-by: Glauber Costa <glommer@redhat.com>
+Acked-by: Zachary Amsden <zamsden@redhat.com>
+CC: Jeremy Fitzhardinge <jeremy@goop.org>
+CC: Avi Kivity <avi@redhat.com>
+CC: Marcelo Tosatti <mtosatti@redhat.com>
+CC: Zachary Amsden <zamsden@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit 489fb490dbf8dab0249ad82b56688ae3842a79e8)
+---
+ arch/x86/kernel/pvclock.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/arch/x86/kernel/pvclock.c
++++ b/arch/x86/kernel/pvclock.c
+@@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvc
+ return pv_tsc_khz;
+ }
+
++static atomic64_t last_value = ATOMIC64_INIT(0);
++
+ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+ {
+ struct pvclock_shadow_time shadow;
+ unsigned version;
+ cycle_t ret, offset;
++ u64 last;
+
+ do {
+ version = pvclock_get_time_values(&shadow, src);
+@@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct
+ barrier();
+ } while (version != src->version);
+
++ /*
++ * Assumption here is that last_value, a global accumulator, always goes
++ * forward. If we are less than that, we should not be much smaller.
++ * We assume there is an error marging we're inside, and then the correction
++ * does not sacrifice accuracy.
++ *
++ * For reads: global may have changed between test and return,
++ * but this means someone else updated poked the clock at a later time.
++ * We just need to make sure we are not seeing a backwards event.
++ *
++ * For updates: last_value = ret is not enough, since two vcpus could be
++ * updating at the same time, and one of them could be slightly behind,
++ * making the assumption that last_value always go forward fail to hold.
++ */
++ last = atomic64_read(&last_value);
++ do {
++ if (ret < last)
++ return last;
++ last = atomic64_cmpxchg(&last_value, last, ret);
++ } while (unlikely(last != ret));
++
+ return ret;
+ }
+
--- /dev/null
+From b4123a844843cf71c0c729250f33a40f8525a376 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Wed, 12 May 2010 00:28:44 +0300
+Subject: KVM: Don't allow lmsw to clear cr0.pe
+
+From: Avi Kivity <avi@redhat.com>
+
+The current lmsw implementation allows the guest to clear cr0.pe, contrary
+to the manual, which breaks EMM386.EXE.
+
+Fix by ORing the old cr0.pe with lmsw's operand.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit f78e917688edbf1f14c318d2e50dc8e7dad20445)
+---
+ arch/x86/kvm/x86.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -345,7 +345,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
+
+ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
+ {
+- kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
++ kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0eul) | (msw & 0x0f));
+ }
+ EXPORT_SYMBOL_GPL(kvm_lmsw);
+
--- /dev/null
+From 39a83d0c5579b897c8b72cd7a15320baca3df6ab Mon Sep 17 00:00:00 2001
+From: Sheng Yang <sheng@linux.intel.com>
+Date: Wed, 12 May 2010 16:40:40 +0800
+Subject: KVM: x86: Check LMA bit before set_efer
+
+From: Sheng Yang <sheng@linux.intel.com>
+
+kvm_x86_ops->set_efer() would execute vcpu->arch.efer = efer, so the
+checking of LMA bit didn't work.
+
+Signed-off-by: Sheng Yang <sheng@linux.intel.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit a3d204e28579427609c3d15d2310127ebaa47d94)
+---
+ arch/x86/kvm/x86.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -516,11 +516,11 @@ static void set_efer(struct kvm_vcpu *vc
+ }
+ }
+
+- kvm_x86_ops->set_efer(vcpu, efer);
+-
+ efer &= ~EFER_LMA;
+ efer |= vcpu->arch.shadow_efer & EFER_LMA;
+
++ kvm_x86_ops->set_efer(vcpu, efer);
++
+ vcpu->arch.shadow_efer = efer;
+
+ vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
--- /dev/null
+From b2cd13b5a7be66d7c72a21f81e4ca19d69288031 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Wed, 12 May 2010 11:48:18 +0300
+Subject: KVM: MMU: Segregate shadow pages with different cr0.wp
+
+From: Avi Kivity <avi@redhat.com>
+
+When cr0.wp=0, we may shadow a gpte having u/s=1 and r/w=0 with an spte
+having u/s=0 and r/w=1. This allows excessive access if the guest sets
+cr0.wp=1 and accesses through this spte.
+
+Fix by making cr0.wp part of the base role; we'll have different sptes for
+the two cases and the problem disappears.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit 3dbe141595faa48a067add3e47bba3205b79d33c)
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/mmu.c | 3 ++-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -193,6 +193,7 @@ union kvm_mmu_page_role {
+ unsigned invalid:1;
+ unsigned cr4_pge:1;
+ unsigned nxe:1;
++ unsigned cr0_wp:1;
+ };
+ };
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -221,7 +221,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
+
+-static int is_write_protection(struct kvm_vcpu *vcpu)
++static bool is_write_protection(struct kvm_vcpu *vcpu)
+ {
+ return vcpu->arch.cr0 & X86_CR0_WP;
+ }
+@@ -2445,6 +2445,7 @@ static int init_kvm_softmmu(struct kvm_v
+ r = paging32_init_context(vcpu);
+
+ vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
++ vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
+
+ return r;
+ }
--- /dev/null
+From f1d2b76046a92122d4fa78394c3a0bb61d39d754 Mon Sep 17 00:00:00 2001
+From: Shane Wang <shane.wang@intel.com>
+Date: Thu, 29 Apr 2010 12:09:01 -0400
+Subject: KVM: VMX: enable VMXON check with SMX enabled (Intel TXT)
+
+From: Shane Wang <shane.wang@intel.com>
+
+Per document, for feature control MSR:
+
+ Bit 1 enables VMXON in SMX operation. If the bit is clear, execution
+ of VMXON in SMX operation causes a general-protection exception.
+ Bit 2 enables VMXON outside SMX operation. If the bit is clear, execution
+ of VMXON outside SMX operation causes a general-protection exception.
+
+This patch is to enable this kind of check with SMX for VMXON in KVM.
+
+Signed-off-by: Shane Wang <shane.wang@intel.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit cafd66595d92591e4bd25c3904e004fc6f897e2d)
+---
+ arch/x86/include/asm/msr-index.h | 5 +++--
+ arch/x86/kernel/tboot.c | 1 +
+ arch/x86/kvm/vmx.c | 32 +++++++++++++++++++++-----------
+ include/linux/tboot.h | 1 +
+ 4 files changed, 26 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -198,8 +198,9 @@
+ #define MSR_IA32_EBL_CR_POWERON 0x0000002a
+ #define MSR_IA32_FEATURE_CONTROL 0x0000003a
+
+-#define FEATURE_CONTROL_LOCKED (1<<0)
+-#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
++#define FEATURE_CONTROL_LOCKED (1<<0)
++#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
++#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
+
+ #define MSR_IA32_APICBASE 0x0000001b
+ #define MSR_IA32_APICBASE_BSP (1<<8)
+--- a/arch/x86/kernel/tboot.c
++++ b/arch/x86/kernel/tboot.c
+@@ -46,6 +46,7 @@
+
+ /* Global pointer to shared data; NULL means no measured launch. */
+ struct tboot *tboot __read_mostly;
++EXPORT_SYMBOL(tboot);
+
+ /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
+ #define AP_WAIT_TIMEOUT 1
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -26,6 +26,7 @@
+ #include <linux/sched.h>
+ #include <linux/moduleparam.h>
+ #include <linux/ftrace_event.h>
++#include <linux/tboot.h>
+ #include "kvm_cache_regs.h"
+ #include "x86.h"
+
+@@ -1140,9 +1141,16 @@ static __init int vmx_disabled_by_bios(v
+ u64 msr;
+
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+- return (msr & (FEATURE_CONTROL_LOCKED |
+- FEATURE_CONTROL_VMXON_ENABLED))
+- == FEATURE_CONTROL_LOCKED;
++ if (msr & FEATURE_CONTROL_LOCKED) {
++ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
++ && tboot_enabled())
++ return 1;
++ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
++ && !tboot_enabled())
++ return 1;
++ }
++
++ return 0;
+ /* locked but not enabled */
+ }
+
+@@ -1150,18 +1158,20 @@ static void hardware_enable(void *garbag
+ {
+ int cpu = raw_smp_processor_id();
+ u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
+- u64 old;
++ u64 old, test_bits;
+
+ INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
+- if ((old & (FEATURE_CONTROL_LOCKED |
+- FEATURE_CONTROL_VMXON_ENABLED))
+- != (FEATURE_CONTROL_LOCKED |
+- FEATURE_CONTROL_VMXON_ENABLED))
++
++ test_bits = FEATURE_CONTROL_LOCKED;
++ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
++ if (tboot_enabled())
++ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
++
++ if ((old & test_bits) != test_bits) {
+ /* enable and lock */
+- wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
+- FEATURE_CONTROL_LOCKED |
+- FEATURE_CONTROL_VMXON_ENABLED);
++ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
++ }
+ write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
+ asm volatile (ASM_VMX_VMXON_RAX
+ : : "a"(&phys_addr), "m"(phys_addr)
+--- a/include/linux/tboot.h
++++ b/include/linux/tboot.h
+@@ -150,6 +150,7 @@ extern int tboot_force_iommu(void);
+
+ #else
+
++#define tboot_enabled() 0
+ #define tboot_probe() do { } while (0)
+ #define tboot_shutdown(shutdown_type) do { } while (0)
+ #define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \
--- /dev/null
+From f4cee755ff0b1fbf9d918939f50f7a61ec6d9454 Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Tue, 4 May 2010 12:58:32 +0300
+Subject: KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots
+
+From: Avi Kivity <avi@redhat.com>
+
+On svm, kvm_read_pdptr() may require reading guest memory, which can sleep.
+
+Push the spinlock into mmu_alloc_roots(), and only take it after we've read
+the pdptr.
+
+Tested-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit 8facbbff071ff2b19268d3732e31badc60471e21)
+---
+ arch/x86/kvm/mmu.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -2091,11 +2091,13 @@ static int mmu_alloc_roots(struct kvm_vc
+ direct = 1;
+ if (mmu_check_root(vcpu, root_gfn))
+ return 1;
++ spin_lock(&vcpu->kvm->mmu_lock);
+ sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
+ PT64_ROOT_LEVEL, direct,
+ ACC_ALL, NULL);
+ root = __pa(sp->spt);
+ ++sp->root_count;
++ spin_unlock(&vcpu->kvm->mmu_lock);
+ vcpu->arch.mmu.root_hpa = root;
+ return 0;
+ }
+@@ -2117,11 +2119,14 @@ static int mmu_alloc_roots(struct kvm_vc
+ root_gfn = 0;
+ if (mmu_check_root(vcpu, root_gfn))
+ return 1;
++ spin_lock(&vcpu->kvm->mmu_lock);
+ sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
+ PT32_ROOT_LEVEL, direct,
+ ACC_ALL, NULL);
+ root = __pa(sp->spt);
+ ++sp->root_count;
++ spin_unlock(&vcpu->kvm->mmu_lock);
++
+ vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
+ }
+ vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+@@ -2485,7 +2490,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
+ goto out;
+ spin_lock(&vcpu->kvm->mmu_lock);
+ kvm_mmu_free_some_pages(vcpu);
++ spin_unlock(&vcpu->kvm->mmu_lock);
+ r = mmu_alloc_roots(vcpu);
++ spin_lock(&vcpu->kvm->mmu_lock);
+ mmu_sync_roots(vcpu);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ if (r)
--- /dev/null
+From ff40c74018d869be15569ad0788d64b2e8fa079b Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Tue, 4 May 2010 15:00:37 +0300
+Subject: KVM: Fix wallclock version writing race
+
+From: Avi Kivity <avi@redhat.com>
+
+Wallclock writing uses an unprotected global variable to hold the version;
+this can cause one guest to interfere with another if both write their
+wallclock at the same time.
+
+Acked-by: Glauber Costa <glommer@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit 9ed3c444ab8987c7b219173a2f7807e3f71e234e)
+---
+ arch/x86/kvm/x86.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -554,14 +554,22 @@ static int do_set_msr(struct kvm_vcpu *v
+
+ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
+ {
+- static int version;
++ int version;
++ int r;
+ struct pvclock_wall_clock wc;
+ struct timespec boot;
+
+ if (!wall_clock)
+ return;
+
+- version++;
++ r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
++ if (r)
++ return;
++
++ if (version & 1)
++ ++version; /* first time write, random junk */
++
++ ++version;
+
+ kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
+
--- /dev/null
+From 0aa11389f97acb26ca9e368531859c045526611c Mon Sep 17 00:00:00 2001
+From: Avi Kivity <avi@redhat.com>
+Date: Thu, 13 May 2010 11:50:19 +0300
+Subject: KVM: x86: Add missing locking to arch specific vcpu ioctls
+
+From: Avi Kivity <avi@redhat.com>
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit 8fbf065d625617bbbf6b72d5f78f84ad13c8b547)
+---
+ arch/x86/kvm/x86.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1420,6 +1420,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
+ {
+ int r;
+
++ vcpu_load(vcpu);
+ r = -E2BIG;
+ if (cpuid->nent < vcpu->arch.cpuid_nent)
+ goto out;
+@@ -1431,6 +1432,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
+
+ out:
+ cpuid->nent = vcpu->arch.cpuid_nent;
++ vcpu_put(vcpu);
+ return r;
+ }
+
+@@ -1680,6 +1682,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(
+ int r;
+ unsigned bank_num = mcg_cap & 0xff, bank;
+
++ vcpu_load(vcpu);
+ r = -EINVAL;
+ if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
+ goto out;
+@@ -1694,6 +1697,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(
+ for (bank = 0; bank < bank_num; bank++)
+ vcpu->arch.mce_banks[bank*4] = ~(u64)0;
+ out:
++ vcpu_put(vcpu);
+ return r;
+ }
+
+@@ -1896,7 +1900,9 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+ r = -EFAULT;
+ if (copy_from_user(&mce, argp, sizeof mce))
+ goto out;
++ vcpu_load(vcpu);
+ r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
++ vcpu_put(vcpu);
+ break;
+ }
+ default:
--- /dev/null
+From 8ff9213b241f4893b36d0cad55825b7da9bda076 Mon Sep 17 00:00:00 2001
+From: Roedel, Joerg <Joerg.Roedel@amd.com>
+Date: Thu, 6 May 2010 11:38:43 +0200
+Subject: KVM: x86: Inject #GP with the right rip on efer writes
+
+From: Roedel, Joerg <Joerg.Roedel@amd.com>
+
+This patch fixes a bug in the KVM efer-msr write path. If a
+guest writes to a reserved efer bit the set_efer function
+injects the #GP directly. The architecture dependent wrmsr
+function does not see this, assumes success and advances the
+rip. This results in a #GP in the guest with the wrong rip.
+This patch fixes this by reporting efer write errors back to
+the architectural wrmsr function.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit b69e8caef5b190af48c525f6d715e7b7728a77f6)
+---
+ arch/x86/kvm/x86.c | 31 ++++++++++++-------------------
+ 1 file changed, 12 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -483,37 +483,29 @@ static u32 emulated_msrs[] = {
+ MSR_IA32_MISC_ENABLE,
+ };
+
+-static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
++static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+ {
+- if (efer & efer_reserved_bits) {
+- kvm_inject_gp(vcpu, 0);
+- return;
+- }
++ if (efer & efer_reserved_bits)
++ return 1;
+
+ if (is_paging(vcpu)
+- && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+- kvm_inject_gp(vcpu, 0);
+- return;
+- }
++ && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME))
++ return 1;
+
+ if (efer & EFER_FFXSR) {
+ struct kvm_cpuid_entry2 *feat;
+
+ feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+- if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
+- kvm_inject_gp(vcpu, 0);
+- return;
+- }
++ if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
++ return 1;
+ }
+
+ if (efer & EFER_SVME) {
+ struct kvm_cpuid_entry2 *feat;
+
+ feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+- if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
+- kvm_inject_gp(vcpu, 0);
+- return;
+- }
++ if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
++ return 1;
+ }
+
+ efer &= ~EFER_LMA;
+@@ -525,6 +517,8 @@ static void set_efer(struct kvm_vcpu *vc
+
+ vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
+ kvm_mmu_reset_context(vcpu);
++
++ return 0;
+ }
+
+ void kvm_enable_efer_bits(u64 mask)
+@@ -827,8 +821,7 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ {
+ switch (msr) {
+ case MSR_EFER:
+- set_efer(vcpu, data);
+- break;
++ return set_efer(vcpu, data);
+ case MSR_K7_HWCR:
+ data &= ~(u64)0x40; /* ignore flush filter disable */
+ if (data != 0) {
--- /dev/null
+From 2f26afba46f0ebf155cf9be746496a0304a5b7cf Mon Sep 17 00:00:00 2001
+From: Shi Weihua <shiwh@cn.fujitsu.com>
+Date: Tue, 18 May 2010 00:50:32 +0000
+Subject: Btrfs: should add a permission check for setfacl
+
+From: Shi Weihua <shiwh@cn.fujitsu.com>
+
+commit 2f26afba46f0ebf155cf9be746496a0304a5b7cf upstream.
+
+On btrfs, do the following
+------------------
+# su user1
+# cd btrfs-part/
+# touch aaa
+# getfacl aaa
+ # file: aaa
+ # owner: user1
+ # group: user1
+ user::rw-
+ group::rw-
+ other::r--
+# su user2
+# cd btrfs-part/
+# setfacl -m u::rwx aaa
+# getfacl aaa
+ # file: aaa
+ # owner: user1
+ # group: user1
+ user::rwx <- successed to setfacl
+ group::rw-
+ other::r--
+------------------
+but we should prohibit it that user2 changing user1's acl.
+In fact, on ext3 and other fs, a message occurs:
+ setfacl: aaa: Operation not permitted
+
+This patch fixed it.
+
+Signed-off-by: Shi Weihua <shiwh@cn.fujitsu.com>
+Signed-off-by: Chris Mason <chris.mason@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/btrfs/acl.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/btrfs/acl.c
++++ b/fs/btrfs/acl.c
+@@ -157,6 +157,9 @@ static int btrfs_xattr_set_acl(struct in
+ int ret = 0;
+ struct posix_acl *acl = NULL;
+
++ if (!is_owner_or_cap(dentry->d_inode))
++ return -EPERM;
++
+ if (value) {
+ acl = posix_acl_from_xattr(value, size);
+ if (acl == NULL) {
--- /dev/null
+From fa588e0c57048b3d4bfcd772d80dc0615f83fd35 Mon Sep 17 00:00:00 2001
+From: Steve French <sfrench@us.ibm.com>
+Date: Thu, 22 Apr 2010 19:21:55 +0000
+Subject: CIFS: Allow null nd (as nfs server uses) on create
+
+From: Steve French <sfrench@us.ibm.com>
+
+commit fa588e0c57048b3d4bfcd772d80dc0615f83fd35 upstream.
+
+While creating a file on a server which supports unix extensions
+such as Samba, if a file is being created which does not supply
+nameidata (i.e. nd is null), cifs client can oops when calling
+cifs_posix_open.
+
+Signed-off-by: Shirish Pargaonkar <shirishp@us.ibm.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/cifsproto.h | 6 ++++--
+ fs/cifs/dir.c | 20 ++++++++++++--------
+ fs/cifs/file.c | 11 +++++++----
+ 3 files changed, 23 insertions(+), 14 deletions(-)
+
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fil
+ __u16 fileHandle, struct file *file,
+ struct vfsmount *mnt, unsigned int oflags);
+ extern int cifs_posix_open(char *full_path, struct inode **pinode,
+- struct vfsmount *mnt, int mode, int oflags,
+- __u32 *poplock, __u16 *pnetfid, int xid);
++ struct vfsmount *mnt,
++ struct super_block *sb,
++ int mode, int oflags,
++ __u32 *poplock, __u16 *pnetfid, int xid);
+ extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
+ FILE_UNIX_BASIC_INFO *info,
+ struct cifs_sb_info *cifs_sb);
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode
+ }
+
+ int cifs_posix_open(char *full_path, struct inode **pinode,
+- struct vfsmount *mnt, int mode, int oflags,
+- __u32 *poplock, __u16 *pnetfid, int xid)
++ struct vfsmount *mnt, struct super_block *sb,
++ int mode, int oflags,
++ __u32 *poplock, __u16 *pnetfid, int xid)
+ {
+ int rc;
+ FILE_UNIX_BASIC_INFO *presp_data;
+ __u32 posix_flags = 0;
+- struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
++ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifs_fattr fattr;
+
+ cFYI(1, ("posix open %s", full_path));
+@@ -241,7 +242,7 @@ int cifs_posix_open(char *full_path, str
+
+ /* get new inode and set it up */
+ if (*pinode == NULL) {
+- *pinode = cifs_iget(mnt->mnt_sb, &fattr);
++ *pinode = cifs_iget(sb, &fattr);
+ if (!*pinode) {
+ rc = -ENOMEM;
+ goto posix_open_ret;
+@@ -250,7 +251,8 @@ int cifs_posix_open(char *full_path, str
+ cifs_fattr_to_inode(*pinode, &fattr);
+ }
+
+- cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
++ if (mnt)
++ cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
+
+ posix_open_ret:
+ kfree(presp_data);
+@@ -314,13 +316,14 @@ cifs_create(struct inode *inode, struct
+ if (nd && (nd->flags & LOOKUP_OPEN))
+ oflags = nd->intent.open.flags;
+ else
+- oflags = FMODE_READ;
++ oflags = FMODE_READ | SMB_O_CREAT;
+
+ if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
+ (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+- rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
+- mode, oflags, &oplock, &fileHandle, xid);
++ rc = cifs_posix_open(full_path, &newinode,
++ nd ? nd->path.mnt : NULL,
++ inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
+ /* EIO could indicate that (posix open) operation is not
+ supported, despite what server claimed in capability
+ negotation. EREMOTE indicates DFS junction, which is not
+@@ -677,6 +680,7 @@ cifs_lookup(struct inode *parent_dir_ino
+ (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
+ (nd->intent.open.flags & O_CREAT)) {
+ rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
++ parent_dir_inode->i_sb,
+ nd->intent.open.create_mode,
+ nd->intent.open.flags, &oplock,
+ &fileHandle, xid);
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -295,10 +295,12 @@ int cifs_open(struct inode *inode, struc
+ (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+ int oflags = (int) cifs_posix_convert_flags(file->f_flags);
++ oflags |= SMB_O_CREAT;
+ /* can not refresh inode info since size could be stale */
+ rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
+- cifs_sb->mnt_file_mode /* ignored */,
+- oflags, &oplock, &netfid, xid);
++ inode->i_sb,
++ cifs_sb->mnt_file_mode /* ignored */,
++ oflags, &oplock, &netfid, xid);
+ if (rc == 0) {
+ cFYI(1, ("posix open succeeded"));
+ /* no need for special case handling of setting mode
+@@ -510,8 +512,9 @@ reopen_error_exit:
+ int oflags = (int) cifs_posix_convert_flags(file->f_flags);
+ /* can not refresh inode info since size could be stale */
+ rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
+- cifs_sb->mnt_file_mode /* ignored */,
+- oflags, &oplock, &netfid, xid);
++ inode->i_sb,
++ cifs_sb->mnt_file_mode /* ignored */,
++ oflags, &oplock, &netfid, xid);
+ if (rc == 0) {
+ cFYI(1, ("posix reopen succeeded"));
+ goto reopen_success;
--- /dev/null
+From 102c6ddb1d081a6a1fede38c43a42c9811313ec7 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 10 Dec 2009 23:52:08 +0000
+Subject: dm snapshot: simplify sector_to_chunk expression
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 102c6ddb1d081a6a1fede38c43a42c9811313ec7 upstream.
+
+Removed unnecessary 'and' masking: The right shift discards the lower
+bits so there is no need to clear them.
+
+(A later patch needs this change to support a 32-bit chunk_mask.)
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Mike Snitzer <snitzer@redhat.com>
+Reviewed-by: Jonathan Brassow <jbrassow@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/dm-exception-store.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -162,7 +162,7 @@ static inline sector_t get_dev_size(stru
+ static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
+ sector_t sector)
+ {
+- return (sector & ~store->chunk_mask) >> store->chunk_shift;
++ return sector >> store->chunk_shift;
+ }
+
+ int dm_exception_store_type_register(struct dm_exception_store_type *type);
--- /dev/null
+From 42007efd569f1cf3bfb9a61da60ef6c2179508ca Mon Sep 17 00:00:00 2001
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Sun, 16 May 2010 01:00:00 -0400
+Subject: ext4: check s_log_groups_per_flex in online resize code
+
+From: Eric Sandeen <sandeen@redhat.com>
+
+commit 42007efd569f1cf3bfb9a61da60ef6c2179508ca upstream.
+
+If groups_per_flex < 2, sbi->s_flex_groups[] doesn't get filled out,
+and every other access to this first tests s_log_groups_per_flex;
+same thing needs to happen in resize or we'll wander off into
+a null pointer when doing an online resize of the file system.
+
+Thanks to Christoph Biedl, who came up with the trivial testcase:
+
+# truncate --size 128M fsfile
+# mkfs.ext3 -F fsfile
+# tune2fs -O extents,uninit_bg,dir_index,flex_bg,huge_file,dir_nlink,extra_isize fsfile
+# e2fsck -yDf -C0 fsfile
+# truncate --size 132M fsfile
+# losetup /dev/loop0 fsfile
+# mount /dev/loop0 mnt
+# resize2fs -p /dev/loop0
+
+ https://bugzilla.kernel.org/show_bug.cgi?id=13549
+
+Reported-by: Alessandro Polverini <alex@nibbles.it>
+Test-case-by: Christoph Biedl <bugzilla.kernel.bpeb@manchmal.in-ulm.de>
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/resize.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -930,7 +930,8 @@ int ext4_group_add(struct super_block *s
+ percpu_counter_add(&sbi->s_freeinodes_counter,
+ EXT4_INODES_PER_GROUP(sb));
+
+- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
++ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
++ sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group;
+ flex_group = ext4_flex_group(sbi, input->group);
+ atomic_add(input->free_blocks_count,
--- /dev/null
+From 1f5a81e41f8b1a782c68d3843e9ec1bfaadf7d72 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Wed, 2 Jun 2010 22:04:39 -0400
+Subject: ext4: Make sure the MOVE_EXT ioctl can't overwrite append-only files
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 1f5a81e41f8b1a782c68d3843e9ec1bfaadf7d72 upstream.
+
+Dan Roseberg has reported a problem with the MOVE_EXT ioctl. If the
+donor file is an append-only file, we should not allow the operation
+to proceed, lest we end up overwriting the contents of an append-only
+file.
+
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Cc: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/move_extent.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -964,6 +964,9 @@ mext_check_arguments(struct inode *orig_
+ return -EINVAL;
+ }
+
++ if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
++ return -EPERM;
++
+ /* Ext4 move extent does not support swapfile */
+ if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
+ ext4_debug("ext4 move extent: The argument files should "
--- /dev/null
+From 7df0e0397b9a18358573274db9fdab991941062f Mon Sep 17 00:00:00 2001
+From: Steven Whitehouse <swhiteho@redhat.com>
+Date: Mon, 24 May 2010 14:36:48 +0100
+Subject: GFS2: Fix permissions checking for setflags ioctl()
+
+From: Steven Whitehouse <swhiteho@redhat.com>
+
+commit 7df0e0397b9a18358573274db9fdab991941062f upstream.
+
+We should be checking for the ownership of the file for which
+flags are being set, rather than just for write access.
+
+Reported-by: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/gfs2/file.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -218,6 +218,11 @@ static int do_gfs2_set_flags(struct file
+ if (error)
+ goto out_drop_write;
+
++ error = -EACCES;
++ if (!is_owner_or_cap(inode))
++ goto out;
++
++ error = 0;
+ flags = ip->i_diskflags;
+ new_flags = (flags & ~mask) | (reqflags & mask);
+ if ((new_flags ^ flags) == 0)
+@@ -275,8 +280,10 @@ static int gfs2_set_flags(struct file *f
+ {
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ u32 fsflags, gfsflags;
++
+ if (get_user(fsflags, ptr))
+ return -EFAULT;
++
+ gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
+ if (!S_ISDIR(inode->i_mode)) {
+ if (gfsflags & GFS2_DIF_INHERIT_JDATA)
--- /dev/null
+From ef110b24e28f36620f63dab94708a17c7e267358 Mon Sep 17 00:00:00 2001
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Date: Thu, 13 May 2010 00:42:23 -0700
+Subject: Input: psmouse - reset all types of mice before reconnecting
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+commit ef110b24e28f36620f63dab94708a17c7e267358 upstream.
+
+Synaptics hardware requires resetting device after suspend to ram
+in order for the device to be operational. The reset lives in
+synaptics-specific reconnect handler, but it is not being invoked
+if synaptics support is disabled and the device is handled as a
+standard PS/2 device (bare or IntelliMouse protocol).
+
+Let's add reset into generic reconnect handler as well.
+
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+Cc: Tim Gardner <tim.gardner@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/input/mouse/psmouse-base.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -1349,6 +1349,7 @@ static int psmouse_reconnect(struct seri
+ struct psmouse *psmouse = serio_get_drvdata(serio);
+ struct psmouse *parent = NULL;
+ struct serio_driver *drv = serio->drv;
++ unsigned char type;
+ int rc = -1;
+
+ if (!drv || !psmouse) {
+@@ -1368,10 +1369,15 @@ static int psmouse_reconnect(struct seri
+ if (psmouse->reconnect) {
+ if (psmouse->reconnect(psmouse))
+ goto out;
+- } else if (psmouse_probe(psmouse) < 0 ||
+- psmouse->type != psmouse_extensions(psmouse,
+- psmouse_max_proto, false)) {
+- goto out;
++ } else {
++ psmouse_reset(psmouse);
++
++ if (psmouse_probe(psmouse) < 0)
++ goto out;
++
++ type = psmouse_extensions(psmouse, psmouse_max_proto, false);
++ if (psmouse->type != type)
++ goto out;
+ }
+
+ /* ok, the device type (and capabilities) match the old one,
--- /dev/null
+From 765f8361902d015c864d5e62019b2f139452d7ef Mon Sep 17 00:00:00 2001
+From: Yin Kangkai <kangkai.yin@intel.com>
+Date: Tue, 15 Dec 2009 14:48:25 -0800
+Subject: jbd: jbd-debug and jbd2-debug should be writable
+
+From: Yin Kangkai <kangkai.yin@intel.com>
+
+commit 765f8361902d015c864d5e62019b2f139452d7ef upstream.
+
+jbd-debug and jbd2-debug is currently read-only (S_IRUGO), which is not
+correct. Make it writable so that we can start debuging.
+
+Signed-off-by: Yin Kangkai <kangkai.yin@intel.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/jbd/journal.c | 2 +-
+ fs/jbd2/journal.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/jbd/journal.c
++++ b/fs/jbd/journal.c
+@@ -1913,7 +1913,7 @@ static void __init jbd_create_debugfs_en
+ {
+ jbd_debugfs_dir = debugfs_create_dir("jbd", NULL);
+ if (jbd_debugfs_dir)
+- jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO,
++ jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR,
+ jbd_debugfs_dir,
+ &journal_enable_debug);
+ }
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -2115,7 +2115,8 @@ static void __init jbd2_create_debugfs_e
+ {
+ jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL);
+ if (jbd2_debugfs_dir)
+- jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO,
++ jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME,
++ S_IRUGO | S_IWUSR,
+ jbd2_debugfs_dir,
+ &jbd2_journal_enable_debug);
+ }
--- /dev/null
+From cea7daa3589d6b550546a8c8963599f7c1a3ae5c Mon Sep 17 00:00:00 2001
+From: Toshiyuki Okajima <toshi.okajima@jp.fujitsu.com>
+Date: Fri, 30 Apr 2010 14:32:13 +0100
+Subject: KEYS: find_keyring_by_name() can gain access to a freed keyring
+
+From: Toshiyuki Okajima <toshi.okajima@jp.fujitsu.com>
+
+commit cea7daa3589d6b550546a8c8963599f7c1a3ae5c upstream.
+
+find_keyring_by_name() can gain access to a keyring that has had its reference
+count reduced to zero, and is thus ready to be freed. This then allows the
+dead keyring to be brought back into use whilst it is being destroyed.
+
+The following timeline illustrates the process:
+
+|(cleaner) (user)
+|
+| free_user(user) sys_keyctl()
+| | |
+| key_put(user->session_keyring) keyctl_get_keyring_ID()
+| || //=> keyring->usage = 0 |
+| |schedule_work(&key_cleanup_task) lookup_user_key()
+| || |
+| kmem_cache_free(,user) |
+| . |[KEY_SPEC_USER_KEYRING]
+| . install_user_keyrings()
+| . ||
+| key_cleanup() [<= worker_thread()] ||
+| | ||
+| [spin_lock(&key_serial_lock)] |[mutex_lock(&key_user_keyr..mutex)]
+| | ||
+| atomic_read() == 0 ||
+| |{ rb_ease(&key->serial_node,) } ||
+| | ||
+| [spin_unlock(&key_serial_lock)] |find_keyring_by_name()
+| | |||
+| keyring_destroy(keyring) ||[read_lock(&keyring_name_lock)]
+| || |||
+| |[write_lock(&keyring_name_lock)] ||atomic_inc(&keyring->usage)
+| |. ||| *** GET freeing keyring ***
+| |. ||[read_unlock(&keyring_name_lock)]
+| || ||
+| |list_del() |[mutex_unlock(&key_user_k..mutex)]
+| || |
+| |[write_unlock(&keyring_name_lock)] ** INVALID keyring is returned **
+| | .
+| kmem_cache_free(,keyring) .
+| .
+| atomic_dec(&keyring->usage)
+v *** DESTROYED ***
+TIME
+
+If CONFIG_SLUB_DEBUG=y then we may see the following message generated:
+
+ =============================================================================
+ BUG key_jar: Poison overwritten
+ -----------------------------------------------------------------------------
+
+ INFO: 0xffff880197a7e200-0xffff880197a7e200. First byte 0x6a instead of 0x6b
+ INFO: Allocated in key_alloc+0x10b/0x35f age=25 cpu=1 pid=5086
+ INFO: Freed in key_cleanup+0xd0/0xd5 age=12 cpu=1 pid=10
+ INFO: Slab 0xffffea000592cb90 objects=16 used=2 fp=0xffff880197a7e200 flags=0x200000000000c3
+ INFO: Object 0xffff880197a7e200 @offset=512 fp=0xffff880197a7e300
+
+ Bytes b4 0xffff880197a7e1f0: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZZZZZ
+ Object 0xffff880197a7e200: 6a 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b jkkkkkkkkkkkkkkk
+
+Alternatively, we may see a system panic happen, such as:
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000001
+ IP: [<ffffffff810e61a3>] kmem_cache_alloc+0x5b/0xe9
+ PGD 6b2b4067 PUD 6a80d067 PMD 0
+ Oops: 0000 [#1] SMP
+ last sysfs file: /sys/kernel/kexec_crash_loaded
+ CPU 1
+ ...
+ Pid: 31245, comm: su Not tainted 2.6.34-rc5-nofixed-nodebug #2 D2089/PRIMERGY
+ RIP: 0010:[<ffffffff810e61a3>] [<ffffffff810e61a3>] kmem_cache_alloc+0x5b/0xe9
+ RSP: 0018:ffff88006af3bd98 EFLAGS: 00010002
+ RAX: 0000000000000000 RBX: 0000000000000001 RCX: ffff88007d19900b
+ RDX: 0000000100000000 RSI: 00000000000080d0 RDI: ffffffff81828430
+ RBP: ffffffff81828430 R08: ffff88000a293750 R09: 0000000000000000
+ R10: 0000000000000001 R11: 0000000000100000 R12: 00000000000080d0
+ R13: 00000000000080d0 R14: 0000000000000296 R15: ffffffff810f20ce
+ FS: 00007f97116bc700(0000) GS:ffff88000a280000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000001 CR3: 000000006a91c000 CR4: 00000000000006e0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+ Process su (pid: 31245, threadinfo ffff88006af3a000, task ffff8800374414c0)
+ Stack:
+ 0000000512e0958e 0000000000008000 ffff880037f8d180 0000000000000001
+ 0000000000000000 0000000000008001 ffff88007d199000 ffffffff810f20ce
+ 0000000000008000 ffff88006af3be48 0000000000000024 ffffffff810face3
+ Call Trace:
+ [<ffffffff810f20ce>] ? get_empty_filp+0x70/0x12f
+ [<ffffffff810face3>] ? do_filp_open+0x145/0x590
+ [<ffffffff810ce208>] ? tlb_finish_mmu+0x2a/0x33
+ [<ffffffff810ce43c>] ? unmap_region+0xd3/0xe2
+ [<ffffffff810e4393>] ? virt_to_head_page+0x9/0x2d
+ [<ffffffff81103916>] ? alloc_fd+0x69/0x10e
+ [<ffffffff810ef4ed>] ? do_sys_open+0x56/0xfc
+ [<ffffffff81008a02>] ? system_call_fastpath+0x16/0x1b
+ Code: 0f 1f 44 00 00 49 89 c6 fa 66 0f 1f 44 00 00 65 4c 8b 04 25 60 e8 00 00 48 8b 45 00 49 01 c0 49 8b 18 48 85 db 74 0d 48 63 45 18 <48> 8b 04 03 49 89 00 eb 14 4c 89 f9 83 ca ff 44 89 e6 48 89 ef
+ RIP [<ffffffff810e61a3>] kmem_cache_alloc+0x5b/0xe9
+
+This problem is that find_keyring_by_name does not confirm that the keyring is
+valid before accepting it.
+
+Skipping keyrings that have been reduced to a zero count seems the way to go.
+To this end, use atomic_inc_not_zero() to increment the usage count and skip
+the candidate keyring if that returns false.
+
+The following script _may_ cause the bug to happen, but there's no guarantee
+as the window of opportunity is small:
+
+ #!/bin/sh
+ LOOP=100000
+ USER=dummy_user
+ /bin/su -c "exit;" $USER || { /usr/sbin/adduser -m $USER; add=1; }
+ for ((i=0; i<LOOP; i++))
+ do
+ /bin/su -c "echo '$i' > /dev/null" $USER
+ done
+ (( add == 1 )) && /usr/sbin/userdel -r $USER
+ exit
+
+Note that the nominated user must not be in use.
+
+An alternative way of testing this may be:
+
+ for ((i=0; i<100000; i++))
+ do
+ keyctl session foo /bin/true || break
+ done >&/dev/null
+
+as that uses a keyring named "foo" rather than relying on the user and
+user-session named keyrings.
+
+Reported-by: Toshiyuki Okajima <toshi.okajima@jp.fujitsu.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Tested-by: Toshiyuki Okajima <toshi.okajima@jp.fujitsu.com>
+Acked-by: Serge Hallyn <serue@us.ibm.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/keys/keyring.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -524,9 +524,8 @@ struct key *find_keyring_by_name(const c
+ struct key *keyring;
+ int bucket;
+
+- keyring = ERR_PTR(-EINVAL);
+ if (!name)
+- goto error;
++ return ERR_PTR(-EINVAL);
+
+ bucket = keyring_hash(name);
+
+@@ -553,17 +552,18 @@ struct key *find_keyring_by_name(const c
+ KEY_SEARCH) < 0)
+ continue;
+
+- /* we've got a match */
+- atomic_inc(&keyring->usage);
+- read_unlock(&keyring_name_lock);
+- goto error;
++ /* we've got a match but we might end up racing with
++ * key_cleanup() if the keyring is currently 'dead'
++ * (ie. it has a zero usage count) */
++ if (!atomic_inc_not_zero(&keyring->usage))
++ continue;
++ goto out;
+ }
+ }
+
+- read_unlock(&keyring_name_lock);
+ keyring = ERR_PTR(-ENOKEY);
+-
+- error:
++out:
++ read_unlock(&keyring_name_lock);
+ return keyring;
+
+ } /* end find_keyring_by_name() */
--- /dev/null
+From 4d09ec0f705cf88a12add029c058b53f288cfaa2 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <error27@gmail.com>
+Date: Mon, 17 May 2010 14:42:35 +0100
+Subject: KEYS: Return more accurate error codes
+
+From: Dan Carpenter <error27@gmail.com>
+
+commit 4d09ec0f705cf88a12add029c058b53f288cfaa2 upstream.
+
+We were using the wrong variable here so the error codes weren't being returned
+properly. The original code returns -ENOKEY.
+
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/keys/process_keys.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -509,7 +509,7 @@ try_again:
+
+ ret = install_thread_keyring();
+ if (ret < 0) {
+- key = ERR_PTR(ret);
++ key_ref = ERR_PTR(ret);
+ goto error;
+ }
+ goto reget_creds;
+@@ -527,7 +527,7 @@ try_again:
+
+ ret = install_process_keyring();
+ if (ret < 0) {
+- key = ERR_PTR(ret);
++ key_ref = ERR_PTR(ret);
+ goto error;
+ }
+ goto reget_creds;
+@@ -586,7 +586,7 @@ try_again:
+
+ case KEY_SPEC_GROUP_KEYRING:
+ /* group keyrings are not yet supported */
+- key = ERR_PTR(-EINVAL);
++ key_ref = ERR_PTR(-EINVAL);
+ goto error;
+
+ case KEY_SPEC_REQKEY_AUTH_KEY:
--- /dev/null
+From 3feec9095d12e311b7d4eb7fe7e5dfa75d4a72a5 Mon Sep 17 00:00:00 2001
+From: James Chapman <jchapman@katalix.com>
+Date: Tue, 16 Mar 2010 06:46:31 +0000
+Subject: l2tp: Fix oops in pppol2tp_xmit
+
+From: James Chapman <jchapman@katalix.com>
+
+commit 3feec9095d12e311b7d4eb7fe7e5dfa75d4a72a5 upstream.
+
+When transmitting L2TP frames, we derive the outgoing interface's UDP
+checksum hardware assist capabilities from the tunnel dst dev. This
+can sometimes be NULL, especially when routing protocols are used and
+routing changes occur. This patch just checks for NULL dst or dev
+pointers when checking for netdev hardware assist features.
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000c
+IP: [<f89d074c>] pppol2tp_xmit+0x341/0x4da [pppol2tp]
+*pde = 00000000
+Oops: 0000 [#1] SMP
+last sysfs file: /sys/class/net/lo/operstate
+Modules linked in: pppol2tp pppox ppp_generic slhc ipv6 dummy loop snd_hda_codec_atihdmi snd_hda_intel snd_hda_codec snd_pcm snd_timer snd soundcore snd_page_alloc evdev psmouse serio_raw processor button i2c_piix4 i2c_core ati_agp agpgart pcspkr ext3 jbd mbcache sd_mod ide_pci_generic atiixp ide_core ahci ata_generic floppy ehci_hcd ohci_hcd libata e1000e scsi_mod usbcore nls_base thermal fan thermal_sys [last unloaded: scsi_wait_scan]
+
+Pid: 0, comm: swapper Not tainted (2.6.32.8 #1)
+EIP: 0060:[<f89d074c>] EFLAGS: 00010297 CPU: 3
+EIP is at pppol2tp_xmit+0x341/0x4da [pppol2tp]
+EAX: 00000000 EBX: f64d1680 ECX: 000005b9 EDX: 00000000
+ESI: f6b91850 EDI: f64d16ac EBP: f6a0c4c0 ESP: f70a9cac
+ DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068
+Process swapper (pid: 0, ti=f70a8000 task=f70a31c0 task.ti=f70a8000)
+Stack:
+ 000005a9 000005b9 f734c400 f66652c0 f7352e00 f67dc800 00000000 f6b91800
+<0> 000005a3 f70ef6c4 f67dcda9 000005a3 f89b192e 00000246 000005a3 f64d1680
+<0> f63633e0 f6363320 f64d1680 f65a7320 f65a7364 f65856c0 f64d1680 f679f02f
+Call Trace:
+ [<f89b192e>] ? ppp_push+0x459/0x50e [ppp_generic]
+ [<f89b217f>] ? ppp_xmit_process+0x3b6/0x430 [ppp_generic]
+ [<f89b2306>] ? ppp_start_xmit+0x10d/0x120 [ppp_generic]
+ [<c11c15cb>] ? dev_hard_start_xmit+0x21f/0x2b2
+ [<c11d0947>] ? sch_direct_xmit+0x48/0x10e
+ [<c11c19a0>] ? dev_queue_xmit+0x263/0x3a6
+ [<c11e2a9f>] ? ip_finish_output+0x1f7/0x221
+ [<c11df682>] ? ip_forward_finish+0x2e/0x30
+ [<c11de645>] ? ip_rcv_finish+0x295/0x2a9
+ [<c11c0b19>] ? netif_receive_skb+0x3e9/0x404
+ [<f814b791>] ? e1000_clean_rx_irq+0x253/0x2fc [e1000e]
+ [<f814cb7a>] ? e1000_clean+0x63/0x1fc [e1000e]
+ [<c1047eff>] ? sched_clock_local+0x15/0x11b
+ [<c11c1095>] ? net_rx_action+0x96/0x195
+ [<c1035750>] ? __do_softirq+0xaa/0x151
+ [<c1035828>] ? do_softirq+0x31/0x3c
+ [<c10358fe>] ? irq_exit+0x26/0x58
+ [<c1004b21>] ? do_IRQ+0x78/0x89
+ [<c1003729>] ? common_interrupt+0x29/0x30
+ [<c101ac28>] ? native_safe_halt+0x2/0x3
+ [<c1008c54>] ? default_idle+0x55/0x75
+ [<c1009045>] ? c1e_idle+0xd2/0xd5
+ [<c100233c>] ? cpu_idle+0x46/0x62
+Code: 8d 45 08 f0 ff 45 08 89 6b 08 c7 43 68 7e fb 9c f8 8a 45 24 83 e0 0c 3c 04 75 09 80 63 64 f3 e9 b4 00 00 00 8b 43 18 8b 4c 24 04 <8b> 40 0c 8d 79 11 f6 40 44 0e 8a 43 64 75 51 6a 00 8b 4c 24 08
+EIP: [<f89d074c>] pppol2tp_xmit+0x341/0x4da [pppol2tp] SS:ESP 0068:f70a9cac
+CR2: 000000000000000c
+
+Signed-off-by: James Chapman <jchapman@katalix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/pppol2tp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/pppol2tp.c
++++ b/drivers/net/pppol2tp.c
+@@ -1178,7 +1178,8 @@ static int pppol2tp_xmit(struct ppp_chan
+ /* Calculate UDP checksum if configured to do so */
+ if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
+ skb->ip_summed = CHECKSUM_NONE;
+- else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
++ else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
++ (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = skb_checksum(skb, 0, udp_len, 0);
+ uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
--- /dev/null
+From 550f0d922286556c7ea43974bb7921effb5a5278 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Mon, 3 May 2010 20:44:21 +0000
+Subject: parisc: clear floating point exception flag on SIGFPE signal
+
+From: Helge Deller <deller@gmx.de>
+
+commit 550f0d922286556c7ea43974bb7921effb5a5278 upstream.
+
+Clear the floating point exception flag before returning to
+user space. This is needed, else the libc trampoline handler
+may hit the same SIGFPE again while building up a trampoline
+to a signal handler.
+
+Fixes debian bug #559406.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/parisc/math-emu/decode_exc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/parisc/math-emu/decode_exc.c
++++ b/arch/parisc/math-emu/decode_exc.c
+@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[],
+ return SIGNALCODE(SIGFPE, FPE_FLTINV);
+ case DIVISIONBYZEROEXCEPTION:
+ update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
++ Clear_excp_register(exception_index);
+ return SIGNALCODE(SIGFPE, FPE_FLTDIV);
+ case INEXACTEXCEPTION:
+ update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
--- /dev/null
+From 6377a7ae1ab82859edccdbc8eaea63782efb134d Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Fri, 19 Mar 2010 16:59:19 -0700
+Subject: [SCSI] qla2xxx: Disable MSI on qla24xx chips other than QLA2432.
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+commit 6377a7ae1ab82859edccdbc8eaea63782efb134d upstream.
+
+On specific platforms, MSI is unreliable on some of the QLA24xx chips, resulting
+in fatal I/O errors under load, as reported in <http://bugs.debian.org/572322>
+and by some RHEL customers.
+
+Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/qla2xxx/qla_isr.c | 28 +++++++++++++---------------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -2240,30 +2240,28 @@ qla2x00_request_irqs(struct qla_hw_data
+
+ /* If possible, enable MSI-X. */
+ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
+- !IS_QLA8432(ha) && !IS_QLA8001(ha))
+- goto skip_msix;
++ !IS_QLA8432(ha) && !IS_QLA8001(ha))
++ goto skip_msi;
++
++ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
++ (ha->pdev->subsystem_device == 0x7040 ||
++ ha->pdev->subsystem_device == 0x7041 ||
++ ha->pdev->subsystem_device == 0x1705)) {
++ DEBUG2(qla_printk(KERN_WARNING, ha,
++ "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
++ ha->pdev->subsystem_vendor,
++ ha->pdev->subsystem_device));
++ goto skip_msi;
++ }
+
+ if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
+ !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
+ ha->pdev->revision, ha->fw_attributes));
+-
+ goto skip_msix;
+ }
+
+- if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+- (ha->pdev->subsystem_device == 0x7040 ||
+- ha->pdev->subsystem_device == 0x7041 ||
+- ha->pdev->subsystem_device == 0x1705)) {
+- DEBUG2(qla_printk(KERN_WARNING, ha,
+- "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
+- ha->pdev->subsystem_vendor,
+- ha->pdev->subsystem_device));
+-
+- goto skip_msi;
+- }
+-
+ ret = qla24xx_enable_msix(ha, rsp);
+ if (!ret) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
--- /dev/null
+From 5fa782c2f5ef6c2e4f04d3e228412c9b4a4c8809 Mon Sep 17 00:00:00 2001
+From: Neil Horman <nhorman@tuxdriver.com>
+Date: Wed, 28 Apr 2010 10:30:59 +0000
+Subject: sctp: Fix skb_over_panic resulting from multiple invalid parameter errors (CVE-2010-1173) (v4)
+
+From: Neil Horman <nhorman@tuxdriver.com>
+
+commit 5fa782c2f5ef6c2e4f04d3e228412c9b4a4c8809 upstream.
+
+Ok, version 4
+
+Change Notes:
+1) Minor cleanups, from Vlads notes
+
+Summary:
+
+Hey-
+ Recently, it was reported to me that the kernel could oops in the
+following way:
+
+<5> kernel BUG at net/core/skbuff.c:91!
+<5> invalid operand: 0000 [#1]
+<5> Modules linked in: sctp netconsole nls_utf8 autofs4 sunrpc iptable_filter
+ip_tables cpufreq_powersave parport_pc lp parport vmblock(U) vsock(U) vmci(U)
+vmxnet(U) vmmemctl(U) vmhgfs(U) acpiphp dm_mirror dm_mod button battery ac md5
+ipv6 uhci_hcd ehci_hcd snd_ens1371 snd_rawmidi snd_seq_device snd_pcm_oss
+snd_mixer_oss snd_pcm snd_timer snd_page_alloc snd_ac97_codec snd soundcore
+pcnet32 mii floppy ext3 jbd ata_piix libata mptscsih mptsas mptspi mptscsi
+mptbase sd_mod scsi_mod
+<5> CPU: 0
+<5> EIP: 0060:[<c02bff27>] Not tainted VLI
+<5> EFLAGS: 00010216 (2.6.9-89.0.25.EL)
+<5> EIP is at skb_over_panic+0x1f/0x2d
+<5> eax: 0000002c ebx: c033f461 ecx: c0357d96 edx: c040fd44
+<5> esi: c033f461 edi: df653280 ebp: 00000000 esp: c040fd40
+<5> ds: 007b es: 007b ss: 0068
+<5> Process swapper (pid: 0, threadinfo=c040f000 task=c0370be0)
+<5> Stack: c0357d96 e0c29478 00000084 00000004 c033f461 df653280 d7883180
+e0c2947d
+<5> 00000000 00000080 df653490 00000004 de4f1ac0 de4f1ac0 00000004
+df653490
+<5> 00000001 e0c2877a 08000800 de4f1ac0 df653490 00000000 e0c29d2e
+00000004
+<5> Call Trace:
+<5> [<e0c29478>] sctp_addto_chunk+0xb0/0x128 [sctp]
+<5> [<e0c2947d>] sctp_addto_chunk+0xb5/0x128 [sctp]
+<5> [<e0c2877a>] sctp_init_cause+0x3f/0x47 [sctp]
+<5> [<e0c29d2e>] sctp_process_unk_param+0xac/0xb8 [sctp]
+<5> [<e0c29e90>] sctp_verify_init+0xcc/0x134 [sctp]
+<5> [<e0c20322>] sctp_sf_do_5_1B_init+0x83/0x28e [sctp]
+<5> [<e0c25333>] sctp_do_sm+0x41/0x77 [sctp]
+<5> [<c01555a4>] cache_grow+0x140/0x233
+<5> [<e0c26ba1>] sctp_endpoint_bh_rcv+0xc5/0x108 [sctp]
+<5> [<e0c2b863>] sctp_inq_push+0xe/0x10 [sctp]
+<5> [<e0c34600>] sctp_rcv+0x454/0x509 [sctp]
+<5> [<e084e017>] ipt_hook+0x17/0x1c [iptable_filter]
+<5> [<c02d005e>] nf_iterate+0x40/0x81
+<5> [<c02e0bb9>] ip_local_deliver_finish+0x0/0x151
+<5> [<c02e0c7f>] ip_local_deliver_finish+0xc6/0x151
+<5> [<c02d0362>] nf_hook_slow+0x83/0xb5
+<5> [<c02e0bb2>] ip_local_deliver+0x1a2/0x1a9
+<5> [<c02e0bb9>] ip_local_deliver_finish+0x0/0x151
+<5> [<c02e103e>] ip_rcv+0x334/0x3b4
+<5> [<c02c66fd>] netif_receive_skb+0x320/0x35b
+<5> [<e0a0928b>] init_stall_timer+0x67/0x6a [uhci_hcd]
+<5> [<c02c67a4>] process_backlog+0x6c/0xd9
+<5> [<c02c690f>] net_rx_action+0xfe/0x1f8
+<5> [<c012a7b1>] __do_softirq+0x35/0x79
+<5> [<c0107efb>] handle_IRQ_event+0x0/0x4f
+<5> [<c01094de>] do_softirq+0x46/0x4d
+
+Its an skb_over_panic BUG halt that results from processing an init chunk in
+which too many of its variable length parameters are in some way malformed.
+
+The problem is in sctp_process_unk_param:
+if (NULL == *errp)
+ *errp = sctp_make_op_error_space(asoc, chunk,
+ ntohs(chunk->chunk_hdr->length));
+
+ if (*errp) {
+ sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+ WORD_ROUND(ntohs(param.p->length)));
+ sctp_addto_chunk(*errp,
+ WORD_ROUND(ntohs(param.p->length)),
+ param.v);
+
+When we allocate an error chunk, we assume that the worst case scenario requires
+that we have chunk_hdr->length data allocated, which would be correct nominally,
+given that we call sctp_addto_chunk for the violating parameter. Unfortunately,
+we also, in sctp_init_cause insert a sctp_errhdr_t structure into the error
+chunk, so the worst case situation in which all parameters are in violation
+requires chunk_hdr->length+(sizeof(sctp_errhdr_t)*param_count) bytes of data.
+
+The result of this error is that a deliberately malformed packet sent to a
+listening host can cause a remote DOS, described in CVE-2010-1173:
+http://cve.mitre.org/cgi-bin/cvename.cgi?name=2010-1173
+
+I've tested the below fix and confirmed that it fixes the issue. We move to a
+strategy whereby we allocate a fixed size error chunk and ignore errors we don't
+have space to report. Tested by me successfully
+
+Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/sctp/structs.h | 1
+ net/sctp/sm_make_chunk.c | 62 +++++++++++++++++++++++++++++++++++++++++----
+ 2 files changed, 58 insertions(+), 5 deletions(-)
+
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -772,6 +772,7 @@ int sctp_user_addto_chunk(struct sctp_ch
+ struct iovec *data);
+ void sctp_chunk_free(struct sctp_chunk *);
+ void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
++void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
+ struct sctp_chunk *sctp_chunkify(struct sk_buff *,
+ const struct sctp_association *,
+ struct sock *);
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -107,7 +107,7 @@ static const struct sctp_paramhdr prsctp
+ cpu_to_be16(sizeof(struct sctp_paramhdr)),
+ };
+
+-/* A helper to initialize to initialize an op error inside a
++/* A helper to initialize an op error inside a
+ * provided chunk, as most cause codes will be embedded inside an
+ * abort chunk.
+ */
+@@ -124,6 +124,29 @@ void sctp_init_cause(struct sctp_chunk
+ chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
+ }
+
++/* A helper to initialize an op error inside a
++ * provided chunk, as most cause codes will be embedded inside an
++ * abort chunk. Differs from sctp_init_cause in that it won't oops
++ * if there isn't enough space in the op error chunk
++ */
++int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
++ size_t paylen)
++{
++ sctp_errhdr_t err;
++ __u16 len;
++
++ /* Cause code constants are now defined in network order. */
++ err.cause = cause_code;
++ len = sizeof(sctp_errhdr_t) + paylen;
++ err.length = htons(len);
++
++ if (skb_tailroom(chunk->skb) > len)
++ return -ENOSPC;
++ chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
++ sizeof(sctp_errhdr_t),
++ &err);
++ return 0;
++}
+ /* 3.3.2 Initiation (INIT) (1)
+ *
+ * This chunk is used to initiate a SCTP association between two
+@@ -1125,6 +1148,24 @@ nodata:
+ return retval;
+ }
+
++/* Create an Operation Error chunk of a fixed size,
++ * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
++ * This is a helper function to allocate an error chunk for
++ * for those invalid parameter codes in which we may not want
++ * to report all the errors, if the incomming chunk is large
++ */
++static inline struct sctp_chunk *sctp_make_op_error_fixed(
++ const struct sctp_association *asoc,
++ const struct sctp_chunk *chunk)
++{
++ size_t size = asoc ? asoc->pathmtu : 0;
++
++ if (!size)
++ size = SCTP_DEFAULT_MAXSEGMENT;
++
++ return sctp_make_op_error_space(asoc, chunk, size);
++}
++
+ /* Create an Operation Error chunk. */
+ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+@@ -1365,6 +1406,18 @@ void *sctp_addto_chunk(struct sctp_chunk
+ return target;
+ }
+
++/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
++ * space in the chunk
++ */
++void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
++ int len, const void *data)
++{
++ if (skb_tailroom(chunk->skb) > len)
++ return sctp_addto_chunk(chunk, len, data);
++ else
++ return NULL;
++}
++
+ /* Append bytes from user space to the end of a chunk. Will panic if
+ * chunk is not big enough.
+ * Returns a kernel err value.
+@@ -1968,13 +2021,12 @@ static sctp_ierror_t sctp_process_unk_pa
+ * returning multiple unknown parameters.
+ */
+ if (NULL == *errp)
+- *errp = sctp_make_op_error_space(asoc, chunk,
+- ntohs(chunk->chunk_hdr->length));
++ *errp = sctp_make_op_error_fixed(asoc, chunk);
+
+ if (*errp) {
+- sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
++ sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+ WORD_ROUND(ntohs(param.p->length)));
+- sctp_addto_chunk(*errp,
++ sctp_addto_chunk_fixed(*errp,
+ WORD_ROUND(ntohs(param.p->length)),
+ param.v);
+ } else {
iwlwifi-recalculate-average-tpt-if-not-current.patch
iwlwifi-update-supported-pci_id-list-for-5xx0-series.patch
wl1251-fix-a-memory-leak-in-probe.patch
+ext4-check-s_log_groups_per_flex-in-online-resize-code.patch
+ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch
+gfs2-fix-permissions-checking-for-setflags-ioctl.patch
+sctp-fix-skb_over_panic-resulting-from-multiple-invalid-parameter-errors-cve-2010-1173-v4.patch
+cifs-allow-null-nd-as-nfs-server-uses-on-create.patch
+vfs-add-nofollow-flag-to-umount-2.patch
+l2tp-fix-oops-in-pppol2tp_xmit.patch
+btrfs-should-add-a-permission-check-for-setfacl.patch
+ucc_geth-fix-empty-tx-queue-processing.patch
+ucc_geth-fix-netdev-watchdog-triggering-on-link-changes.patch
+ucc_geth-fix-full-tx-queue-processing.patch
+tipc-fix-oops-on-send-prior-to-entering-networked-mode-v3.patch
+input-psmouse-reset-all-types-of-mice-before-reconnecting.patch
+0001-KVM-s390-Fix-possible-memory-leak-of-in-kvm_arch_vcp.patch
+0002-KVM-PPC-Do-not-create-debugfs-if-fail-to-create-vcpu.patch
+0003-x86-paravirt-Add-a-global-synchronization-point-for-.patch
+0004-KVM-Don-t-allow-lmsw-to-clear-cr0.pe.patch
+0005-KVM-x86-Check-LMA-bit-before-set_efer.patch
+0006-KVM-MMU-Segregate-shadow-pages-with-different-cr0.wp.patch
+0007-KVM-VMX-enable-VMXON-check-with-SMX-enabled-Intel-TX.patch
+0008-KVM-MMU-Don-t-read-pdptrs-with-mmu-spinlock-held-in-.patch
+0009-KVM-Fix-wallclock-version-writing-race.patch
+0010-KVM-x86-Add-missing-locking-to-arch-specific-vcpu-io.patch
+0011-KVM-x86-Inject-GP-with-the-right-rip-on-efer-writes.patch
+jbd-jbd-debug-and-jbd2-debug-should-be-writable.patch
+parisc-clear-floating-point-exception-flag-on-sigfpe-signal.patch
+dm-snapshot-simplify-sector_to_chunk-expression.patch
+keys-return-more-accurate-error-codes.patch
+keys-find_keyring_by_name-can-gain-access-to-a-freed-keyring.patch
+qla2xxx-disable-msi-on-qla24xx-chips-other-than-qla2432.patch
--- /dev/null
+From d0021b252eaf65ca07ed14f0d66425dd9ccab9a6 Mon Sep 17 00:00:00 2001
+From: Neil Horman <nhorman@tuxdriver.com>
+Date: Wed, 3 Mar 2010 08:31:23 +0000
+Subject: tipc: Fix oops on send prior to entering networked mode (v3)
+
+From: Neil Horman <nhorman@tuxdriver.com>
+
+commit d0021b252eaf65ca07ed14f0d66425dd9ccab9a6 upstream.
+
+Fix TIPC to disallow sending to remote addresses prior to entering NET_MODE
+
+user programs can oops the kernel by sending datagrams via AF_TIPC prior to
+entering networked mode. The following backtrace has been observed:
+
+ID: 13459 TASK: ffff810014640040 CPU: 0 COMMAND: "tipc-client"
+[exception RIP: tipc_node_select_next_hop+90]
+RIP: ffffffff8869d3c3 RSP: ffff81002d9a5ab8 RFLAGS: 00010202
+RAX: 0000000000000001 RBX: 0000000000000001 RCX: 0000000000000001
+RDX: 0000000000000000 RSI: 0000000000000001 RDI: 0000000001001001
+RBP: 0000000001001001 R8: 0074736575716552 R9: 0000000000000000
+R10: ffff81003fbd0680 R11: 00000000000000c8 R12: 0000000000000008
+R13: 0000000000000001 R14: 0000000000000001 R15: ffff810015c6ca00
+ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018
+RIP: 0000003cbd8d49a3 RSP: 00007fffc84e0be8 RFLAGS: 00010206
+RAX: 000000000000002c RBX: ffffffff8005d116 RCX: 0000000000000000
+RDX: 0000000000000008 RSI: 00007fffc84e0c00 RDI: 0000000000000003
+RBP: 0000000000000000 R8: 00007fffc84e0c10 R9: 0000000000000010
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 00007fffc84e0d10 R14: 0000000000000000 R15: 00007fffc84e0c30
+ORIG_RAX: 000000000000002c CS: 0033 SS: 002b
+
+What happens is that, when the tipc module in inserted it enters a standalone
+node mode in which communication to its own address is allowed <0.0.0> but not
+to other addresses, since the appropriate data structures have not been
+allocated yet (specifically the tipc_net pointer). There is nothing stopping a
+client from trying to send such a message however, and if that happens, we
+attempt to dereference tipc_net.zones while the pointer is still NULL, and
+explode. The fix is pretty straightforward. Since these oopses all arise from
+the dereference of global pointers prior to their assignment to allocated
+values, and since these allocations are small (about 2k total), lets convert
+these pointers to static arrays of the appropriate size. All the accesses to
+these bits consider 0/NULL to be a non match when searching, so all the lookups
+still work properly, and there is no longer a chance of a bad dererence
+anywhere. As a bonus, this lets us eliminate the setup/teardown routines for
+those pointers, and elimnates the need to preform any locking around them to
+prevent access while their being allocated/freed.
+
+I've updated the tipc_net structure to behave this way to fix the exact reported
+problem, and also fixed up the tipc_bearers and media_list arrays to fix an
+obvious simmilar problem that arises from issuing tipc-config commands to
+manipulate bearers/links prior to entering networked mode
+
+I've tested this for a few hours by running the sanity tests and stress test
+with the tipcutils suite, and nothing has fallen over. There have been a few
+lockdep warnings, but those were there before, and can be addressed later, as
+they didn't actually result in any deadlock.
+
+Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
+CC: Allan Stephens <allan.stephens@windriver.com>
+CC: David S. Miller <davem@davemloft.net>
+CC: tipc-discussion@lists.sourceforge.net
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/tipc/bearer.c | 37 ++++++-------------------------------
+ net/tipc/bearer.h | 2 +-
+ net/tipc/net.c | 25 ++++---------------------
+ 3 files changed, 11 insertions(+), 53 deletions(-)
+
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -45,10 +45,10 @@
+
+ #define MAX_ADDR_STR 32
+
+-static struct media *media_list = NULL;
++static struct media media_list[MAX_MEDIA];
+ static u32 media_count = 0;
+
+-struct bearer *tipc_bearers = NULL;
++struct bearer tipc_bearers[MAX_BEARERS];
+
+ /**
+ * media_name_valid - validate media name
+@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
+ int res = -EINVAL;
+
+ write_lock_bh(&tipc_net_lock);
+- if (!media_list)
+- goto exit;
+
++ if (tipc_mode != TIPC_NET_MODE) {
++ warn("Media <%s> rejected, not in networked mode yet\n", name);
++ goto exit;
++ }
+ if (!media_name_valid(name)) {
+ warn("Media <%s> rejected, illegal name\n", name);
+ goto exit;
+@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name
+
+
+
+-int tipc_bearer_init(void)
+-{
+- int res;
+-
+- write_lock_bh(&tipc_net_lock);
+- tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
+- media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
+- if (tipc_bearers && media_list) {
+- res = 0;
+- } else {
+- kfree(tipc_bearers);
+- kfree(media_list);
+- tipc_bearers = NULL;
+- media_list = NULL;
+- res = -ENOMEM;
+- }
+- write_unlock_bh(&tipc_net_lock);
+- return res;
+-}
+-
+ void tipc_bearer_stop(void)
+ {
+ u32 i;
+
+- if (!tipc_bearers)
+- return;
+-
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (tipc_bearers[i].active)
+ tipc_bearers[i].publ.blocked = 1;
+@@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
+ if (tipc_bearers[i].active)
+ bearer_disable(tipc_bearers[i].publ.name);
+ }
+- kfree(tipc_bearers);
+- kfree(media_list);
+- tipc_bearers = NULL;
+- media_list = NULL;
+ media_count = 0;
+ }
+
+--- a/net/tipc/bearer.h
++++ b/net/tipc/bearer.h
+@@ -114,7 +114,7 @@ struct bearer_name {
+
+ struct link;
+
+-extern struct bearer *tipc_bearers;
++extern struct bearer tipc_bearers[];
+
+ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+ struct sk_buff *tipc_media_get_names(void);
+--- a/net/tipc/net.c
++++ b/net/tipc/net.c
+@@ -116,7 +116,8 @@
+ */
+
+ DEFINE_RWLOCK(tipc_net_lock);
+-struct network tipc_net = { NULL };
++struct _zone *tipc_zones[256] = { NULL, };
++struct network tipc_net = { tipc_zones };
+
+ struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
+ {
+@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 d
+ }
+ }
+
+-static int net_init(void)
+-{
+- memset(&tipc_net, 0, sizeof(tipc_net));
+- tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
+- if (!tipc_net.zones) {
+- return -ENOMEM;
+- }
+- return 0;
+-}
+-
+ static void net_stop(void)
+ {
+ u32 z_num;
+
+- if (!tipc_net.zones)
+- return;
+-
+- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
++ for (z_num = 1; z_num <= tipc_max_zones; z_num++)
+ tipc_zone_delete(tipc_net.zones[z_num]);
+- }
+- kfree(tipc_net.zones);
+- tipc_net.zones = NULL;
+ }
+
+ static void net_route_named_msg(struct sk_buff *buf)
+@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
+ tipc_named_reinit();
+ tipc_port_reinit();
+
+- if ((res = tipc_bearer_init()) ||
+- (res = net_init()) ||
+- (res = tipc_cltr_init()) ||
++ if ((res = tipc_cltr_init()) ||
+ (res = tipc_bclink_init())) {
+ return res;
+ }
--- /dev/null
+From 7583605b6d29f1f7f6fc505b883328089f3485ad Mon Sep 17 00:00:00 2001
+From: Anton Vorontsov <avorontsov@ru.mvista.com>
+Date: Thu, 24 Dec 2009 05:31:03 +0000
+Subject: ucc_geth: Fix empty TX queue processing
+
+From: Anton Vorontsov <avorontsov@ru.mvista.com>
+
+commit 7583605b6d29f1f7f6fc505b883328089f3485ad upstream.
+
+Following oops was seen with the ucc_geth driver:
+
+ Unable to handle kernel paging request for data at address 0x00000058
+ Faulting instruction address: 0xc024f2fc
+ Oops: Kernel access of bad area, sig: 11 [#1]
+ [...]
+ NIP [c024f2fc] skb_recycle_check+0x14/0x100
+ LR [e30aa0a4] ucc_geth_poll+0xd8/0x4e0 [ucc_geth_driver]
+ Call Trace:
+ [df857d50] [c000b03c] __ipipe_grab_irq+0x3c/0xa4 (unreliable)
+ [df857d60] [e30aa0a4] ucc_geth_poll+0xd8/0x4e0 [ucc_geth_driver]
+ [df857dd0] [c0258cf8] net_rx_action+0xf8/0x1b8
+ [df857e10] [c0032a38] __do_softirq+0xb8/0x13c
+ [df857e60] [c00065cc] do_softirq+0xa0/0xac
+ [...]
+
+This is because ucc_geth_tx() tries to process an empty queue when
+queues are logically stopped. Stopping the queues doesn't disable
+polling, and since nowadays ucc_geth_tx() is actually called from
+the polling routine, the oops above might pop up.
+
+Fix this by removing 'netif_queue_stopped() == 0' check.
+
+Reported-by: Lennart Sorensen <lsorense@csclub.uwaterloo.ca>
+Signed-off-by: Anton Vorontsov <avorontsov@ru.mvista.com>
+Tested-by: Lennart Sorensen <lsorense@csclub.uwaterloo.ca>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/ucc_geth.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ucc_geth.c
++++ b/drivers/net/ucc_geth.c
+@@ -3273,7 +3273,7 @@ static int ucc_geth_tx(struct net_device
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+- if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
++ if (bd == ugeth->txBd[txQ]) /* queue empty? */
+ break;
+
+ dev->stats.tx_packets++;
--- /dev/null
+From 34692421bc7d6145ef383b014860f4fde10b7505 Mon Sep 17 00:00:00 2001
+From: Jiajun Wu <b06378@freescale.com>
+Date: Mon, 18 Jan 2010 05:47:50 +0000
+Subject: ucc_geth: Fix full TX queue processing
+
+From: Jiajun Wu <b06378@freescale.com>
+
+commit 34692421bc7d6145ef383b014860f4fde10b7505 upstream.
+
+commit 7583605b6d29f1f7f6fc505b883328089f3485ad ("ucc_geth: Fix empty
+TX queue processing") fixed empty TX queue mishandling, but didn't
+account another corner case: when TX queue becomes full.
+
+Without this patch the driver will stop transmiting when TX queue
+becomes full since 'bd == ugeth->txBd[txQ]' actually checks for
+two things: queue empty or full.
+
+Let's better check for NULL skb, which unambiguously signals an empty
+queue.
+
+Signed-off-by: Jiajun Wu <b06378@freescale.com>
+Signed-off-by: Anton Vorontsov <avorontsov@ru.mvista.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/ucc_geth.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ucc_geth.c
++++ b/drivers/net/ucc_geth.c
+@@ -3276,13 +3276,12 @@ static int ucc_geth_tx(struct net_device
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+- if (bd == ugeth->txBd[txQ]) /* queue empty? */
++ skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
++ if (!skb)
+ break;
+
+ dev->stats.tx_packets++;
+
+- skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
+-
+ if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
+ skb_recycle_check(skb,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
--- /dev/null
+From 08b5e1c91ce95793c59a59529a362a1bcc81faae Mon Sep 17 00:00:00 2001
+From: Anton Vorontsov <avorontsov@ru.mvista.com>
+Date: Thu, 24 Dec 2009 05:31:05 +0000
+Subject: ucc_geth: Fix netdev watchdog triggering on link changes
+
+From: Anton Vorontsov <avorontsov@ru.mvista.com>
+
+commit 08b5e1c91ce95793c59a59529a362a1bcc81faae upstream.
+
+Since commit 864fdf884e82bacbe8ca5e93bd43393a61d2e2b4 ("ucc_geth:
+Fix hangs after switching from full to half duplex") ucc_geth driver
+disables the controller during MAC configuration changes. Though,
+disabling the controller might take quite awhile, and so the netdev
+watchdog might get upset:
+
+ NETDEV WATCHDOG: eth2 (ucc_geth): transmit queue 0 timed out
+ ------------[ cut here ]------------
+ Badness at c02729a8 [verbose debug info unavailable]
+ NIP: c02729a8 LR: c02729a8 CTR: c01b6088
+ REGS: c0451c40 TRAP: 0700 Not tainted (2.6.32-trunk-8360e)
+ [...]
+ NIP [c02729a8] dev_watchdog+0x280/0x290
+ LR [c02729a8] dev_watchdog+0x280/0x290
+ Call Trace:
+ [c0451cf0] [c02729a8] dev_watchdog+0x280/0x290 (unreliable)
+ [c0451d50] [c00377c4] run_timer_softirq+0x164/0x224
+ [c0451da0] [c0032a38] __do_softirq+0xb8/0x13c
+ [c0451df0] [c00065cc] do_softirq+0xa0/0xac
+ [c0451e00] [c003280c] irq_exit+0x7c/0x9c
+ [c0451e10] [c00640c4] __ipipe_sync_stage+0x248/0x24c
+ [...]
+
+This patch fixes the issue by detaching the netdev during the
+time we change the configuration.
+
+Reported-by: Lennart Sorensen <lsorense@csclub.uwaterloo.ca>
+Signed-off-by: Anton Vorontsov <avorontsov@ru.mvista.com>
+Tested-by: Lennart Sorensen <lsorense@csclub.uwaterloo.ca>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/ucc_geth.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ucc_geth.c
++++ b/drivers/net/ucc_geth.c
+@@ -1563,7 +1563,10 @@ static int ugeth_disable(struct ucc_geth
+
+ static void ugeth_quiesce(struct ucc_geth_private *ugeth)
+ {
+- /* Wait for and prevent any further xmits. */
++ /* Prevent any further xmits, plus detach the device. */
++ netif_device_detach(ugeth->ndev);
++
++ /* Wait for any current xmits to finish. */
+ netif_tx_disable(ugeth->ndev);
+
+ /* Disable the interrupt to avoid NAPI rescheduling. */
+@@ -1577,7 +1580,7 @@ static void ugeth_activate(struct ucc_ge
+ {
+ napi_enable(&ugeth->napi);
+ enable_irq(ugeth->ug_info->uf_info.irq);
+- netif_tx_wake_all_queues(ugeth->ndev);
++ netif_device_attach(ugeth->ndev);
+ }
+
+ /* Called every time the controller might need to be made
--- /dev/null
+From db1f05bb85d7966b9176e293f3ceead1cb8b5d79 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@suse.cz>
+Date: Wed, 10 Feb 2010 12:15:53 +0100
+Subject: vfs: add NOFOLLOW flag to umount(2)
+
+From: Miklos Szeredi <mszeredi@suse.cz>
+
+commit db1f05bb85d7966b9176e293f3ceead1cb8b5d79 upstream.
+
+Add a new UMOUNT_NOFOLLOW flag to umount(2). This is needed to prevent
+symlink attacks in unprivileged unmounts (fuse, samba, ncpfs).
+
+Additionally, return -EINVAL if an unknown flag is used (and specify
+an explicitly unused flag: UMOUNT_UNUSED). This makes it possible for
+the caller to determine if a flag is supported or not.
+
+CC: Eugene Teo <eugene@redhat.com>
+CC: Michael Kerrisk <mtk.manpages@gmail.com>
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/namespace.c | 9 ++++++++-
+ include/linux/fs.h | 2 ++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1119,8 +1119,15 @@ SYSCALL_DEFINE2(umount, char __user *, n
+ {
+ struct path path;
+ int retval;
++ int lookup_flags = 0;
+
+- retval = user_path(name, &path);
++ if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
++ return -EINVAL;
++
++ if (!(flags & UMOUNT_NOFOLLOW))
++ lookup_flags |= LOOKUP_FOLLOW;
++
++ retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
+ if (retval)
+ goto out;
+ retval = -EINVAL;
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1310,6 +1310,8 @@ extern int send_sigurg(struct fown_struc
+ #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
+ #define MNT_DETACH 0x00000002 /* Just detach from the tree */
+ #define MNT_EXPIRE 0x00000004 /* Mark for expiry */
++#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
++#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
+
+ extern struct list_head super_blocks;
+ extern spinlock_t sb_lock;