--- /dev/null
+From c19483cc5e56ac5e22dd19cf25ba210ab1537773 Mon Sep 17 00:00:00 2001
+From: Alan Cox <alan@linux.intel.com>
+Date: Fri, 22 Oct 2010 14:11:26 +0100
+Subject: bluetooth: Fix missing NULL check
+
+From: Alan Cox <alan@linux.intel.com>
+
+commit c19483cc5e56ac5e22dd19cf25ba210ab1537773 upstream.
+
+Fortunately this is only exploitable on very unusual hardware.
+
+[Reported a while ago but nothing happened so just fixing it]
+
+Signed-off-by: Alan Cox <alan@linux.intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/bluetooth/hci_ldisc.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -258,9 +258,16 @@ static int hci_uart_tty_open(struct tty_
+
+ BT_DBG("tty %p", tty);
+
++ /* FIXME: This btw is bogus, nothing requires the old ldisc to clear
++ the pointer */
+ if (hu)
+ return -EEXIST;
+
++ /* Error if the tty has no write op instead of leaving an exploitable
++ hole */
++ if (tty->ops->write == NULL)
++ return -EOPNOTSUPP;
++
+ if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
+ BT_ERR("Can't allocate control structure");
+ return -ENFILE;
--- /dev/null
+From aa91c7e4ab9b0842b7d7a7cbf8cca18b20df89b5 Mon Sep 17 00:00:00 2001
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Fri, 17 Sep 2010 18:56:39 +0530
+Subject: cifs: fix broken oplock handling
+
+From: Suresh Jayaraman <sjayaraman@suse.de>
+
+commit aa91c7e4ab9b0842b7d7a7cbf8cca18b20df89b5 upstream.
+
+cifs_new_fileinfo() does not use the 'oplock' value from the callers. Instead,
+it sets it to REQ_OPLOCK which seems wrong. We should be using the oplock value
+obtained from the Server to set the inode's clientCanCacheAll or
+clientCanCacheRead flags. Fix this by passing oplock from the callers to
+cifs_new_fileinfo().
+
+This change dates back to commit a6ce4932 (2.6.30-rc3). So, all the affected
+versions will need this fix. Please Cc stable once reviewed and accepted.
+
+Reviewed-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/cifsproto.h | 3 ++-
+ fs/cifs/dir.c | 12 +++++-------
+ fs/cifs/file.c | 4 ++--
+ 3 files changed, 9 insertions(+), 10 deletions(-)
+
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -104,7 +104,8 @@ extern struct timespec cnvrtDosUnixTm(__
+
+ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
+ __u16 fileHandle, struct file *file,
+- struct vfsmount *mnt, unsigned int oflags);
++ struct vfsmount *mnt, unsigned int oflags,
++ __u32 oplock);
+ extern int cifs_posix_open(char *full_path, struct inode **pinode,
+ struct super_block *sb,
+ int mode, int oflags,
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -138,9 +138,9 @@ cifs_bp_rename_retry:
+ */
+ struct cifsFileInfo *
+ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
+- struct file *file, struct vfsmount *mnt, unsigned int oflags)
++ struct file *file, struct vfsmount *mnt, unsigned int oflags,
++ __u32 oplock)
+ {
+- int oplock = 0;
+ struct cifsFileInfo *pCifsFile;
+ struct cifsInodeInfo *pCifsInode;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
+@@ -149,9 +149,6 @@ cifs_new_fileinfo(struct inode *newinode
+ if (pCifsFile == NULL)
+ return pCifsFile;
+
+- if (oplockEnabled)
+- oplock = REQ_OPLOCK;
+-
+ pCifsFile->netfid = fileHandle;
+ pCifsFile->pid = current->tgid;
+ pCifsFile->pInode = igrab(newinode);
+@@ -476,7 +473,7 @@ cifs_create_set_dentry:
+ }
+
+ pfile_info = cifs_new_fileinfo(newinode, fileHandle, filp,
+- nd->path.mnt, oflags);
++ nd->path.mnt, oflags, oplock);
+ if (pfile_info == NULL) {
+ fput(filp);
+ CIFSSMBClose(xid, tcon, fileHandle);
+@@ -738,7 +735,8 @@ cifs_lookup(struct inode *parent_dir_ino
+
+ cfile = cifs_new_fileinfo(newInode, fileHandle, filp,
+ nd->path.mnt,
+- nd->intent.open.flags);
++ nd->intent.open.flags,
++ oplock);
+ if (cfile == NULL) {
+ fput(filp);
+ CIFSSMBClose(xid, pTcon, fileHandle);
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -277,7 +277,7 @@ int cifs_open(struct inode *inode, struc
+
+ pCifsFile = cifs_new_fileinfo(inode, netfid, file,
+ file->f_path.mnt,
+- oflags);
++ oflags, oplock);
+ if (pCifsFile == NULL) {
+ CIFSSMBClose(xid, tcon, netfid);
+ rc = -ENOMEM;
+@@ -367,7 +367,7 @@ int cifs_open(struct inode *inode, struc
+ goto out;
+
+ pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
+- file->f_flags);
++ file->f_flags, oplock);
+ if (pCifsFile == NULL) {
+ rc = -ENOMEM;
+ goto out;
--- /dev/null
+From 7ada876a8703f23befbb20a7465a702ee39b1704 Mon Sep 17 00:00:00 2001
+From: Darren Hart <dvhart@linux.intel.com>
+Date: Sun, 17 Oct 2010 08:35:04 -0700
+Subject: futex: Fix errors in nested key ref-counting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Darren Hart <dvhart@linux.intel.com>
+
+commit 7ada876a8703f23befbb20a7465a702ee39b1704 upstream.
+
+futex_wait() is leaking key references due to futex_wait_setup()
+acquiring an additional reference via the queue_lock() routine. The
+nested key ref-counting has been masking bugs and complicating code
+analysis. queue_lock() is only called with a previously ref-counted
+key, so remove the additional ref-counting from the queue_(un)lock()
+functions.
+
+Also futex_wait_requeue_pi() drops one key reference too many in
+unqueue_me_pi(). Remove the key reference handling from
+unqueue_me_pi(). This was paired with a queue_lock() in
+futex_lock_pi(), so the count remains unchanged.
+
+Document remaining nested key ref-counting sites.
+
+Signed-off-by: Darren Hart <dvhart@linux.intel.com>
+Reported-and-tested-by: Matthieu Fertré<matthieu.fertre@kerlabs.com>
+Reported-by: Louis Rilling<louis.rilling@kerlabs.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: John Kacur <jkacur@redhat.com>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+LKML-Reference: <4CBB17A8.70401@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/futex.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1363,7 +1363,6 @@ static inline struct futex_hash_bucket *
+ {
+ struct futex_hash_bucket *hb;
+
+- get_futex_key_refs(&q->key);
+ hb = hash_futex(&q->key);
+ q->lock_ptr = &hb->lock;
+
+@@ -1375,7 +1374,6 @@ static inline void
+ queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
+ {
+ spin_unlock(&hb->lock);
+- drop_futex_key_refs(&q->key);
+ }
+
+ /**
+@@ -1480,8 +1478,6 @@ static void unqueue_me_pi(struct futex_q
+ q->pi_state = NULL;
+
+ spin_unlock(q->lock_ptr);
+-
+- drop_futex_key_refs(&q->key);
+ }
+
+ /*
+@@ -1812,7 +1808,10 @@ static int futex_wait(u32 __user *uaddr,
+ }
+
+ retry:
+- /* Prepare to wait on uaddr. */
++ /*
++ * Prepare to wait on uaddr. On success, holds hb lock and increments
++ * q.key refs.
++ */
+ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
+ if (ret)
+ goto out;
+@@ -1822,24 +1821,23 @@ retry:
+
+ /* If we were woken (and unqueued), we succeeded, whatever. */
+ ret = 0;
++ /* unqueue_me() drops q.key ref */
+ if (!unqueue_me(&q))
+- goto out_put_key;
++ goto out;
+ ret = -ETIMEDOUT;
+ if (to && !to->task)
+- goto out_put_key;
++ goto out;
+
+ /*
+ * We expect signal_pending(current), but we might be the
+ * victim of a spurious wakeup as well.
+ */
+- if (!signal_pending(current)) {
+- put_futex_key(fshared, &q.key);
++ if (!signal_pending(current))
+ goto retry;
+- }
+
+ ret = -ERESTARTSYS;
+ if (!abs_time)
+- goto out_put_key;
++ goto out;
+
+ restart = ¤t_thread_info()->restart_block;
+ restart->fn = futex_wait_restart;
+@@ -1856,8 +1854,6 @@ retry:
+
+ ret = -ERESTART_RESTARTBLOCK;
+
+-out_put_key:
+- put_futex_key(fshared, &q.key);
+ out:
+ if (to) {
+ hrtimer_cancel(&to->timer);
+@@ -2236,7 +2232,10 @@ static int futex_wait_requeue_pi(u32 __u
+ q.rt_waiter = &rt_waiter;
+ q.requeue_pi_key = &key2;
+
+- /* Prepare to wait on uaddr. */
++ /*
++ * Prepare to wait on uaddr. On success, increments q.key (key1) ref
++ * count.
++ */
+ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
+ if (ret)
+ goto out_key2;
+@@ -2254,7 +2253,9 @@ static int futex_wait_requeue_pi(u32 __u
+ * In order for us to be here, we know our q.key == key2, and since
+ * we took the hb->lock above, we also know that futex_requeue() has
+ * completed and we no longer have to concern ourselves with a wakeup
+- * race with the atomic proxy lock acquition by the requeue code.
++ * race with the atomic proxy lock acquisition by the requeue code. The
++ * futex_requeue dropped our key1 reference and incremented our key2
++ * reference count.
+ */
+
+ /* Check if the requeue code acquired the second futex for us. */
--- /dev/null
+From c25d29952b2a8c9aaf00e081c9162a0e383030cd Mon Sep 17 00:00:00 2001
+From: Len Brown <len.brown@intel.com>
+Date: Sat, 23 Oct 2010 23:25:53 -0400
+Subject: intel_idle: do not use the LAPIC timer for ATOM C2
+
+From: Len Brown <len.brown@intel.com>
+
+commit c25d29952b2a8c9aaf00e081c9162a0e383030cd upstream.
+
+If we use the LAPIC timer during ATOM C2 on
+some nvidia chisets, the system stalls.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=21032
+
+Signed-off-by: Len Brown <len.brown@intel.com>
+Cc: Tom Gundersen <teg@jklm.no>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/idle/intel_idle.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -308,7 +308,7 @@ static int intel_idle_probe(void)
+ break;
+
+ case 0x1C: /* 28 - Atom Processor */
+- lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
++ lapic_timer_reliable_states = (1 << 1); /* C1 */
+ cpuidle_state_table = atom_cstates;
+ choose_substate = choose_zero_substate;
+ break;
--- /dev/null
+From cda0008299a06f0d7218c6037c3c02d7a865e954 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Thu, 2 Sep 2010 17:29:46 +0200
+Subject: KVM: SVM: Restore correct registers after sel_cr0 intercept emulation
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit cda0008299a06f0d7218c6037c3c02d7a865e954 upstream.
+
+This patch implements restoring of the correct rip, rsp, and
+rax after the svm emulation in KVM injected a selective_cr0
+write intercept into the guest hypervisor. The problem was
+that the vmexit is emulated in the instruction emulation
+which later commits the registers right after the write-cr0
+instruction. So the l1 guest will continue to run with the
+l2 rip, rsp and rax resulting in unpredictable behavior.
+
+This patch is not the final word, it is just an easy patch
+to fix the issue. The real fix will be done when the
+instruction emulator is made aware of nested virtualization.
+Until this is done this patch fixes the issue and provides
+an easy way to fix this in -stable too.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/svm.c | 33 +++++++++++++++++++++++++++++++--
+ 1 file changed, 31 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -87,6 +87,14 @@ struct nested_state {
+ /* A VMEXIT is required but not yet emulated */
+ bool exit_required;
+
++ /*
++ * If we vmexit during an instruction emulation we need this to restore
++ * the l1 guest rip after the emulation
++ */
++ unsigned long vmexit_rip;
++ unsigned long vmexit_rsp;
++ unsigned long vmexit_rax;
++
+ /* cache for intercepts of the guest */
+ u16 intercept_cr_read;
+ u16 intercept_cr_write;
+@@ -1201,8 +1209,12 @@ static void svm_set_cr0(struct kvm_vcpu
+ if (old == new) {
+ /* cr0 write with ts and mp unchanged */
+ svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
+- if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
++ if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
++ svm->nested.vmexit_rip = kvm_rip_read(vcpu);
++ svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
++ svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ return;
++ }
+ }
+ }
+
+@@ -2398,6 +2410,23 @@ static int emulate_on_interception(struc
+ return 1;
+ }
+
++static int cr0_write_interception(struct vcpu_svm *svm)
++{
++ struct kvm_vcpu *vcpu = &svm->vcpu;
++ int r;
++
++ r = emulate_instruction(&svm->vcpu, 0, 0, 0);
++
++ if (svm->nested.vmexit_rip) {
++ kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
++ kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
++ kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
++ svm->nested.vmexit_rip = 0;
++ }
++
++ return r == EMULATE_DONE;
++}
++
+ static int cr8_write_interception(struct vcpu_svm *svm)
+ {
+ struct kvm_run *kvm_run = svm->vcpu.run;
+@@ -2671,7 +2700,7 @@ static int (*svm_exit_handlers[])(struct
+ [SVM_EXIT_READ_CR4] = emulate_on_interception,
+ [SVM_EXIT_READ_CR8] = emulate_on_interception,
+ [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
+- [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
++ [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
+ [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
--- /dev/null
+From 4c62a2dc92518c5adf434df8e5c2283c6762672a Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Fri, 10 Sep 2010 17:31:06 +0200
+Subject: KVM: X86: Report SVM bit to userspace only when supported
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit 4c62a2dc92518c5adf434df8e5c2283c6762672a upstream.
+
+This patch fixes a bug in KVM where it _always_ reports the
+support of the SVM feature to userspace. But KVM only
+supports SVM on AMD hardware and only when it is enabled in
+the kernel module. This patch fixes the wrong reporting.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/svm.c | 4 ++++
+ arch/x86/kvm/x86.c | 2 +-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3253,6 +3253,10 @@ static void svm_cpuid_update(struct kvm_
+ static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+ {
+ switch (func) {
++ case 0x80000001:
++ if (nested)
++ entry->ecx |= (1 << 2); /* Set SVM bit */
++ break;
+ case 0x8000000A:
+ entry->eax = 1; /* SVM revision 1 */
+ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1910,7 +1910,7 @@ static void do_cpuid_ent(struct kvm_cpui
+ 0 /* Reserved, XSAVE, OSXSAVE */;
+ /* cpuid 0x80000001.ecx */
+ const u32 kvm_supported_word6_x86_features =
+- F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
++ F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
+ F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
+ F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+ 0 /* SKINIT */ | 0 /* WDT */;
--- /dev/null
+From 6ad601955315b010a117306b994f2204fae85fdc Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 15 Oct 2010 11:00:08 +0200
+Subject: libahci: fix result_tf handling after an ATA PIO data-in command
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 6ad601955315b010a117306b994f2204fae85fdc upstream.
+
+ATA devices don't send D2H Reg FIS after an successful ATA PIO data-in
+command. The host is supposed to take the TF and E_Status of the
+preceding PIO Setup FIS. Update ahci_qc_fill_rtf() such that it takes
+TF + E_Status from PIO Setup FIS after a successful ATA PIO data-in
+command.
+
+Without this patch, result_tf for such a command is filled with the
+content of the previous D2H Reg FIS which belongs to a previous
+command, which can make the command incorrectly seen as failed.
+
+* Patch updated to grab the whole TF + E_Status from PIO Setup FIS
+ instead of just E_Status as suggested by Robert Hancock.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Mark Lord <kernel@teksavvy.com>
+Cc: Robert Hancock <hancockrwd@gmail.com>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/ahci.h | 1 +
+ drivers/ata/libahci.c | 18 +++++++++++++++---
+ 2 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -72,6 +72,7 @@ enum {
+ AHCI_CMD_RESET = (1 << 8),
+ AHCI_CMD_CLR_BUSY = (1 << 10),
+
++ RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */
+ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
+ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
+ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1824,12 +1824,24 @@ static unsigned int ahci_qc_issue(struct
+ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+ {
+ struct ahci_port_priv *pp = qc->ap->private_data;
+- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
++ u8 *rx_fis = pp->rx_fis;
+
+ if (pp->fbs_enabled)
+- d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
++ rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
++
++ /*
++ * After a successful execution of an ATA PIO data-in command,
++ * the device doesn't send D2H Reg FIS to update the TF and
++ * the host should take TF and E_Status from the preceding PIO
++ * Setup FIS.
++ */
++ if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
++ !(qc->flags & ATA_QCFLAG_FAILED)) {
++ ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
++ qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
++ } else
++ ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
+
+- ata_tf_from_fis(d2h_fis, &qc->result_tf);
+ return true;
+ }
+
--- /dev/null
+From 3ee48b6af49cf534ca2f481ecc484b156a41451d Mon Sep 17 00:00:00 2001
+From: Cliff Wickman <cpw@sgi.com>
+Date: Thu, 16 Sep 2010 11:44:02 -0500
+Subject: mm, x86: Saving vmcore with non-lazy freeing of vmas
+
+From: Cliff Wickman <cpw@sgi.com>
+
+commit 3ee48b6af49cf534ca2f481ecc484b156a41451d upstream.
+
+During the reading of /proc/vmcore the kernel is doing
+ioremap()/iounmap() repeatedly. And the buildup of un-flushed
+vm_area_struct's is causing a great deal of overhead. (rb_next()
+is chewing up most of that time).
+
+This solution is to provide function set_iounmap_nonlazy(). It
+causes a subsequent call to iounmap() to immediately purge the
+vma area (with try_purge_vmap_area_lazy()).
+
+With this patch we have seen the time for writing a 250MB
+compressed dump drop from 71 seconds to 44 seconds.
+
+Signed-off-by: Cliff Wickman <cpw@sgi.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: kexec@lists.infradead.org
+LKML-Reference: <E1OwHZ4-0005WK-Tw@eag09.americas.sgi.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/io.h | 1 +
+ arch/x86/kernel/crash_dump_64.c | 1 +
+ mm/vmalloc.c | 9 +++++++++
+ 3 files changed, 11 insertions(+)
+
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -206,6 +206,7 @@ static inline void __iomem *ioremap(reso
+
+ extern void iounmap(volatile void __iomem *addr);
+
++extern void set_iounmap_nonlazy(void);
+
+ #ifdef __KERNEL__
+
+--- a/arch/x86/kernel/crash_dump_64.c
++++ b/arch/x86/kernel/crash_dump_64.c
+@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long p
+ } else
+ memcpy(buf, vaddr + offset, csize);
+
++ set_iounmap_nonlazy();
+ iounmap(vaddr);
+ return csize;
+ }
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -513,6 +513,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_IN
+ static void purge_fragmented_blocks_allcpus(void);
+
+ /*
++ * called before a call to iounmap() if the caller wants vm_area_struct's
++ * immediately freed.
++ */
++void set_iounmap_nonlazy(void)
++{
++ atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
++}
++
++/*
+ * Purges all lazily-freed vmap areas.
+ *
+ * If sync is 0 then don't purge if there is already a purge in progress.
--- /dev/null
+From e5953cbdff26f7cbae7eff30cd9b18c4e19b7594 Mon Sep 17 00:00:00 2001
+From: Nicolas Kaiser <nikai@nikai.net>
+Date: Thu, 21 Oct 2010 14:56:00 +0200
+Subject: pipe: fix failure to return error code on ->confirm()
+
+From: Nicolas Kaiser <nikai@nikai.net>
+
+commit e5953cbdff26f7cbae7eff30cd9b18c4e19b7594 upstream.
+
+The arguments were transposed, we want to assign the error code to
+'ret', which is being returned.
+
+Signed-off-by: Nicolas Kaiser <nikai@nikai.net>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/pipe.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -382,7 +382,7 @@ pipe_read(struct kiocb *iocb, const stru
+ error = ops->confirm(pipe, buf);
+ if (error) {
+ if (!ret)
+- error = ret;
++ ret = error;
+ break;
+ }
+
pcmcia-synclink_cs-fix-information-leak-to-userland.patch
sched-drop-all-load-weight-manipulation-for-rt-tasks.patch
sched-fix-string-comparison-in-proc-sched_features.patch
+bluetooth-fix-missing-null-check.patch
+futex-fix-errors-in-nested-key-ref-counting.patch
+cifs-fix-broken-oplock-handling.patch
+libahci-fix-result_tf-handling-after-an-ata-pio-data-in-command.patch
+intel_idle-do-not-use-the-lapic-timer-for-atom-c2.patch
+mm-x86-saving-vmcore-with-non-lazy-freeing-of-vmas.patch
+x86-cpu-fix-renamed-not-yet-shipping-amd-cpuid-feature-bit.patch
+x86-kexec-make-sure-to-stop-all-cpus-before-exiting-the-kernel.patch
+x86-olpc-don-t-retry-ec-commands-forever.patch
+x86-mtrr-assume-sys_cfg-exists-on-all-future-amd-cpus.patch
+x86-intr-remap-set-redirection-hint-in-the-irte.patch
+x86-kdump-change-copy_oldmem_page-to-use-cached-addressing.patch
+x86-vm86-fix-preemption-bug-for-int1-debug-and-int3-breakpoint-handlers.patch
+kvm-x86-report-svm-bit-to-userspace-only-when-supported.patch
+kvm-svm-restore-correct-registers-after-sel_cr0-intercept-emulation.patch
+usb-mct_u232-fix-broken-close.patch
+pipe-fix-failure-to-return-error-code-on-confirm.patch
--- /dev/null
+From 92ca0dc5ee022e4c0e488177e1d8865a0778c6c2 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <jhovold@gmail.com>
+Date: Thu, 21 Oct 2010 10:49:10 +0200
+Subject: USB: mct_u232: fix broken close
+
+From: Johan Hovold <jhovold@gmail.com>
+
+commit 92ca0dc5ee022e4c0e488177e1d8865a0778c6c2 upstream.
+
+Fix regression introduced by commit
+f26788da3b342099d2b02d99ba1cb7f154d6ef7b (USB: serial: refactor generic
+close) which broke driver close().
+
+This driver uses non-standard semantics for the read urb which makes the
+generic close function fail to kill it (the read urb is actually an
+interrupt urb and therefore bulk_in size is zero).
+
+Reported-by: Eric Shattow "Eprecocious" <lucent@gmail.com>
+Tested-by: Eric Shattow "Eprecocious" <lucent@gmail.com>
+Signed-off-by: Johan Hovold <jhovold@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/serial/mct_u232.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -549,9 +549,12 @@ static void mct_u232_close(struct usb_se
+ {
+ dbg("%s port %d", __func__, port->number);
+
+- usb_serial_generic_close(port);
+- if (port->serial->dev)
++ if (port->serial->dev) {
++ /* shutdown our urbs */
++ usb_kill_urb(port->write_urb);
++ usb_kill_urb(port->read_urb);
+ usb_kill_urb(port->interrupt_in_urb);
++ }
+ } /* mct_u232_close */
+
+
--- /dev/null
+From 7ef8aa72ab176e0288f363d1247079732c5d5792 Mon Sep 17 00:00:00 2001
+From: Andre Przywara <andre.przywara@amd.com>
+Date: Mon, 6 Sep 2010 15:14:17 +0200
+Subject: x86, cpu: Fix renamed, not-yet-shipping AMD CPUID feature bit
+
+From: Andre Przywara <andre.przywara@amd.com>
+
+commit 7ef8aa72ab176e0288f363d1247079732c5d5792 upstream.
+
+The AMD SSE5 feature set as-it has been replaced by some extensions
+to the AVX instruction set. Thus the bit formerly advertised as SSE5
+is re-used for one of these extensions (XOP).
+Although this changes the /proc/cpuinfo output, it is not user visible, as
+there are no CPUs (yet) having this feature.
+To avoid confusion this should be added to the stable series, too.
+
+Signed-off-by: Andre Przywara <andre.przywara@amd.com>
+LKML-Reference: <1283778860-26843-2-git-send-email-andre.przywara@amd.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/cpufeature.h | 2 +-
+ arch/x86/kvm/x86.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -150,7 +150,7 @@
+ #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
+ #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
+ #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
+-#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
++#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
+ #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
+ #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
+ #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1912,7 +1912,7 @@ static void do_cpuid_ent(struct kvm_cpui
+ const u32 kvm_supported_word6_x86_features =
+ F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
+ F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
+- F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
++ F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+ 0 /* SKINIT */ | 0 /* WDT */;
+
+ /* all calls to cpuid_count() should be made on the same cpu */
--- /dev/null
+From 75e3cfbed6f71a8f151dc6e413b6ce3c390030cb Mon Sep 17 00:00:00 2001
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+Date: Fri, 27 Aug 2010 11:09:48 -0700
+Subject: x86, intr-remap: Set redirection hint in the IRTE
+
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+
+commit 75e3cfbed6f71a8f151dc6e413b6ce3c390030cb upstream.
+
+Currently the redirection hint in the interrupt-remapping table entry
+is set to 0, which means the remapped interrupt is directed to the
+processors listed in the destination. So in logical flat mode
+in the presence of intr-remapping, this results in a single
+interrupt multi-casted to multiple cpu's as specified by the destination
+bit mask. But what we really want is to send that interrupt to one of the cpus
+based on the lowest priority delivery mode.
+
+Set the redirection hint in the IRTE to '1' to indicate that we want
+the remapped interrupt to be directed to only one of the processors
+listed in the destination.
+
+This fixes the issue of same interrupt getting delivered to multiple cpu's
+in the logical flat mode in the presence of interrupt-remapping. While
+there is no functional issue observed with this behavior, this will
+impact performance of such configurations (<=8 cpu's using logical flat
+mode in the presence of interrupt-remapping)
+
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+LKML-Reference: <20100827181049.013051492@sbsiddha-MOBL3.sc.intel.com>
+Cc: Weidong Han <weidong.han@intel.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/apic/io_apic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1397,6 +1397,7 @@ int setup_ioapic_entry(int apic_id, int
+ irte.dlvry_mode = apic->irq_delivery_mode;
+ irte.vector = vector;
+ irte.dest_id = IRTE_DEST(destination);
++ irte.redir_hint = 1;
+
+ /* Set source-id of interrupt request */
+ set_ioapic_sid(&irte, apic_id);
+@@ -3348,6 +3349,7 @@ static int msi_compose_msg(struct pci_de
+ irte.dlvry_mode = apic->irq_delivery_mode;
+ irte.vector = cfg->vector;
+ irte.dest_id = IRTE_DEST(dest);
++ irte.redir_hint = 1;
+
+ /* Set source-id of interrupt request */
+ if (pdev)
--- /dev/null
+From 37a2f9f30a360fb03522d15c85c78265ccd80287 Mon Sep 17 00:00:00 2001
+From: Cliff Wickman <cpw@sgi.com>
+Date: Wed, 8 Sep 2010 10:14:27 -0500
+Subject: x86, kdump: Change copy_oldmem_page() to use cached addressing
+
+From: Cliff Wickman <cpw@sgi.com>
+
+commit 37a2f9f30a360fb03522d15c85c78265ccd80287 upstream.
+
+The copy of /proc/vmcore to a user buffer proceeds much faster
+if the kernel addresses memory as cached.
+
+With this patch we have seen an increase in transfer rate from
+less than 15MB/s to 80-460MB/s, depending on size of the
+transfer. This makes a big difference in time needed to save a
+system dump.
+
+Signed-off-by: Cliff Wickman <cpw@sgi.com>
+Acked-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Cc: kexec@lists.infradead.org
+LKML-Reference: <E1OtMLz-0001yp-Ia@eag09.americas.sgi.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/crash_dump_64.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/crash_dump_64.c
++++ b/arch/x86/kernel/crash_dump_64.c
+@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long p
+ if (!csize)
+ return 0;
+
+- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++ vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!vaddr)
+ return -ENOMEM;
+
--- /dev/null
+From 76fac077db6b34e2c6383a7b4f3f4f7b7d06d8ce Mon Sep 17 00:00:00 2001
+From: Alok Kataria <akataria@vmware.com>
+Date: Mon, 11 Oct 2010 14:37:08 -0700
+Subject: x86, kexec: Make sure to stop all CPUs before exiting the kernel
+
+From: Alok Kataria <akataria@vmware.com>
+
+commit 76fac077db6b34e2c6383a7b4f3f4f7b7d06d8ce upstream.
+
+x86 smp_ops now has a new op, stop_other_cpus which takes a parameter
+"wait" this allows the caller to specify if it wants to stop until all
+the cpus have processed the stop IPI. This is required specifically
+for the kexec case where we should wait for all the cpus to be stopped
+before starting the new kernel. We now wait for the cpus to stop in
+all cases except for panic/kdump where we expect things to be broken
+and we are doing our best to make things work anyway.
+
+This patch fixes a legitimate regression, which was introduced during
+2.6.30, by commit id 4ef702c10b5df18ab04921fc252c26421d4d6c75.
+
+Signed-off-by: Alok N Kataria <akataria@vmware.com>
+LKML-Reference: <1286833028.1372.20.camel@ank32.eng.vmware.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/smp.h | 9 +++++++--
+ arch/x86/kernel/reboot.c | 2 +-
+ arch/x86/kernel/smp.c | 15 +++++++++------
+ arch/x86/xen/enlighten.c | 2 +-
+ arch/x86/xen/smp.c | 6 +++---
+ 5 files changed, 21 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -50,7 +50,7 @@ struct smp_ops {
+ void (*smp_prepare_cpus)(unsigned max_cpus);
+ void (*smp_cpus_done)(unsigned max_cpus);
+
+- void (*smp_send_stop)(void);
++ void (*stop_other_cpus)(int wait);
+ void (*smp_send_reschedule)(int cpu);
+
+ int (*cpu_up)(unsigned cpu);
+@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
+
+ static inline void smp_send_stop(void)
+ {
+- smp_ops.smp_send_stop();
++ smp_ops.stop_other_cpus(0);
++}
++
++static inline void stop_other_cpus(void)
++{
++ smp_ops.stop_other_cpus(1);
+ }
+
+ static inline void smp_prepare_boot_cpu(void)
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -641,7 +641,7 @@ void native_machine_shutdown(void)
+ /* O.K Now that I'm on the appropriate processor,
+ * stop all of the others.
+ */
+- smp_send_stop();
++ stop_other_cpus();
+ #endif
+
+ lapic_shutdown();
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(voi
+ irq_exit();
+ }
+
+-static void native_smp_send_stop(void)
++static void native_stop_other_cpus(int wait)
+ {
+ unsigned long flags;
+- unsigned long wait;
++ unsigned long timeout;
+
+ if (reboot_force)
+ return;
+@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
+ if (num_online_cpus() > 1) {
+ apic->send_IPI_allbutself(REBOOT_VECTOR);
+
+- /* Don't wait longer than a second */
+- wait = USEC_PER_SEC;
+- while (num_online_cpus() > 1 && wait--)
++ /*
++ * Don't wait longer than a second if the caller
++ * didn't ask us to wait.
++ */
++ timeout = USEC_PER_SEC;
++ while (num_online_cpus() > 1 && (wait || timeout--))
+ udelay(1);
+ }
+
+@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
+ .smp_prepare_cpus = native_smp_prepare_cpus,
+ .smp_cpus_done = native_smp_cpus_done,
+
+- .smp_send_stop = native_smp_send_stop,
++ .stop_other_cpus = native_stop_other_cpus,
+ .smp_send_reschedule = native_smp_send_reschedule,
+
+ .cpu_up = native_cpu_up,
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1001,7 +1001,7 @@ static void xen_reboot(int reason)
+ struct sched_shutdown r = { .reason = reason };
+
+ #ifdef CONFIG_SMP
+- smp_send_stop();
++ stop_other_cpus();
+ #endif
+
+ if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -398,9 +398,9 @@ static void stop_self(void *v)
+ BUG();
+ }
+
+-static void xen_smp_send_stop(void)
++static void xen_stop_other_cpus(int wait)
+ {
+- smp_call_function(stop_self, NULL, 0);
++ smp_call_function(stop_self, NULL, wait);
+ }
+
+ static void xen_smp_send_reschedule(int cpu)
+@@ -468,7 +468,7 @@ static const struct smp_ops xen_smp_ops
+ .cpu_disable = xen_cpu_disable,
+ .play_dead = xen_play_dead,
+
+- .smp_send_stop = xen_smp_send_stop,
++ .stop_other_cpus = xen_stop_other_cpus,
+ .smp_send_reschedule = xen_smp_send_reschedule,
+
+ .send_call_func_ipi = xen_smp_send_call_function_ipi,
--- /dev/null
+From 3fdbf004c1706480a7c7fac3c9d836fa6df20d7d Mon Sep 17 00:00:00 2001
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+Date: Thu, 30 Sep 2010 14:32:35 +0200
+Subject: x86, mtrr: Assume SYS_CFG[Tom2ForceMemTypeWB] exists on all future AMD CPUs
+
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+
+commit 3fdbf004c1706480a7c7fac3c9d836fa6df20d7d upstream.
+
+Instead of adapting the CPU family check in amd_special_default_mtrr()
+for each new CPU family assume that all new AMD CPUs support the
+necessary bits in SYS_CFG MSR.
+
+Tom2Enabled is architectural (defined in APM Vol.2).
+Tom2ForceMemTypeWB is defined in all BKDGs starting with K8 NPT.
+In pre K8-NPT BKDG this bit is reserved (read as zero).
+
+W/o this adaption Linux would unnecessarily complain about bad MTRR
+settings on every new AMD CPU family, e.g.
+
+[ 0.000000] WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing 4863MB of RAM.
+
+Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
+LKML-Reference: <20100930123235.GB20545@loge.amd.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/mtrr/cleanup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
++++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
+@@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return 0;
+- if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
++ if (boot_cpu_data.x86 < 0xf)
+ return 0;
+ /* In case some hypervisor doesn't pass SYSCFG through: */
+ if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
--- /dev/null
+From 286e5b97eb22baab9d9a41ca76c6b933a484252c Mon Sep 17 00:00:00 2001
+From: Paul Fox <pgf@laptop.org>
+Date: Fri, 1 Oct 2010 18:17:19 +0100
+Subject: x86, olpc: Don't retry EC commands forever
+
+From: Paul Fox <pgf@laptop.org>
+
+commit 286e5b97eb22baab9d9a41ca76c6b933a484252c upstream.
+
+Avoids a potential infinite loop.
+
+It was observed once, during an EC hacking/debugging
+session - not in regular operation.
+
+Signed-off-by: Daniel Drake <dsd@laptop.org>
+Cc: dilinger@queued.net
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/olpc.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/olpc.c
++++ b/arch/x86/kernel/olpc.c
+@@ -117,6 +117,7 @@ int olpc_ec_cmd(unsigned char cmd, unsig
+ unsigned long flags;
+ int ret = -EIO;
+ int i;
++ int restarts = 0;
+
+ spin_lock_irqsave(&ec_lock, flags);
+
+@@ -173,7 +174,9 @@ restart:
+ if (wait_on_obf(0x6c, 1)) {
+ printk(KERN_ERR "olpc-ec: timeout waiting for"
+ " EC to provide data!\n");
+- goto restart;
++ if (restarts++ < 10)
++ goto restart;
++ goto err;
+ }
+ outbuf[i] = inb(0x68);
+ printk(KERN_DEBUG "olpc-ec: received 0x%x\n",
--- /dev/null
+From 6554287b1de0448f1e02e200d02b43914e997d15 Mon Sep 17 00:00:00 2001
+From: Bart Oldeman <bartoldeman@gmail.com>
+Date: Thu, 23 Sep 2010 13:16:58 -0400
+Subject: x86, vm86: Fix preemption bug for int1 debug and int3 breakpoint handlers.
+
+From: Bart Oldeman <bartoldeman@gmail.com>
+
+commit 6554287b1de0448f1e02e200d02b43914e997d15 upstream.
+
+Impact: fix kernel bug such as:
+BUG: scheduling while atomic: dosemu.bin/19680/0x00000004
+See also Ubuntu bug 455067 at
+https://bugs.launchpad.net/ubuntu/+source/linux/+bug/455067
+
+Commits 4915a35e35a037254550a2ba9f367a812bc37d40
+("Use preempt_conditional_sti/cli in do_int3, like on x86_64.")
+and 3d2a71a596bd9c761c8487a2178e95f8a61da083
+("x86, traps: converge do_debug handlers")
+started disabling preemption in int1 and int3 handlers on i386.
+The problem with vm86 is that the call to handle_vm86_trap() may jump
+straight to entry_32.S and never returns so preempt is never enabled
+again, and there is an imbalance in the preempt count.
+
+Commit be716615fe596ee117292dc615e95f707fb67fd1 ("x86, vm86:
+fix preemption bug"), which was later (accidentally?) reverted by commit
+08d68323d1f0c34452e614263b212ca556dae47f ("hw-breakpoints: modifying
+generic debug exception to use thread-specific debug registers")
+fixed the problem for debug exceptions but not for breakpoints.
+
+There are three solutions to this problem.
+
+1. Reenable preemption before calling handle_vm86_trap(). This
+was the approach that was later reverted.
+
+2. Do not disable preemption for i386 in breakpoint and debug handlers.
+This was the situation before October 2008. As far as I understand
+preemption only needs to be disabled on x86_64 because a seperate stack is
+used, but it's nice to have things work the same way on
+i386 and x86_64.
+
+3. Let handle_vm86_trap() return instead of jumping to assembly code.
+By setting a flag in _TIF_WORK_MASK, either TIF_IRET or TIF_NOTIFY_RESUME,
+the code in entry_32.S is instructed to return to 32 bit mode from
+V86 mode. The logic in entry_32.S was already present to handle signals.
+(I chose TIF_IRET because it's slightly more efficient in
+do_notify_resume() in signal.c, but in fact TIF_IRET can probably be
+replaced by TIF_NOTIFY_RESUME everywhere.)
+
+I'm submitting approach 3, because I believe it is the most elegant
+and prevents future confusion. Still, an obvious
+preempt_conditional_cli(regs); is necessary in traps.c to correct the
+bug.
+
+[ hpa: This is technically a regression, but because:
+ 1. the regression is so old,
+ 2. the patch seems relatively high risk, justifying more testing, and
+ 3. we're late in the 2.6.36-rc cycle,
+
+ I'm queuing it up for the 2.6.37 merge window. It might, however,
+ justify as a -stable backport at a latter time, hence Cc: stable. ]
+
+Signed-off-by: Bart Oldeman <bartoldeman@users.sourceforge.net>
+LKML-Reference: <alpine.DEB.2.00.1009231312330.4732@localhost.localdomain>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: K.Prasad <prasad@linux.vnet.ibm.com>
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: Alexander van Heukelum <heukelum@fastmail.fm>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/traps.c | 1 +
+ arch/x86/kernel/vm86_32.c | 10 ++++++++--
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -568,6 +568,7 @@ dotraplinkage void __kprobes do_debug(st
+ if (regs->flags & X86_VM_MASK) {
+ handle_vm86_trap((struct kernel_vm86_regs *) regs,
+ error_code, 1);
++ preempt_conditional_cli(regs);
+ return;
+ }
+
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -551,8 +551,14 @@ cannot_handle:
+ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
+ {
+ if (VMPI.is_vm86pus) {
+- if ((trapno == 3) || (trapno == 1))
+- return_to_32bit(regs, VM86_TRAP + (trapno << 8));
++ if ((trapno == 3) || (trapno == 1)) {
++ KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
++ /* setting this flag forces the code in entry_32.S to
++ call save_v86_state() and change the stack pointer
++ to KVM86->regs32 */
++ set_thread_flag(TIF_IRET);
++ return 0;
++ }
+ do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
+ return 0;
+ }