--- /dev/null
+From e991a2bd4fa0b2f475b67dfe8f33e8ecbdcbb40b Mon Sep 17 00:00:00 2001
+From: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Date: Mon, 28 Apr 2008 02:14:06 -0700
+Subject: Fix tty speed handling on 8250
+
+From: Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+commit e991a2bd4fa0b2f475b67dfe8f33e8ecbdcbb40b upstream.
+
+We try and write the correct speed back but the serial midlayer already
+mangles the speed on us and that means if we request B0 we report back B9600
+when we should not. For now we'll hack around this in the drivers and serial
+code, pending a better long term solution.
+
+Signed-off-by: Alan Cox <alan@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/serial/8250.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/serial/8250.c
++++ b/drivers/serial/8250.c
+@@ -2174,7 +2174,9 @@ serial8250_set_termios(struct uart_port
+ }
+ serial8250_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+- tty_termios_encode_baud_rate(termios, baud, baud);
++ /* Don't rewrite B0 */
++ if (tty_termios_baud_rate(termios))
++ tty_termios_encode_baud_rate(termios, baud, baud);
+ }
+
+ static void
--- /dev/null
+From 42a886af728c089df8da1b0017b0e7e6c81b5335 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Tue, 17 Jun 2008 17:47:50 -0700
+Subject: x86-64: Fix "bytes left to copy" return value for copy_from_user()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 42a886af728c089df8da1b0017b0e7e6c81b5335 upstream
+
+Most users by far do not care about the exact return value (they only
+really care about whether the copy succeeded in its entirety or not),
+but a few special core routines actually care deeply about exactly how
+many bytes were copied from user space.
+
+And the unrolled versions of the x86-64 user copy routines would
+sometimes report that it had copied more bytes than it actually had.
+
+Very few uses actually have partial copies to begin with, but to make
+this bug even harder to trigger, most x86 CPU's use the "rep string"
+instructions for normal user copies, and that version didn't have this
+issue.
+
+To make it even harder to hit, the one user of this that really cared
+about the return value (and used the uncached version of the copy that
+doesn't use the "rep string" instructions) was the generic write
+routine, which pre-populated its source, once more hiding the problem by
+avoiding the exception case that triggers the bug.
+
+In other words, very special thanks to Bron Gondwana who not only
+triggered this, but created a test-program to show it, and bisected the
+behavior down to commit 08291429cfa6258c4cd95d8833beb40f828b194e ("mm:
+fix pagecache write deadlocks") which changed the access pattern just
+enough that you can now trigger it with 'writev()' with multiple
+iovec's.
+
+That commit itself was not the cause of the bug, it just allowed all the
+stars to align just right that you could trigger the problem.
+
+[ Side note: this is just the minimal fix to make the copy routines
+ (with __copy_from_user_inatomic_nocache as the particular version that
+ was involved in showing this) have the right return values.
+
+ We really should improve on the exceptional case further - to make the
+ copy do a byte-accurate copy up to the exact page limit that causes it
+ to fail. As it is, the callers have to do extra work to handle the
+ limit case gracefully. ]
+
+Reported-by: Bron Gondwana <brong@fastmail.fm>
+Cc: Nick Piggin <npiggin@suse.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Al Viro <viro@ZenIV.linux.org.uk>
+Acked-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/lib/copy_user_64.S | 25 +++++++++++--------------
+ arch/x86/lib/copy_user_nocache_64.S | 25 +++++++++++--------------
+ 2 files changed, 22 insertions(+), 28 deletions(-)
+
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -217,19 +217,19 @@ ENTRY(copy_user_generic_unrolled)
+ /* table sorted by exception address */
+ .section __ex_table,"a"
+ .align 8
+- .quad .Ls1,.Ls1e
+- .quad .Ls2,.Ls2e
+- .quad .Ls3,.Ls3e
+- .quad .Ls4,.Ls4e
+- .quad .Ld1,.Ls1e
++ .quad .Ls1,.Ls1e /* Ls1-Ls4 have copied zero bytes */
++ .quad .Ls2,.Ls1e
++ .quad .Ls3,.Ls1e
++ .quad .Ls4,.Ls1e
++ .quad .Ld1,.Ls1e /* Ld1-Ld4 have copied 0-24 bytes */
+ .quad .Ld2,.Ls2e
+ .quad .Ld3,.Ls3e
+ .quad .Ld4,.Ls4e
+- .quad .Ls5,.Ls5e
+- .quad .Ls6,.Ls6e
+- .quad .Ls7,.Ls7e
+- .quad .Ls8,.Ls8e
+- .quad .Ld5,.Ls5e
++ .quad .Ls5,.Ls5e /* Ls5-Ls8 have copied 32 bytes */
++ .quad .Ls6,.Ls5e
++ .quad .Ls7,.Ls5e
++ .quad .Ls8,.Ls5e
++ .quad .Ld5,.Ls5e /* Ld5-Ld8 have copied 32-56 bytes */
+ .quad .Ld6,.Ls6e
+ .quad .Ld7,.Ls7e
+ .quad .Ld8,.Ls8e
+@@ -244,11 +244,8 @@ ENTRY(copy_user_generic_unrolled)
+ .quad .Le5,.Le_zero
+ .previous
+
+- /* compute 64-offset for main loop. 8 bytes accuracy with error on the
+- pessimistic side. this is gross. it would be better to fix the
+- interface. */
+ /* eax: zero, ebx: 64 */
+-.Ls1e: addl $8,%eax
++.Ls1e: addl $8,%eax /* eax is bytes left uncopied within the loop (Ls1e: 64 .. Ls8e: 8) */
+ .Ls2e: addl $8,%eax
+ .Ls3e: addl $8,%eax
+ .Ls4e: addl $8,%eax
+--- a/arch/x86/lib/copy_user_nocache_64.S
++++ b/arch/x86/lib/copy_user_nocache_64.S
+@@ -145,19 +145,19 @@ ENTRY(__copy_user_nocache)
+ /* table sorted by exception address */
+ .section __ex_table,"a"
+ .align 8
+- .quad .Ls1,.Ls1e
+- .quad .Ls2,.Ls2e
+- .quad .Ls3,.Ls3e
+- .quad .Ls4,.Ls4e
+- .quad .Ld1,.Ls1e
++ .quad .Ls1,.Ls1e /* .Ls[1-4] - 0 bytes copied */
++ .quad .Ls2,.Ls1e
++ .quad .Ls3,.Ls1e
++ .quad .Ls4,.Ls1e
++ .quad .Ld1,.Ls1e /* .Ld[1-4] - 0..24 bytes coped */
+ .quad .Ld2,.Ls2e
+ .quad .Ld3,.Ls3e
+ .quad .Ld4,.Ls4e
+- .quad .Ls5,.Ls5e
+- .quad .Ls6,.Ls6e
+- .quad .Ls7,.Ls7e
+- .quad .Ls8,.Ls8e
+- .quad .Ld5,.Ls5e
++ .quad .Ls5,.Ls5e /* .Ls[5-8] - 32 bytes copied */
++ .quad .Ls6,.Ls5e
++ .quad .Ls7,.Ls5e
++ .quad .Ls8,.Ls5e
++ .quad .Ld5,.Ls5e /* .Ld[5-8] - 32..56 bytes copied */
+ .quad .Ld6,.Ls6e
+ .quad .Ld7,.Ls7e
+ .quad .Ld8,.Ls8e
+@@ -172,11 +172,8 @@ ENTRY(__copy_user_nocache)
+ .quad .Le5,.Le_zero
+ .previous
+
+- /* compute 64-offset for main loop. 8 bytes accuracy with error on the
+- pessimistic side. this is gross. it would be better to fix the
+- interface. */
+ /* eax: zero, ebx: 64 */
+-.Ls1e: addl $8,%eax
++.Ls1e: addl $8,%eax /* eax: bytes left uncopied: Ls1e: 64 .. Ls8e: 8 */
+ .Ls2e: addl $8,%eax
+ .Ls3e: addl $8,%eax
+ .Ls4e: addl $8,%eax