--- /dev/null
+From stable-bounces@linux.kernel.org Wed Oct 10 03:22:44 2007
+From: David Miller <davem@davemloft.net>
+Date: Wed, 10 Oct 2007 03:22:30 -0700 (PDT)
+Subject: Fix sys_ipc() SEMCTL on sparc64.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071010.032230.45875923.davem@davemloft.net>
+
+From: David S. Miller <davem@davemloft.net>
+
+changeset 6536a6b331d3225921c398eb7c6e4ecedb9b05e0 from mainline
+
+Thanks to Tom Callaway for the excellent bug report and
+test case.
+
+sys_ipc() has several problems, most to due with semaphore
+call handling:
+
+1) 'err' return should be a 'long'
+2) "union semun" is passed in a register on 64-bit compared
+ to 32-bit which provides it on the stack and therefore
+ by reference
+3) Second and third arguments to SEMCTL are swapped compared
+ to 32-bit.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/sparc64/kernel/sys_sparc.c | 15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+--- a/arch/sparc64/kernel/sys_sparc.c
++++ b/arch/sparc64/kernel/sys_sparc.c
+@@ -436,7 +436,7 @@ out:
+ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
+ unsigned long third, void __user *ptr, long fifth)
+ {
+- int err;
++ long err;
+
+ /* No need for backward compatibility. We can start fresh... */
+ if (call <= SEMCTL) {
+@@ -453,16 +453,9 @@ asmlinkage long sys_ipc(unsigned int cal
+ err = sys_semget(first, (int)second, (int)third);
+ goto out;
+ case SEMCTL: {
+- union semun fourth;
+- err = -EINVAL;
+- if (!ptr)
+- goto out;
+- err = -EFAULT;
+- if (get_user(fourth.__pad,
+- (void __user * __user *) ptr))
+- goto out;
+- err = sys_semctl(first, (int)second | IPC_64,
+- (int)third, fourth);
++ err = sys_semctl(first, third,
++ (int)second | IPC_64,
++ (union semun) ptr);
+ goto out;
+ }
+ default:
--- /dev/null
+From stable-bounces@linux.kernel.org Wed Oct 10 03:28:47 2007
+From: Eric Dumazet <dada1@cosmosbay.com>
+Date: Wed, 10 Oct 2007 03:28:33 -0700 (PDT)
+Subject: [stable] [PATCH]: Fix TCP initial sequence number selection.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071010.032833.98710737.davem@davemloft.net>
+
+From: Eric Dumazet <dada1@cosmosbay.com>
+
+changeset 162f6690a65075b49f242d3c8cdb5caaa959a060 in mainline.
+
+TCP V4 sequence numbers are 32bits, and RFC 793 assumed a 250 KHz clock.
+In order to follow network speed increase, we can use a faster clock, but
+we should limit this clock so that the delay between two rollovers is
+greater than MSL (TCP Maximum Segment Lifetime : 2 minutes)
+
+Choosing a 64 nsec clock should be OK, since the rollovers occur every
+274 seconds.
+
+Problem spotted by Denys Fedoryshchenko
+
+[ This bug was introduced by f85958151900f9d30fa5ff941b0ce71eaa45a7de ]
+
+Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/random.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1550,11 +1550,13 @@ __u32 secure_tcp_sequence_number(__be32
+ * As close as possible to RFC 793, which
+ * suggests using a 250 kHz clock.
+ * Further reading shows this assumes 2 Mb/s networks.
+- * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate.
+- * That's funny, Linux has one built in! Use it!
+- * (Networks are faster now - should this be increased?)
++ * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
++ * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
++ * we also need to limit the resolution so that the u32 seq
++ * overlaps less than one time per MSL (2 minutes).
++ * Choosing a clock of 64 ns period is OK. (period of 274 s)
+ */
+- seq += ktime_get_real().tv64;
++ seq += ktime_get_real().tv64 >> 6;
+ #if 0
+ printk("init_seq(%lx, %lx, %d, %d) = %d\n",
+ saddr, daddr, sport, dport, seq);
--- /dev/null
+From stable-bounces@linux.kernel.org Wed Oct 10 03:27:31 2007
+From: David Miller <davem@davemloft.net>
+Date: Wed, 10 Oct 2007 03:27:19 -0700 (PDT)
+Subject: [stable] [PATCH]: Fix TCP MD5 on big-endian.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071010.032719.54189064.davem@davemloft.net>
+
+From: David Miller <davem@davemloft.net>
+
+changeset f8ab18d2d987a59ccbf0495032b2aef05b730037 in mainline.
+
+Based upon a report and initial patch by Peter Lieven.
+
+tcp4_md5sig_key and tcp6_md5sig_key need to start with
+the exact same members as tcp_md5sig_key. Because they
+are both cast to that type by tcp_v{4,6}_md5_do_lookup().
+
+Unfortunately tcp{4,6}_md5sig_key use a u16 for the key
+length instead of a u8, which is what tcp_md5sig_key
+uses. This just so happens to work by accident on
+little-endian, but on big-endian it doesn't.
+
+Instead of casting, just place tcp_md5sig_key as the first member of
+the address-family specific structures, adjust the access sites, and
+kill off the ugly casts.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/tcp.h | 6 ++----
+ net/ipv4/tcp_ipv4.c | 19 +++++++++----------
+ net/ipv6/tcp_ipv6.c | 18 +++++++++---------
+ 3 files changed, 20 insertions(+), 23 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1061,14 +1061,12 @@ struct tcp_md5sig_key {
+ };
+
+ struct tcp4_md5sig_key {
+- u8 *key;
+- u16 keylen;
++ struct tcp_md5sig_key base;
+ __be32 addr;
+ };
+
+ struct tcp6_md5sig_key {
+- u8 *key;
+- u16 keylen;
++ struct tcp_md5sig_key base;
+ #if 0
+ u32 scope_id; /* XXX */
+ #endif
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -833,8 +833,7 @@ static struct tcp_md5sig_key *
+ return NULL;
+ for (i = 0; i < tp->md5sig_info->entries4; i++) {
+ if (tp->md5sig_info->keys4[i].addr == addr)
+- return (struct tcp_md5sig_key *)
+- &tp->md5sig_info->keys4[i];
++ return &tp->md5sig_info->keys4[i].base;
+ }
+ return NULL;
+ }
+@@ -865,9 +864,9 @@ int tcp_v4_md5_do_add(struct sock *sk, _
+ key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
+ if (key) {
+ /* Pre-existing entry - just update that one. */
+- kfree(key->key);
+- key->key = newkey;
+- key->keylen = newkeylen;
++ kfree(key->base.key);
++ key->base.key = newkey;
++ key->base.keylen = newkeylen;
+ } else {
+ struct tcp_md5sig_info *md5sig;
+
+@@ -906,9 +905,9 @@ int tcp_v4_md5_do_add(struct sock *sk, _
+ md5sig->alloced4++;
+ }
+ md5sig->entries4++;
+- md5sig->keys4[md5sig->entries4 - 1].addr = addr;
+- md5sig->keys4[md5sig->entries4 - 1].key = newkey;
+- md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen;
++ md5sig->keys4[md5sig->entries4 - 1].addr = addr;
++ md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
++ md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
+ }
+ return 0;
+ }
+@@ -930,7 +929,7 @@ int tcp_v4_md5_do_del(struct sock *sk, _
+ for (i = 0; i < tp->md5sig_info->entries4; i++) {
+ if (tp->md5sig_info->keys4[i].addr == addr) {
+ /* Free the key */
+- kfree(tp->md5sig_info->keys4[i].key);
++ kfree(tp->md5sig_info->keys4[i].base.key);
+ tp->md5sig_info->entries4--;
+
+ if (tp->md5sig_info->entries4 == 0) {
+@@ -964,7 +963,7 @@ static void tcp_v4_clear_md5_list(struct
+ if (tp->md5sig_info->entries4) {
+ int i;
+ for (i = 0; i < tp->md5sig_info->entries4; i++)
+- kfree(tp->md5sig_info->keys4[i].key);
++ kfree(tp->md5sig_info->keys4[i].base.key);
+ tp->md5sig_info->entries4 = 0;
+ tcp_free_md5sig_pool();
+ }
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -551,7 +551,7 @@ static struct tcp_md5sig_key *tcp_v6_md5
+
+ for (i = 0; i < tp->md5sig_info->entries6; i++) {
+ if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
+- return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
++ return &tp->md5sig_info->keys6[i].base;
+ }
+ return NULL;
+ }
+@@ -579,9 +579,9 @@ static int tcp_v6_md5_do_add(struct sock
+ key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
+ if (key) {
+ /* modify existing entry - just update that one */
+- kfree(key->key);
+- key->key = newkey;
+- key->keylen = newkeylen;
++ kfree(key->base.key);
++ key->base.key = newkey;
++ key->base.keylen = newkeylen;
+ } else {
+ /* reallocate new list if current one is full. */
+ if (!tp->md5sig_info) {
+@@ -615,8 +615,8 @@ static int tcp_v6_md5_do_add(struct sock
+
+ ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
+ peer);
+- tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
+- tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
++ tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
++ tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
+
+ tp->md5sig_info->entries6++;
+ }
+@@ -638,7 +638,7 @@ static int tcp_v6_md5_do_del(struct sock
+ for (i = 0; i < tp->md5sig_info->entries6; i++) {
+ if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
+ /* Free the key */
+- kfree(tp->md5sig_info->keys6[i].key);
++ kfree(tp->md5sig_info->keys6[i].base.key);
+ tp->md5sig_info->entries6--;
+
+ if (tp->md5sig_info->entries6 == 0) {
+@@ -669,7 +669,7 @@ static void tcp_v6_clear_md5_list (struc
+
+ if (tp->md5sig_info->entries6) {
+ for (i = 0; i < tp->md5sig_info->entries6; i++)
+- kfree(tp->md5sig_info->keys6[i].key);
++ kfree(tp->md5sig_info->keys6[i].base.key);
+ tp->md5sig_info->entries6 = 0;
+ tcp_free_md5sig_pool();
+ }
+@@ -680,7 +680,7 @@ static void tcp_v6_clear_md5_list (struc
+
+ if (tp->md5sig_info->entries4) {
+ for (i = 0; i < tp->md5sig_info->entries4; i++)
+- kfree(tp->md5sig_info->keys4[i].key);
++ kfree(tp->md5sig_info->keys4[i].base.key);
+ tp->md5sig_info->entries4 = 0;
+ tcp_free_md5sig_pool();
+ }
--- /dev/null
+From stable-bounces@linux.kernel.org Wed Oct 10 03:26:15 2007
+From: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Date: Wed, 10 Oct 2007 03:25:53 -0700 (PDT)
+Subject: Fix TCP's ->fastpath_cnt_hit handling.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071010.032553.42774666.davem@davemloft.net>
+
+From: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+
+changeset 48611c47d09023d9356e78550d1cadb8d61da9c8 in mainline.
+
+When only GSO skb was partially ACKed, no hints are reset,
+therefore fastpath_cnt_hint must be tweaked too or else it can
+corrupt fackets_out. The corruption to occur, one must have
+non-trivial ACK/SACK sequence, so this bug is not very often
+that harmful. There's a fackets_out state reset in TCP because
+fackets_out is known to be inaccurate and that fixes the issue
+eventually anyway.
+
+In case there was also at least one skb that got fully ACKed,
+the fastpath_skb_hint is set to NULL which causes a recount for
+fastpath_cnt_hint (the old value won't be accessed anymore),
+thus it can safely be decremented without additional checking.
+
+Reported by Cedric Le Goater <clg@fr.ibm.com>
+
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/tcp_input.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2403,6 +2403,9 @@ static int tcp_tso_acked(struct sock *sk
+ __u32 dval = min(tp->fackets_out, packets_acked);
+ tp->fackets_out -= dval;
+ }
++ /* hint's skb might be NULL but we don't need to care */
++ tp->fastpath_cnt_hint -= min_t(u32, packets_acked,
++ tp->fastpath_cnt_hint);
+ tp->packets_out -= packets_acked;
+
+ BUG_ON(tcp_skb_pcount(skb) == 0);
--- /dev/null
+From stable-bounces@linux.kernel.org Wed Oct 10 03:21:47 2007
+From: David S. Miller <davem@davemloft.net>
+Date: Wed, 10 Oct 2007 03:21:37 -0700 (PDT)
+Subject: Fix zero length socket write() semantics.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071010.032137.91314889.davem@davemloft.net>
+
+From: David S. Miller <davem@davemloft.net>
+
+changeset e79ad711a0108475c1b3a03815527e7237020b08 from mainline.
+
+This fixes kernel bugzilla #5731
+
+It should generate an empty packet for datagram protocols when the
+socket is connected, for one.
+
+The check is doubly-wrong because all that a write() can be is a
+sendmsg() call with a NULL msg_control and a single entry iovec. No
+special semantics should be assigned to it, therefore the zero length
+check should be removed entirely.
+
+This matches the behavior of BSD and several other systems.
+
+Alan Cox notes that SuSv3 says the behavior of a zero length write on
+non-files is "unspecified", but that's kind of useless since BSD has
+defined this behavior for a quarter century and BSD is essentially
+what application folks code to.
+
+Based upon a patch from Stephen Hemminger.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/socket.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -778,9 +778,6 @@ static ssize_t sock_aio_write(struct kio
+ if (pos != 0)
+ return -ESPIPE;
+
+- if (iocb->ki_left == 0) /* Match SYS5 behaviour */
+- return 0;
+-
+ x = alloc_sock_iocb(iocb, &siocb);
+ if (!x)
+ return -ENOMEM;
fix-some-cases-of-missed-ipv6-dad.patch
fix-ipv6-redirect-processing-leads-to-tahi-failures.patch
fix-rose-module-unload-oops.patch
+fix-zero-length-socket-write-semantics.patch
+fix-sys_ipc-semctl-on-sparc64.patch
+fix-tcp-s-fastpath_cnt_hit-handling.patch
+fix-tcp-md5-on-big-endian.patch
+fix-tcp-initial-sequence-number-selection.patch