--- /dev/null
+From stable-bounces@linux.kernel.org Mon Nov 12 16:08:28 2007
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+Date: Mon, 12 Nov 2007 19:06:40 -0500
+Subject: ACPI: sleep: Fix GPE suspend cleanup
+To: linux-stable <stable@kernel.org>
+Cc: "Brown, Len" <len.brown@intel.com>
+Message-ID: <4738EA90.6090704@redhat.com>
+
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+
+patch is 9c1c6a1ba786d58bd03e27ee49f89a5685e8e07b in mainline.
+
+ACPI: sleep: Fix GPE suspend cleanup
+
+Commit 9b039330808b83acac3597535da26f47ad1862ce removed
+acpi_gpe_sleep_prepare(), the only function used at S5 transition
+Add call to generic acpi_enable_wake_device().
+
+Reference: https://bugzilla.novell.com/show_bug.cgi?id=299882
+
+Signed-off-by: Alexey Starikovskiy <astarikovskiy@suse.de>
+Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/sleep/main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/acpi/sleep/main.c
++++ b/drivers/acpi/sleep/main.c
+@@ -389,6 +389,7 @@ static void acpi_power_off(void)
+ /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
+ printk("%s called\n", __FUNCTION__);
+ local_irq_disable();
++ acpi_enable_wakeup_device(ACPI_STATE_S5);
+ acpi_enter_sleep_state(ACPI_STATE_S5);
+ }
+
--- /dev/null
+From stable-bounces@linux.kernel.org Mon Nov 12 16:10:31 2007
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+Date: Mon, 12 Nov 2007 19:09:01 -0500
+Subject: ACPI: suspend: Wrong order of GPE restore.
+To: linux-stable <stable@kernel.org>
+Cc: "Brown, Len" <len.brown@intel.com>
+Message-ID: <4738EB1D.6030709@redhat.com>
+
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+
+commit 1dbc1fda5d8ca907f320b806005d4a447977d26a in mainline.
+
+ACPI: suspend: Wrong order of GPE restore.
+
+acpi_leave_sleep_state() should have correct list of wake and
+runtime GPEs, which is available only after disable_wakeup_device()
+is called.
+
+[cebbert@redhat.com: backport to 2.6.23]
+
+Signed-off-by: Alexey Starikovskiy <astarikovskiy@suse.de>
+Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/sleep/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/acpi/sleep/main.c
++++ b/drivers/acpi/sleep/main.c
+@@ -170,8 +170,8 @@ static int acpi_pm_finish(suspend_state_
+ {
+ u32 acpi_state = acpi_target_sleep_state;
+
+- acpi_leave_sleep_state(acpi_state);
+ acpi_disable_wakeup_device(acpi_state);
++ acpi_leave_sleep_state(acpi_state);
+
+ /* reset firmware waking vector */
+ acpi_set_firmware_waking_vector((acpi_physical_address) 0);
+@@ -256,8 +256,8 @@ static int acpi_hibernation_enter(void)
+
+ static void acpi_hibernation_finish(void)
+ {
+- acpi_leave_sleep_state(ACPI_STATE_S4);
+ acpi_disable_wakeup_device(ACPI_STATE_S4);
++ acpi_leave_sleep_state(ACPI_STATE_S4);
+
+ /* reset firmware waking vector */
+ acpi_set_firmware_waking_vector((acpi_physical_address) 0);
--- /dev/null
+From stable-bounces@linux.kernel.org Mon Nov 12 23:59:27 2007
+From: David Miller <davem@davemloft.net>
+Date: Mon, 12 Nov 2007 23:59:05 -0800 (PST)
+Subject: Fix compat futex hangs.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071112.235905.219307536.davem@davemloft.net>
+
+From: David Miller <davem@davemloft.net>
+
+[FUTEX]: Fix address computation in compat code.
+
+[ Upstream commit: 3c5fd9c77d609b51c0bab682c9d40cbb496ec6f1 ]
+
+compat_exit_robust_list() computes a pointer to the
+futex entry in userspace as follows:
+
+ (void __user *)entry + futex_offset
+
+'entry' is a 'struct robust_list __user *', and
+'futex_offset' is a 'compat_long_t' (typically a 's32').
+
+Things explode if the 32-bit sign bit is set in futex_offset.
+
+Type promotion sign extends futex_offset to a 64-bit value before
+adding it to 'entry'.
+
+This triggered a problem on sparc64 running 32-bit applications which
+would lock up a cpu looping forever in the fault handling for the
+userspace load in handle_futex_death().
+
+Compat userspace runs with address masking (wherein the cpu zeros out
+the top 32-bits of every effective address given to a memory operation
+instruction) so the sparc64 fault handler accounts for this by
+zero'ing out the top 32-bits of the fault address too.
+
+Since the kernel properly uses the compat_uptr interfaces, kernel side
+accesses to compat userspace work too since they will only use
+addresses with the top 32-bit clear.
+
+Because of this compat futex layer bug we get into the following loop
+when executing the get_user() load near the top of handle_futex_death():
+
+1) load from address '0xfffffffff7f16bd8', FAULT
+2) fault handler clears upper 32-bits, processes fault
+ for address '0xf7f16bd8' which succeeds
+3) goto #1
+
+I want to thank Bernd Zeimetz, Josip Rodin, and Fabio Massimo Di Nitto
+for their tireless efforts helping me track down this bug.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/futex_compat.c | 27 ++++++++++++++++++++-------
+ 1 file changed, 20 insertions(+), 7 deletions(-)
+
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -29,6 +29,15 @@ fetch_robust_entry(compat_uptr_t *uentry
+ return 0;
+ }
+
++static void __user *futex_uaddr(struct robust_list *entry,
++ compat_long_t futex_offset)
++{
++ compat_uptr_t base = ptr_to_compat(entry);
++ void __user *uaddr = compat_ptr(base + futex_offset);
++
++ return uaddr;
++}
++
+ /*
+ * Walk curr->robust_list (very carefully, it's a userspace list!)
+ * and mark any locks found there dead, and notify any waiters.
+@@ -75,11 +84,13 @@ void compat_exit_robust_list(struct task
+ * A pending lock might already be on the list, so
+ * dont process it twice:
+ */
+- if (entry != pending)
+- if (handle_futex_death((void __user *)entry + futex_offset,
+- curr, pi))
+- return;
++ if (entry != pending) {
++ void __user *uaddr = futex_uaddr(entry,
++ futex_offset);
+
++ if (handle_futex_death(uaddr, curr, pi))
++ return;
++ }
+ if (rc)
+ return;
+ uentry = next_uentry;
+@@ -93,9 +104,11 @@ void compat_exit_robust_list(struct task
+
+ cond_resched();
+ }
+- if (pending)
+- handle_futex_death((void __user *)pending + futex_offset,
+- curr, pip);
++ if (pending) {
++ void __user *uaddr = futex_uaddr(pending, futex_offset);
++
++ handle_futex_death(uaddr, curr, pip);
++ }
+ }
+
+ asmlinkage long
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Nov 13 02:48:46 2007
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 13 Nov 2007 02:48:28 -0800 (PST)
+Subject: Fix crypto_alloc_comp() error checking.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071113.024828.260506088.davem@davemloft.net>
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[IPSEC]: Fix crypto_alloc_comp error checking
+
+[ Upstream commit: 4999f3621f4da622e77931b3d33ada6c7083c705 ]
+
+The function crypto_alloc_comp returns an errno instead of NULL
+to indicate error. So it needs to be tested with IS_ERR.
+
+This is based on a patch by Vicenç Beltran Querol.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/ipcomp.c | 3 ++-
+ net/ipv6/ipcomp6.c | 3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/ipcomp.c
++++ b/net/ipv4/ipcomp.c
+@@ -17,6 +17,7 @@
+ #include <asm/scatterlist.h>
+ #include <asm/semaphore.h>
+ #include <linux/crypto.h>
++#include <linux/err.h>
+ #include <linux/pfkeyv2.h>
+ #include <linux/percpu.h>
+ #include <linux/smp.h>
+@@ -355,7 +356,7 @@ static struct crypto_comp **ipcomp_alloc
+ for_each_possible_cpu(cpu) {
+ struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
+ CRYPTO_ALG_ASYNC);
+- if (!tfm)
++ if (IS_ERR(tfm))
+ goto error;
+ *per_cpu_ptr(tfms, cpu) = tfm;
+ }
+--- a/net/ipv6/ipcomp6.c
++++ b/net/ipv6/ipcomp6.c
+@@ -37,6 +37,7 @@
+ #include <asm/scatterlist.h>
+ #include <asm/semaphore.h>
+ #include <linux/crypto.h>
++#include <linux/err.h>
+ #include <linux/pfkeyv2.h>
+ #include <linux/random.h>
+ #include <linux/percpu.h>
+@@ -366,7 +367,7 @@ static struct crypto_comp **ipcomp6_allo
+ for_each_possible_cpu(cpu) {
+ struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
+ CRYPTO_ALG_ASYNC);
+- if (!tfm)
++ if (IS_ERR(tfm))
+ goto error;
+ *per_cpu_ptr(tfms, cpu) = tfm;
+ }
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Nov 13 00:10:11 2007
+From: Radu Rendec <radu.rendec@ines.ro>
+Date: Tue, 13 Nov 2007 00:09:56 -0800 (PST)
+Subject: Fix endianness bug in U32 classifier.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071113.000956.33032860.davem@davemloft.net>
+
+From: Radu Rendec <radu.rendec@ines.ro>
+
+changeset 543821c6f5dea5221426eaf1eac98b100249c7ac in mainline.
+
+[PKT_SCHED] CLS_U32: Fix endianness problem with u32 classifier hash masks.
+
+While trying to implement u32 hashes in my shaping machine I ran into
+a possible bug in the u32 hash/bucket computing algorithm
+(net/sched/cls_u32.c).
+
+The problem occurs only with hash masks that extend over the octet
+boundary, on little endian machines (where htonl() actually does
+something).
+
+Let's say that I would like to use 0x3fc0 as the hash mask. This means
+8 contiguous "1" bits starting at b6. With such a mask, the expected
+(and logical) behavior is to hash any address in, for instance,
+192.168.0.0/26 in bucket 0, then any address in 192.168.0.64/26 in
+bucket 1, then 192.168.0.128/26 in bucket 2 and so on.
+
+This is exactly what would happen on a big endian machine, but on
+little endian machines, what would actually happen with current
+implementation is 0x3fc0 being reversed (into 0xc03f0000) by htonl()
+in the userspace tool and then applied to 192.168.x.x in the u32
+classifier. When shifting right by 16 bits (rank of first "1" bit in
+the reversed mask) and applying the divisor mask (0xff for divisor
+256), what would actually remain is 0x3f applied on the "168" octet of
+the address.
+
+One could say is this can be easily worked around by taking endianness
+into account in userspace and supplying an appropriate mask (0xfc03)
+that would be turned into contiguous "1" bits when reversed
+(0x03fc0000). But the actual problem is the network address (inside
+the packet) not being converted to host order, but used as a
+host-order value when computing the bucket.
+
+Let's say the network address is written as n31 n30 ... n0, with n0
+being the least significant bit. When used directly (without any
+conversion) on a little endian machine, it becomes n7 ... n0 n8 ..n15
+etc in the machine's registers. Thus bits n7 and n8 would no longer be
+adjacent and 192.168.64.0/26 and 192.168.128.0/26 would no longer be
+consecutive.
+
+The fix is to apply ntohl() on the hmask before computing fshift,
+and in u32_hash_fold() convert the packet data to host order before
+shifting down by fshift.
+
+With helpful feedback from Jamal Hadi Salim and Jarek Poplawski.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sched/cls_u32.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -91,7 +91,7 @@ static struct tc_u_common *u32_list;
+
+ static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
+ {
+- unsigned h = (key & sel->hmask)>>fshift;
++ unsigned h = ntohl(key & sel->hmask)>>fshift;
+
+ return h;
+ }
+@@ -615,7 +615,7 @@ static int u32_change(struct tcf_proto *
+ n->handle = handle;
+ {
+ u8 i = 0;
+- u32 mask = s->hmask;
++ u32 mask = ntohl(s->hmask);
+ if (mask) {
+ while (!(mask & 1)) {
+ i++;
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Nov 13 00:03:31 2007
+From: David Miller <davem@davemloft.net>
+Date: Tue, 13 Nov 2007 00:02:56 -0800 (PST)
+Subject: Fix error returns in sys_socketpair()
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071113.000256.110812500.davem@davemloft.net>
+
+From: David Miller <davem@davemloft.net>
+
+patch bf3c23d171e35e6e168074a1514b0acd59cfd81a in mainline.
+
+[NET]: Fix error reporting in sys_socketpair().
+
+If either of the two sock_alloc_fd() calls fail, we
+forget to update 'err' and thus we'll erroneously
+return zero in these cases.
+
+Based upon a report and patch from Rich Paul, and
+commentary from Chuck Ebbert.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/socket.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1245,11 +1245,14 @@ asmlinkage long sys_socketpair(int famil
+ goto out_release_both;
+
+ fd1 = sock_alloc_fd(&newfile1);
+- if (unlikely(fd1 < 0))
++ if (unlikely(fd1 < 0)) {
++ err = fd1;
+ goto out_release_both;
++ }
+
+ fd2 = sock_alloc_fd(&newfile2);
+ if (unlikely(fd2 < 0)) {
++ err = fd2;
+ put_filp(newfile1);
+ put_unused_fd(fd1);
+ goto out_release_both;
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Nov 13 00:01:28 2007
+From: James Chapman <jchapman@katalix.com>
+Date: Tue, 13 Nov 2007 00:01:00 -0800 (PST)
+Subject: Fix L2TP oopses.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071113.000100.223352977.davem@davemloft.net>
+
+From: James Chapman <jchapman@katalix.com>
+
+changeset 91781004b9c029ee55b7aa9ef950a373ba865dc6 in mainline.
+
+[PPP]: L2TP: Fix oops in transmit and receive paths
+
+Changes made on 18-sep to fix skb handling in the pppol2tp driver
+broke the transmit and receive paths. Users are only running into this
+now because distros are now using 2.6.23 and I must have messed up
+when I tested the change.
+
+For receive, we now do our own calculation of how much to pull from
+the skb (variable length L2TP header) rather than using
+skb_transport_offset(). Also, if the skb isn't a data packet, it must
+be passed back to UDP with skb->data pointing to the UDP header.
+
+For transmit, make sure skb->sk is set up because ip_queue_xmit()
+needs it.
+
+Signed-off-by: James Chapman <jchapman@katalix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/pppol2tp.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/pppol2tp.c
++++ b/drivers/net/pppol2tp.c
+@@ -487,7 +487,7 @@ static int pppol2tp_recv_core(struct soc
+ {
+ struct pppol2tp_session *session = NULL;
+ struct pppol2tp_tunnel *tunnel;
+- unsigned char *ptr;
++ unsigned char *ptr, *optr;
+ u16 hdrflags;
+ u16 tunnel_id, session_id;
+ int length;
+@@ -495,7 +495,7 @@ static int pppol2tp_recv_core(struct soc
+
+ tunnel = pppol2tp_sock_to_tunnel(sock);
+ if (tunnel == NULL)
+- goto error;
++ goto no_tunnel;
+
+ /* UDP always verifies the packet length. */
+ __skb_pull(skb, sizeof(struct udphdr));
+@@ -508,7 +508,7 @@ static int pppol2tp_recv_core(struct soc
+ }
+
+ /* Point to L2TP header */
+- ptr = skb->data;
++ optr = ptr = skb->data;
+
+ /* Get L2TP header flags */
+ hdrflags = ntohs(*(__be16*)ptr);
+@@ -636,12 +636,14 @@ static int pppol2tp_recv_core(struct soc
+ /* If offset bit set, skip it. */
+ if (hdrflags & L2TP_HDRFLAG_O) {
+ offset = ntohs(*(__be16 *)ptr);
+- skb->transport_header += 2 + offset;
+- if (!pskb_may_pull(skb, skb_transport_offset(skb) + 2))
+- goto discard;
++ ptr += 2 + offset;
+ }
+
+- __skb_pull(skb, skb_transport_offset(skb));
++ offset = ptr - optr;
++ if (!pskb_may_pull(skb, offset))
++ goto discard;
++
++ __skb_pull(skb, offset);
+
+ /* Skip PPP header, if present. In testing, Microsoft L2TP clients
+ * don't send the PPP header (PPP header compression enabled), but
+@@ -651,6 +653,9 @@ static int pppol2tp_recv_core(struct soc
+ * Note that skb->data[] isn't dereferenced from a u16 ptr here since
+ * the field may be unaligned.
+ */
++ if (!pskb_may_pull(skb, 2))
++ goto discard;
++
+ if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
+ skb_pull(skb, 2);
+
+@@ -708,6 +713,10 @@ discard:
+ return 0;
+
+ error:
++ /* Put UDP header back */
++ __skb_push(skb, sizeof(struct udphdr));
++
++no_tunnel:
+ return 1;
+ }
+
+@@ -1049,6 +1058,8 @@ static int pppol2tp_xmit(struct ppp_chan
+ /* Get routing info from the tunnel socket */
+ dst_release(skb->dst);
+ skb->dst = sk_dst_get(sk_tun);
++ skb_orphan(skb);
++ skb->sk = sk_tun;
+
+ /* Queue the packet to IP for output */
+ len = skb->len;
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Nov 13 03:31:15 2007
+From: Patrick McHardy <kaber@trash.net>
+Date: Tue, 13 Nov 2007 03:03:00 -0800 (PST)
+Subject: Fix netlink timeouts.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071113.030300.51440049.davem@davemloft.net>
+
+From: Patrick McHardy <kaber@trash.net>
+
+[NETLINK]: Fix unicast timeouts
+
+[ Upstream commit: c3d8d1e30cace31fed6186a4b8c6b1401836d89c ]
+
+Commit ed6dcf4a in the history.git tree broke netlink_unicast timeouts
+by moving the schedule_timeout() call to a new function that doesn't
+propagate the remaining timeout back to the caller. This means on each
+retry we start with the full timeout again.
+
+ipc/mqueue.c seems to actually want to wait indefinitely so this
+behaviour is retained.
+
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/netlink.h | 2 +-
+ ipc/mqueue.c | 6 ++++--
+ net/netlink/af_netlink.c | 10 +++++-----
+ 3 files changed, 10 insertions(+), 8 deletions(-)
+
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -175,7 +175,7 @@ extern int netlink_unregister_notifier(s
+ /* finegrained unicast helpers: */
+ struct sock *netlink_getsockbyfilp(struct file *filp);
+ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+- long timeo, struct sock *ssk);
++ long *timeo, struct sock *ssk);
+ void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
+ int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol);
+
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -1014,6 +1014,8 @@ asmlinkage long sys_mq_notify(mqd_t mqde
+ return -EINVAL;
+ }
+ if (notification.sigev_notify == SIGEV_THREAD) {
++ long timeo;
++
+ /* create the notify skb */
+ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
+ ret = -ENOMEM;
+@@ -1042,8 +1044,8 @@ retry:
+ goto out;
+ }
+
+- ret = netlink_attachskb(sock, nc, 0,
+- MAX_SCHEDULE_TIMEOUT, NULL);
++ timeo = MAX_SCHEDULE_TIMEOUT;
++ ret = netlink_attachskb(sock, nc, 0, &timeo, NULL);
+ if (ret == 1)
+ goto retry;
+ if (ret) {
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -744,7 +744,7 @@ struct sock *netlink_getsockbyfilp(struc
+ * 1: repeat lookup - reference dropped while waiting for socket memory.
+ */
+ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+- long timeo, struct sock *ssk)
++ long *timeo, struct sock *ssk)
+ {
+ struct netlink_sock *nlk;
+
+@@ -753,7 +753,7 @@ int netlink_attachskb(struct sock *sk, s
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+ test_bit(0, &nlk->state)) {
+ DECLARE_WAITQUEUE(wait, current);
+- if (!timeo) {
++ if (!*timeo) {
+ if (!ssk || nlk_sk(ssk)->pid == 0)
+ netlink_overrun(sk);
+ sock_put(sk);
+@@ -767,7 +767,7 @@ int netlink_attachskb(struct sock *sk, s
+ if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+ test_bit(0, &nlk->state)) &&
+ !sock_flag(sk, SOCK_DEAD))
+- timeo = schedule_timeout(timeo);
++ *timeo = schedule_timeout(*timeo);
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&nlk->wait, &wait);
+@@ -775,7 +775,7 @@ int netlink_attachskb(struct sock *sk, s
+
+ if (signal_pending(current)) {
+ kfree_skb(skb);
+- return sock_intr_errno(timeo);
++ return sock_intr_errno(*timeo);
+ }
+ return 1;
+ }
+@@ -839,7 +839,7 @@ retry:
+ kfree_skb(skb);
+ return PTR_ERR(sk);
+ }
+- err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
++ err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
+ if (err == 1)
+ goto retry;
+ if (err)
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Nov 13 00:13:57 2007
+From: Patrick McHardy <kaber@trash.net>
+Date: Tue, 13 Nov 2007 00:13:37 -0800 (PST)
+Subject: Fix SET_VLAN_INGRESS_PRIORITY_CMD error return.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071113.001337.36700440.davem@davemloft.net>
+
+From: Patrick McHardy <kaber@trash.net>
+
+patch fffe470a803e7f7b74c016291e542a0162761209 in mainline.
+
+[VLAN]: Fix SET_VLAN_INGRESS_PRIORITY_CMD ioctl
+
+Based on report and patch by Doug Kehn <rdkehn@yahoo.com>:
+
+vconfig returns the following error when attempting to execute the
+set_ingress_map command:
+
+vconfig: socket or ioctl error for set_ingress_map: Operation not permitted
+
+In vlan.c, vlan_ioctl_handler for SET_VLAN_INGRESS_PRIORITY_CMD
+sets err = -EPERM and calls vlan_dev_set_ingress_priority.
+vlan_dev_set_ingress_priority is a void function so err remains
+at -EPERM and results in the vconfig error (even though the ingress
+map was set).
+
+Fix by setting err = 0 after the vlan_dev_set_ingress_priority call.
+
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/8021q/vlan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -744,6 +744,7 @@ static int vlan_ioctl_handler(void __use
+ vlan_dev_set_ingress_priority(dev,
+ args.u.skb_priority,
+ args.vlan_qos);
++ err = 0;
+ break;
+
+ case SET_VLAN_EGRESS_PRIORITY_CMD:
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Nov 13 00:08:07 2007
+From: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+Date: Tue, 13 Nov 2007 00:07:45 -0800 (PST)
+Subject: Fix TEQL oops.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071113.000745.02473542.davem@davemloft.net>
+
+From: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+
+[PKT_SCHED]: Fix OOPS when removing devices from a teql queuing discipline
+
+[ Upstream commit: 4f9f8311a08c0d95c70261264a2b47f2ae99683a ]
+
+tecl_reset() is called from deactivate and qdisc is set to noop already,
+but subsequent teql_xmit does not know about it and dereference private
+data as teql qdisc and thus oopses.
+not catch it first :)
+
+Signed-off-by: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sched/sch_teql.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/sched/sch_teql.c
++++ b/net/sched/sch_teql.c
+@@ -249,6 +249,9 @@ __teql_resolve(struct sk_buff *skb, stru
+ static __inline__ int
+ teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+ {
++ if (dev->qdisc == &noop_qdisc)
++ return -ENODEV;
++
+ if (dev->hard_header == NULL ||
+ skb->dst == NULL ||
+ skb->dst->neighbour == NULL)
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Nov 13 00:12:09 2007
+From: Patrick McHardy <kaber@trash.net>
+Date: Tue, 13 Nov 2007 00:11:51 -0800 (PST)
+Subject: Fix VLAN address syncing.
+To: stable@kernel.org
+Cc: bunk@kernel.org
+Message-ID: <20071113.001151.121206681.davem@davemloft.net>
+
+From: Patrick McHardy <kaber@trash.net>
+
+patch d932e04a5e7b146c5f9bf517714b986a432a7594 in mainline.
+
+[PATCH] [VLAN]: Don't synchronize addresses while the vlan device is down
+
+While the VLAN device is down, the unicast addresses are not configured
+on the underlying device, so we shouldn't attempt to sync them.
+
+Noticed by Dmitry Butskoy <buc@odusz.so-cdu.ru>
+
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/8021q/vlan.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -629,6 +629,10 @@ static int vlan_device_event(struct noti
+ if (!vlandev)
+ continue;
+
++ flgs = vlandev->flags;
++ if (!(flgs & IFF_UP))
++ continue;
++
+ vlan_sync_address(dev, vlandev);
+ }
+ break;
--- /dev/null
+From stable-bounces@linux.kernel.org Mon Nov 12 14:09:50 2007
+From: Mark Fasheh <mark.fasheh@oracle.com>
+Date: Mon, 12 Nov 2007 14:09:22 -0800
+Subject: ocfs2: fix write() performance regression
+To: stable@kernel.org
+Message-ID: <20071112220922.GW28607@ca-server1.us.oracle.com>
+Content-Disposition: inline
+
+From: Mark Fasheh <mark.fasheh@oracle.com>
+
+patch 4e9563fd55ff4479f2b118d0757d121dd0cfc39c in mainline.
+
+ocfs2: fix write() performance regression
+
+On file systems which don't support sparse files, Ocfs2_map_page_blocks()
+was reading blocks on appending writes. This caused write performance to
+suffer dramatically. Fix this by detecting an appending write on a nonsparse
+fs and skipping the read.
+
+Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/aops.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -661,6 +661,27 @@ static void ocfs2_clear_page_regions(str
+ }
+
+ /*
++ * Nonsparse file systems fully allocate before we get to the write
++ * code. This prevents ocfs2_write() from tagging the write as an
++ * allocating one, which means ocfs2_map_page_blocks() might try to
++ * read-in the blocks at the tail of our file. Avoid reading them by
++ * testing i_size against each block offset.
++ */
++static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
++ unsigned int block_start)
++{
++ u64 offset = page_offset(page) + block_start;
++
++ if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
++ return 1;
++
++ if (i_size_read(inode) > offset)
++ return 1;
++
++ return 0;
++}
++
++/*
+ * Some of this taken from block_prepare_write(). We already have our
+ * mapping by now though, and the entire write will be allocating or
+ * it won't, so not much need to use BH_New.
+@@ -713,6 +734,7 @@ int ocfs2_map_page_blocks(struct page *p
+ set_buffer_uptodate(bh);
+ } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+ !buffer_new(bh) &&
++ ocfs2_should_read_blk(inode, page, block_start) &&
+ (block_start < from || block_end > to)) {
+ ll_rw_block(READ, 1, &bh);
+ *wait_bh++=bh;
sky2-status-race.patch
sky2-ethdump.patch
sky2-phy-power.patch
+ocfs2-fix-write-performance-regression.patch
+acpi-sleep-fix-gpe-suspend-cleanup.patch
+acpi-suspend-wrong-order-of-gpe-restore.patch
+fix-l2tp-oopses.patch
+fix-error-returns-in-sys_socketpair.patch
+fix-teql-oops.patch
+fix-endianness-bug-in-u32-classifier.patch
+fix-vlan-address-syncing.patch
+fix-set_vlan_ingress_priority_cmd-error-return.patch
+fix-crypto_alloc_comp-error-checking.patch
+fix-netlink-timeouts.patch
+fix-compat-futex-hangs.patch