--- /dev/null
+From b09a48a3f84cc28dea31822db22fcbcd14e68aef Mon Sep 17 00:00:00 2001
+From: Stephen Hemminger <shemminger@vyatta.com>
+Date: Thu, 11 Jun 2009 05:46:04 -0700
+Subject: bonding: fix multiple module load problem
+
+From: Stephen Hemminger <shemminger@vyatta.com>
+
+[ Upstream commit 130aa61a77b8518f1ea618e1b7d214d60b405f10 ]
+
+Some users still load bond module multiple times to create bonding
+devices. This accidentally was broken by a later patch about
+the time sysfs was fixed. According to Jay, it was broken
+by:
+ commit b8a9787eddb0e4665f31dd1d64584732b2b5d051
+ Author: Jay Vosburgh <fubar@us.ibm.com>
+ Date: Fri Jun 13 18:12:04 2008 -0700
+
+ bonding: Allow setting max_bonds to zero
+
+Note: sysfs and procfs still produce WARN() messages when this is done
+so the sysfs method is the recommended API.
+
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: Jay Vosburgh <fubar@us.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/bonding/bond_sysfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -1538,6 +1538,7 @@ int bond_create_sysfs(void)
+ printk(KERN_ERR
+ "network device named %s already exists in sysfs",
+ class_attr_bonding_masters.attr.name);
++ ret = 0;
+ }
+
+ return ret;
--- /dev/null
+From 51ebf8b062174a603dcf0019f1371520cfa11cc4 Mon Sep 17 00:00:00 2001
+From: Andy Gospodarek <andy@greyhouse.net>
+Date: Thu, 18 Jun 2009 11:57:37 +0000
+Subject: e1000e: stop unnecessary polling when using msi-x
+
+From: Andy Gospodarek <andy@greyhouse.net>
+
+[ Upstream commit 679e8a0f0ae3333e94b1d374d07775fce9066025 ]
+
+The last hunk of this commit:
+
+ commit 12d04a3c12b420f23398b4d650127642469a60a6
+ Author: Alexander Duyck <alexander.h.duyck@intel.com>
+ Date: Wed Mar 25 22:05:03 2009 +0000
+
+ e1000e: commonize tx cleanup routine to match e1000 & igb
+
+changed the logic for determining if we should call napi_complete or
+not at then end of a napi poll.
+
+If the NIC is using MSI-X with no work to do in ->poll, net_rx_action
+can just spin indefinitely on older kernels and for 2 jiffies on newer
+kernels since napi_complete is never called and budget isn't
+decremented.
+
+Discovered and verified while testing driver backport to an older
+kernel.
+
+Signed-off-by: Andy Gospodarek <andy@greyhouse.net>
+Acked-by: Alexander Duyck <alexander.h.duyck@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/e1000e/netdev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/e1000e/netdev.c
++++ b/drivers/net/e1000e/netdev.c
+@@ -1996,7 +1996,7 @@ static int e1000_clean(struct napi_struc
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *poll_dev = adapter->netdev;
+- int tx_cleaned = 0, work_done = 0;
++ int tx_cleaned = 1, work_done = 0;
+
+ adapter = netdev_priv(poll_dev);
+
--- /dev/null
+From e8b65d4ce15b98b262a1914e156846f8870add09 Mon Sep 17 00:00:00 2001
+From: Neil Horman <nhorman@tuxdriver.com>
+Date: Fri, 26 Jun 2009 11:40:30 -0700
+Subject: ipv4: fix NULL pointer + success return in route lookup path
+
+From: Neil Horman <nhorman@tuxdriver.com>
+
+[ Upstream commit 73e42897e8e5619eacb787d2ce69be12f47cfc21 ]
+
+Don't drop route if we're not caching
+
+ I recently got a report of an oops on a route lookup. Maxime was
+testing what would happen if route caching was turned off (doing so by setting
+making rt_caching always return 0), and found that it triggered an oops. I
+looked at it and found that the problem stemmed from the fact that the route
+lookup routines were returning success from their lookup paths (which is good),
+but never set the **rp pointer to anything (which is bad). This happens because
+in rt_intern_hash, if rt_caching returns false, we call rt_drop and return 0.
+This almost emulates slient success. What we should be doing is assigning *rp =
+rt and _not_ dropping the route. This way, during slow path lookups, when we
+create a new route cache entry, we don't immediately discard it, rather we just
+don't add it into the cache hash table, but we let this one lookup use it for
+the purpose of this route request. Maxime has tested and reports it prevents
+the oops. There is still a subsequent routing issue that I'm looking into
+further, but I'm confident that, even if its related to this same path, this
+patch makes sense to take.
+
+Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/route.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1081,8 +1081,16 @@ restart:
+ now = jiffies;
+
+ if (!rt_caching(dev_net(rt->u.dst.dev))) {
+- rt_drop(rt);
+- return 0;
++ /*
++ * If we're not caching, just tell the caller we
++ * were successful and don't touch the route. The
++ * caller hold the sole reference to the cache entry, and
++ * it will be released when the caller is done with it.
++ * If we drop it here, the callers have no way to resolve routes
++ * when we're not caching. Instead, just point *rp at rt, so
++ * the caller gets a single use out of the route
++ */
++ goto report_and_exit;
+ }
+
+ rthp = &rt_hash_table[hash].chain;
+@@ -1210,6 +1218,8 @@ restart:
+ rcu_assign_pointer(rt_hash_table[hash].chain, rt);
+
+ spin_unlock_bh(rt_hash_lock_addr(hash));
++
++report_and_exit:
+ *rp = rt;
+ return 0;
+ }
--- /dev/null
+From 83c97c9fbcefcc759abf935de7d4b9ce99675b68 Mon Sep 17 00:00:00 2001
+From: Neil Horman <nhorman@tuxdriver.com>
+Date: Fri, 26 Jun 2009 11:41:16 -0700
+Subject: ipv4 routing: Ensure that route cache entries are usable and reclaimable with caching is off
+
+From: Neil Horman <nhorman@tuxdriver.com>
+
+[ Upstream commit b6280b47a7a42970d098a3059f4ebe7e55e90d8d ]
+
+When route caching is disabled (rt_caching returns false), We still use route
+cache entries that are created and passed into rt_intern_hash once. These
+routes need to be made usable for the one call path that holds a reference to
+them, and they need to be reclaimed when they're finished with their use. To be
+made usable, they need to be associated with a neighbor table entry (which they
+currently are not), otherwise iproute_finish2 just discards the packet, since we
+don't know which L2 peer to send the packet to. To do this binding, we need to
+follow the path a bit higher up in rt_intern_hash, which calls
+arp_bind_neighbour, but not assign the route entry to the hash table.
+Currently, if caching is off, we simply assign the route to the rp pointer and
+are reutrn success. This patch associates us with a neighbor entry first.
+
+Secondly, we need to make sure that any single use routes like this are known to
+the garbage collector when caching is off. If caching is off, and we try to
+hash in a route, it will leak when its refcount reaches zero. To avoid this,
+this patch calls rt_free on the route cache entry passed into rt_intern_hash.
+This places us on the gc list for the route cache garbage collector, so that
+when its refcount reaches zero, it will be reclaimed (Thanks to Alexey for this
+suggestion).
+
+I've tested this on a local system here, and with these patches in place, I'm
+able to maintain routed connectivity to remote systems, even if I set
+/proc/sys/net/ipv4/rt_cache_rebuild_count to -1, which forces rt_caching to
+return false.
+
+Signed-off-by: Neil Horman <nhorman@redhat.com>
+Reported-by: Jarek Poplawski <jarkao2@gmail.com>
+Reported-by: Maxime Bizon <mbizon@freebox.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/route.c | 26 +++++++++++++++++++++++---
+ 1 file changed, 23 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1089,8 +1089,27 @@ restart:
+ * If we drop it here, the callers have no way to resolve routes
+ * when we're not caching. Instead, just point *rp at rt, so
+ * the caller gets a single use out of the route
++ * Note that we do rt_free on this new route entry, so that
++ * once its refcount hits zero, we are still able to reap it
++ * (Thanks Alexey)
++ * Note also the rt_free uses call_rcu. We don't actually
++ * need rcu protection here, this is just our path to get
++ * on the route gc list.
+ */
+- goto report_and_exit;
++
++ if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
++ int err = arp_bind_neighbour(&rt->u.dst);
++ if (err) {
++ if (net_ratelimit())
++ printk(KERN_WARNING
++ "Neighbour table failure & not caching routes.\n");
++ rt_drop(rt);
++ return err;
++ }
++ }
++
++ rt_free(rt);
++ goto skip_hashing;
+ }
+
+ rthp = &rt_hash_table[hash].chain;
+@@ -1204,7 +1223,8 @@ restart:
+ #if RT_CACHE_DEBUG >= 2
+ if (rt->u.dst.rt_next) {
+ struct rtable *trt;
+- printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst);
++ printk(KERN_DEBUG "rt_cache @%02x: %pI4",
++ hash, &rt->rt_dst);
+ for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
+ printk(" . %pI4", &trt->rt_dst);
+ printk("\n");
+@@ -1219,7 +1239,7 @@ restart:
+
+ spin_unlock_bh(rt_hash_lock_addr(hash));
+
+-report_and_exit:
++skip_hashing:
+ *rp = rt;
+ return 0;
+ }
--- /dev/null
+From 8c76569fc804f3856dab9893a0a00e0137ed7019 Mon Sep 17 00:00:00 2001
+From: Michael Buesch <mb@bu3sch.de>
+Date: Thu, 18 Jun 2009 07:03:47 +0000
+Subject: pegasus usb-net: Fix endianness bugs
+
+From: Michael Buesch <mb@bu3sch.de>
+
+[ Upstream commit e3453f6342110d60edb37be92c4a4f668ca8b0c4 ]
+
+This fixes various endianness bugs. Some harmless and some real ones.
+This is tested on a PowerPC-64 machine.
+
+Signed-off-by: Michael Buesch <mb@bu3sch.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/usb/pegasus.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/usb/pegasus.c
++++ b/drivers/net/usb/pegasus.c
+@@ -297,7 +297,7 @@ static int update_eth_regs_async(pegasus
+
+ pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
+ pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
+- pegasus->dr.wValue = 0;
++ pegasus->dr.wValue = cpu_to_le16(0);
+ pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
+ pegasus->dr.wLength = cpu_to_le16(3);
+ pegasus->ctrl_urb->transfer_buffer_length = 3;
+@@ -446,11 +446,12 @@ static int write_eprom_word(pegasus_t *
+ int i;
+ __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
+ int ret;
++ __le16 le_data = cpu_to_le16(data);
+
+ set_registers(pegasus, EpromOffset, 4, d);
+ enable_eprom_write(pegasus);
+ set_register(pegasus, EpromOffset, index);
+- set_registers(pegasus, EpromData, 2, &data);
++ set_registers(pegasus, EpromData, 2, &le_data);
+ set_register(pegasus, EpromCtrl, EPROM_WRITE);
+
+ for (i = 0; i < REG_TIMEOUT; i++) {
+@@ -923,29 +924,32 @@ static struct net_device_stats *pegasus_
+
+ static inline void disable_net_traffic(pegasus_t * pegasus)
+ {
+- int tmp = 0;
++ __le16 tmp = cpu_to_le16(0);
+
+- set_registers(pegasus, EthCtrl0, 2, &tmp);
++ set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
+ }
+
+ static inline void get_interrupt_interval(pegasus_t * pegasus)
+ {
+- __u8 data[2];
++ u16 data;
++ u8 interval;
+
+- read_eprom_word(pegasus, 4, (__u16 *) data);
++ read_eprom_word(pegasus, 4, &data);
++ interval = data >> 8;
+ if (pegasus->usb->speed != USB_SPEED_HIGH) {
+- if (data[1] < 0x80) {
++ if (interval < 0x80) {
+ if (netif_msg_timer(pegasus))
+ dev_info(&pegasus->intf->dev, "intr interval "
+ "changed from %ums to %ums\n",
+- data[1], 0x80);
+- data[1] = 0x80;
++ interval, 0x80);
++ interval = 0x80;
++ data = (data & 0x00FF) | ((u16)interval << 8);
+ #ifdef PEGASUS_WRITE_EEPROM
+- write_eprom_word(pegasus, 4, *(__u16 *) data);
++ write_eprom_word(pegasus, 4, data);
+ #endif
+ }
+ }
+- pegasus->intr_interval = data[1];
++ pegasus->intr_interval = interval;
+ }
+
+ static void set_carrier(struct net_device *net)
+@@ -1299,7 +1303,8 @@ static int pegasus_blacklisted(struct us
+ /* Special quirk to keep the driver from handling the Belkin Bluetooth
+ * dongle which happens to have the same ID.
+ */
+- if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
++ if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) &&
++ (udd->idProduct == cpu_to_le16(0x0121)) &&
+ (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
+ (udd->bDeviceProtocol == 1))
+ return 1;
ima-use-current_cred-instead-of-current-cred.patch
ima-handle-dentry_open-failures.patch
ima-open-all-files-o_largefile.patch
+e1000e-stop-unnecessary-polling-when-using-msi-x.patch
+pegasus-usb-net-fix-endianness-bugs.patch
+ipv4-fix-null-pointer-success-return-in-route-lookup-path.patch
+ipv4-routing-ensure-that-route-cache-entries-are-usable-and-reclaimable-with-caching-is-off.patch
+sky2-don-t-look-for-vpd-size.patch
+tun-fix-unregister-race.patch
+via-velocity-fix-velocity-driver-unmapping-incorrect-size.patch
+x25-fix-sleep-from-timer-on-socket-destroy.patch
+bonding-fix-multiple-module-load-problem.patch
--- /dev/null
+From 0c6e0475eea6e64cc57b5b383b30ad92c634727f Mon Sep 17 00:00:00 2001
+From: Stephen Hemminger <shemminger@vyatta.com>
+Date: Thu, 11 Jun 2009 07:03:47 +0000
+Subject: sky2: don't look for VPD size
+
+From: Stephen Hemminger <shemminger@vyatta.com>
+
+[ Upstream commit 6cc90a5a6061428358d0f726a53fb44af5254111 ]
+
+The code to compute VPD size didn't handle some systems that use
+chip without VPD. Also some of the newer chips use some additional
+registers to store the actual size, and wasn't worth putting the
+additional complexity in, so just remove the code.
+
+No big loss since the code to set the VPD size was only a
+convenience so that utilities would not read the extra space past
+the end of the available VPD.
+
+Move the first PCI config read earlier to detect bad hardware
+where it returns all ones and refuse loading driver before furthur
+damage.
+
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Tested-by: Andy Whitcroft <apw@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/sky2.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/sky2.c
++++ b/drivers/net/sky2.c
+@@ -4365,6 +4365,22 @@ static int __devinit sky2_probe(struct p
+ goto err_out;
+ }
+
++ /* Get configuration information
++ * Note: only regular PCI config access once to test for HW issues
++ * other PCI access through shared memory for speed and to
++ * avoid MMCONFIG problems.
++ */
++ err = pci_read_config_dword(pdev, PCI_DEV_REG2, ®);
++ if (err) {
++ dev_err(&pdev->dev, "PCI read config failed\n");
++ goto err_out;
++ }
++
++ if (~reg == 0) {
++ dev_err(&pdev->dev, "PCI configuration read error\n");
++ goto err_out;
++ }
++
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+@@ -4390,21 +4406,6 @@ static int __devinit sky2_probe(struct p
+ }
+ }
+
+- /* Get configuration information
+- * Note: only regular PCI config access once to test for HW issues
+- * other PCI access through shared memory for speed and to
+- * avoid MMCONFIG problems.
+- */
+- err = pci_read_config_dword(pdev, PCI_DEV_REG2, ®);
+- if (err) {
+- dev_err(&pdev->dev, "PCI read config failed\n");
+- goto err_out_free_regions;
+- }
+-
+- /* size of available VPD, only impact sysfs */
+- err = pci_vpd_truncate(pdev, 1ul << (((reg & PCI_VPD_ROM_SZ) >> 14) + 8));
+- if (err)
+- dev_warn(&pdev->dev, "Can't set VPD size\n");
+
+ #ifdef __BIG_ENDIAN
+ /* The sk98lin vendor driver uses hardware byte swapping but
--- /dev/null
+From 5bc4fa040c11a242fe0633112f8b93c4e2868dae Mon Sep 17 00:00:00 2001
+From: Eric W. Biederman <ebiederm@aristanetworks.com>
+Date: Mon, 8 Jun 2009 00:44:31 -0700
+Subject: tun: Fix unregister race
+
+From: Eric W. Biederman <ebiederm@aristanetworks.com>
+
+[ Upstream commit f0a4d0e5b5bfd271e6737f7c095994835b70d450 ]
+
+It is possible for tun_chr_close to race with dellink on the
+a tun device. In which case if __tun_get runs before dellink
+but dellink runs before tun_chr_close calls unregister_netdevice
+we will attempt to unregister the netdevice after it is already
+gone.
+
+The two cases are already serialized on the rtnl_lock, so I have
+gone for the cheap simple fix of moving rtnl_lock to cover __tun_get
+in tun_chr_close. Eliminating the possibility of the tun device
+being unregistered between __tun_get and unregister_netdevice in
+tun_chr_close.
+
+Signed-off-by: Eric W. Biederman <ebiederm@aristanetworks.com>
+Tested-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/tun.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1275,21 +1275,22 @@ static int tun_chr_open(struct inode *in
+ static int tun_chr_close(struct inode *inode, struct file *file)
+ {
+ struct tun_file *tfile = file->private_data;
+- struct tun_struct *tun = __tun_get(tfile);
++ struct tun_struct *tun;
+
+
++ rtnl_lock();
++ tun = __tun_get(tfile);
+ if (tun) {
+ DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
+
+- rtnl_lock();
+ __tun_detach(tun);
+
+ /* If desireable, unregister the netdevice. */
+ if (!(tun->flags & TUN_PERSIST))
+ unregister_netdevice(tun->dev);
+
+- rtnl_unlock();
+ }
++ rtnl_unlock();
+
+ tun = tfile->tun;
+ if (tun)
--- /dev/null
+From e271077b7fbcf79e1d111bbf28c89b4042ce264a Mon Sep 17 00:00:00 2001
+From: Dave Jones <davej@redhat.com>
+Date: Sun, 21 Jun 2009 22:42:30 -0700
+Subject: via-velocity: Fix velocity driver unmapping incorrect size.
+
+From: Dave Jones <davej@redhat.com>
+
+[ Upstream commit f6b24caaf933a466397915a08e30e885a32f905a ]
+
+When a packet is greater than ETH_ZLEN, we end up assigning the
+boolean result of a comparison to the size we unmap.
+
+Signed-off-by: Dave Jones <davej@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/via-velocity.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/via-velocity.c
++++ b/drivers/net/via-velocity.c
+@@ -1845,7 +1845,7 @@ static void velocity_free_tx_buf(struct
+ */
+ if (tdinfo->skb_dma) {
+
+- pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN);
++ pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
+ for (i = 0; i < tdinfo->nskb_dma; i++) {
+ #ifdef VELOCITY_ZERO_COPY_SUPPORT
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
--- /dev/null
+From a4f867421f7931789a96a277f916f9c3e03a9ece Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Tue, 16 Jun 2009 05:40:30 -0700
+Subject: x25: Fix sleep from timer on socket destroy.
+
+From: David S. Miller <davem@davemloft.net>
+
+[ Upstream commit 14ebaf81e13ce66bff275380b246796fd16cbfa1 ]
+
+If socket destuction gets delayed to a timer, we try to
+lock_sock() from that timer which won't work.
+
+Use bh_lock_sock() in that case.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Tested-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/x25.h | 2 +-
+ net/x25/af_x25.c | 23 ++++++++++++++++++-----
+ net/x25/x25_timer.c | 2 +-
+ 3 files changed, 20 insertions(+), 7 deletions(-)
+
+--- a/include/net/x25.h
++++ b/include/net/x25.h
+@@ -187,7 +187,7 @@ extern int x25_addr_ntoa(unsigned char
+ extern int x25_addr_aton(unsigned char *, struct x25_address *,
+ struct x25_address *);
+ extern struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
+-extern void x25_destroy_socket(struct sock *);
++extern void x25_destroy_socket_from_timer(struct sock *);
+ extern int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
+ extern void x25_kill_by_neigh(struct x25_neigh *);
+
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -332,14 +332,14 @@ static unsigned int x25_new_lci(struct x
+ /*
+ * Deferred destroy.
+ */
+-void x25_destroy_socket(struct sock *);
++static void __x25_destroy_socket(struct sock *);
+
+ /*
+ * handler for deferred kills.
+ */
+ static void x25_destroy_timer(unsigned long data)
+ {
+- x25_destroy_socket((struct sock *)data);
++ x25_destroy_socket_from_timer((struct sock *)data);
+ }
+
+ /*
+@@ -349,12 +349,10 @@ static void x25_destroy_timer(unsigned l
+ * will touch it and we are (fairly 8-) ) safe.
+ * Not static as it's used by the timer
+ */
+-void x25_destroy_socket(struct sock *sk)
++static void __x25_destroy_socket(struct sock *sk)
+ {
+ struct sk_buff *skb;
+
+- sock_hold(sk);
+- lock_sock(sk);
+ x25_stop_heartbeat(sk);
+ x25_stop_timer(sk);
+
+@@ -385,7 +383,22 @@ void x25_destroy_socket(struct sock *sk)
+ /* drop last reference so sock_put will free */
+ __sock_put(sk);
+ }
++}
+
++void x25_destroy_socket_from_timer(struct sock *sk)
++{
++ sock_hold(sk);
++ bh_lock_sock(sk);
++ __x25_destroy_socket(sk);
++ bh_unlock_sock(sk);
++ sock_put(sk);
++}
++
++static void x25_destroy_socket(struct sock *sk)
++{
++ sock_hold(sk);
++ lock_sock(sk);
++ __x25_destroy_socket(sk);
+ release_sock(sk);
+ sock_put(sk);
+ }
+--- a/net/x25/x25_timer.c
++++ b/net/x25/x25_timer.c
+@@ -113,7 +113,7 @@ static void x25_heartbeat_expiry(unsigne
+ (sk->sk_state == TCP_LISTEN &&
+ sock_flag(sk, SOCK_DEAD))) {
+ bh_unlock_sock(sk);
+- x25_destroy_socket(sk);
++ x25_destroy_socket_from_timer(sk);
+ return;
+ }
+ break;