--- /dev/null
+From 733b71ac858e93670e5cc069e6c8eb04af4d7d22 Mon Sep 17 00:00:00 2001
+From: Jukka Rissanen <jukka.rissanen@linux.intel.com>
+Date: Wed, 13 Nov 2013 11:03:39 +0200
+Subject: 6lowpan: Uncompression of traffic class field was incorrect
+
+From: Jukka Rissanen <jukka.rissanen@linux.intel.com>
+
+[ Upstream commit 1188f05497e7bd2f2614b99c54adfbe7413d5749 ]
+
+If priority/traffic class field in IPv6 header is set (seen when
+using ssh), the uncompression sets the TC and Flow fields incorrectly.
+
+Example:
+
+This is IPv6 header of a sent packet. Note the priority/TC (=1) in
+the first byte.
+
+00000000: 61 00 00 00 00 2c 06 40 fe 80 00 00 00 00 00 00
+00000010: 02 02 72 ff fe c6 42 10 fe 80 00 00 00 00 00 00
+00000020: 02 1e ab ff fe 4c 52 57
+
+This gets compressed like this in the sending side
+
+00000000: 72 31 04 06 02 1e ab ff fe 4c 52 57 ec c2 00 16
+00000010: aa 2d fe 92 86 4e be c6 ....
+
+In the receiving end, the packet gets uncompressed to this
+IPv6 header
+
+00000000: 60 06 06 02 00 2a 1e 40 fe 80 00 00 00 00 00 00
+00000010: 02 02 72 ff fe c6 42 10 fe 80 00 00 00 00 00 00
+00000020: ab ff fe 4c 52 57 ec c2
+
+First four bytes are set incorrectly and we have also lost
+two bytes from destination address.
+
+The fix is to switch the case values in switch statement
+when checking the TC field.
+
+Signed-off-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ieee802154/6lowpan.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ieee802154/6lowpan.c
++++ b/net/ieee802154/6lowpan.c
+@@ -862,7 +862,7 @@ lowpan_process_data(struct sk_buff *skb)
+ * Traffic class carried in-line
+ * ECN + DSCP (1 byte), Flow Label is elided
+ */
+- case 1: /* 10b */
++ case 2: /* 10b */
+ if (lowpan_fetch_skb_u8(skb, &tmp))
+ goto drop;
+
+@@ -875,7 +875,7 @@ lowpan_process_data(struct sk_buff *skb)
+ * Flow Label carried in-line
+ * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
+ */
+- case 2: /* 01b */
++ case 1: /* 01b */
+ if (lowpan_fetch_skb_u8(skb, &tmp))
+ goto drop;
+
--- /dev/null
+From 1f25ea537dcb2c3d42b7c0989b2dc31fa21c87da Mon Sep 17 00:00:00 2001
+From: Veaceslav Falico <vfalico@redhat.com>
+Date: Fri, 29 Nov 2013 09:53:23 +0100
+Subject: af_packet: block BH in prb_shutdown_retire_blk_timer()
+
+From: Veaceslav Falico <vfalico@redhat.com>
+
+[ Upstream commit ec6f809ff6f19fafba3212f6aff0dda71dfac8e8 ]
+
+Currently we're using plain spin_lock() in prb_shutdown_retire_blk_timer(),
+however the timer might fire right in the middle and thus try to re-aquire
+the same spinlock, leaving us in a endless loop.
+
+To fix that, use the spin_lock_bh() to block it.
+
+Fixes: f6fb8f100b80 ("af-packet: TPACKET_V3 flexible buffer implementation.")
+CC: "David S. Miller" <davem@davemloft.net>
+CC: Daniel Borkmann <dborkman@redhat.com>
+CC: Willem de Bruijn <willemb@google.com>
+CC: Phil Sutter <phil@nwl.cc>
+CC: Eric Dumazet <edumazet@google.com>
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Tested-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Veaceslav Falico <vfalico@redhat.com>
+Acked-by: Daniel Borkmann <dborkman@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -439,9 +439,9 @@ static void prb_shutdown_retire_blk_time
+
+ pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+
+- spin_lock(&rb_queue->lock);
++ spin_lock_bh(&rb_queue->lock);
+ pkc->delete_blk_timer = 1;
+- spin_unlock(&rb_queue->lock);
++ spin_unlock_bh(&rb_queue->lock);
+
+ prb_del_retire_blk_timer(pkc);
+ }
--- /dev/null
+From 9a5846bbcd170262d4c9a0e2602f7712b7644db0 Mon Sep 17 00:00:00 2001
+From: Ying Xue <ying.xue@windriver.com>
+Date: Tue, 19 Nov 2013 18:09:27 +0800
+Subject: atm: idt77252: fix dev refcnt leak
+
+From: Ying Xue <ying.xue@windriver.com>
+
+[ Upstream commit b5de4a22f157ca345cdb3575207bf46402414bc1 ]
+
+init_card() calls dev_get_by_name() to get a network deceive. But it
+doesn't decrease network device reference count after the device is
+used.
+
+Signed-off-by: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/atm/idt77252.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev
+ tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */
+ if (tmp) {
+ memcpy(card->atmdev->esi, tmp->dev_addr, 6);
+-
++ dev_put(tmp);
+ printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
+ }
+ /*
--- /dev/null
+From 0c28eb79fa0868a1237cd18347504e757c891405 Mon Sep 17 00:00:00 2001
+From: Veaceslav Falico <vfalico@redhat.com>
+Date: Tue, 12 Nov 2013 15:37:40 +0100
+Subject: bonding: don't permit to use ARP monitoring in 802.3ad mode
+
+From: Veaceslav Falico <vfalico@redhat.com>
+
+[ Upstream commit ec9f1d15db8185f63a2c3143dc1e90ba18541b08 ]
+
+Currently the ARP monitoring is not supported with 802.3ad, and it's
+prohibited to use it via the module params.
+
+However we still can set it afterwards via sysfs, cause we only check for
+*LB modes there.
+
+To fix this - add a check for 802.3ad mode in bonding_store_arp_interval.
+
+Signed-off-by: Veaceslav Falico <vfalico@redhat.com>
+CC: Jay Vosburgh <fubar@us.ibm.com>
+CC: Andy Gospodarek <andy@greyhouse.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_sysfs.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -537,8 +537,9 @@ static ssize_t bonding_store_arp_interva
+ goto out;
+ }
+ if (bond->params.mode == BOND_MODE_ALB ||
+- bond->params.mode == BOND_MODE_TLB) {
+- pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
++ bond->params.mode == BOND_MODE_TLB ||
++ bond->params.mode == BOND_MODE_8023AD) {
++ pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
+ bond->dev->name, bond->dev->name);
+ ret = -EINVAL;
+ goto out;
--- /dev/null
+From 528dfe7694081afbdd4e7738b93bd9e25780ed1b Mon Sep 17 00:00:00 2001
+From: Nikolay Aleksandrov <nikolay@redhat.com>
+Date: Wed, 13 Nov 2013 17:07:46 +0100
+Subject: bonding: fix two race conditions in bond_store_updelay/downdelay
+
+From: Nikolay Aleksandrov <nikolay@redhat.com>
+
+[ Upstream commit b869ccfab1e324507fa3596e3e1308444fb68227 ]
+
+This patch fixes two race conditions between bond_store_updelay/downdelay
+and bond_store_miimon which could lead to division by zero as miimon can
+be set to 0 while either updelay/downdelay are being set and thus miss the
+zero check in the beginning, the zero div happens because updelay/downdelay
+are stored as new_value / bond->params.miimon. Use rtnl to synchronize with
+miimon setting.
+
+Signed-off-by: Nikolay Aleksandrov <nikolay@redhat.com>
+CC: Jay Vosburgh <fubar@us.ibm.com>
+CC: Andy Gospodarek <andy@greyhouse.net>
+CC: Veaceslav Falico <vfalico@redhat.com>
+Acked-by: Veaceslav Falico <vfalico@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_sysfs.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -697,6 +697,8 @@ static ssize_t bonding_store_downdelay(s
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
+ bond->dev->name);
+@@ -730,6 +732,7 @@ static ssize_t bonding_store_downdelay(s
+ }
+
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
+@@ -752,6 +755,8 @@ static ssize_t bonding_store_updelay(str
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
+ bond->dev->name);
+@@ -785,6 +790,7 @@ static ssize_t bonding_store_updelay(str
+ }
+
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
--- /dev/null
+From a751b0f492ae3e20de43a9dc7cb527dc4af37c39 Mon Sep 17 00:00:00 2001
+From: Ding Tianhong <dingtianhong@huawei.com>
+Date: Sat, 7 Dec 2013 22:12:05 +0800
+Subject: bridge: flush br's address entry in fdb when remove the bridge dev
+
+From: Ding Tianhong <dingtianhong@huawei.com>
+
+[ Upstream commit f873042093c0b418d2351fe142222b625c740149 ]
+
+When the following commands are executed:
+
+brctl addbr br0
+ifconfig br0 hw ether <addr>
+rmmod bridge
+
+The calltrace will occur:
+
+[ 563.312114] device eth1 left promiscuous mode
+[ 563.312188] br0: port 1(eth1) entered disabled state
+[ 563.468190] kmem_cache_destroy bridge_fdb_cache: Slab cache still has objects
+[ 563.468197] CPU: 6 PID: 6982 Comm: rmmod Tainted: G O 3.12.0-0.7-default+ #9
+[ 563.468199] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007
+[ 563.468200] 0000000000000880 ffff88010f111e98 ffffffff814d1c92 ffff88010f111eb8
+[ 563.468204] ffffffff81148efd ffff88010f111eb8 0000000000000000 ffff88010f111ec8
+[ 563.468206] ffffffffa062a270 ffff88010f111ed8 ffffffffa063ac76 ffff88010f111f78
+[ 563.468209] Call Trace:
+[ 563.468218] [<ffffffff814d1c92>] dump_stack+0x6a/0x78
+[ 563.468234] [<ffffffff81148efd>] kmem_cache_destroy+0xfd/0x100
+[ 563.468242] [<ffffffffa062a270>] br_fdb_fini+0x10/0x20 [bridge]
+[ 563.468247] [<ffffffffa063ac76>] br_deinit+0x4e/0x50 [bridge]
+[ 563.468254] [<ffffffff810c7dc9>] SyS_delete_module+0x199/0x2b0
+[ 563.468259] [<ffffffff814e0922>] system_call_fastpath+0x16/0x1b
+[ 570.377958] Bridge firewalling registered
+
+--------------------------- cut here -------------------------------
+
+The reason is that when the bridge dev's address is changed, the
+br_fdb_change_mac_address() will add new address in fdb, but when
+the bridge was removed, the address entry in the fdb did not free,
+the bridge_fdb_cache still has objects when destroy the cache, Fix
+this by flushing the bridge address entry when removing the bridge.
+
+v2: according to the Toshiaki Makita and Vlad's suggestion, I only
+ delete the vlan0 entry, it still have a leak here if the vlan id
+ is other number, so I need to call fdb_delete_by_port(br, NULL, 1)
+ to flush all entries whose dst is NULL for the bridge.
+
+Suggested-by: Toshiaki Makita <toshiaki.makita1@gmail.com>
+Suggested-by: Vlad Yasevich <vyasevich@gmail.com>
+Signed-off-by: Ding Tianhong <dingtianhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_if.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -172,6 +172,8 @@ void br_dev_delete(struct net_device *de
+ del_nbp(p);
+ }
+
++ br_fdb_delete_by_port(br, NULL, 1);
++
+ del_timer_sync(&br->gc_timer);
+
+ br_sysfs_delbr(br->dev);
--- /dev/null
+From a3ebba682e5247ff3d446301663f38b84e3a41ca Mon Sep 17 00:00:00 2001
+From: Chris Metcalf <cmetcalf@tilera.com>
+Date: Thu, 14 Nov 2013 12:09:21 -0500
+Subject: connector: improved unaligned access error fix
+
+From: Chris Metcalf <cmetcalf@tilera.com>
+
+[ Upstream commit 1ca1a4cf59ea343a1a70084fe7cc96f37f3cf5b1 ]
+
+In af3e095a1fb4, Erik Jacobsen fixed one type of unaligned access
+bug for ia64 by converting a 64-bit write to use put_unaligned().
+Unfortunately, since gcc will convert a short memset() to a series
+of appropriately-aligned stores, the problem is now visible again
+on tilegx, where the memset that zeros out proc_event is converted
+to three 64-bit stores, causing an unaligned access panic.
+
+A better fix for the original problem is to ensure that proc_event
+is aligned to 8 bytes here. We can do that relatively easily by
+arranging to start the struct cn_msg aligned to 8 bytes and then
+offset by 4 bytes. Doing so means that the immediately following
+proc_event structure is then correctly aligned to 8 bytes.
+
+The result is that the memset() stores are now aligned, and as an
+added benefit, we can remove the put_unaligned() calls in the code.
+
+Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/connector/cn_proc.c | 72 +++++++++++++++++++++++++-------------------
+ 1 file changed, 42 insertions(+), 30 deletions(-)
+
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -32,11 +32,23 @@
+ #include <linux/atomic.h>
+ #include <linux/pid_namespace.h>
+
+-#include <asm/unaligned.h>
+-
+ #include <linux/cn_proc.h>
+
+-#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
++/*
++ * Size of a cn_msg followed by a proc_event structure. Since the
++ * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
++ * add one 4-byte word to the size here, and then start the actual
++ * cn_msg structure 4 bytes into the stack buffer. The result is that
++ * the immediately following proc_event structure is aligned to 8 bytes.
++ */
++#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
++
++/* See comment above; we test our assumption about sizeof struct cn_msg here. */
++static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
++{
++ BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
++ return (struct cn_msg *)(buffer + 4);
++}
+
+ static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
+ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
+@@ -56,19 +68,19 @@ void proc_fork_connector(struct task_str
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+ struct task_struct *parent;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_FORK;
+ rcu_read_lock();
+ parent = rcu_dereference(task->real_parent);
+@@ -91,17 +103,17 @@ void proc_exec_connector(struct task_str
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_EXEC;
+ ev->event_data.exec.process_pid = task->pid;
+ ev->event_data.exec.process_tgid = task->tgid;
+@@ -117,14 +129,14 @@ void proc_id_connector(struct task_struc
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+ const struct cred *cred;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ ev->what = which_id;
+@@ -145,7 +157,7 @@ void proc_id_connector(struct task_struc
+ rcu_read_unlock();
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+@@ -159,17 +171,17 @@ void proc_sid_connector(struct task_stru
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_SID;
+ ev->event_data.sid.process_pid = task->pid;
+ ev->event_data.sid.process_tgid = task->tgid;
+@@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_s
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_PTRACE;
+ ev->event_data.ptrace.process_pid = task->pid;
+ ev->event_data.ptrace.process_tgid = task->tgid;
+@@ -221,17 +233,17 @@ void proc_comm_connector(struct task_str
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_COMM;
+ ev->event_data.comm.process_pid = task->pid;
+ ev->event_data.comm.process_tgid = task->tgid;
+@@ -248,18 +260,18 @@ void proc_coredump_connector(struct task
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_COREDUMP;
+ ev->event_data.coredump.process_pid = task->pid;
+ ev->event_data.coredump.process_tgid = task->tgid;
+@@ -275,18 +287,18 @@ void proc_exit_connector(struct task_str
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_EXIT;
+ ev->event_data.exit.process_pid = task->pid;
+ ev->event_data.exit.process_tgid = task->tgid;
+@@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcv
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ msg->seq = rcvd_seq;
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->cpu = -1;
+ ev->what = PROC_EVENT_NONE;
+ ev->event_data.ack.err = err;
--- /dev/null
+From 9591b1c35f7485ff7d505e43075ef1d2305d25fc Mon Sep 17 00:00:00 2001
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Sat, 23 Nov 2013 00:46:12 +0100
+Subject: inet: fix addr_len/msg->msg_namelen assignment in recv_error and rxpmtu functions
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit 85fbaa75037d0b6b786ff18658ddf0b4014ce2a4 ]
+
+Commit bceaa90240b6019ed73b49965eac7d167610be69 ("inet: prevent leakage
+of uninitialized memory to user in recv syscalls") conditionally updated
+addr_len if the msg_name is written to. The recv_error and rxpmtu
+functions relied on the recvmsg functions to set up addr_len before.
+
+As this does not happen any more we have to pass addr_len to those
+functions as well and set it to the size of the corresponding sockaddr
+length.
+
+This broke traceroute and such.
+
+Fixes: bceaa90240b6 ("inet: prevent leakage of uninitialized memory to user in recv syscalls")
+Reported-by: Brad Spengler <spender@grsecurity.net>
+Reported-by: Tom Labanowski
+Cc: mpb <mpb.mail@gmail.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip.h | 2 +-
+ include/net/ipv6.h | 6 ++++--
+ net/ipv4/ip_sockglue.c | 3 ++-
+ net/ipv4/ping.c | 2 +-
+ net/ipv4/raw.c | 2 +-
+ net/ipv4/udp.c | 2 +-
+ net/ipv6/datagram.c | 7 +++++--
+ net/ipv6/raw.c | 4 ++--
+ net/ipv6/udp.c | 4 ++--
+ net/l2tp/l2tp_ip6.c | 2 +-
+ 10 files changed, 20 insertions(+), 14 deletions(-)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -454,7 +454,7 @@ extern int compat_ip_getsockopt(struct s
+ int optname, char __user *optval, int __user *optlen);
+ extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
+
+-extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
++extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
+ extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload);
+ extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -793,8 +793,10 @@ extern int compat_ipv6_getsockopt(stru
+ extern int ip6_datagram_connect(struct sock *sk,
+ struct sockaddr *addr, int addr_len);
+
+-extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+-extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
++extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len);
++extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len);
+ extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+ extern void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -368,7 +368,7 @@ void ip_local_error(struct sock *sk, int
+ /*
+ * Handle MSG_ERRQUEUE
+ */
+-int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
++int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+@@ -405,6 +405,7 @@ int ip_recv_error(struct sock *sk, struc
+ serr->addr_offset);
+ sin->sin_port = serr->port;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -636,7 +636,7 @@ static int ping_recvmsg(struct kiocb *io
+ goto out;
+
+ if (flags & MSG_ERRQUEUE)
+- return ip_recv_error(sk, msg, len);
++ return ip_recv_error(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -693,7 +693,7 @@ static int raw_recvmsg(struct kiocb *ioc
+ goto out;
+
+ if (flags & MSG_ERRQUEUE) {
+- err = ip_recv_error(sk, msg, len);
++ err = ip_recv_error(sk, msg, len, addr_len);
+ goto out;
+ }
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1208,7 +1208,7 @@ int udp_recvmsg(struct kiocb *iocb, stru
+ bool slow;
+
+ if (flags & MSG_ERRQUEUE)
+- return ip_recv_error(sk, msg, len);
++ return ip_recv_error(sk, msg, len, addr_len);
+
+ try_again:
+ skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -318,7 +318,7 @@ void ipv6_local_rxpmtu(struct sock *sk,
+ /*
+ * Handle MSG_ERRQUEUE
+ */
+-int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
++int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sock_exterr_skb *serr;
+@@ -369,6 +369,7 @@ int ipv6_recv_error(struct sock *sk, str
+ &sin->sin6_addr);
+ sin->sin6_scope_id = 0;
+ }
++ *addr_len = sizeof(*sin);
+ }
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+@@ -423,7 +424,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error);
+ /*
+ * Handle IPV6_RECVPATHMTU
+ */
+-int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
++int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sk_buff *skb;
+@@ -457,6 +459,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, st
+ sin->sin6_port = 0;
+ sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
+ sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
++ *addr_len = sizeof(*sin);
+ }
+
+ put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -460,10 +460,10 @@ static int rawv6_recvmsg(struct kiocb *i
+ return -EOPNOTSUPP;
+
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+- return ipv6_recv_rxpmtu(sk, msg, len);
++ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -374,10 +374,10 @@ int udpv6_recvmsg(struct kiocb *iocb, st
+ bool slow;
+
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+- return ipv6_recv_rxpmtu(sk, msg, len);
++ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
+
+ try_again:
+ skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -665,7 +665,7 @@ static int l2tp_ip6_recvmsg(struct kiocb
+ *addr_len = sizeof(*lsa);
+
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
--- /dev/null
+From c19eb885a2c3d9c85a821725dab48b70de88e046 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 28 Nov 2013 09:51:22 -0800
+Subject: inet: fix possible seqlock deadlocks
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit f1d8cba61c3c4b1eb88e507249c4cb8d635d9a76 ]
+
+In commit c9e9042994d3 ("ipv4: fix possible seqlock deadlock") I left
+another places where IP_INC_STATS_BH() were improperly used.
+
+udp_sendmsg(), ping_v4_sendmsg() and tcp_v4_connect() are called from
+process context, not from softirq context.
+
+This was detected by lockdep seqlock support.
+
+Reported-by: jongman heo <jongman.heo@samsung.com>
+Fixes: 584bdf8cbdf6 ("[IPV4]: Fix "ipOutNoRoutes" counter error for TCP and UDP")
+Fixes: c319b4d76b9e ("net: ipv4: add IPPROTO_ICMP socket kind")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ping.c | 2 +-
+ net/ipv4/tcp_ipv4.c | 2 +-
+ net/ipv4/udp.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -570,7 +570,7 @@ static int ping_sendmsg(struct kiocb *io
+ err = PTR_ERR(rt);
+ rt = NULL;
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -176,7 +176,7 @@ int tcp_v4_connect(struct sock *sk, stru
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ return err;
+ }
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -971,7 +971,7 @@ int udp_sendmsg(struct kiocb *iocb, stru
+ err = PTR_ERR(rt);
+ rt = NULL;
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
--- /dev/null
+From 24832094f088049d4d8379a1691aa94510242964 Mon Sep 17 00:00:00 2001
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Mon, 18 Nov 2013 04:20:45 +0100
+Subject: inet: prevent leakage of uninitialized memory to user in recv syscalls
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit bceaa90240b6019ed73b49965eac7d167610be69 ]
+
+Only update *addr_len when we actually fill in sockaddr, otherwise we
+can return uninitialized memory from the stack to the caller in the
+recvfrom, recvmmsg and recvmsg syscalls. Drop the the (addr_len == NULL)
+checks because we only get called with a valid addr_len pointer either
+from sock_common_recvmsg or inet_recvmsg.
+
+If a blocking read waits on a socket which is concurrently shut down we
+now return zero and set msg_msgnamelen to 0.
+
+Reported-by: mpb <mpb.mail@gmail.com>
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ieee802154/dgram.c | 3 +--
+ net/ipv4/ping.c | 9 ++++-----
+ net/ipv4/raw.c | 4 +---
+ net/ipv4/udp.c | 7 +------
+ net/ipv6/raw.c | 4 +---
+ net/ipv6/udp.c | 5 +----
+ net/l2tp/l2tp_ip.c | 4 +---
+ net/phonet/datagram.c | 9 ++++-----
+ 8 files changed, 14 insertions(+), 31 deletions(-)
+
+--- a/net/ieee802154/dgram.c
++++ b/net/ieee802154/dgram.c
+@@ -315,9 +315,8 @@ static int dgram_recvmsg(struct kiocb *i
+ if (saddr) {
+ saddr->family = AF_IEEE802154;
+ saddr->addr = mac_cb(skb)->sa;
+- }
+- if (addr_len)
+ *addr_len = sizeof(*saddr);
++ }
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -626,7 +626,6 @@ static int ping_recvmsg(struct kiocb *io
+ size_t len, int noblock, int flags, int *addr_len)
+ {
+ struct inet_sock *isk = inet_sk(sk);
+- struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
+ struct sk_buff *skb;
+ int copied, err;
+
+@@ -636,9 +635,6 @@ static int ping_recvmsg(struct kiocb *io
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE)
+ return ip_recv_error(sk, msg, len);
+
+@@ -660,11 +656,14 @@ static int ping_recvmsg(struct kiocb *io
+ sock_recv_timestamp(msg, sk, skb);
+
+ /* Copy the address. */
+- if (sin) {
++ if (msg->msg_name) {
++ struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
++
+ sin->sin_family = AF_INET;
+ sin->sin_port = 0 /* skb->h.uh->source */;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (isk->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -692,9 +692,6 @@ static int raw_recvmsg(struct kiocb *ioc
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE) {
+ err = ip_recv_error(sk, msg, len);
+ goto out;
+@@ -722,6 +719,7 @@ static int raw_recvmsg(struct kiocb *ioc
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1207,12 +1207,6 @@ int udp_recvmsg(struct kiocb *iocb, stru
+ int is_udplite = IS_UDPLITE(sk);
+ bool slow;
+
+- /*
+- * Check any passed addresses
+- */
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE)
+ return ip_recv_error(sk, msg, len);
+
+@@ -1274,6 +1268,7 @@ try_again:
+ sin->sin_port = udp_hdr(skb)->source;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -459,9 +459,6 @@ static int rawv6_recvmsg(struct kiocb *i
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+- if (addr_len)
+- *addr_len=sizeof(*sin6);
+-
+ if (flags & MSG_ERRQUEUE)
+ return ipv6_recv_error(sk, msg, len);
+
+@@ -500,6 +497,7 @@ static int rawv6_recvmsg(struct kiocb *i
+ sin6->sin6_flowinfo = 0;
+ sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
+ IP6CB(skb)->iif);
++ *addr_len = sizeof(*sin6);
+ }
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -373,9 +373,6 @@ int udpv6_recvmsg(struct kiocb *iocb, st
+ int is_udp4;
+ bool slow;
+
+- if (addr_len)
+- *addr_len = sizeof(struct sockaddr_in6);
+-
+ if (flags & MSG_ERRQUEUE)
+ return ipv6_recv_error(sk, msg, len);
+
+@@ -461,7 +458,7 @@ try_again:
+ ipv6_iface_scope_id(&sin6->sin6_addr,
+ IP6CB(skb)->iif);
+ }
+-
++ *addr_len = sizeof(*sin6);
+ }
+ if (is_udp4) {
+ if (inet->cmsg_flags)
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -518,9 +518,6 @@ static int l2tp_ip_recvmsg(struct kiocb
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ goto out;
+@@ -543,6 +540,7 @@ static int l2tp_ip_recvmsg(struct kiocb
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+--- a/net/phonet/datagram.c
++++ b/net/phonet/datagram.c
+@@ -139,9 +139,6 @@ static int pn_recvmsg(struct kiocb *iocb
+ MSG_CMSG_COMPAT))
+ goto out_nofree;
+
+- if (addr_len)
+- *addr_len = sizeof(sa);
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &rval);
+ if (skb == NULL)
+ goto out_nofree;
+@@ -162,8 +159,10 @@ static int pn_recvmsg(struct kiocb *iocb
+
+ rval = (flags & MSG_TRUNC) ? skb->len : copylen;
+
+- if (msg->msg_name != NULL)
+- memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
++ if (msg->msg_name != NULL) {
++ memcpy(msg->msg_name, &sa, sizeof(sa));
++ *addr_len = sizeof(sa);
++ }
+
+ out:
+ skb_free_datagram(sk, skb);
--- /dev/null
+From 7e76a2052e5ec51bd94e05ba76f47729af9ef937 Mon Sep 17 00:00:00 2001
+From: Jiri Pirko <jiri@resnulli.us>
+Date: Wed, 6 Nov 2013 17:52:19 +0100
+Subject: ip6_output: fragment outgoing reassembled skb properly
+
+From: Jiri Pirko <jiri@resnulli.us>
+
+[ Upstream commit 9037c3579a277f3a23ba476664629fda8c35f7c4 ]
+
+If reassembled packet would fit into outdev MTU, it is not fragmented
+according the original frag size and it is send as single big packet.
+
+The second case is if skb is gso. In that case fragmentation does not happen
+according to the original frag size.
+
+This patch fixes these.
+
+Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_output.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -150,7 +150,8 @@ static int ip6_finish_output2(struct sk_
+ static int ip6_finish_output(struct sk_buff *skb)
+ {
+ if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+- dst_allfrag(skb_dst(skb)))
++ dst_allfrag(skb_dst(skb)) ||
++ (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
+ return ip6_fragment(skb, ip6_finish_output2);
+ else
+ return ip6_finish_output2(skb);
--- /dev/null
+From 9ee51267f6c39a4f3c63ad470c8f978b502fc8e2 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 14 Nov 2013 13:37:54 -0800
+Subject: ipv4: fix possible seqlock deadlock
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit c9e9042994d37cbc1ee538c500e9da1bb9d1bcdf ]
+
+ip4_datagram_connect() being called from process context,
+it should use IP_INC_STATS() instead of IP_INC_STATS_BH()
+otherwise we can deadlock on 32bit arches, or get corruptions of
+SNMP counters.
+
+Fixes: 584bdf8cbdf6 ("[IPV4]: Fix "ipOutNoRoutes" counter error for TCP and UDP")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Dave Jones <davej@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/datagram.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -57,7 +57,7 @@ int ip4_datagram_connect(struct sock *sk
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
--- /dev/null
+From 7061e17d06decf63b27432a83011934ca47b0fd0 Mon Sep 17 00:00:00 2001
+From: Alexei Starovoitov <ast@plumgrid.com>
+Date: Tue, 19 Nov 2013 19:12:34 -0800
+Subject: ipv4: fix race in concurrent ip_route_input_slow()
+
+From: Alexei Starovoitov <ast@plumgrid.com>
+
+[ Upstream commit dcdfdf56b4a6c9437fc37dbc9cee94a788f9b0c4 ]
+
+CPUs can ask for local route via ip_route_input_noref() concurrently.
+if nh_rth_input is not cached yet, CPUs will proceed to allocate
+equivalent DSTs on 'lo' and then will try to cache them in nh_rth_input
+via rt_cache_route()
+Most of the time they succeed, but on occasion the following two lines:
+ orig = *p;
+ prev = cmpxchg(p, orig, rt);
+in rt_cache_route() do race and one of the cpus fails to complete cmpxchg.
+But ip_route_input_slow() doesn't check the return code of rt_cache_route(),
+so dst is leaking. dst_destroy() is never called and 'lo' device
+refcnt doesn't go to zero, which can be seen in the logs as:
+ unregister_netdevice: waiting for lo to become free. Usage count = 1
+Adding mdelay() between above two lines makes it easily reproducible.
+Fix it similar to nh_pcpu_rth_output case.
+
+Fixes: d2d68ba9fe8b ("ipv4: Cache input routes in fib_info nexthops.")
+Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/route.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1720,8 +1720,12 @@ local_input:
+ rth->dst.error= -err;
+ rth->rt_flags &= ~RTCF_LOCAL;
+ }
+- if (do_cache)
+- rt_cache_route(&FIB_RES_NH(res), rth);
++ if (do_cache) {
++ if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
++ rth->dst.flags |= DST_NOCACHE;
++ rt_add_uncached_list(rth);
++ }
++ }
+ skb_dst_set(skb, &rth->dst);
+ err = 0;
+ goto out;
--- /dev/null
+From aea39f26c4a2c08bd625bdb35a16ce6edb42f71c Mon Sep 17 00:00:00 2001
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Tue, 5 Nov 2013 02:41:27 +0100
+Subject: ipv6: fix headroom calculation in udp6_ufo_fragment
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit 0e033e04c2678dbbe74a46b23fffb7bb918c288e ]
+
+Commit 1e2bd517c108816220f262d7954b697af03b5f9c ("udp6: Fix udp
+fragmentation for tunnel traffic.") changed the calculation if
+there is enough space to include a fragment header in the skb from a
+skb->mac_header dervived one to skb_headroom. Because we already peeled
+off the skb to transport_header this is wrong. Change this back to check
+if we have enough room before the mac_header.
+
+This fixes a panic Saran Neti reported. He used the tbf scheduler which
+skb_gso_segments the skb. The offsets get negative and we panic in memcpy
+because the skb was erroneously not expanded at the head.
+
+Reported-by: Saran Neti <Saran.Neti@telus.com>
+Cc: Pravin B Shelar <pshelar@nicira.com>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/udp_offload.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -85,7 +85,7 @@ static struct sk_buff *udp6_ufo_fragment
+
+ /* Check if there is enough headroom to insert fragment header. */
+ tnl_hlen = skb_tnl_header_len(skb);
+- if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
++ if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
+ if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+ goto out;
+ }
--- /dev/null
+From cda165fc4b264da12719ffb07d124d011e01d1da Mon Sep 17 00:00:00 2001
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Sat, 23 Nov 2013 07:22:33 +0100
+Subject: ipv6: fix leaking uninitialized port number of offender sockaddr
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit 1fa4c710b6fe7b0aac9907240291b6fe6aafc3b8 ]
+
+Offenders don't have port numbers, so set it to 0.
+
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/datagram.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -378,6 +378,7 @@ int ipv6_recv_error(struct sock *sk, str
+ if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
+ sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
++ sin->sin6_port = 0;
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ sin->sin6_addr = ipv6_hdr(skb)->saddr;
+ if (np->rxopt.all)
--- /dev/null
+From 79fec1e3bb0db644023e3e7ff938c523bb143938 Mon Sep 17 00:00:00 2001
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Fri, 29 Nov 2013 06:39:44 +0100
+Subject: ipv6: fix possible seqlock deadlock in ip6_finish_output2
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit 7f88c6b23afbd31545c676dea77ba9593a1a14bf ]
+
+IPv6 stats are 64 bits and thus are protected with a seqlock. By not
+disabling bottom-half we could deadlock here if we don't disable bh and
+a softirq reentrantly updates the same mib.
+
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_output.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -141,8 +141,8 @@ static int ip6_finish_output2(struct sk_
+ }
+ rcu_read_unlock_bh();
+
+- IP6_INC_STATS_BH(dev_net(dst->dev),
+- ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
++ IP6_INC_STATS(dev_net(dst->dev),
++ ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
--- /dev/null
+From 116a1c5c3f045ee7f444a0416ff72e5bc73d7c04 Mon Sep 17 00:00:00 2001
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Fri, 8 Nov 2013 19:26:21 +0100
+Subject: ipv6: protect for_each_sk_fl_rcu in mem_check with rcu_read_lock_bh
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit f8c31c8f80dd882f7eb49276989a4078d33d67a7 ]
+
+Fixes a suspicious rcu derference warning.
+
+Cc: Florent Fourcot <florent.fourcot@enst-bretagne.fr>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_flowlabel.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -453,8 +453,10 @@ static int mem_check(struct sock *sk)
+ if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
+ return 0;
+
++ rcu_read_lock_bh();
+ for_each_sk_fl_rcu(np, sfl)
+ count++;
++ rcu_read_unlock_bh();
+
+ if (room <= 0 ||
+ ((count >= FL_MAX_PER_SOCK ||
--- /dev/null
+From a9db1ad10a2ede3b2c85f8ddf595f82550b0bb69 Mon Sep 17 00:00:00 2001
+From: Duan Jiong <duanj.fnst@cn.fujitsu.com>
+Date: Fri, 8 Nov 2013 09:56:53 +0800
+Subject: ipv6: use rt6_get_dflt_router to get default router in rt6_route_rcv
+
+From: Duan Jiong <duanj.fnst@cn.fujitsu.com>
+
+[ Upstream commit f104a567e673f382b09542a8dc3500aa689957b4 ]
+
+As the rfc 4191 said, the Router Preference and Lifetime values in a
+::/0 Route Information Option should override the preference and lifetime
+values in the Router Advertisement header. But when the kernel deals with
+a ::/0 Route Information Option, the rt6_get_route_info() always return
+NULL, that means that overriding will not happen, because those default
+routers were added without flag RTF_ROUTEINFO in rt6_add_dflt_router().
+
+In order to deal with that condition, we should call rt6_get_dflt_router
+when the prefix length is 0.
+
+Signed-off-by: Duan Jiong <duanj.fnst@cn.fujitsu.com>
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -728,8 +728,11 @@ int rt6_route_rcv(struct net_device *dev
+ prefix = &prefix_buf;
+ }
+
+- rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
+- dev->ifindex);
++ if (rinfo->prefix_len == 0)
++ rt = rt6_get_dflt_router(gwaddr, dev);
++ else
++ rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
++ gwaddr, dev->ifindex);
+
+ if (rt && !lifetime) {
+ ip6_del_rt(rt);
--- /dev/null
+From 3d614cf831e8bb8339f89f8f26992da86c1017f7 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 14 Nov 2013 11:21:10 +0300
+Subject: isdnloop: use strlcpy() instead of strcpy()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit f9a23c84486ed350cce7bb1b2828abd1f6658796 ]
+
+These strings come from a copy_from_user() and there is no way to be
+sure they are NUL terminated.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/isdn/isdnloop/isdnloop.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdn
+ spin_unlock_irqrestore(&card->isdnloop_lock, flags);
+ return -ENOMEM;
+ }
+- for (i = 0; i < 3; i++)
+- strcpy(card->s0num[i], sdef.num[i]);
++ for (i = 0; i < 3; i++) {
++ strlcpy(card->s0num[i], sdef.num[i],
++ sizeof(card->s0num[0]));
++ }
+ break;
+ case ISDN_PTYPE_1TR6:
+ if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
+@@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdn
+ spin_unlock_irqrestore(&card->isdnloop_lock, flags);
+ return -ENOMEM;
+ }
+- strcpy(card->s0num[0], sdef.num[0]);
++ strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
+ card->s0num[1][0] = '\0';
+ card->s0num[2][0] = '\0';
+ break;
--- /dev/null
+From 869831b11cf4bb5ec2d80294d68367bb152d9fe1 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 13 Nov 2013 14:00:40 +0800
+Subject: macvtap: limit head length of skb allocated
+
+From: Jason Wang <jasowang@redhat.com>
+
+[ Upstream commit 16a3fa28630331e28208872fa5341ce210b901c7 ]
+
+We currently use hdr_len as a hint of head length which is advertised by
+guest. But when guest advertise a very big value, it can lead to an 64K+
+allocating of kmalloc() which has a very high possibility of failure when host
+memory is fragmented or under heavy stress. The huge hdr_len also reduce the
+effect of zerocopy or even disable if a gso skb is linearized in guest.
+
+To solves those issues, this patch introduces an upper limit (PAGE_SIZE) of the
+head, which guarantees an order 0 allocation each time.
+
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Cc: Stefan Hajnoczi <stefanha@redhat.com>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/macvtap.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -661,6 +661,7 @@ static ssize_t macvtap_get_user(struct m
+ const struct iovec *iv, unsigned long total_len,
+ size_t count, int noblock)
+ {
++ int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
+ struct sk_buff *skb;
+ struct macvlan_dev *vlan;
+ unsigned long len = total_len;
+@@ -703,6 +704,8 @@ static ssize_t macvtap_get_user(struct m
+
+ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+ copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
++ if (copylen > good_linear)
++ copylen = good_linear;
+ linear = copylen;
+ if (iov_pages(iv, vnet_hdr_len + copylen, count)
+ <= MAX_SKB_FRAGS)
+@@ -711,7 +714,10 @@ static ssize_t macvtap_get_user(struct m
+
+ if (!zerocopy) {
+ copylen = len;
+- linear = vnet_hdr.hdr_len;
++ if (vnet_hdr.hdr_len > good_linear)
++ linear = good_linear;
++ else
++ linear = vnet_hdr.hdr_len;
+ }
+
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
--- /dev/null
+From 15c05d7e4b93b97783de3616e78f930af89074bd Mon Sep 17 00:00:00 2001
+From: Yang Yingliang <yangyingliang@huawei.com>
+Date: Wed, 27 Nov 2013 14:32:52 +0800
+Subject: net: 8139cp: fix a BUG_ON triggered by wrong bytes_compl
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 7fe0ee099ad5e3dea88d4ee1b6f20246b1ca57c3 ]
+
+Using iperf to send packets(GSO mode is on), a bug is triggered:
+
+[ 212.672781] kernel BUG at lib/dynamic_queue_limits.c:26!
+[ 212.673396] invalid opcode: 0000 [#1] SMP
+[ 212.673882] Modules linked in: 8139cp(O) nls_utf8 edd fuse loop dm_mod ipv6 i2c_piix4 8139too i2c_core intel_agp joydev pcspkr hid_generic intel_gtt floppy sr_mod mii button sg cdrom ext3 jbd mbcache usbhid hid uhci_hcd ehci_hcd usbcore sd_mod usb_common crc_t10dif crct10dif_common processor thermal_sys hwmon scsi_dh_emc scsi_dh_rdac scsi_dh_hp_sw scsi_dh ata_generic ata_piix libata scsi_mod [last unloaded: 8139cp]
+[ 212.676084] CPU: 0 PID: 4124 Comm: iperf Tainted: G O 3.12.0-0.7-default+ #16
+[ 212.676084] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007
+[ 212.676084] task: ffff8800d83966c0 ti: ffff8800db4c8000 task.ti: ffff8800db4c8000
+[ 212.676084] RIP: 0010:[<ffffffff8122e23f>] [<ffffffff8122e23f>] dql_completed+0x17f/0x190
+[ 212.676084] RSP: 0018:ffff880116e03e30 EFLAGS: 00010083
+[ 212.676084] RAX: 00000000000005ea RBX: 0000000000000f7c RCX: 0000000000000002
+[ 212.676084] RDX: ffff880111dd0dc0 RSI: 0000000000000bd4 RDI: ffff8800db6ffcc0
+[ 212.676084] RBP: ffff880116e03e48 R08: 0000000000000992 R09: 0000000000000000
+[ 212.676084] R10: ffffffff8181e400 R11: 0000000000000004 R12: 000000000000000f
+[ 212.676084] R13: ffff8800d94ec840 R14: ffff8800db440c80 R15: 000000000000000e
+[ 212.676084] FS: 00007f6685a3c700(0000) GS:ffff880116e00000(0000) knlGS:0000000000000000
+[ 212.676084] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 212.676084] CR2: 00007f6685ad6460 CR3: 00000000db714000 CR4: 00000000000006f0
+[ 212.676084] Stack:
+[ 212.676084] ffff8800db6ffc00 000000000000000f ffff8800d94ec840 ffff880116e03eb8
+[ 212.676084] ffffffffa041509f ffff880116e03e88 0000000f16e03e88 ffff8800d94ec000
+[ 212.676084] 00000bd400059858 000000050000000f ffffffff81094c36 ffff880116e03eb8
+[ 212.676084] Call Trace:
+[ 212.676084] <IRQ>
+[ 212.676084] [<ffffffffa041509f>] cp_interrupt+0x4ef/0x590 [8139cp]
+[ 212.676084] [<ffffffff81094c36>] ? ktime_get+0x56/0xd0
+[ 212.676084] [<ffffffff8108cf73>] handle_irq_event_percpu+0x53/0x170
+[ 212.676084] [<ffffffff8108d0cc>] handle_irq_event+0x3c/0x60
+[ 212.676084] [<ffffffff8108fdb5>] handle_fasteoi_irq+0x55/0xf0
+[ 212.676084] [<ffffffff810045df>] handle_irq+0x1f/0x30
+[ 212.676084] [<ffffffff81003c8b>] do_IRQ+0x5b/0xe0
+[ 212.676084] [<ffffffff8142beaa>] common_interrupt+0x6a/0x6a
+[ 212.676084] <EOI>
+[ 212.676084] [<ffffffffa0416a21>] ? cp_start_xmit+0x621/0x97c [8139cp]
+[ 212.676084] [<ffffffffa0416a09>] ? cp_start_xmit+0x609/0x97c [8139cp]
+[ 212.676084] [<ffffffff81378ed9>] dev_hard_start_xmit+0x2c9/0x550
+[ 212.676084] [<ffffffff813960a9>] sch_direct_xmit+0x179/0x1d0
+[ 212.676084] [<ffffffff813793f3>] dev_queue_xmit+0x293/0x440
+[ 212.676084] [<ffffffff813b0e46>] ip_finish_output+0x236/0x450
+[ 212.676084] [<ffffffff810e59e7>] ? __alloc_pages_nodemask+0x187/0xb10
+[ 212.676084] [<ffffffff813b10e8>] ip_output+0x88/0x90
+[ 212.676084] [<ffffffff813afa64>] ip_local_out+0x24/0x30
+[ 212.676084] [<ffffffff813aff0d>] ip_queue_xmit+0x14d/0x3e0
+[ 212.676084] [<ffffffff813c6fd1>] tcp_transmit_skb+0x501/0x840
+[ 212.676084] [<ffffffff813c8323>] tcp_write_xmit+0x1e3/0xb20
+[ 212.676084] [<ffffffff81363237>] ? skb_page_frag_refill+0x87/0xd0
+[ 212.676084] [<ffffffff813c8c8b>] tcp_push_one+0x2b/0x40
+[ 212.676084] [<ffffffff813bb7e6>] tcp_sendmsg+0x926/0xc90
+[ 212.676084] [<ffffffff813e1d21>] inet_sendmsg+0x61/0xc0
+[ 212.676084] [<ffffffff8135e861>] sock_aio_write+0x101/0x120
+[ 212.676084] [<ffffffff81107cf1>] ? vma_adjust+0x2e1/0x5d0
+[ 212.676084] [<ffffffff812163e0>] ? timerqueue_add+0x60/0xb0
+[ 212.676084] [<ffffffff81130b60>] do_sync_write+0x60/0x90
+[ 212.676084] [<ffffffff81130d44>] ? rw_verify_area+0x54/0xf0
+[ 212.676084] [<ffffffff81130f66>] vfs_write+0x186/0x190
+[ 212.676084] [<ffffffff811317fd>] SyS_write+0x5d/0xa0
+[ 212.676084] [<ffffffff814321e2>] system_call_fastpath+0x16/0x1b
+[ 212.676084] Code: ca 41 89 dc 41 29 cc 45 31 db 29 c2 41 89 c5 89 d0 45 29 c5 f7 d0 c1 e8 1f e9 43 ff ff ff 66 0f 1f 44 00 00 31 c0 e9 7b ff ff ff <0f> 0b eb fe 66 66 66 66 2e 0f 1f 84 00 00 00 00 00 c7 47 40 00
+[ 212.676084] RIP [<ffffffff8122e23f>] dql_completed+0x17f/0x190
+------------[ cut here ]------------
+
+When a skb has frags, bytes_compl plus skb->len nr_frags times in cp_tx().
+It's not the correct value(actually, it should plus skb->len once) and it
+will trigger the BUG_ON(bytes_compl > num_queued - dql->num_completed).
+So only increase bytes_compl when finish sending all frags. pkts_compl also
+has a wrong value, fix it too.
+
+It's introduced by commit 871f0d4c ("8139cp: enable bql").
+
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/8139cp.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp
+ le32_to_cpu(txd->opts1) & 0xffff,
+ PCI_DMA_TODEVICE);
+
+- bytes_compl += skb->len;
+- pkts_compl++;
+-
+ if (status & LastFrag) {
+ if (status & (TxError | TxFIFOUnder)) {
+ netif_dbg(cp, tx_err, cp->dev,
+@@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp
+ netif_dbg(cp, tx_done, cp->dev,
+ "tx done, slot %d\n", tx_tail);
+ }
++ bytes_compl += skb->len;
++ pkts_compl++;
+ dev_kfree_skb_irq(skb);
+ }
+
--- /dev/null
+From a23a37d03bace468a313092a83ca94089e109349 Mon Sep 17 00:00:00 2001
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Thu, 21 Nov 2013 03:14:34 +0100
+Subject: net: add BUG_ON if kernel advertises msg_namelen > sizeof(struct sockaddr_storage)
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit 68c6beb373955da0886d8f4f5995b3922ceda4be ]
+
+In that case it is probable that kernel code overwrote part of the
+stack. So we should bail out loudly here.
+
+The BUG_ON may be removed in future if we are sure all protocols are
+conformant.
+
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/socket.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -215,12 +215,13 @@ static int move_addr_to_user(struct sock
+ int err;
+ int len;
+
++ BUG_ON(klen > sizeof(struct sockaddr_storage));
+ err = get_user(len, ulen);
+ if (err)
+ return err;
+ if (len > klen)
+ len = klen;
+- if (len < 0 || len > sizeof(struct sockaddr_storage))
++ if (len < 0)
+ return -EINVAL;
+ if (len) {
+ if (audit_sockaddr(klen, kaddr))
--- /dev/null
+From f1fe8bb11aa2772335c3e66da2bbdbaba36285ae Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 27 Nov 2013 15:40:21 +0300
+Subject: net: clamp ->msg_namelen instead of returning an error
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit db31c55a6fb245fdbb752a2ca4aefec89afabb06 ]
+
+If kmsg->msg_namelen > sizeof(struct sockaddr_storage) then in the
+original code that would lead to memory corruption in the kernel if you
+had audit configured. If you didn't have audit configured it was
+harmless.
+
+There are some programs such as beta versions of Ruby which use too
+large of a buffer and returning an error code breaks them. We should
+clamp the ->msg_namelen value instead.
+
+Fixes: 1661bf364ae9 ("net: heap overflow in __audit_sockaddr()")
+Reported-by: Eric Wong <normalperson@yhbt.net>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Tested-by: Eric Wong <normalperson@yhbt.net>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/compat.c | 2 +-
+ net/socket.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kms
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+- return -EINVAL;
++ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ kmsg->msg_name = compat_ptr(tmp1);
+ kmsg->msg_iov = compat_ptr(tmp2);
+ kmsg->msg_control = compat_ptr(tmp3);
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1965,7 +1965,7 @@ static int copy_msghdr_from_user(struct
+ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+- return -EINVAL;
++ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ return 0;
+ }
+
--- /dev/null
+From 8a614e7b1ff979ace2391195dd9031567a2af5b4 Mon Sep 17 00:00:00 2001
+From: Vlad Yasevich <vyasevic@redhat.com>
+Date: Tue, 19 Nov 2013 20:47:15 -0500
+Subject: net: core: Always propagate flag changes to interfaces
+
+From: Vlad Yasevich <vyasevic@redhat.com>
+
+[ Upstream commit d2615bf450694c1302d86b9cc8a8958edfe4c3a4 ]
+
+The following commit:
+ b6c40d68ff6498b7f63ddf97cf0aa818d748dee7
+ net: only invoke dev->change_rx_flags when device is UP
+
+tried to fix a problem with VLAN devices and promiscuouse flag setting.
+The issue was that VLAN device was setting a flag on an interface that
+was down, thus resulting in bad promiscuity count.
+This commit blocked flag propagation to any device that is currently
+down.
+
+A later commit:
+ deede2fabe24e00bd7e246eb81cd5767dc6fcfc7
+ vlan: Don't propagate flag changes on down interfaces
+
+fixed VLAN code to only propagate flags when the VLAN interface is up,
+thus fixing the same issue as above, only localized to VLAN.
+
+The problem we have now is that if we have create a complex stack
+involving multiple software devices like bridges, bonds, and vlans,
+then it is possible that the flags would not propagate properly to
+the physical devices. A simple examle of the scenario is the
+following:
+
+ eth0----> bond0 ----> bridge0 ---> vlan50
+
+If bond0 or eth0 happen to be down at the time bond0 is added to
+the bridge, then eth0 will never have promisc mode set which is
+currently required for operation as part of the bridge. As a
+result, packets with vlan50 will be dropped by the interface.
+
+The only 2 devices that implement the special flag handling are
+VLAN and DSA and they both have required code to prevent incorrect
+flag propagation. As a result we can remove the generic solution
+introduced in b6c40d68ff6498b7f63ddf97cf0aa818d748dee7 and leave
+it to the individual devices to decide whether they will block
+flag propagation or not.
+
+Reported-by: Stefan Priebe <s.priebe@profihost.ag>
+Suggested-by: Veaceslav Falico <vfalico@redhat.com>
+Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4478,7 +4478,7 @@ static void dev_change_rx_flags(struct n
+ {
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+- if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
++ if (ops->ndo_change_rx_flags)
+ ops->ndo_change_rx_flags(dev, flags);
+ }
+
--- /dev/null
+From 901f0c66a7761d07ce1fb1cefcb3f10b6e7856c1 Mon Sep 17 00:00:00 2001
+From: Andreas Henriksson <andreas@fatal.se>
+Date: Thu, 7 Nov 2013 18:26:38 +0100
+Subject: net: Fix "ip rule delete table 256"
+
+From: Andreas Henriksson <andreas@fatal.se>
+
+[ Upstream commit 13eb2ab2d33c57ebddc57437a7d341995fc9138c ]
+
+When trying to delete a table >= 256 using iproute2 the local table
+will be deleted.
+The table id is specified as a netlink attribute when it needs more then
+8 bits and iproute2 then sets the table field to RT_TABLE_UNSPEC (0).
+Preconditions to matching the table id in the rule delete code
+doesn't seem to take the "table id in netlink attribute" into condition
+so the frh_get_table helper function never gets to do its job when
+matching against current rule.
+Use the helper function twice instead of peaking at the table value directly.
+
+Originally reported at: http://bugs.debian.org/724783
+
+Reported-by: Nicolas HICHER <nhicher@avencall.com>
+Signed-off-by: Andreas Henriksson <andreas@fatal.se>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/fib_rules.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -445,7 +445,8 @@ static int fib_nl_delrule(struct sk_buff
+ if (frh->action && (frh->action != rule->action))
+ continue;
+
+- if (frh->table && (frh_get_table(frh, tb) != rule->table))
++ if (frh_get_table(frh, tb) &&
++ (frh_get_table(frh, tb) != rule->table))
+ continue;
+
+ if (tb[FRA_PRIORITY] &&
--- /dev/null
+From 1e7b211c28959af942642d25d6e1917de0163ca0 Mon Sep 17 00:00:00 2001
+From: Amir Vadai <amirv@mellanox.com>
+Date: Thu, 7 Nov 2013 11:08:30 +0200
+Subject: net/mlx4_en: Fixed crash when port type is changed
+
+From: Amir Vadai <amirv@mellanox.com>
+
+[ Upstream commit 1ec4864b10171b0691ee196d7006ae56d2c153f2 ]
+
+timecounter_init() was was called only after first potential
+timecounter_read().
+Moved mlx4_en_init_timestamp() before mlx4_en_init_netdev()
+
+Signed-off-by: Amir Vadai <amirv@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_main.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
+@@ -264,6 +264,10 @@ static void *mlx4_en_add(struct mlx4_dev
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ mdev->port_cnt++;
+
++ /* Initialize time stamp mechanism */
++ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
++ mlx4_en_init_timestamp(mdev);
++
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ if (!dev->caps.comp_pool) {
+ mdev->profile.prof[i].rx_ring_num =
+@@ -301,10 +305,6 @@ static void *mlx4_en_add(struct mlx4_dev
+ mdev->pndev[i] = NULL;
+ }
+
+- /* Initialize time stamp mechanism */
+- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+- mlx4_en_init_timestamp(mdev);
+-
+ return mdev;
+
+ err_mr:
--- /dev/null
+From c3f3855abf05e913f89699adf8d88e7abad5e06e Mon Sep 17 00:00:00 2001
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Thu, 21 Nov 2013 03:14:22 +0100
+Subject: net: rework recvmsg handler msg_name and msg_namelen logic
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit f3d3342602f8bcbf37d7c46641cb9bca7618eb1c ]
+
+This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
+set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
+to return msg_name to the user.
+
+This prevents numerous uninitialized memory leaks we had in the
+recvmsg handlers and makes it harder for new code to accidentally leak
+uninitialized memory.
+
+Optimize for the case recvfrom is called with NULL as address. We don't
+need to copy the address at all, so set it to NULL before invoking the
+recvmsg handler. We can do so, because all the recvmsg handlers must
+cope with the case a plain read() is called on them. read() also sets
+msg_name to NULL.
+
+Also document these changes in include/linux/net.h as suggested by David
+Miller.
+
+Changes since RFC:
+
+Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
+non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
+affect sendto as it would bail out earlier while trying to copy-in the
+address. It also more naturally reflects the logic by the callers of
+verify_iovec.
+
+With this change in place I could remove "
+if (!uaddr || msg_sys->msg_namelen == 0)
+ msg->msg_name = NULL
+".
+
+This change does not alter the user visible error logic as we ignore
+msg_namelen as long as msg_name is NULL.
+
+Also remove two unnecessary curly brackets in ___sys_recvmsg and change
+comments to netdev style.
+
+Cc: David Miller <davem@davemloft.net>
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/algif_hash.c | 2 --
+ crypto/algif_skcipher.c | 1 -
+ drivers/isdn/mISDN/socket.c | 13 ++++---------
+ drivers/net/ppp/pppoe.c | 2 --
+ include/linux/net.h | 8 ++++++++
+ net/appletalk/ddp.c | 16 +++++++---------
+ net/atm/common.c | 2 --
+ net/ax25/af_ax25.c | 4 ++--
+ net/bluetooth/af_bluetooth.c | 4 ----
+ net/bluetooth/hci_sock.c | 2 --
+ net/bluetooth/rfcomm/sock.c | 1 -
+ net/bluetooth/sco.c | 1 -
+ net/caif/caif_socket.c | 4 ----
+ net/compat.c | 3 ++-
+ net/core/iovec.c | 3 ++-
+ net/ipx/af_ipx.c | 3 +--
+ net/irda/af_irda.c | 4 ----
+ net/iucv/af_iucv.c | 2 --
+ net/key/af_key.c | 1 -
+ net/l2tp/l2tp_ppp.c | 2 --
+ net/llc/af_llc.c | 2 --
+ net/netlink/af_netlink.c | 2 --
+ net/netrom/af_netrom.c | 3 +--
+ net/nfc/llcp_sock.c | 2 --
+ net/nfc/rawsock.c | 2 --
+ net/packet/af_packet.c | 32 +++++++++++++++-----------------
+ net/rds/recv.c | 2 --
+ net/rose/af_rose.c | 8 +++++---
+ net/rxrpc/ar-recvmsg.c | 9 ++++++---
+ net/socket.c | 19 +++++++++++--------
+ net/tipc/socket.c | 6 ------
+ net/unix/af_unix.c | 5 -----
+ net/vmw_vsock/af_vsock.c | 2 --
+ net/vmw_vsock/vmci_transport.c | 2 --
+ net/x25/af_x25.c | 3 +--
+ 35 files changed, 65 insertions(+), 112 deletions(-)
+
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -161,8 +161,6 @@ static int hash_recvmsg(struct kiocb *un
+ else if (len < ds)
+ msg->msg_flags |= MSG_TRUNC;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+ if (ctx->more) {
+ ctx->more = 0;
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -432,7 +432,6 @@ static int skcipher_recvmsg(struct kiocb
+ long copied = 0;
+
+ lock_sock(sk);
+- msg->msg_namelen = 0;
+ for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ iovlen--, iov++) {
+ unsigned long seglen = iov->iov_len;
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -117,7 +117,6 @@ mISDN_sock_recvmsg(struct kiocb *iocb, s
+ {
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+- struct sockaddr_mISDN *maddr;
+
+ int copied, err;
+
+@@ -135,9 +134,9 @@ mISDN_sock_recvmsg(struct kiocb *iocb, s
+ if (!skb)
+ return err;
+
+- if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
+- msg->msg_namelen = sizeof(struct sockaddr_mISDN);
+- maddr = (struct sockaddr_mISDN *)msg->msg_name;
++ if (msg->msg_name) {
++ struct sockaddr_mISDN *maddr = msg->msg_name;
++
+ maddr->family = AF_ISDN;
+ maddr->dev = _pms(sk)->dev->id;
+ if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
+@@ -150,11 +149,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, s
+ maddr->sapi = _pms(sk)->ch.addr & 0xFF;
+ maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
+ }
+- } else {
+- if (msg->msg_namelen)
+- printk(KERN_WARNING "%s: too small namelen %d\n",
+- __func__, msg->msg_namelen);
+- msg->msg_namelen = 0;
++ msg->msg_namelen = sizeof(*maddr);
+ }
+
+ copied = skb->len + MISDN_HEADER_LEN;
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -979,8 +979,6 @@ static int pppoe_recvmsg(struct kiocb *i
+ if (error < 0)
+ goto end;
+
+- m->msg_namelen = 0;
+-
+ if (skb) {
+ total_len = min_t(size_t, total_len, skb->len);
+ error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -163,6 +163,14 @@ struct proto_ops {
+ #endif
+ int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len);
++ /* Notes for implementing recvmsg:
++ * ===============================
++ * msg->msg_namelen should get updated by the recvmsg handlers
++ * iff msg_name != NULL. It is by default 0 to prevent
++ * returning uninitialized memory to user space. The recvfrom
++ * handlers can assume that msg.msg_name is either NULL or has
++ * a minimum size of sizeof(struct sockaddr_storage).
++ */
+ int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags);
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1735,7 +1735,6 @@ static int atalk_recvmsg(struct kiocb *i
+ size_t size, int flags)
+ {
+ struct sock *sk = sock->sk;
+- struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
+ struct ddpehdr *ddp;
+ int copied = 0;
+ int offset = 0;
+@@ -1764,14 +1763,13 @@ static int atalk_recvmsg(struct kiocb *i
+ }
+ err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
+
+- if (!err) {
+- if (sat) {
+- sat->sat_family = AF_APPLETALK;
+- sat->sat_port = ddp->deh_sport;
+- sat->sat_addr.s_node = ddp->deh_snode;
+- sat->sat_addr.s_net = ddp->deh_snet;
+- }
+- msg->msg_namelen = sizeof(*sat);
++ if (!err && msg->msg_name) {
++ struct sockaddr_at *sat = msg->msg_name;
++ sat->sat_family = AF_APPLETALK;
++ sat->sat_port = ddp->deh_sport;
++ sat->sat_addr.s_node = ddp->deh_snode;
++ sat->sat_addr.s_net = ddp->deh_snet;
++ msg->msg_namelen = sizeof(*sat);
+ }
+
+ skb_free_datagram(sk, skb); /* Free the datagram. */
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -531,8 +531,6 @@ int vcc_recvmsg(struct kiocb *iocb, stru
+ struct sk_buff *skb;
+ int copied, error = -EINVAL;
+
+- msg->msg_namelen = 0;
+-
+ if (sock->state != SS_CONNECTED)
+ return -ENOTCONN;
+
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1636,11 +1636,11 @@ static int ax25_recvmsg(struct kiocb *io
+
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+- if (msg->msg_namelen != 0) {
+- struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
++ if (msg->msg_name) {
+ ax25_digi digi;
+ ax25_address src;
+ const unsigned char *mac = skb_mac_header(skb);
++ struct sockaddr_ax25 *sax = msg->msg_name;
+
+ memset(sax, 0, sizeof(struct full_sockaddr_ax25));
+ ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -221,8 +221,6 @@ int bt_sock_recvmsg(struct kiocb *iocb,
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -287,8 +285,6 @@ int bt_sock_stream_recvmsg(struct kiocb
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ BT_DBG("sk %p size %zu", sk, size);
+
+ lock_sock(sk);
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -752,8 +752,6 @@ static int hci_sock_recvmsg(struct kiocb
+ if (!skb)
+ return err;
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -608,7 +608,6 @@ static int rfcomm_sock_recvmsg(struct ki
+
+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
+ rfcomm_dlc_accept(d);
+- msg->msg_namelen = 0;
+ return 0;
+ }
+
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -700,7 +700,6 @@ static int sco_sock_recvmsg(struct kiocb
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ sco_conn_defer_accept(pi->conn->hcon, 0);
+ sk->sk_state = BT_CONFIG;
+- msg->msg_namelen = 0;
+
+ release_sock(sk);
+ return 0;
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -286,8 +286,6 @@ static int caif_seqpkt_recvmsg(struct ki
+ if (m->msg_flags&MSG_OOB)
+ goto read_error;
+
+- m->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+@@ -361,8 +359,6 @@ static int caif_stream_recvmsg(struct ki
+ if (flags&MSG_OOB)
+ goto out;
+
+- msg->msg_namelen = 0;
+-
+ /*
+ * Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -93,7 +93,8 @@ int verify_compat_iovec(struct msghdr *k
+ if (err < 0)
+ return err;
+ }
+- kern_msg->msg_name = kern_address;
++ if (kern_msg->msg_name)
++ kern_msg->msg_name = kern_address;
+ } else
+ kern_msg->msg_name = NULL;
+
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -48,7 +48,8 @@ int verify_iovec(struct msghdr *m, struc
+ if (err < 0)
+ return err;
+ }
+- m->msg_name = address;
++ if (m->msg_name)
++ m->msg_name = address;
+ } else {
+ m->msg_name = NULL;
+ }
+--- a/net/ipx/af_ipx.c
++++ b/net/ipx/af_ipx.c
+@@ -1823,8 +1823,6 @@ static int ipx_recvmsg(struct kiocb *ioc
+ if (skb->tstamp.tv64)
+ sk->sk_stamp = skb->tstamp;
+
+- msg->msg_namelen = sizeof(*sipx);
+-
+ if (sipx) {
+ sipx->sipx_family = AF_IPX;
+ sipx->sipx_port = ipx->ipx_source.sock;
+@@ -1832,6 +1830,7 @@ static int ipx_recvmsg(struct kiocb *ioc
+ sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
+ sipx->sipx_type = ipx->ipx_type;
+ sipx->sipx_zero = 0;
++ msg->msg_namelen = sizeof(*sipx);
+ }
+ rc = copied;
+
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1385,8 +1385,6 @@ static int irda_recvmsg_dgram(struct kio
+
+ IRDA_DEBUG(4, "%s()\n", __func__);
+
+- msg->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+@@ -1451,8 +1449,6 @@ static int irda_recvmsg_stream(struct ki
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, noblock);
+
+- msg->msg_namelen = 0;
+-
+ do {
+ int chunk;
+ struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1324,8 +1324,6 @@ static int iucv_sock_recvmsg(struct kioc
+ int err = 0;
+ u32 offset;
+
+- msg->msg_namelen = 0;
+-
+ if ((sk->sk_state == IUCV_DISCONN) &&
+ skb_queue_empty(&iucv->backlog_skb_q) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -3623,7 +3623,6 @@ static int pfkey_recvmsg(struct kiocb *k
+ if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
+ goto out;
+
+- msg->msg_namelen = 0;
+ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+ if (skb == NULL)
+ goto out;
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -197,8 +197,6 @@ static int pppol2tp_recvmsg(struct kiocb
+ if (sk->sk_state & PPPOX_BOUND)
+ goto end;
+
+- msg->msg_namelen = 0;
+-
+ err = 0;
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -720,8 +720,6 @@ static int llc_ui_recvmsg(struct kiocb *
+ int target; /* Read at least this many bytes */
+ long timeo;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+ copied = -ENOTCONN;
+ if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2168,8 +2168,6 @@ static int netlink_recvmsg(struct kiocb
+ }
+ #endif
+
+- msg->msg_namelen = 0;
+-
+ copied = data_skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1179,10 +1179,9 @@ static int nr_recvmsg(struct kiocb *iocb
+ sax->sax25_family = AF_NETROM;
+ skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
+ AX25_ADDR_LEN);
++ msg->msg_namelen = sizeof(*sax);
+ }
+
+- msg->msg_namelen = sizeof(*sax);
+-
+ skb_free_datagram(sk, skb);
+
+ release_sock(sk);
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -800,8 +800,6 @@ static int llcp_sock_recvmsg(struct kioc
+
+ pr_debug("%p %zu\n", sk, len);
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+
+ if (sk->sk_state == LLCP_CLOSED &&
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -241,8 +241,6 @@ static int rawsock_recvmsg(struct kiocb
+ if (!skb)
+ return rc;
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2694,7 +2694,6 @@ static int packet_recvmsg(struct kiocb *
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int copied, err;
+- struct sockaddr_ll *sll;
+ int vnet_hdr_len = 0;
+
+ err = -EINVAL;
+@@ -2777,22 +2776,10 @@ static int packet_recvmsg(struct kiocb *
+ goto out_free;
+ }
+
+- /*
+- * If the address length field is there to be filled in, we fill
+- * it in now.
++ /* You lose any data beyond the buffer you gave. If it worries
++ * a user program they can ask the device for its MTU
++ * anyway.
+ */
+-
+- sll = &PACKET_SKB_CB(skb)->sa.ll;
+- if (sock->type == SOCK_PACKET)
+- msg->msg_namelen = sizeof(struct sockaddr_pkt);
+- else
+- msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
+-
+- /*
+- * You lose any data beyond the buffer you gave. If it worries a
+- * user program they can ask the device for its MTU anyway.
+- */
+-
+ copied = skb->len;
+ if (copied > len) {
+ copied = len;
+@@ -2805,9 +2792,20 @@ static int packet_recvmsg(struct kiocb *
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+
+- if (msg->msg_name)
++ if (msg->msg_name) {
++ /* If the address length field is there to be filled
++ * in, we fill it in now.
++ */
++ if (sock->type == SOCK_PACKET) {
++ msg->msg_namelen = sizeof(struct sockaddr_pkt);
++ } else {
++ struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
++ msg->msg_namelen = sll->sll_halen +
++ offsetof(struct sockaddr_ll, sll_addr);
++ }
+ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
+ msg->msg_namelen);
++ }
+
+ if (pkt_sk(sk)->auxdata) {
+ struct tpacket_auxdata aux;
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -410,8 +410,6 @@ int rds_recvmsg(struct kiocb *iocb, stru
+
+ rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+
+- msg->msg_namelen = 0;
+-
+ if (msg_flags & MSG_OOB)
+ goto out;
+
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1216,7 +1216,6 @@ static int rose_recvmsg(struct kiocb *io
+ {
+ struct sock *sk = sock->sk;
+ struct rose_sock *rose = rose_sk(sk);
+- struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
+ size_t copied;
+ unsigned char *asmptr;
+ struct sk_buff *skb;
+@@ -1252,8 +1251,11 @@ static int rose_recvmsg(struct kiocb *io
+
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+- if (srose != NULL) {
+- memset(srose, 0, msg->msg_namelen);
++ if (msg->msg_name) {
++ struct sockaddr_rose *srose;
++
++ memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
++ srose = msg->msg_name;
+ srose->srose_family = AF_ROSE;
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+--- a/net/rxrpc/ar-recvmsg.c
++++ b/net/rxrpc/ar-recvmsg.c
+@@ -143,10 +143,13 @@ int rxrpc_recvmsg(struct kiocb *iocb, st
+
+ /* copy the peer address and timestamp */
+ if (!continue_call) {
+- if (msg->msg_name && msg->msg_namelen > 0)
++ if (msg->msg_name) {
++ size_t len =
++ sizeof(call->conn->trans->peer->srx);
+ memcpy(msg->msg_name,
+- &call->conn->trans->peer->srx,
+- sizeof(call->conn->trans->peer->srx));
++ &call->conn->trans->peer->srx, len);
++ msg->msg_namelen = len;
++ }
+ sock_recv_ts_and_drops(msg, &rx->sk, skb);
+ }
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1832,8 +1832,10 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void
+ msg.msg_iov = &iov;
+ iov.iov_len = size;
+ iov.iov_base = ubuf;
+- msg.msg_name = (struct sockaddr *)&address;
+- msg.msg_namelen = sizeof(address);
++ /* Save some cycles and don't copy the address if not needed */
++ msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
++ /* We assume all kernel code knows the size of sockaddr_storage */
++ msg.msg_namelen = 0;
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = sock_recvmsg(sock, &msg, size, flags);
+@@ -2213,16 +2215,14 @@ static int ___sys_recvmsg(struct socket
+ goto out;
+ }
+
+- /*
+- * Save the user-mode address (verify_iovec will change the
+- * kernel msghdr to use the kernel address space)
++ /* Save the user-mode address (verify_iovec will change the
++ * kernel msghdr to use the kernel address space)
+ */
+-
+ uaddr = (__force void __user *)msg_sys->msg_name;
+ uaddr_len = COMPAT_NAMELEN(msg);
+- if (MSG_CMSG_COMPAT & flags) {
++ if (MSG_CMSG_COMPAT & flags)
+ err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
+- } else
++ else
+ err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
+ if (err < 0)
+ goto out_freeiov;
+@@ -2231,6 +2231,9 @@ static int ___sys_recvmsg(struct socket
+ cmsg_ptr = (unsigned long)msg_sys->msg_control;
+ msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
+
++ /* We assume all kernel code knows the size of sockaddr_storage */
++ msg_sys->msg_namelen = 0;
++
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -905,9 +905,6 @@ static int recv_msg(struct kiocb *iocb,
+ goto exit;
+ }
+
+- /* will be updated in set_orig_addr() if needed */
+- m->msg_namelen = 0;
+-
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+
+@@ -1017,9 +1014,6 @@ static int recv_stream(struct kiocb *ioc
+ goto exit;
+ }
+
+- /* will be updated in set_orig_addr() if needed */
+- m->msg_namelen = 0;
+-
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1761,7 +1761,6 @@ static void unix_copy_addr(struct msghdr
+ {
+ struct unix_sock *u = unix_sk(sk);
+
+- msg->msg_namelen = 0;
+ if (u->addr) {
+ msg->msg_namelen = u->addr->len;
+ memcpy(msg->msg_name, u->addr->name, u->addr->len);
+@@ -1785,8 +1784,6 @@ static int unix_dgram_recvmsg(struct kio
+ if (flags&MSG_OOB)
+ goto out;
+
+- msg->msg_namelen = 0;
+-
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err) {
+ err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+@@ -1926,8 +1923,6 @@ static int unix_stream_recvmsg(struct ki
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+
+- msg->msg_namelen = 0;
+-
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+ */
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1670,8 +1670,6 @@ vsock_stream_recvmsg(struct kiocb *kiocb
+ vsk = vsock_sk(sk);
+ err = 0;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+
+ if (sk->sk_state != SS_CONNECTED) {
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -1746,8 +1746,6 @@ static int vmci_transport_dgram_dequeue(
+ if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ /* Retrieve the head sk_buff from the socket's receive queue. */
+ err = 0;
+ skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1340,10 +1340,9 @@ static int x25_recvmsg(struct kiocb *ioc
+ if (sx25) {
+ sx25->sx25_family = AF_X25;
+ sx25->sx25_addr = x25->dest_addr;
++ msg->msg_namelen = sizeof(*sx25);
+ }
+
+- msg->msg_namelen = sizeof(struct sockaddr_x25);
+-
+ x25_check_rbuf(sk);
+ rc = copied;
+ out_free_dgram:
--- /dev/null
+From 07d457d33d7b855caf511ba27ae2973a2a28d2d6 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 13 Nov 2013 15:00:46 -0800
+Subject: net-tcp: fix panic in tcp_fastopen_cache_set()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit dccf76ca6b626c0c4a4e09bb221adee3270ab0ef ]
+
+We had some reports of crashes using TCP fastopen, and Dave Jones
+gave a nice stack trace pointing to the error.
+
+Issue is that tcp_get_metrics() should not be called with a NULL dst
+
+Fixes: 1fe4c481ba637 ("net-tcp: Fast Open client - cookie cache")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Dave Jones <davej@redhat.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Tested-by: Dave Jones <davej@fedoraproject.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_metrics.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -665,10 +665,13 @@ void tcp_fastopen_cache_get(struct sock
+ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+ struct tcp_fastopen_cookie *cookie, bool syn_lost)
+ {
++ struct dst_entry *dst = __sk_dst_get(sk);
+ struct tcp_metrics_block *tm;
+
++ if (!dst)
++ return;
+ rcu_read_lock();
+- tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
++ tm = tcp_get_metrics(sk, dst, true);
+ if (tm) {
+ struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
+
--- /dev/null
+From e0960eb977786c2fe216d4f779e086cd347bf81d Mon Sep 17 00:00:00 2001
+From: Shawn Landden <shawn@churchofgit.com>
+Date: Sun, 24 Nov 2013 22:36:28 -0800
+Subject: net: update consumers of MSG_MORE to recognize MSG_SENDPAGE_NOTLAST
+
+From: Shawn Landden <shawn@churchofgit.com>
+
+[ Upstream commit d3f7d56a7a4671d395e8af87071068a195257bf6 ]
+
+Commit 35f9c09fe (tcp: tcp_sendpages() should call tcp_push() once)
+added an internal flag MSG_SENDPAGE_NOTLAST, similar to
+MSG_MORE.
+
+algif_hash, algif_skcipher, and udp used MSG_MORE from tcp_sendpages()
+and need to see the new flag as identical to MSG_MORE.
+
+This fixes sendfile() on AF_ALG.
+
+v3: also fix udp
+
+Reported-and-tested-by: Shawn Landden <shawnlandden@gmail.com>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: David S. Miller <davem@davemloft.net>
+Original-patch: Richard Weinberger <richard@nod.at>
+Signed-off-by: Shawn Landden <shawn@churchofgit.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/algif_hash.c | 3 +++
+ crypto/algif_skcipher.c | 3 +++
+ net/ipv4/udp.c | 3 +++
+ 3 files changed, 9 insertions(+)
+
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct sock
+ struct hash_ctx *ctx = ask->private;
+ int err;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ sg_init_table(ctx->sgl.sg, 1);
+ sg_set_page(ctx->sgl.sg, page, size, offset);
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct
+ struct skcipher_sg_list *sgl;
+ int err = -EINVAL;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1070,6 +1070,9 @@ int udp_sendpage(struct sock *sk, struct
+ struct udp_sock *up = udp_sk(sk);
+ int ret;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ if (!up->pending) {
+ struct msghdr msg = { .msg_flags = flags|MSG_MORE };
+
--- /dev/null
+From 9bee8449c0cb4ced48dd3661aa94aeb02d9a1c9c Mon Sep 17 00:00:00 2001
+From: Jiri Pirko <jiri@resnulli.us>
+Date: Wed, 6 Nov 2013 17:52:20 +0100
+Subject: netfilter: push reasm skb through instead of original frag skbs
+
+From: Jiri Pirko <jiri@resnulli.us>
+
+[ Upstream commit 6aafeef03b9d9ecf255f3a80ed85ee070260e1ae ]
+
+Pushing original fragments through causes several problems. For example
+for matching, frags may not be matched correctly. Take following
+example:
+
+<example>
+On HOSTA do:
+ip6tables -I INPUT -p icmpv6 -j DROP
+ip6tables -I INPUT -p icmpv6 -m icmp6 --icmpv6-type 128 -j ACCEPT
+
+and on HOSTB you do:
+ping6 HOSTA -s2000 (MTU is 1500)
+
+Incoming echo requests will be filtered out on HOSTA. This issue does
+not occur with smaller packets than MTU (where fragmentation does not happen)
+</example>
+
+As was discussed previously, the only correct solution seems to be to use
+reassembled skb instead of separete frags. Doing this has positive side
+effects in reducing sk_buff by one pointer (nfct_reasm) and also the reams
+dances in ipvs and conntrack can be removed.
+
+Future plan is to remove net/ipv6/netfilter/nf_conntrack_reasm.c
+entirely and use code in net/ipv6/reassembly.c instead.
+
+Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+Acked-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Marcelo Ricardo Leitner <mleitner@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 32 --------------
+ include/net/ip_vs.h | 32 --------------
+ include/net/netfilter/ipv6/nf_defrag_ipv6.h | 5 --
+ net/core/skbuff.c | 3 -
+ net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 54 ------------------------
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 19 --------
+ net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | 7 ++-
+ net/netfilter/ipvs/ip_vs_core.c | 55 -------------------------
+ net/netfilter/ipvs/ip_vs_pe_sip.c | 8 ---
+ 9 files changed, 13 insertions(+), 202 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -331,11 +331,6 @@ typedef unsigned int sk_buff_data_t;
+ typedef unsigned char *sk_buff_data_t;
+ #endif
+
+-#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
+- defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
+-#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
+-#endif
+-
+ /**
+ * struct sk_buff - socket buffer
+ * @next: Next buffer in list
+@@ -368,7 +363,6 @@ typedef unsigned char *sk_buff_data_t;
+ * @protocol: Packet protocol from driver
+ * @destructor: Destruct function
+ * @nfct: Associated connection, if any
+- * @nfct_reasm: netfilter conntrack re-assembly pointer
+ * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
+ * @skb_iif: ifindex of device we arrived on
+ * @tc_index: Traffic control index
+@@ -455,9 +449,6 @@ struct sk_buff {
+ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ struct nf_conntrack *nfct;
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- struct sk_buff *nfct_reasm;
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ struct nf_bridge_info *nf_bridge;
+ #endif
+@@ -2700,18 +2691,6 @@ static inline void nf_conntrack_get(stru
+ atomic_inc(&nfct->use);
+ }
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+-static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
+-{
+- if (skb)
+- atomic_inc(&skb->users);
+-}
+-static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
+-{
+- if (skb)
+- kfree_skb(skb);
+-}
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
+ {
+@@ -2730,10 +2709,6 @@ static inline void nf_reset(struct sk_bu
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = NULL;
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- nf_conntrack_put_reasm(skb->nfct_reasm);
+- skb->nfct_reasm = NULL;
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ nf_bridge_put(skb->nf_bridge);
+ skb->nf_bridge = NULL;
+@@ -2755,10 +2730,6 @@ static inline void __nf_copy(struct sk_b
+ nf_conntrack_get(src->nfct);
+ dst->nfctinfo = src->nfctinfo;
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- dst->nfct_reasm = src->nfct_reasm;
+- nf_conntrack_get_reasm(src->nfct_reasm);
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ dst->nf_bridge = src->nf_bridge;
+ nf_bridge_get(src->nf_bridge);
+@@ -2770,9 +2741,6 @@ static inline void nf_copy(struct sk_buf
+ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ nf_conntrack_put(dst->nfct);
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- nf_conntrack_put_reasm(dst->nfct_reasm);
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ nf_bridge_put(dst->nf_bridge);
+ #endif
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -109,7 +109,6 @@ extern int ip_vs_conn_tab_size;
+ struct ip_vs_iphdr {
+ __u32 len; /* IPv4 simply where L4 starts
+ IPv6 where L4 Transport Header starts */
+- __u32 thoff_reasm; /* Transport Header Offset in nfct_reasm skb */
+ __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
+ __s16 protocol;
+ __s32 flags;
+@@ -117,34 +116,12 @@ struct ip_vs_iphdr {
+ union nf_inet_addr daddr;
+ };
+
+-/* Dependency to module: nf_defrag_ipv6 */
+-#if defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
+-static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
+-{
+- return skb->nfct_reasm;
+-}
+ static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
+ int len, void *buffer,
+ const struct ip_vs_iphdr *ipvsh)
+ {
+- if (unlikely(ipvsh->fragoffs && skb_nfct_reasm(skb)))
+- return skb_header_pointer(skb_nfct_reasm(skb),
+- ipvsh->thoff_reasm, len, buffer);
+-
+ return skb_header_pointer(skb, offset, len, buffer);
+ }
+-#else
+-static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
+-{
+- return NULL;
+-}
+-static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
+- int len, void *buffer,
+- const struct ip_vs_iphdr *ipvsh)
+-{
+- return skb_header_pointer(skb, offset, len, buffer);
+-}
+-#endif
+
+ static inline void
+ ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
+@@ -171,19 +148,12 @@ ip_vs_fill_iph_skb(int af, const struct
+ (struct ipv6hdr *)skb_network_header(skb);
+ iphdr->saddr.in6 = iph->saddr;
+ iphdr->daddr.in6 = iph->daddr;
+- /* ipv6_find_hdr() updates len, flags, thoff_reasm */
+- iphdr->thoff_reasm = 0;
++ /* ipv6_find_hdr() updates len, flags */
+ iphdr->len = 0;
+ iphdr->flags = 0;
+ iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1,
+ &iphdr->fragoffs,
+ &iphdr->flags);
+- /* get proto from re-assembled packet and it's offset */
+- if (skb_nfct_reasm(skb))
+- iphdr->protocol = ipv6_find_hdr(skb_nfct_reasm(skb),
+- &iphdr->thoff_reasm,
+- -1, NULL, NULL);
+-
+ } else
+ #endif
+ {
+--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
++++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+@@ -6,10 +6,7 @@ extern void nf_defrag_ipv6_enable(void);
+ extern int nf_ct_frag6_init(void);
+ extern void nf_ct_frag6_cleanup(void);
+ extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+- struct net_device *in,
+- struct net_device *out,
+- int (*okfn)(struct sk_buff *));
++extern void nf_ct_frag6_consume_orig(struct sk_buff *skb);
+
+ struct inet_frags_ctl;
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -585,9 +585,6 @@ static void skb_release_head_state(struc
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_conntrack_put(skb->nfct);
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- nf_conntrack_put_reasm(skb->nfct_reasm);
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ nf_bridge_put(skb->nf_bridge);
+ #endif
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -172,63 +172,13 @@ out:
+ return nf_conntrack_confirm(skb);
+ }
+
+-static unsigned int __ipv6_conntrack_in(struct net *net,
+- unsigned int hooknum,
+- struct sk_buff *skb,
+- const struct net_device *in,
+- const struct net_device *out,
+- int (*okfn)(struct sk_buff *))
+-{
+- struct sk_buff *reasm = skb->nfct_reasm;
+- const struct nf_conn_help *help;
+- struct nf_conn *ct;
+- enum ip_conntrack_info ctinfo;
+-
+- /* This packet is fragmented and has reassembled packet. */
+- if (reasm) {
+- /* Reassembled packet isn't parsed yet ? */
+- if (!reasm->nfct) {
+- unsigned int ret;
+-
+- ret = nf_conntrack_in(net, PF_INET6, hooknum, reasm);
+- if (ret != NF_ACCEPT)
+- return ret;
+- }
+-
+- /* Conntrack helpers need the entire reassembled packet in the
+- * POST_ROUTING hook. In case of unconfirmed connections NAT
+- * might reassign a helper, so the entire packet is also
+- * required.
+- */
+- ct = nf_ct_get(reasm, &ctinfo);
+- if (ct != NULL && !nf_ct_is_untracked(ct)) {
+- help = nfct_help(ct);
+- if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
+- nf_conntrack_get_reasm(reasm);
+- NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
+- (struct net_device *)in,
+- (struct net_device *)out,
+- okfn, NF_IP6_PRI_CONNTRACK + 1);
+- return NF_DROP_ERR(-ECANCELED);
+- }
+- }
+-
+- nf_conntrack_get(reasm->nfct);
+- skb->nfct = reasm->nfct;
+- skb->nfctinfo = reasm->nfctinfo;
+- return NF_ACCEPT;
+- }
+-
+- return nf_conntrack_in(net, PF_INET6, hooknum, skb);
+-}
+-
+ static unsigned int ipv6_conntrack_in(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+ {
+- return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
++ return nf_conntrack_in(dev_net(in), PF_INET6, hooknum, skb);
+ }
+
+ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
+@@ -242,7 +192,7 @@ static unsigned int ipv6_conntrack_local
+ net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
+ return NF_ACCEPT;
+ }
+- return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
++ return nf_conntrack_in(dev_net(out), PF_INET6, hooknum, skb);
+ }
+
+ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -621,31 +621,16 @@ ret_orig:
+ return skb;
+ }
+
+-void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+- struct net_device *in, struct net_device *out,
+- int (*okfn)(struct sk_buff *))
++void nf_ct_frag6_consume_orig(struct sk_buff *skb)
+ {
+ struct sk_buff *s, *s2;
+- unsigned int ret = 0;
+
+ for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
+- nf_conntrack_put_reasm(s->nfct_reasm);
+- nf_conntrack_get_reasm(skb);
+- s->nfct_reasm = skb;
+-
+ s2 = s->next;
+ s->next = NULL;
+-
+- if (ret != -ECANCELED)
+- ret = NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s,
+- in, out, okfn,
+- NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+- else
+- kfree_skb(s);
+-
++ consume_skb(s);
+ s = s2;
+ }
+- nf_conntrack_put_reasm(skb);
+ }
+
+ static int nf_ct_net_init(struct net *net)
+--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
++++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+@@ -75,8 +75,11 @@ static unsigned int ipv6_defrag(unsigned
+ if (reasm == skb)
+ return NF_ACCEPT;
+
+- nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in,
+- (struct net_device *)out, okfn);
++ nf_ct_frag6_consume_orig(reasm);
++
++ NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
++ (struct net_device *) in, (struct net_device *) out,
++ okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+
+ return NF_STOLEN;
+ }
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1131,12 +1131,6 @@ ip_vs_out(unsigned int hooknum, struct s
+ ip_vs_fill_iph_skb(af, skb, &iph);
+ #ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6) {
+- if (!iph.fragoffs && skb_nfct_reasm(skb)) {
+- struct sk_buff *reasm = skb_nfct_reasm(skb);
+- /* Save fw mark for coming frags */
+- reasm->ipvs_property = 1;
+- reasm->mark = skb->mark;
+- }
+ if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
+ int related;
+ int verdict = ip_vs_out_icmp_v6(skb, &related,
+@@ -1606,12 +1600,6 @@ ip_vs_in(unsigned int hooknum, struct sk
+
+ #ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6) {
+- if (!iph.fragoffs && skb_nfct_reasm(skb)) {
+- struct sk_buff *reasm = skb_nfct_reasm(skb);
+- /* Save fw mark for coming frags. */
+- reasm->ipvs_property = 1;
+- reasm->mark = skb->mark;
+- }
+ if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
+ int related;
+ int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
+@@ -1663,9 +1651,8 @@ ip_vs_in(unsigned int hooknum, struct sk
+ /* sorry, all this trouble for a no-hit :) */
+ IP_VS_DBG_PKT(12, af, pp, skb, 0,
+ "ip_vs_in: packet continues traversal as normal");
+- if (iph.fragoffs && !skb_nfct_reasm(skb)) {
++ if (iph.fragoffs) {
+ /* Fragment that couldn't be mapped to a conn entry
+- * and don't have any pointer to a reasm skb
+ * is missing module nf_defrag_ipv6
+ */
+ IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
+@@ -1748,38 +1735,6 @@ ip_vs_local_request4(unsigned int hooknu
+ #ifdef CONFIG_IP_VS_IPV6
+
+ /*
+- * AF_INET6 fragment handling
+- * Copy info from first fragment, to the rest of them.
+- */
+-static unsigned int
+-ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
+- const struct net_device *in,
+- const struct net_device *out,
+- int (*okfn)(struct sk_buff *))
+-{
+- struct sk_buff *reasm = skb_nfct_reasm(skb);
+- struct net *net;
+-
+- /* Skip if not a "replay" from nf_ct_frag6_output or first fragment.
+- * ipvs_property is set when checking first fragment
+- * in ip_vs_in() and ip_vs_out().
+- */
+- if (reasm)
+- IP_VS_DBG(2, "Fragment recv prop:%d\n", reasm->ipvs_property);
+- if (!reasm || !reasm->ipvs_property)
+- return NF_ACCEPT;
+-
+- net = skb_net(skb);
+- if (!net_ipvs(net)->enable)
+- return NF_ACCEPT;
+-
+- /* Copy stored fw mark, saved in ip_vs_{in,out} */
+- skb->mark = reasm->mark;
+-
+- return NF_ACCEPT;
+-}
+-
+-/*
+ * AF_INET6 handler in NF_INET_LOCAL_IN chain
+ * Schedule and forward packets from remote clients
+ */
+@@ -1916,14 +1871,6 @@ static struct nf_hook_ops ip_vs_ops[] __
+ .priority = 100,
+ },
+ #ifdef CONFIG_IP_VS_IPV6
+- /* After mangle & nat fetch 2:nd fragment and following */
+- {
+- .hook = ip_vs_preroute_frag6,
+- .owner = THIS_MODULE,
+- .pf = NFPROTO_IPV6,
+- .hooknum = NF_INET_PRE_ROUTING,
+- .priority = NF_IP6_PRI_NAT_DST + 1,
+- },
+ /* After packet filtering, change source only for VS/NAT */
+ {
+ .hook = ip_vs_reply6,
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -65,7 +65,6 @@ static int get_callid(const char *dptr,
+ static int
+ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ {
+- struct sk_buff *reasm = skb_nfct_reasm(skb);
+ struct ip_vs_iphdr iph;
+ unsigned int dataoff, datalen, matchoff, matchlen;
+ const char *dptr;
+@@ -79,15 +78,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_p
+ /* todo: IPv6 fragments:
+ * I think this only should be done for the first fragment. /HS
+ */
+- if (reasm) {
+- skb = reasm;
+- dataoff = iph.thoff_reasm + sizeof(struct udphdr);
+- } else
+- dataoff = iph.len + sizeof(struct udphdr);
++ dataoff = iph.len + sizeof(struct udphdr);
+
+ if (dataoff >= skb->len)
+ return -EINVAL;
+- /* todo: Check if this will mess-up the reasm skb !!! /HS */
+ retc = skb_linearize(skb);
+ if (retc < 0)
+ return retc;
--- /dev/null
+From 14fe8b362a4dc73ca49199af6548ab40597a673f Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Thu, 21 Nov 2013 16:50:58 +0100
+Subject: packet: fix use after free race in send path when dev is released
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+[ Upstream commit e40526cb20b5ee53419452e1f03d97092f144418 ]
+
+Salam reported a use after free bug in PF_PACKET that occurs when
+we're sending out frames on a socket bound device and suddenly the
+net device is being unregistered. It appears that commit 827d9780
+introduced a possible race condition between {t,}packet_snd() and
+packet_notifier(). In the case of a bound socket, packet_notifier()
+can drop the last reference to the net_device and {t,}packet_snd()
+might end up suddenly sending a packet over a freed net_device.
+
+To avoid reverting 827d9780 and thus introducing a performance
+regression compared to the current state of things, we decided to
+hold a cached RCU protected pointer to the net device and maintain
+it on write side via bind spin_lock protected register_prot_hook()
+and __unregister_prot_hook() calls.
+
+In {t,}packet_snd() path, we access this pointer under rcu_read_lock
+through packet_cached_dev_get() that holds reference to the device
+to prevent it from being freed through packet_notifier() while
+we're in send path. This is okay to do as dev_put()/dev_hold() are
+per-cpu counters, so this should not be a performance issue. Also,
+the code simplifies a bit as we don't need need_rls_dev anymore.
+
+Fixes: 827d978037d7 ("af-packet: Use existing netdev reference for bound sockets.")
+Reported-by: Salam Noureddine <noureddine@aristanetworks.com>
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Signed-off-by: Salam Noureddine <noureddine@aristanetworks.com>
+Cc: Ben Greear <greearb@candelatech.com>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 59 +++++++++++++++++++++++++++++--------------------
+ net/packet/internal.h | 1
+ 2 files changed, 37 insertions(+), 23 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -244,11 +244,15 @@ static void __fanout_link(struct sock *s
+ static void register_prot_hook(struct sock *sk)
+ {
+ struct packet_sock *po = pkt_sk(sk);
++
+ if (!po->running) {
+- if (po->fanout)
++ if (po->fanout) {
+ __fanout_link(sk, po);
+- else
++ } else {
+ dev_add_pack(&po->prot_hook);
++ rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
++ }
++
+ sock_hold(sk);
+ po->running = 1;
+ }
+@@ -266,10 +270,13 @@ static void __unregister_prot_hook(struc
+ struct packet_sock *po = pkt_sk(sk);
+
+ po->running = 0;
+- if (po->fanout)
++ if (po->fanout) {
+ __fanout_unlink(sk, po);
+- else
++ } else {
+ __dev_remove_pack(&po->prot_hook);
++ RCU_INIT_POINTER(po->cached_dev, NULL);
++ }
++
+ __sock_put(sk);
+
+ if (sync) {
+@@ -2041,12 +2048,24 @@ static int tpacket_fill_skb(struct packe
+ return tp_len;
+ }
+
++static struct net_device *packet_cached_dev_get(struct packet_sock *po)
++{
++ struct net_device *dev;
++
++ rcu_read_lock();
++ dev = rcu_dereference(po->cached_dev);
++ if (dev)
++ dev_hold(dev);
++ rcu_read_unlock();
++
++ return dev;
++}
++
+ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ {
+ struct sk_buff *skb;
+ struct net_device *dev;
+ __be16 proto;
+- bool need_rls_dev = false;
+ int err, reserve = 0;
+ void *ph;
+ struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
+@@ -2059,7 +2078,7 @@ static int tpacket_snd(struct packet_soc
+ mutex_lock(&po->pg_vec_lock);
+
+ if (saddr == NULL) {
+- dev = po->prot_hook.dev;
++ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+ } else {
+@@ -2073,19 +2092,17 @@ static int tpacket_snd(struct packet_soc
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+- need_rls_dev = true;
+ }
+
+ err = -ENXIO;
+ if (unlikely(dev == NULL))
+ goto out;
+-
+- reserve = dev->hard_header_len;
+-
+ err = -ENETDOWN;
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_put;
+
++ reserve = dev->hard_header_len;
++
+ size_max = po->tx_ring.frame_size
+ - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
+
+@@ -2162,8 +2179,7 @@ out_status:
+ __packet_set_status(po, ph, status);
+ kfree_skb(skb);
+ out_put:
+- if (need_rls_dev)
+- dev_put(dev);
++ dev_put(dev);
+ out:
+ mutex_unlock(&po->pg_vec_lock);
+ return err;
+@@ -2201,7 +2217,6 @@ static int packet_snd(struct socket *soc
+ struct sk_buff *skb;
+ struct net_device *dev;
+ __be16 proto;
+- bool need_rls_dev = false;
+ unsigned char *addr;
+ int err, reserve = 0;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+@@ -2217,7 +2232,7 @@ static int packet_snd(struct socket *soc
+ */
+
+ if (saddr == NULL) {
+- dev = po->prot_hook.dev;
++ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+ } else {
+@@ -2229,19 +2244,17 @@ static int packet_snd(struct socket *soc
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+- need_rls_dev = true;
+ }
+
+ err = -ENXIO;
+- if (dev == NULL)
++ if (unlikely(dev == NULL))
+ goto out_unlock;
+- if (sock->type == SOCK_RAW)
+- reserve = dev->hard_header_len;
+-
+ err = -ENETDOWN;
+- if (!(dev->flags & IFF_UP))
++ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_unlock;
+
++ if (sock->type == SOCK_RAW)
++ reserve = dev->hard_header_len;
+ if (po->has_vnet_hdr) {
+ vnet_hdr_len = sizeof(vnet_hdr);
+
+@@ -2375,15 +2388,14 @@ static int packet_snd(struct socket *soc
+ if (err > 0 && (err = net_xmit_errno(err)) != 0)
+ goto out_unlock;
+
+- if (need_rls_dev)
+- dev_put(dev);
++ dev_put(dev);
+
+ return len;
+
+ out_free:
+ kfree_skb(skb);
+ out_unlock:
+- if (dev && need_rls_dev)
++ if (dev)
+ dev_put(dev);
+ out:
+ return err;
+@@ -2603,6 +2615,7 @@ static int packet_create(struct net *net
+ po = pkt_sk(sk);
+ sk->sk_family = PF_PACKET;
+ po->num = proto;
++ RCU_INIT_POINTER(po->cached_dev, NULL);
+
+ sk->sk_destruct = packet_sock_destruct;
+ sk_refcnt_debug_inc(sk);
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -113,6 +113,7 @@ struct packet_sock {
+ unsigned int tp_loss:1;
+ unsigned int tp_tx_has_off:1;
+ unsigned int tp_tstamp;
++ struct net_device __rcu *cached_dev;
+ struct packet_type prot_hook ____cacheline_aligned_in_smp;
+ };
+
--- /dev/null
+From 6a55410b0fac9eaa54f254784658f93468056654 Mon Sep 17 00:00:00 2001
+From: "fan.du" <fan.du@windriver.com>
+Date: Sun, 1 Dec 2013 16:28:48 +0800
+Subject: {pktgen, xfrm} Update IPv4 header total len and checksum after tranformation
+
+From: "fan.du" <fan.du@windriver.com>
+
+[ Upstream commit 3868204d6b89ea373a273e760609cb08020beb1a ]
+
+commit a553e4a6317b2cfc7659542c10fe43184ffe53da ("[PKTGEN]: IPSEC support")
+tried to support IPsec ESP transport transformation for pktgen, but acctually
+this doesn't work at all for two reasons(The orignal transformed packet has
+bad IPv4 checksum value, as well as wrong auth value, reported by wireshark)
+
+- After transpormation, IPv4 header total length needs update,
+ because encrypted payload's length is NOT same as that of plain text.
+
+- After transformation, IPv4 checksum needs re-caculate because of payload
+ has been changed.
+
+With this patch, armmed pktgen with below cofiguration, Wireshark is able to
+decrypted ESP packet generated by pktgen without any IPv4 checksum error or
+auth value error.
+
+pgset "flag IPSEC"
+pgset "flows 1"
+
+Signed-off-by: Fan Du <fan.du@windriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/pktgen.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2515,6 +2515,8 @@ static int process_ipsec(struct pktgen_d
+ if (x) {
+ int ret;
+ __u8 *eth;
++ struct iphdr *iph;
++
+ nhead = x->props.header_len - skb_headroom(skb);
+ if (nhead > 0) {
+ ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
+@@ -2536,6 +2538,11 @@ static int process_ipsec(struct pktgen_d
+ eth = (__u8 *) skb_push(skb, ETH_HLEN);
+ memcpy(eth, pkt_dev->hh, 12);
+ *(u16 *) ð[12] = protocol;
++
++ /* Update IPv4 header len as well as checksum value */
++ iph = ip_hdr(skb);
++ iph->tot_len = htons(skb->len - ETH_HLEN);
++ ip_send_check(iph);
+ }
+ }
+ return 1;
--- /dev/null
+From c11447cd532b5021dc2ea90b7f63c1b51954535c Mon Sep 17 00:00:00 2001
+From: David Chang <dchang@suse.com>
+Date: Wed, 27 Nov 2013 15:48:36 +0800
+Subject: r8169: check ALDPS bit and disable it if enabled for the 8168g
+
+From: David Chang <dchang@suse.com>
+
+[ Upstream commit 1bac1072425c86f1ac85bd5967910706677ef8b3 ]
+
+Windows driver will enable ALDPS function, but linux driver and firmware
+do not have any configuration related to ALDPS function for 8168g.
+So restart system to linux and remove the NIC cable, LAN enter ALDPS,
+then LAN RX will be disabled.
+
+This issue can be easily reproduced on dual boot windows and linux
+system with RTL_GIGA_MAC_VER_40 chip.
+
+Realtek said, ALDPS function can be disabled by configuring to PHY,
+switch to page 0x0A43, reg0x10 bit2=0.
+
+Signed-off-by: David Chang <dchang@suse.com>
+Acked-by: Hayes Wang <hayeswang@realtek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -3456,6 +3456,11 @@ static void rtl8168g_1_hw_phy_config(str
+ rtl_writephy(tp, 0x14, 0x9065);
+ rtl_writephy(tp, 0x14, 0x1065);
+
++ /* Check ALDPS bit, disable it if enabled */
++ rtl_writephy(tp, 0x1f, 0x0a43);
++ if (rtl_readphy(tp, 0x10) & 0x0004)
++ rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
++
+ rtl_writephy(tp, 0x1f, 0x0000);
+ }
+
--- /dev/null
+From 18cead822be9b93064205c4e4b55d7bb7d4c1efb Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Mon, 11 Nov 2013 12:20:32 +0100
+Subject: random32: fix off-by-one in seeding requirement
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+[ Upstream commit 51c37a70aaa3f95773af560e6db3073520513912 ]
+
+For properly initialising the Tausworthe generator [1], we have
+a strict seeding requirement, that is, s1 > 1, s2 > 7, s3 > 15.
+
+Commit 697f8d0348 ("random32: seeding improvement") introduced
+a __seed() function that imposes boundary checks proposed by the
+errata paper [2] to properly ensure above conditions.
+
+However, we're off by one, as the function is implemented as:
+"return (x < m) ? x + m : x;", and called with __seed(X, 1),
+__seed(X, 7), __seed(X, 15). Thus, an unwanted seed of 1, 7, 15
+would be possible, whereas the lower boundary should actually
+be of at least 2, 8, 16, just as GSL does. Fix this, as otherwise
+an initialization with an unwanted seed could have the effect
+that Tausworthe's PRNG properties cannot not be ensured.
+
+Note that this PRNG is *not* used for cryptography in the kernel.
+
+ [1] http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
+ [2] http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
+
+Joint work with Hannes Frederic Sowa.
+
+Fixes: 697f8d0348a6 ("random32: seeding improvement")
+Cc: Stephen Hemminger <stephen@networkplumber.org>
+Cc: Florian Weimer <fweimer@redhat.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/random.h | 6 +++---
+ lib/random32.c | 14 +++++++-------
+ 2 files changed, 10 insertions(+), 10 deletions(-)
+
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -50,9 +50,9 @@ static inline void prandom_seed_state(st
+ {
+ u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+
+- state->s1 = __seed(i, 1);
+- state->s2 = __seed(i, 7);
+- state->s3 = __seed(i, 15);
++ state->s1 = __seed(i, 2);
++ state->s2 = __seed(i, 8);
++ state->s3 = __seed(i, 16);
+ }
+
+ #ifdef CONFIG_ARCH_RANDOM
+--- a/lib/random32.c
++++ b/lib/random32.c
+@@ -141,7 +141,7 @@ void prandom_seed(u32 entropy)
+ */
+ for_each_possible_cpu (i) {
+ struct rnd_state *state = &per_cpu(net_rand_state, i);
+- state->s1 = __seed(state->s1 ^ entropy, 1);
++ state->s1 = __seed(state->s1 ^ entropy, 2);
+ }
+ }
+ EXPORT_SYMBOL(prandom_seed);
+@@ -158,9 +158,9 @@ static int __init prandom_init(void)
+ struct rnd_state *state = &per_cpu(net_rand_state,i);
+
+ #define LCG(x) ((x) * 69069) /* super-duper LCG */
+- state->s1 = __seed(LCG(i + jiffies), 1);
+- state->s2 = __seed(LCG(state->s1), 7);
+- state->s3 = __seed(LCG(state->s2), 15);
++ state->s1 = __seed(LCG(i + jiffies), 2);
++ state->s2 = __seed(LCG(state->s1), 8);
++ state->s3 = __seed(LCG(state->s2), 16);
+
+ /* "warm it up" */
+ prandom_u32_state(state);
+@@ -187,9 +187,9 @@ static int __init prandom_reseed(void)
+ u32 seeds[3];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+- state->s1 = __seed(seeds[0], 1);
+- state->s2 = __seed(seeds[1], 7);
+- state->s3 = __seed(seeds[2], 15);
++ state->s1 = __seed(seeds[0], 2);
++ state->s2 = __seed(seeds[1], 8);
++ state->s3 = __seed(seeds[2], 16);
+
+ /* mix it in */
+ prandom_u32_state(state);
--- /dev/null
+ipv6-fix-headroom-calculation-in-udp6_ufo_fragment.patch
+net-mlx4_en-fixed-crash-when-port-type-is-changed.patch
+net-fix-ip-rule-delete-table-256.patch
+ipv6-use-rt6_get_dflt_router-to-get-default-router-in-rt6_route_rcv.patch
+ipv6-protect-for_each_sk_fl_rcu-in-mem_check-with-rcu_read_lock_bh.patch
+random32-fix-off-by-one-in-seeding-requirement.patch
+bonding-don-t-permit-to-use-arp-monitoring-in-802.3ad-mode.patch
+usbnet-fix-status-interrupt-urb-handling.patch
+6lowpan-uncompression-of-traffic-class-field-was-incorrect.patch
+tuntap-limit-head-length-of-skb-allocated.patch
+macvtap-limit-head-length-of-skb-allocated.patch
+tcp-tsq-restore-minimal-amount-of-queueing.patch
+bonding-fix-two-race-conditions-in-bond_store_updelay-downdelay.patch
+net-tcp-fix-panic-in-tcp_fastopen_cache_set.patch
+isdnloop-use-strlcpy-instead-of-strcpy.patch
+connector-improved-unaligned-access-error-fix.patch
+ipv4-fix-possible-seqlock-deadlock.patch
+inet-prevent-leakage-of-uninitialized-memory-to-user-in-recv-syscalls.patch
+net-rework-recvmsg-handler-msg_name-and-msg_namelen-logic.patch
+net-add-bug_on-if-kernel-advertises-msg_namelen-sizeof-struct-sockaddr_storage.patch
+inet-fix-addr_len-msg-msg_namelen-assignment-in-recv_error-and-rxpmtu-functions.patch
+net-clamp-msg_namelen-instead-of-returning-an-error.patch
+ipv6-fix-leaking-uninitialized-port-number-of-offender-sockaddr.patch
+ip6_output-fragment-outgoing-reassembled-skb-properly.patch
+netfilter-push-reasm-skb-through-instead-of-original-frag-skbs.patch
+xfrm-release-dst-if-this-dst-is-improper-for-vti-tunnel.patch
+atm-idt77252-fix-dev-refcnt-leak.patch
+tcp-don-t-update-snd_nxt-when-a-socket-is-switched-from-repair-mode.patch
+ipv4-fix-race-in-concurrent-ip_route_input_slow.patch
+net-core-always-propagate-flag-changes-to-interfaces.patch
+bridge-flush-br-s-address-entry-in-fdb-when-remove-the-bridge-dev.patch
+packet-fix-use-after-free-race-in-send-path-when-dev-is-released.patch
+af_packet-block-bh-in-prb_shutdown_retire_blk_timer.patch
+r8169-check-aldps-bit-and-disable-it-if-enabled-for-the-8168g.patch
+net-8139cp-fix-a-bug_on-triggered-by-wrong-bytes_compl.patch
+net-update-consumers-of-msg_more-to-recognize-msg_sendpage_notlast.patch
+team-fix-master-carrier-set-when-user-linkup-is-enabled.patch
+inet-fix-possible-seqlock-deadlocks.patch
+ipv6-fix-possible-seqlock-deadlock-in-ip6_finish_output2.patch
+pktgen-xfrm-update-ipv4-header-total-len-and-checksum-after-tranformation.patch
+tcp-gso-fix-truesize-tracking.patch
--- /dev/null
+From acbcf8b7cc753939940407a421c98dba9739aa67 Mon Sep 17 00:00:00 2001
+From: Andrey Vagin <avagin@openvz.org>
+Date: Tue, 19 Nov 2013 22:10:06 +0400
+Subject: tcp: don't update snd_nxt, when a socket is switched from repair mode
+
+From: Andrey Vagin <avagin@openvz.org>
+
+[ Upstream commit dbde497966804e63a38fdedc1e3815e77097efc2 ]
+
+snd_nxt must be updated synchronously with sk_send_head. Otherwise
+tp->packets_out may be updated incorrectly, what may bring a kernel panic.
+
+Here is a kernel panic from my host.
+[ 103.043194] BUG: unable to handle kernel NULL pointer dereference at 0000000000000048
+[ 103.044025] IP: [<ffffffff815aaaaf>] tcp_rearm_rto+0xcf/0x150
+...
+[ 146.301158] Call Trace:
+[ 146.301158] [<ffffffff815ab7f0>] tcp_ack+0xcc0/0x12c0
+
+Before this panic a tcp socket was restored. This socket had sent and
+unsent data in the write queue. Sent data was restored in repair mode,
+then the socket was switched from reapair mode and unsent data was
+restored. After that the socket was switched back into repair mode.
+
+In that moment we had a socket where write queue looks like this:
+snd_una snd_nxt write_seq
+ |_________|________|
+ |
+ sk_send_head
+
+After a second switching from repair mode the state of socket was
+changed:
+
+snd_una snd_nxt, write_seq
+ |_________ ________|
+ |
+ sk_send_head
+
+This state is inconsistent, because snd_nxt and sk_send_head are not
+synchronized.
+
+Bellow you can find a call trace, how packets_out can be incremented
+twice for one skb, if snd_nxt and sk_send_head are not synchronized.
+In this case packets_out will be always positive, even when
+sk_write_queue is empty.
+
+tcp_write_wakeup
+ skb = tcp_send_head(sk);
+ tcp_fragment
+ if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq))
+ tcp_adjust_pcount(sk, skb, diff);
+ tcp_event_new_data_sent
+ tp->packets_out += tcp_skb_pcount(skb);
+
+I think update of snd_nxt isn't required, when a socket is switched from
+repair mode. Because it's initialized in tcp_connect_init. Then when a
+write queue is restored, snd_nxt is incremented in tcp_event_new_data_sent,
+so it's always is in consistent state.
+
+I have checked, that the bug is not reproduced with this patch and
+all tests about restoring tcp connections work fine.
+
+Signed-off-by: Andrey Vagin <avagin@openvz.org>
+Cc: Pavel Emelyanov <xemul@parallels.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+Cc: James Morris <jmorris@namei.org>
+Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+Cc: Patrick McHardy <kaber@trash.net>
+Acked-by: Pavel Emelyanov <xemul@parallels.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3102,7 +3102,6 @@ void tcp_send_window_probe(struct sock *
+ {
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
+- tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
+ tcp_xmit_probe_skb(sk, 0);
+ }
+ }
--- /dev/null
+From 934c8b7de7c0632cf0dbaa5d9c9dcdcdc23cb33c Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 25 Oct 2013 17:26:17 -0700
+Subject: tcp: gso: fix truesize tracking
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 0d08c42cf9a71530fef5ebcfe368f38f2dd0476f ]
+
+commit 6ff50cd55545 ("tcp: gso: do not generate out of order packets")
+had an heuristic that can trigger a warning in skb_try_coalesce(),
+because skb->truesize of the gso segments were exactly set to mss.
+
+This breaks the requirement that
+
+skb->truesize >= skb->len + truesizeof(struct sk_buff);
+
+It can trivially be reproduced by :
+
+ifconfig lo mtu 1500
+ethtool -K lo tso off
+netperf
+
+As the skbs are looped into the TCP networking stack, skb_try_coalesce()
+warns us of these skb under-estimating their truesize.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Alexei Starovoitov <ast@plumgrid.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2899,6 +2899,7 @@ struct sk_buff *tcp_tso_segment(struct s
+ netdev_features_t features)
+ {
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
++ unsigned int sum_truesize = 0;
+ struct tcphdr *th;
+ unsigned int thlen;
+ unsigned int seq;
+@@ -2982,13 +2983,7 @@ struct sk_buff *tcp_tso_segment(struct s
+ if (copy_destructor) {
+ skb->destructor = gso_skb->destructor;
+ skb->sk = gso_skb->sk;
+- /* {tcp|sock}_wfree() use exact truesize accounting :
+- * sum(skb->truesize) MUST be exactly be gso_skb->truesize
+- * So we account mss bytes of 'true size' for each segment.
+- * The last segment will contain the remaining.
+- */
+- skb->truesize = mss;
+- gso_skb->truesize -= mss;
++ sum_truesize += skb->truesize;
+ }
+ skb = skb->next;
+ th = tcp_hdr(skb);
+@@ -3005,7 +3000,9 @@ struct sk_buff *tcp_tso_segment(struct s
+ if (copy_destructor) {
+ swap(gso_skb->sk, skb->sk);
+ swap(gso_skb->destructor, skb->destructor);
+- swap(gso_skb->truesize, skb->truesize);
++ sum_truesize += skb->truesize;
++ atomic_add(sum_truesize - gso_skb->truesize,
++ &skb->sk->sk_wmem_alloc);
+ }
+
+ delta = htonl(oldlen + (skb->tail - skb->transport_header) +
--- /dev/null
+From 4c851201dc603140381dd008c574062de6a38107 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 13 Nov 2013 06:32:54 -0800
+Subject: tcp: tsq: restore minimal amount of queueing
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 98e09386c0ef4dfd48af7ba60ff908f0d525cdee ]
+
+After commit c9eeec26e32e ("tcp: TSQ can use a dynamic limit"), several
+users reported throughput regressions, notably on mvneta and wifi
+adapters.
+
+802.11 AMPDU requires a fair amount of queueing to be effective.
+
+This patch partially reverts the change done in tcp_write_xmit()
+so that the minimal amount is sysctl_tcp_limit_output_bytes.
+
+It also remove the use of this sysctl while building skb stored
+in write queue, as TSO autosizing does the right thing anyway.
+
+Users with well behaving NICS and correct qdisc (like sch_fq),
+can then lower the default sysctl_tcp_limit_output_bytes value from
+128KB to 8KB.
+
+This new usage of sysctl_tcp_limit_output_bytes permits each driver
+authors to check how their driver performs when/if the value is set
+to a minimum of 4KB.
+
+Normally, line rate for a single TCP flow should be possible,
+but some drivers rely on timers to perform TX completion and
+too long TX completion delays prevent reaching full throughput.
+
+Fixes: c9eeec26e32e ("tcp: TSQ can use a dynamic limit")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Sujith Manoharan <sujith@msujith.org>
+Reported-by: Arnaud Ebalard <arno@natisbad.org>
+Tested-by: Sujith Manoharan <sujith@msujith.org>
+Cc: Felix Fietkau <nbd@openwrt.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/networking/ip-sysctl.txt | 3 ---
+ net/ipv4/tcp.c | 6 ------
+ net/ipv4/tcp_output.c | 6 +++++-
+ 3 files changed, 5 insertions(+), 10 deletions(-)
+
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -571,9 +571,6 @@ tcp_limit_output_bytes - INTEGER
+ typical pfifo_fast qdiscs.
+ tcp_limit_output_bytes limits the number of bytes on qdisc
+ or device to reduce artificial RTT/cwnd and reduce bufferbloat.
+- Note: For GSO/TSO enabled flows, we try to have at least two
+- packets in flight. Reducing tcp_limit_output_bytes might also
+- reduce the size of individual GSO packet (64KB being the max)
+ Default: 131072
+
+ tcp_challenge_ack_limit - INTEGER
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -807,12 +807,6 @@ static unsigned int tcp_xmit_size_goal(s
+ xmit_size_goal = min_t(u32, gso_size,
+ sk->sk_gso_max_size - 1 - hlen);
+
+- /* TSQ : try to have at least two segments in flight
+- * (one in NIC TX ring, another in Qdisc)
+- */
+- xmit_size_goal = min_t(u32, xmit_size_goal,
+- sysctl_tcp_limit_output_bytes >> 1);
+-
+ xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
+
+ /* We try hard to avoid divides here */
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1866,8 +1866,12 @@ static bool tcp_write_xmit(struct sock *
+ * - better RTT estimation and ACK scheduling
+ * - faster recovery
+ * - high rates
++ * Alas, some drivers / subsystems require a fair amount
++ * of queued bytes to ensure line rate.
++ * One example is wifi aggregation (802.11 AMPDU)
+ */
+- limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
++ limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
++ sk->sk_pacing_rate >> 10);
+
+ if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+ set_bit(TSQ_THROTTLED, &tp->tsq_flags);
--- /dev/null
+From c69b7d9b46b518012e9b61f4e3ee2b350895c00c Mon Sep 17 00:00:00 2001
+From: Jiri Pirko <jiri@resnulli.us>
+Date: Thu, 28 Nov 2013 18:01:38 +0100
+Subject: team: fix master carrier set when user linkup is enabled
+
+From: Jiri Pirko <jiri@resnulli.us>
+
+[ Upstream commit f5e0d34382e18f396d7673a84df8e3342bea7eb6 ]
+
+When user linkup is enabled and user sets linkup of individual port,
+we need to recompute linkup (carrier) of master interface so the change
+is reflected. Fix this by calling __team_carrier_check() which does the
+needed work.
+
+Please apply to all stable kernels as well. Thanks.
+
+Reported-by: Jan Tluka <jtluka@redhat.com>
+Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1217,6 +1217,8 @@ static int team_user_linkup_option_get(s
+ return 0;
+ }
+
++static void __team_carrier_check(struct team *team);
++
+ static int team_user_linkup_option_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+ {
+@@ -1224,6 +1226,7 @@ static int team_user_linkup_option_set(s
+
+ port->user.linkup = ctx->data.bool_val;
+ team_refresh_port_linkup(port);
++ __team_carrier_check(port->team);
+ return 0;
+ }
+
+@@ -1243,6 +1246,7 @@ static int team_user_linkup_en_option_se
+
+ port->user.linkup_enabled = ctx->data.bool_val;
+ team_refresh_port_linkup(port);
++ __team_carrier_check(port->team);
+ return 0;
+ }
+
--- /dev/null
+From c1999ca30fe2477d8cad6ee33ab40a86f11978da Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 13 Nov 2013 14:00:39 +0800
+Subject: tuntap: limit head length of skb allocated
+
+From: Jason Wang <jasowang@redhat.com>
+
+[ Upstream commit 96f8d9ecf227638c89f98ccdcdd50b569891976c ]
+
+We currently use hdr_len as a hint of head length which is advertised by
+guest. But when guest advertise a very big value, it can lead to an 64K+
+allocating of kmalloc() which has a very high possibility of failure when host
+memory is fragmented or under heavy stress. The huge hdr_len also reduce the
+effect of zerocopy or even disable if a gso skb is linearized in guest.
+
+To solves those issues, this patch introduces an upper limit (PAGE_SIZE) of the
+head, which guarantees an order 0 allocation each time.
+
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Cc: Stefan Hajnoczi <stefanha@redhat.com>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/tun.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1069,6 +1069,7 @@ static ssize_t tun_get_user(struct tun_s
+ struct sk_buff *skb;
+ size_t len = total_len, align = NET_SKB_PAD, linear;
+ struct virtio_net_hdr gso = { 0 };
++ int good_linear;
+ int offset = 0;
+ int copylen;
+ bool zerocopy = false;
+@@ -1109,12 +1110,16 @@ static ssize_t tun_get_user(struct tun_s
+ return -EINVAL;
+ }
+
++ good_linear = SKB_MAX_HEAD(align);
++
+ if (msg_control) {
+ /* There are 256 bytes to be copied in skb, so there is
+ * enough room for skb expand head in case it is used.
+ * The rest of the buffer is mapped from userspace.
+ */
+ copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
++ if (copylen > good_linear)
++ copylen = good_linear;
+ linear = copylen;
+ if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
+ zerocopy = true;
+@@ -1122,7 +1127,10 @@ static ssize_t tun_get_user(struct tun_s
+
+ if (!zerocopy) {
+ copylen = len;
+- linear = gso.hdr_len;
++ if (gso.hdr_len > good_linear)
++ linear = good_linear;
++ else
++ linear = gso.hdr_len;
+ }
+
+ skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
--- /dev/null
+From 2f8394327fc3f08e8142243e7d7b8699acb7d2b0 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@openwrt.org>
+Date: Tue, 12 Nov 2013 16:34:41 +0100
+Subject: usbnet: fix status interrupt urb handling
+
+From: Felix Fietkau <nbd@openwrt.org>
+
+[ Upstream commit 52f48d0d9aaa621ffa5e08d79da99a3f8c93b848 ]
+
+Since commit 7b0c5f21f348a66de495868b8df0284e8dfd6bbf
+"sierra_net: keep status interrupt URB active", sierra_net triggers
+status interrupt polling before the net_device is opened (in order to
+properly receive the sync message response).
+
+To be able to receive further interrupts, the interrupt urb needs to be
+re-submitted, so this patch removes the bogus check for netif_running().
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+Tested-by: Dan Williams <dcbw@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/usbnet.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -206,9 +206,6 @@ static void intr_complete (struct urb *u
+ break;
+ }
+
+- if (!netif_running (dev->net))
+- return;
+-
+ status = usb_submit_urb (urb, GFP_ATOMIC);
+ if (status != 0)
+ netif_err(dev, timer, dev->net,
--- /dev/null
+From 11f81df10453542ec8928675d24cbd94c7c774af Mon Sep 17 00:00:00 2001
+From: "fan.du" <fan.du@windriver.com>
+Date: Tue, 19 Nov 2013 16:53:28 +0800
+Subject: xfrm: Release dst if this dst is improper for vti tunnel
+
+From: "fan.du" <fan.du@windriver.com>
+
+[ Upstream commit 236c9f84868534c718b6889aa624de64763281f9 ]
+
+After searching rt by the vti tunnel dst/src parameter,
+if this rt has neither attached to any transformation
+nor the transformation is not tunnel oriented, this rt
+should be released back to ip layer.
+
+otherwise causing dst memory leakage.
+
+Signed-off-by: Fan Du <fan.du@windriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_vti.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -350,6 +350,7 @@ static netdev_tx_t vti_tunnel_xmit(struc
+ if (!rt->dst.xfrm ||
+ rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
+ dev->stats.tx_carrier_errors++;
++ ip_rt_put(rt);
+ goto tx_error_icmp;
+ }
+ tdev = rt->dst.dev;