--- /dev/null
+From 1bb0e966ae02704e5a2f5915bde190f2ce2b32fc Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Sat, 28 Jan 2012 16:11:03 +0000
+Subject: af_unix: fix EPOLLET regression for stream sockets
+
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+[ Upstream commit 6f01fd6e6f6809061b56e78f1e8d143099716d70 ]
+
+Commit 0884d7aa24 (AF_UNIX: Fix poll blocking problem when reading from
+a stream socket) added a regression for epoll() in Edge Triggered mode
+(EPOLLET)
+
+Appropriate fix is to use skb_peek()/skb_unlink() instead of
+skb_dequeue(), and only call skb_unlink() when skb is fully consumed.
+
+This remove the need to requeue a partial skb into sk_receive_queue head
+and the extra sk->sk_data_ready() calls that added the regression.
+
+This is safe because once skb is given to sk_receive_queue, it is not
+modified by a writer, and readers are serialized by u->readlock mutex.
+
+This also reduce number of spinlock acquisition for small reads or
+MSG_PEEK users so should improve overall performance.
+
+Reported-by: Nick Mathewson <nickm@freehaven.net>
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Alexey Moiseytsev <himeraster@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/unix/af_unix.c | 19 ++++---------------
+ 1 file changed, 4 insertions(+), 15 deletions(-)
+
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1915,7 +1915,7 @@ static int unix_stream_recvmsg(struct ki
+ struct sk_buff *skb;
+
+ unix_state_lock(sk);
+- skb = skb_dequeue(&sk->sk_receive_queue);
++ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb == NULL) {
+ unix_sk(sk)->recursion_level = 0;
+ if (copied >= target)
+@@ -1955,11 +1955,8 @@ static int unix_stream_recvmsg(struct ki
+ if (check_creds) {
+ /* Never glue messages from different writers */
+ if ((UNIXCB(skb).pid != siocb->scm->pid) ||
+- (UNIXCB(skb).cred != siocb->scm->cred)) {
+- skb_queue_head(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
++ (UNIXCB(skb).cred != siocb->scm->cred))
+ break;
+- }
+ } else {
+ /* Copy credentials */
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
+@@ -1974,8 +1971,6 @@ static int unix_stream_recvmsg(struct ki
+
+ chunk = min_t(unsigned int, skb->len, size);
+ if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+- skb_queue_head(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
+ if (copied == 0)
+ copied = -EFAULT;
+ break;
+@@ -1990,13 +1985,10 @@ static int unix_stream_recvmsg(struct ki
+ if (UNIXCB(skb).fp)
+ unix_detach_fds(siocb->scm, skb);
+
+- /* put the skb back if we didn't use it up.. */
+- if (skb->len) {
+- skb_queue_head(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
++ if (skb->len)
+ break;
+- }
+
++ skb_unlink(skb, &sk->sk_receive_queue);
+ consume_skb(skb);
+
+ if (siocb->scm->fp)
+@@ -2007,9 +1999,6 @@ static int unix_stream_recvmsg(struct ki
+ if (UNIXCB(skb).fp)
+ siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+
+- /* put message back and return */
+- skb_queue_head(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
+ break;
+ }
+ } while (size);
--- /dev/null
+From 9506b00a7ba8a8043dd7eb7d4efd0667f28c5d37 Mon Sep 17 00:00:00 2001
+From: Jiri Bohac <jbohac@suse.cz>
+Date: Wed, 18 Jan 2012 12:24:54 +0000
+Subject: bonding: fix enslaving in alb mode when link down
+
+
+From: Jiri Bohac <jbohac@suse.cz>
+
+[ Upstream commit b924551bed09f61b64f21bffe241afc5526b091a ]
+
+bond_alb_init_slave() is called from bond_enslave() and sets the slave's MAC
+address. This is done differently for TLB and ALB modes.
+bond->alb_info.rlb_enabled is used to discriminate between the two modes but
+this flag may be uninitialized if the slave is being enslaved prior to calling
+bond_open() -> bond_alb_initialize() on the master.
+
+It turns out all the callers of alb_set_slave_mac_addr() pass
+bond->alb_info.rlb_enabled as the hw parameter.
+
+This patch cleans up the unnecessary parameter of alb_set_slave_mac_addr() and
+makes the function decide based on the bonding mode instead, which fixes the
+above problem.
+
+Reported-by: Narendra K <Narendra_K@Dell.com>
+Signed-off-by: Jiri Bohac <jbohac@suse.cz>
+Signed-off-by: Jay Vosburgh <fubar@us.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/bonding/bond_alb.c | 27 +++++++++------------------
+ 1 file changed, 9 insertions(+), 18 deletions(-)
+
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -871,16 +871,12 @@ static void alb_send_learning_packets(st
+ }
+ }
+
+-/* hw is a boolean parameter that determines whether we should try and
+- * set the hw address of the device as well as the hw address of the
+- * net_device
+- */
+-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
++static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
+ {
+ struct net_device *dev = slave->dev;
+ struct sockaddr s_addr;
+
+- if (!hw) {
++ if (slave->bond->params.mode == BOND_MODE_TLB) {
+ memcpy(dev->dev_addr, addr, dev->addr_len);
+ return 0;
+ }
+@@ -910,8 +906,8 @@ static void alb_swap_mac_addr(struct bon
+ u8 tmp_mac_addr[ETH_ALEN];
+
+ memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
+- alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
+- alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
++ alb_set_slave_mac_addr(slave2, tmp_mac_addr);
+
+ }
+
+@@ -1058,8 +1054,7 @@ static int alb_handle_addr_collision_on_
+
+ /* Try setting slave mac to bond address and fall-through
+ to code handling that situation below... */
+- alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
+- bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
+ }
+
+ /* The slave's address is equal to the address of the bond.
+@@ -1095,8 +1090,7 @@ static int alb_handle_addr_collision_on_
+ }
+
+ if (free_mac_slave) {
+- alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
+- bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
+
+ pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+ bond->dev->name, slave->dev->name,
+@@ -1451,8 +1445,7 @@ int bond_alb_init_slave(struct bonding *
+ {
+ int res;
+
+- res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
+- bond->alb_info.rlb_enabled);
++ res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
+ if (res) {
+ return res;
+ }
+@@ -1603,8 +1596,7 @@ void bond_alb_handle_active_change(struc
+ alb_swap_mac_addr(bond, swap_slave, new_slave);
+ } else {
+ /* set the new_slave to the bond mac address */
+- alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
+- bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
+ }
+
+ if (swap_slave) {
+@@ -1664,8 +1656,7 @@ int bond_alb_set_mac_address(struct net_
+ alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
+ alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
+ } else {
+- alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
+- bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
+
+ read_lock(&bond->lock);
+ alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
--- /dev/null
+From 08c17640230d98994299e281d490682534eda609 Mon Sep 17 00:00:00 2001
+From: James Chapman <jchapman@katalix.com>
+Date: Wed, 25 Jan 2012 02:39:05 +0000
+Subject: l2tp: l2tp_ip - fix possible oops on packet receive
+
+
+From: James Chapman <jchapman@katalix.com>
+
+[ Upstream commit 68315801dbf3ab2001679fd2074c9dc5dcf87dfa ]
+
+When a packet is received on an L2TP IP socket (L2TPv3 IP link
+encapsulation), the l2tpip socket's backlog_rcv function calls
+xfrm4_policy_check(). This is not necessary, since it was called
+before the skb was added to the backlog. With CONFIG_NET_NS enabled,
+xfrm4_policy_check() will oops if skb->dev is null, so this trivial
+patch removes the call.
+
+This bug has always been present, but only when CONFIG_NET_NS is
+enabled does it cause problems. Most users are probably using UDP
+encapsulation for L2TP, hence the problem has only recently
+surfaced.
+
+EIP: 0060:[<c12bb62b>] EFLAGS: 00210246 CPU: 0
+EIP is at l2tp_ip_recvmsg+0xd4/0x2a7
+EAX: 00000001 EBX: d77b5180 ECX: 00000000 EDX: 00200246
+ESI: 00000000 EDI: d63cbd30 EBP: d63cbd18 ESP: d63cbcf4
+ DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
+Call Trace:
+ [<c1218568>] sock_common_recvmsg+0x31/0x46
+ [<c1215c92>] __sock_recvmsg_nosec+0x45/0x4d
+ [<c12163a1>] __sock_recvmsg+0x31/0x3b
+ [<c1216828>] sock_recvmsg+0x96/0xab
+ [<c10b2693>] ? might_fault+0x47/0x81
+ [<c10b2693>] ? might_fault+0x47/0x81
+ [<c1167fd0>] ? _copy_from_user+0x31/0x115
+ [<c121e8c8>] ? copy_from_user+0x8/0xa
+ [<c121ebd6>] ? verify_iovec+0x3e/0x78
+ [<c1216604>] __sys_recvmsg+0x10a/0x1aa
+ [<c1216792>] ? sock_recvmsg+0x0/0xab
+ [<c105a99b>] ? __lock_acquire+0xbdf/0xbee
+ [<c12d5a99>] ? do_page_fault+0x193/0x375
+ [<c10d1200>] ? fcheck_files+0x9b/0xca
+ [<c10d1259>] ? fget_light+0x2a/0x9c
+ [<c1216bbb>] sys_recvmsg+0x2b/0x43
+ [<c1218145>] sys_socketcall+0x16d/0x1a5
+ [<c11679f0>] ? trace_hardirqs_on_thunk+0xc/0x10
+ [<c100305f>] sysenter_do_call+0x12/0x38
+Code: c6 05 8c ea a8 c1 01 e8 0c d4 d9 ff 85 f6 74 07 3e ff 86 80 00 00 00 b9 17 b6 2b c1 ba 01 00 00 00 b8 78 ed 48 c1 e8 23 f6 d9 ff <ff> 76 0c 68 28 e3 30 c1 68 2d 44 41 c1 e8 89 57 01 00 83 c4 0c
+
+Signed-off-by: James Chapman <jchapman@katalix.com>
+Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/l2tp/l2tp_ip.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -393,11 +393,6 @@ static int l2tp_ip_backlog_recv(struct s
+ {
+ int rc;
+
+- if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+- goto drop;
+-
+- nf_reset(skb);
+-
+ /* Charge it to the socket, dropping if the queue is full. */
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0)
--- /dev/null
+From 33cdb9c9c280982858a98c47233912050209aaa5 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Mon, 23 Jan 2012 05:38:59 +0000
+Subject: macvlan: fix a possible use after free
+
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+[ Upstream commit 4ec7ac1203bcf21f5e3d977c9818b1a56c9ef40d ]
+
+Commit bc416d9768 (macvlan: handle fragmented multicast frames) added a
+possible use after free in macvlan_handle_frame(), since
+ip_check_defrag() uses pskb_may_pull() : skb header can be reallocated.
+
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Ben Greear <greearb@candelatech.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/macvlan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -172,6 +172,7 @@ static rx_handler_result_t macvlan_handl
+ skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
++ eth = eth_hdr(skb);
+ src = macvlan_hash_lookup(port, eth->h_source);
+ if (!src)
+ /* frame comes from an external address */
--- /dev/null
+From 0cba1a2f31a6b48fafd76fbea218094af5f25922 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Wed, 18 Jan 2012 07:21:42 +0000
+Subject: net: bpf_jit: fix divide by 0 generation
+
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+[ Upstream commit d00a9dd21bdf7908b70866794c8313ee8a5abd5c ]
+
+Several problems fixed in this patch :
+
+1) Target of the conditional jump in case a divide by 0 is performed
+ by a bpf is wrong.
+
+2) Must 'generate' the full function prologue/epilogue at pass=0,
+ or else we can stop too early in pass=1 if the proglen doesnt change.
+ (if the increase of prologue/epilogue equals decrease of all
+ instructions length because some jumps are converted to near jumps)
+
+3) Change the wrong length detection at the end of code generation to
+ issue a more explicit message, no need for a full stack trace.
+
+Reported-by: Phil Oester <kernel@linuxace.com>
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/x86/net/bpf_jit_comp.c | 36 ++++++++++++++++++++++--------------
+ 1 file changed, 22 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *f
+ cleanup_addr = proglen; /* epilogue address */
+
+ for (pass = 0; pass < 10; pass++) {
++ u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
+ /* no prologue/epilogue for trivial filters (RET something) */
+ proglen = 0;
+ prog = temp;
+
+- if (seen) {
++ if (seen_or_pass0) {
+ EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
+ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
+ /* note : must save %rbx in case bpf_error is hit */
+- if (seen & (SEEN_XREG | SEEN_DATAREF))
++ if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
+ EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
+- if (seen & SEEN_XREG)
++ if (seen_or_pass0 & SEEN_XREG)
+ CLEAR_X(); /* make sure we dont leek kernel memory */
+
+ /*
+@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *f
+ * r9 = skb->len - skb->data_len
+ * r8 = skb->data
+ */
+- if (seen & SEEN_DATAREF) {
++ if (seen_or_pass0 & SEEN_DATAREF) {
+ if (offsetof(struct sk_buff, len) <= 127)
+ /* mov off8(%rdi),%r9d */
+ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
+@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *f
+ case BPF_S_ALU_DIV_X: /* A /= X; */
+ seen |= SEEN_XREG;
+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
+- if (pc_ret0 != -1)
+- EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
+- else {
++ if (pc_ret0 > 0) {
++ /* addrs[pc_ret0 - 1] is start address of target
++ * (addrs[i] - 4) is the address following this jmp
++ * ("xor %edx,%edx; div %ebx" being 4 bytes long)
++ */
++ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
++ (addrs[i] - 4));
++ } else {
+ EMIT_COND_JMP(X86_JNE, 2 + 5);
+ CLEAR_A();
+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
+@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *f
+ }
+ /* fallinto */
+ case BPF_S_RET_A:
+- if (seen) {
++ if (seen_or_pass0) {
+ if (i != flen - 1) {
+ EMIT_JMP(cleanup_addr - addrs[i]);
+ break;
+ }
+- if (seen & SEEN_XREG)
++ if (seen_or_pass0 & SEEN_XREG)
+ EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
+ EMIT1(0xc9); /* leaveq */
+ }
+@@ -483,8 +489,9 @@ common_load: seen |= SEEN_DATAREF;
+ goto common_load;
+ case BPF_S_LDX_B_MSH:
+ if ((int)K < 0) {
+- if (pc_ret0 != -1) {
+- EMIT_JMP(addrs[pc_ret0] - addrs[i]);
++ if (pc_ret0 > 0) {
++ /* addrs[pc_ret0 - 1] is the start address */
++ EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
+ break;
+ }
+ CLEAR_A();
+@@ -599,13 +606,14 @@ cond_branch: f_offset = addrs[i + filt
+ * use it to give the cleanup instruction(s) addr
+ */
+ cleanup_addr = proglen - 1; /* ret */
+- if (seen)
++ if (seen_or_pass0)
+ cleanup_addr -= 1; /* leaveq */
+- if (seen & SEEN_XREG)
++ if (seen_or_pass0 & SEEN_XREG)
+ cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
+
+ if (image) {
+- WARN_ON(proglen != oldproglen);
++ if (proglen != oldproglen)
++ pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
+ break;
+ }
+ if (proglen == oldproglen) {
--- /dev/null
+From 42f7400894971bf3e6fc47898a6040a8d414ef16 Mon Sep 17 00:00:00 2001
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Thu, 26 Jan 2012 14:04:53 +0000
+Subject: net caif: Register properly as a pernet subsystem.
+
+
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+
+[ Upstream commit 8a8ee9aff6c3077dd9c2c7a77478e8ed362b96c6 ]
+
+caif is a subsystem and as such it needs to register with
+register_pernet_subsys instead of register_pernet_device.
+
+Among other problems using register_pernet_device was resulting in
+net_generic being called before the caif_net structure was allocated.
+Which has been causing net_generic to fail with either BUG_ON's or by
+return NULL pointers.
+
+A more ugly problem that could be caused is packets in flight why the
+subsystem is shutting down.
+
+To remove confusion also remove the cruft cause by inappropriately
+trying to fix this bug.
+
+With the aid of the previous patch I have tested this patch and
+confirmed that using register_pernet_subsys makes the failure go away as
+it should.
+
+Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
+Acked-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
+Tested-by: Sasha Levin <levinsasha928@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/caif/caif_dev.c | 11 ++++-------
+ net/caif/cfcnfg.c | 1 -
+ 2 files changed, 4 insertions(+), 8 deletions(-)
+
+--- a/net/caif/caif_dev.c
++++ b/net/caif/caif_dev.c
+@@ -53,7 +53,6 @@ struct cfcnfg *get_cfcnfg(struct net *ne
+ struct caif_net *caifn;
+ BUG_ON(!net);
+ caifn = net_generic(net, caif_net_id);
+- BUG_ON(!caifn);
+ return caifn->cfg;
+ }
+ EXPORT_SYMBOL(get_cfcnfg);
+@@ -63,7 +62,6 @@ static struct caif_device_entry_list *ca
+ struct caif_net *caifn;
+ BUG_ON(!net);
+ caifn = net_generic(net, caif_net_id);
+- BUG_ON(!caifn);
+ return &caifn->caifdevs;
+ }
+
+@@ -92,7 +90,6 @@ static struct caif_device_entry *caif_de
+ struct caif_device_entry *caifd;
+
+ caifdevs = caif_device_list(dev_net(dev));
+- BUG_ON(!caifdevs);
+
+ caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
+ if (!caifd)
+@@ -112,7 +109,7 @@ static struct caif_device_entry *caif_ge
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+- BUG_ON(!caifdevs);
++
+ list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
+ if (caifd->netdev == dev)
+ return caifd;
+@@ -353,7 +350,7 @@ static struct notifier_block caif_device
+ static int caif_init_net(struct net *net)
+ {
+ struct caif_net *caifn = net_generic(net, caif_net_id);
+- BUG_ON(!caifn);
++
+ INIT_LIST_HEAD(&caifn->caifdevs.list);
+ mutex_init(&caifn->caifdevs.lock);
+
+@@ -418,7 +415,7 @@ static int __init caif_device_init(void)
+ {
+ int result;
+
+- result = register_pernet_device(&caif_net_ops);
++ result = register_pernet_subsys(&caif_net_ops);
+
+ if (result)
+ return result;
+@@ -431,7 +428,7 @@ static int __init caif_device_init(void)
+
+ static void __exit caif_device_exit(void)
+ {
+- unregister_pernet_device(&caif_net_ops);
++ unregister_pernet_subsys(&caif_net_ops);
+ unregister_netdevice_notifier(&caif_device_notifier);
+ dev_remove_pack(&caif_packet_type);
+ }
+--- a/net/caif/cfcnfg.c
++++ b/net/caif/cfcnfg.c
+@@ -309,7 +309,6 @@ int caif_connect_client(struct net *net,
+ int err;
+ struct cfctrl_link_param param;
+ struct cfcnfg *cfg = get_cfcnfg(net);
+- caif_assert(cfg != NULL);
+
+ rcu_read_lock();
+ err = caif_connect_req_to_link_param(cfg, conn_req, ¶m);
--- /dev/null
+From b9c96307df992e5aa3fbc9dc1e3675f8d36af201 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Thu, 12 Jan 2012 04:41:32 +0000
+Subject: net: reintroduce missing rcu_assign_pointer() calls
+
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+[ Upstream commit cf778b00e96df6d64f8e21b8395d1f8a859ecdc7 ]
+
+commit a9b3cd7f32 (rcu: convert uses of rcu_assign_pointer(x, NULL) to
+RCU_INIT_POINTER) did a lot of incorrect changes, since it did a
+complete conversion of rcu_assign_pointer(x, y) to RCU_INIT_POINTER(x,
+y).
+
+We miss needed barriers, even on x86, when y is not NULL.
+
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+CC: Stephen Hemminger <shemminger@vyatta.com>
+CC: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/core/net-sysfs.c | 6 +++---
+ net/core/netpoll.c | 2 +-
+ net/decnet/dn_dev.c | 4 ++--
+ net/ipv4/devinet.c | 2 +-
+ net/ipv4/fib_trie.c | 10 +++++-----
+ net/ipv4/igmp.c | 8 ++++----
+ net/ipv4/ipip.c | 8 ++++----
+ net/ipv4/ipmr.c | 2 +-
+ net/ipv6/addrconf.c | 2 +-
+ net/ipv6/ip6_tunnel.c | 8 ++++----
+ net/ipv6/raw.c | 2 +-
+ net/ipv6/sit.c | 10 +++++-----
+ net/mac80211/agg-rx.c | 2 +-
+ net/mac80211/cfg.c | 4 ++--
+ net/mac80211/ibss.c | 2 +-
+ net/mac80211/sta_info.c | 6 +++---
+ net/netfilter/nf_conntrack_core.c | 2 +-
+ net/netfilter/nf_conntrack_ecache.c | 4 ++--
+ net/netfilter/nf_conntrack_extend.c | 2 +-
+ net/netfilter/nf_conntrack_helper.c | 2 +-
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_log.c | 6 +++---
+ net/netfilter/nf_queue.c | 2 +-
+ net/netfilter/nfnetlink.c | 4 ++--
+ net/netlabel/netlabel_domainhash.c | 4 ++--
+ net/netlabel/netlabel_unlabeled.c | 6 ++----
+ net/phonet/af_phonet.c | 2 +-
+ net/phonet/pn_dev.c | 2 +-
+ net/phonet/socket.c | 2 +-
+ net/socket.c | 2 +-
+ net/sunrpc/auth_gss/auth_gss.c | 2 +-
+ net/xfrm/xfrm_user.c | 2 +-
+ 32 files changed, 61 insertions(+), 63 deletions(-)
+
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -990,9 +990,9 @@ static ssize_t store_xps_map(struct netd
+ nonempty = 1;
+ }
+
+- if (nonempty)
+- RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
+- else {
++ if (nonempty) {
++ rcu_assign_pointer(dev->xps_maps, new_dev_maps);
++ } else {
+ kfree(new_dev_maps);
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
+ }
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -763,7 +763,7 @@ int __netpoll_setup(struct netpoll *np)
+ }
+
+ /* last thing to do is link it to the net device structure */
+- RCU_INIT_POINTER(ndev->npinfo, npinfo);
++ rcu_assign_pointer(ndev->npinfo, npinfo);
+
+ return 0;
+
+--- a/net/decnet/dn_dev.c
++++ b/net/decnet/dn_dev.c
+@@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_d
+ }
+
+ ifa->ifa_next = dn_db->ifa_list;
+- RCU_INIT_POINTER(dn_db->ifa_list, ifa);
++ rcu_assign_pointer(dn_db->ifa_list, ifa);
+
+ dn_ifaddr_notify(RTM_NEWADDR, ifa);
+ blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
+@@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(stru
+
+ memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
+
+- RCU_INIT_POINTER(dev->dn_ptr, dn_db);
++ rcu_assign_pointer(dev->dn_ptr, dn_db);
+ dn_db->dev = dev;
+ init_timer(&dn_db->timer);
+
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -258,7 +258,7 @@ static struct in_device *inetdev_init(st
+ ip_mc_up(in_dev);
+
+ /* we can receive as soon as ip_ptr is set -- do this last */
+- RCU_INIT_POINTER(dev->ip_ptr, in_dev);
++ rcu_assign_pointer(dev->ip_ptr, in_dev);
+ out:
+ return in_dev;
+ out_kfree:
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -205,7 +205,7 @@ static inline struct tnode *node_parent_
+ return (struct tnode *)(parent & ~NODE_TYPE_MASK);
+ }
+
+-/* Same as RCU_INIT_POINTER
++/* Same as rcu_assign_pointer
+ * but that macro() assumes that value is a pointer.
+ */
+ static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
+@@ -529,7 +529,7 @@ static void tnode_put_child_reorg(struct
+ if (n)
+ node_set_parent(n, tn);
+
+- RCU_INIT_POINTER(tn->child[i], n);
++ rcu_assign_pointer(tn->child[i], n);
+ }
+
+ #define MAX_WORK 10
+@@ -1015,7 +1015,7 @@ static void trie_rebalance(struct trie *
+
+ tp = node_parent((struct rt_trie_node *) tn);
+ if (!tp)
+- RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
++ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+
+ tnode_free_flush();
+ if (!tp)
+@@ -1027,7 +1027,7 @@ static void trie_rebalance(struct trie *
+ if (IS_TNODE(tn))
+ tn = (struct tnode *)resize(t, (struct tnode *)tn);
+
+- RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
++ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ tnode_free_flush();
+ }
+
+@@ -1164,7 +1164,7 @@ static struct list_head *fib_insert_node
+ put_child(t, (struct tnode *)tp, cindex,
+ (struct rt_trie_node *)tn);
+ } else {
+- RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
++ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ tp = tn;
+ }
+ }
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1244,7 +1244,7 @@ void ip_mc_inc_group(struct in_device *i
+
+ im->next_rcu = in_dev->mc_list;
+ in_dev->mc_count++;
+- RCU_INIT_POINTER(in_dev->mc_list, im);
++ rcu_assign_pointer(in_dev->mc_list, im);
+
+ #ifdef CONFIG_IP_MULTICAST
+ igmpv3_del_delrec(in_dev, im->multiaddr);
+@@ -1816,7 +1816,7 @@ int ip_mc_join_group(struct sock *sk , s
+ iml->next_rcu = inet->mc_list;
+ iml->sflist = NULL;
+ iml->sfmode = MCAST_EXCLUDE;
+- RCU_INIT_POINTER(inet->mc_list, iml);
++ rcu_assign_pointer(inet->mc_list, iml);
+ ip_mc_inc_group(in_dev, addr);
+ err = 0;
+ done:
+@@ -2003,7 +2003,7 @@ int ip_mc_source(int add, int omode, str
+ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
+ kfree_rcu(psl, rcu);
+ }
+- RCU_INIT_POINTER(pmc->sflist, newpsl);
++ rcu_assign_pointer(pmc->sflist, newpsl);
+ psl = newpsl;
+ }
+ rv = 1; /* > 0 for insert logic below if sl_count is 0 */
+@@ -2106,7 +2106,7 @@ int ip_mc_msfilter(struct sock *sk, stru
+ } else
+ (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
+ 0, NULL, 0);
+- RCU_INIT_POINTER(pmc->sflist, newpsl);
++ rcu_assign_pointer(pmc->sflist, newpsl);
+ pmc->sfmode = msf->imsf_fmode;
+ err = 0;
+ done:
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ip
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+- RCU_INIT_POINTER(*tp, t->next);
++ rcu_assign_pointer(*tp, t->next);
+ break;
+ }
+ }
+@@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip
+ {
+ struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
+
+- RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
+- RCU_INIT_POINTER(*tp, t);
++ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
++ rcu_assign_pointer(*tp, t);
+ }
+
+ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
+@@ -792,7 +792,7 @@ static int __net_init ipip_fb_tunnel_ini
+ return -ENOMEM;
+
+ dev_hold(dev);
+- RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel);
++ rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
+ return 0;
+ }
+
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1225,7 +1225,7 @@ int ip_mroute_setsockopt(struct sock *sk
+
+ ret = ip_ra_control(sk, 1, mrtsock_destruct);
+ if (ret == 0) {
+- RCU_INIT_POINTER(mrt->mroute_sk, sk);
++ rcu_assign_pointer(mrt->mroute_sk, sk);
+ IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
+ }
+ rtnl_unlock();
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -429,7 +429,7 @@ static struct inet6_dev * ipv6_add_dev(s
+ ndev->tstamp = jiffies;
+ addrconf_sysctl_register(ndev);
+ /* protected by rtnl_lock */
+- RCU_INIT_POINTER(dev->ip6_ptr, ndev);
++ rcu_assign_pointer(dev->ip6_ptr, ndev);
+
+ /* Join all-node multicast group */
+ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, s
+ {
+ struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
+
+- RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
+- RCU_INIT_POINTER(*tp, t);
++ rcu_assign_pointer(t->next , rtnl_dereference(*tp));
++ rcu_assign_pointer(*tp, t);
+ }
+
+ /**
+@@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n,
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+- RCU_INIT_POINTER(*tp, t->next);
++ rcu_assign_pointer(*tp, t->next);
+ break;
+ }
+ }
+@@ -1450,7 +1450,7 @@ static int __net_init ip6_fb_tnl_dev_ini
+
+ t->parms.proto = IPPROTO_IPV6;
+ dev_hold(dev);
+- RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
++ rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ return 0;
+ }
+
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -131,7 +131,7 @@ static mh_filter_t __rcu *mh_filter __re
+
+ int rawv6_mh_filter_register(mh_filter_t filter)
+ {
+- RCU_INIT_POINTER(mh_filter, filter);
++ rcu_assign_pointer(mh_filter, filter);
+ return 0;
+ }
+ EXPORT_SYMBOL(rawv6_mh_filter_register);
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct s
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+- RCU_INIT_POINTER(*tp, t->next);
++ rcu_assign_pointer(*tp, t->next);
+ break;
+ }
+ }
+@@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit
+ {
+ struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
+
+- RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
+- RCU_INIT_POINTER(*tp, t);
++ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
++ rcu_assign_pointer(*tp, t);
+ }
+
+ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
+@@ -393,7 +393,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t
+ p->addr = a->addr;
+ p->flags = a->flags;
+ t->prl_count++;
+- RCU_INIT_POINTER(t->prl, p);
++ rcu_assign_pointer(t->prl, p);
+ out:
+ return err;
+ }
+@@ -1177,7 +1177,7 @@ static int __net_init ipip6_fb_tunnel_in
+ if (!dev->tstats)
+ return -ENOMEM;
+ dev_hold(dev);
+- RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel);
++ rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
+ return 0;
+ }
+
+--- a/net/mac80211/agg-rx.c
++++ b/net/mac80211/agg-rx.c
+@@ -326,7 +326,7 @@ void ieee80211_process_addba_request(str
+ status = WLAN_STATUS_SUCCESS;
+
+ /* activate it for RX */
+- RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
++ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
+
+ if (timeout)
+ mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -575,7 +575,7 @@ static int ieee80211_config_beacon(struc
+
+ sdata->vif.bss_conf.dtim_period = new->dtim_period;
+
+- RCU_INIT_POINTER(sdata->u.ap.beacon, new);
++ rcu_assign_pointer(sdata->u.ap.beacon, new);
+
+ synchronize_rcu();
+
+@@ -922,7 +922,7 @@ static int ieee80211_change_station(stru
+ return -EBUSY;
+ }
+
+- RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta);
++ rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
+ }
+
+ sta->sdata = vlansdata;
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -184,7 +184,7 @@ static void __ieee80211_sta_join_ibss(st
+ *pos++ = 0; /* U-APSD no in use */
+ }
+
+- RCU_INIT_POINTER(ifibss->presp, skb);
++ rcu_assign_pointer(ifibss->presp, skb);
+
+ sdata->vif.bss_conf.beacon_int = beacon_int;
+ sdata->vif.bss_conf.basic_rates = basic_rates;
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -73,7 +73,7 @@ static int sta_info_hash_del(struct ieee
+ if (!s)
+ return -ENOENT;
+ if (s == sta) {
+- RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)],
++ rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
+ s->hnext);
+ return 0;
+ }
+@@ -83,7 +83,7 @@ static int sta_info_hash_del(struct ieee
+ s = rcu_dereference_protected(s->hnext,
+ lockdep_is_held(&local->sta_lock));
+ if (rcu_access_pointer(s->hnext)) {
+- RCU_INIT_POINTER(s->hnext, sta->hnext);
++ rcu_assign_pointer(s->hnext, sta->hnext);
+ return 0;
+ }
+
+@@ -232,7 +232,7 @@ static void sta_info_hash_add(struct iee
+ struct sta_info *sta)
+ {
+ sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
+- RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
++ rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
+ }
+
+ static void sta_unblock(struct work_struct *wk)
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -776,7 +776,7 @@ init_conntrack(struct net *net, struct n
+ if (exp->helper) {
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ if (help)
+- RCU_INIT_POINTER(help->helper, exp->helper);
++ rcu_assign_pointer(help->helper, exp->helper);
+ }
+
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+--- a/net/netfilter/nf_conntrack_ecache.c
++++ b/net/netfilter/nf_conntrack_ecache.c
+@@ -91,7 +91,7 @@ int nf_conntrack_register_notifier(struc
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+- RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
++ rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
+ mutex_unlock(&nf_ct_ecache_mutex);
+ return ret;
+
+@@ -128,7 +128,7 @@ int nf_ct_expect_register_notifier(struc
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+- RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
++ rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
+ mutex_unlock(&nf_ct_ecache_mutex);
+ return ret;
+
+--- a/net/netfilter/nf_conntrack_extend.c
++++ b/net/netfilter/nf_conntrack_extend.c
+@@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_e
+ before updating alloc_size */
+ type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
+ + type->len;
+- RCU_INIT_POINTER(nf_ct_ext_types[type->id], type);
++ rcu_assign_pointer(nf_ct_ext_types[type->id], type);
+ update_alloc_size(type);
+ out:
+ mutex_unlock(&nf_ct_ext_type_mutex);
+--- a/net/netfilter/nf_conntrack_helper.c
++++ b/net/netfilter/nf_conntrack_helper.c
+@@ -145,7 +145,7 @@ int __nf_ct_try_assign_helper(struct nf_
+ memset(&help->help, 0, sizeof(help->help));
+ }
+
+- RCU_INIT_POINTER(help->helper, helper);
++ rcu_assign_pointer(help->helper, helper);
+ out:
+ return ret;
+ }
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -1163,7 +1163,7 @@ ctnetlink_change_helper(struct nf_conn *
+ return -EOPNOTSUPP;
+ }
+
+- RCU_INIT_POINTER(help->helper, helper);
++ rcu_assign_pointer(help->helper, helper);
+
+ return 0;
+ }
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct
+ llog = rcu_dereference_protected(nf_loggers[pf],
+ lockdep_is_held(&nf_log_mutex));
+ if (llog == NULL)
+- RCU_INIT_POINTER(nf_loggers[pf], logger);
++ rcu_assign_pointer(nf_loggers[pf], logger);
+ }
+
+ mutex_unlock(&nf_log_mutex);
+@@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const st
+ mutex_unlock(&nf_log_mutex);
+ return -ENOENT;
+ }
+- RCU_INIT_POINTER(nf_loggers[pf], logger);
++ rcu_assign_pointer(nf_loggers[pf], logger);
+ mutex_unlock(&nf_log_mutex);
+ return 0;
+ }
+@@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_tabl
+ mutex_unlock(&nf_log_mutex);
+ return -ENOENT;
+ }
+- RCU_INIT_POINTER(nf_loggers[tindex], logger);
++ rcu_assign_pointer(nf_loggers[tindex], logger);
+ mutex_unlock(&nf_log_mutex);
+ } else {
+ mutex_lock(&nf_log_mutex);
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t p
+ else if (old)
+ ret = -EBUSY;
+ else {
+- RCU_INIT_POINTER(queue_handler[pf], qh);
++ rcu_assign_pointer(queue_handler[pf], qh);
+ ret = 0;
+ }
+ mutex_unlock(&queue_handler_mutex);
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const stru
+ nfnl_unlock();
+ return -EBUSY;
+ }
+- RCU_INIT_POINTER(subsys_table[n->subsys_id], n);
++ rcu_assign_pointer(subsys_table[n->subsys_id], n);
+ nfnl_unlock();
+
+ return 0;
+@@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init
+ if (!nfnl)
+ return -ENOMEM;
+ net->nfnl_stash = nfnl;
+- RCU_INIT_POINTER(net->nfnl, nfnl);
++ rcu_assign_pointer(net->nfnl, nfnl);
+ return 0;
+ }
+
+--- a/net/netlabel/netlabel_domainhash.c
++++ b/net/netlabel/netlabel_domainhash.c
+@@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size)
+ INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
+
+ spin_lock(&netlbl_domhsh_lock);
+- RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl);
++ rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
+ spin_unlock(&netlbl_domhsh_lock);
+
+ return 0;
+@@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_
+ &rcu_dereference(netlbl_domhsh)->tbl[bkt]);
+ } else {
+ INIT_LIST_HEAD(&entry->list);
+- RCU_INIT_POINTER(netlbl_domhsh_def, entry);
++ rcu_assign_pointer(netlbl_domhsh_def, entry);
+ }
+
+ if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
+--- a/net/netlabel/netlabel_unlabeled.c
++++ b/net/netlabel/netlabel_unlabeled.c
+@@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlb
+ INIT_LIST_HEAD(&iface->list);
+ if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
+ goto add_iface_failure;
+- RCU_INIT_POINTER(netlbl_unlhsh_def, iface);
++ rcu_assign_pointer(netlbl_unlhsh_def, iface);
+ }
+ spin_unlock(&netlbl_unlhsh_lock);
+
+@@ -1447,11 +1447,9 @@ int __init netlbl_unlabel_init(u32 size)
+ for (iter = 0; iter < hsh_tbl->size; iter++)
+ INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
+
+- rcu_read_lock();
+ spin_lock(&netlbl_unlhsh_lock);
+- RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl);
++ rcu_assign_pointer(netlbl_unlhsh, hsh_tbl);
+ spin_unlock(&netlbl_unlhsh_lock);
+- rcu_read_unlock();
+
+ register_netdevice_notifier(&netlbl_unlhsh_netdev_notifier);
+
+--- a/net/phonet/af_phonet.c
++++ b/net/phonet/af_phonet.c
+@@ -480,7 +480,7 @@ int __init_or_module phonet_proto_regist
+ if (proto_tab[protocol])
+ err = -EBUSY;
+ else
+- RCU_INIT_POINTER(proto_tab[protocol], pp);
++ rcu_assign_pointer(proto_tab[protocol], pp);
+ mutex_unlock(&proto_tab_lock);
+
+ return err;
+--- a/net/phonet/pn_dev.c
++++ b/net/phonet/pn_dev.c
+@@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *
+ daddr = daddr >> 2;
+ mutex_lock(&routes->lock);
+ if (routes->table[daddr] == NULL) {
+- RCU_INIT_POINTER(routes->table[daddr], dev);
++ rcu_assign_pointer(routes->table[daddr], dev);
+ dev_hold(dev);
+ err = 0;
+ }
+--- a/net/phonet/socket.c
++++ b/net/phonet/socket.c
+@@ -680,7 +680,7 @@ int pn_sock_bind_res(struct sock *sk, u8
+ mutex_lock(&resource_mutex);
+ if (pnres.sk[res] == NULL) {
+ sock_hold(sk);
+- RCU_INIT_POINTER(pnres.sk[res], sk);
++ rcu_assign_pointer(pnres.sk[res], sk);
+ ret = 0;
+ }
+ mutex_unlock(&resource_mutex);
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2472,7 +2472,7 @@ int sock_register(const struct net_proto
+ lockdep_is_held(&net_family_lock)))
+ err = -EEXIST;
+ else {
+- RCU_INIT_POINTER(net_families[ops->family], ops);
++ rcu_assign_pointer(net_families[ops->family], ops);
+ err = 0;
+ }
+ spin_unlock(&net_family_lock);
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred,
+ if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
+ return;
+ gss_get_ctx(ctx);
+- RCU_INIT_POINTER(gss_cred->gc_ctx, ctx);
++ rcu_assign_pointer(gss_cred->gc_ctx, ctx);
+ set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
+ smp_mb__before_clear_bit();
+ clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init
+ if (nlsk == NULL)
+ return -ENOMEM;
+ net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
+- RCU_INIT_POINTER(net->xfrm.nlsk, nlsk);
++ rcu_assign_pointer(net->xfrm.nlsk, nlsk);
+ return 0;
+ }
+
--- /dev/null
+From edfae7e59c985de33d2f09e4a260a0d26b376f7d Mon Sep 17 00:00:00 2001
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Thu, 26 Jan 2012 14:02:55 +0000
+Subject: netns: Fail conspicously if someone uses net_generic at an inappropriate time.
+
+
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+
+[ Upstream commit 5ee4433efe99b9f39f6eff5052a177bbcfe72cea ]
+
+By definition net_generic should never be called when it can return
+NULL. Fail conspicously with a BUG_ON to make it clear when people mess
+up that a NULL return should never happen.
+
+Recently there was a bug in the CAIF subsystem where it was registered
+with register_pernet_device instead of register_pernet_subsys. It was
+erroneously concluded that net_generic could validly return NULL and
+that net_assign_generic was buggy (when it was just inefficient).
+Hopefully this BUG_ON will prevent people to coming to similar erroneous
+conclusions in the futrue.
+
+Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
+Tested-by: Sasha Levin <levinsasha928@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ include/net/netns/generic.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/net/netns/generic.h
++++ b/include/net/netns/generic.h
+@@ -41,6 +41,7 @@ static inline void *net_generic(const st
+ ptr = ng->ptr[id - 1];
+ rcu_read_unlock();
+
++ BUG_ON(!ptr);
+ return ptr;
+ }
+ #endif
--- /dev/null
+From ee79d4fcc2da181b8d1822c9eae912998c9aea3a Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Thu, 26 Jan 2012 00:41:38 +0000
+Subject: netns: fix net_alloc_generic()
+
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+[ Upstream commit 073862ba5d249c20bd5c49fc6d904ff0e1f6a672 ]
+
+When a new net namespace is created, we should attach to it a "struct
+net_generic" with enough slots (even empty), or we can hit the following
+BUG_ON() :
+
+[ 200.752016] kernel BUG at include/net/netns/generic.h:40!
+...
+[ 200.752016] [<ffffffff825c3cea>] ? get_cfcnfg+0x3a/0x180
+[ 200.752016] [<ffffffff821cf0b0>] ? lockdep_rtnl_is_held+0x10/0x20
+[ 200.752016] [<ffffffff825c41be>] caif_device_notify+0x2e/0x530
+[ 200.752016] [<ffffffff810d61b7>] notifier_call_chain+0x67/0x110
+[ 200.752016] [<ffffffff810d67c1>] raw_notifier_call_chain+0x11/0x20
+[ 200.752016] [<ffffffff821bae82>] call_netdevice_notifiers+0x32/0x60
+[ 200.752016] [<ffffffff821c2b26>] register_netdevice+0x196/0x300
+[ 200.752016] [<ffffffff821c2ca9>] register_netdev+0x19/0x30
+[ 200.752016] [<ffffffff81c1c67a>] loopback_net_init+0x4a/0xa0
+[ 200.752016] [<ffffffff821b5e62>] ops_init+0x42/0x180
+[ 200.752016] [<ffffffff821b600b>] setup_net+0x6b/0x100
+[ 200.752016] [<ffffffff821b6466>] copy_net_ns+0x86/0x110
+[ 200.752016] [<ffffffff810d5789>] create_new_namespaces+0xd9/0x190
+
+net_alloc_generic() should take into account the maximum index into the
+ptr array, as a subsystem might use net_generic() anytime.
+
+This also reduces number of reallocations in net_assign_generic()
+
+Reported-by: Sasha Levin <levinsasha928@gmail.com>
+Tested-by: Sasha Levin <levinsasha928@gmail.com>
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Sjur Brændeland <sjur.brandeland@stericsson.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Pavel Emelyanov <xemul@openvz.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/core/net_namespace.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -30,6 +30,20 @@ EXPORT_SYMBOL(init_net);
+
+ #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
+
++static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
++
++static struct net_generic *net_alloc_generic(void)
++{
++ struct net_generic *ng;
++ size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
++
++ ng = kzalloc(generic_size, GFP_KERNEL);
++ if (ng)
++ ng->len = max_gen_ptrs;
++
++ return ng;
++}
++
+ static int net_assign_generic(struct net *net, int id, void *data)
+ {
+ struct net_generic *ng, *old_ng;
+@@ -43,8 +57,7 @@ static int net_assign_generic(struct net
+ if (old_ng->len >= id)
+ goto assign;
+
+- ng = kzalloc(sizeof(struct net_generic) +
+- id * sizeof(void *), GFP_KERNEL);
++ ng = net_alloc_generic();
+ if (ng == NULL)
+ return -ENOMEM;
+
+@@ -59,7 +72,6 @@ static int net_assign_generic(struct net
+ * the old copy for kfree after a grace period.
+ */
+
+- ng->len = id;
+ memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
+
+ rcu_assign_pointer(net->gen, ng);
+@@ -161,18 +173,6 @@ out_undo:
+ goto out;
+ }
+
+-static struct net_generic *net_alloc_generic(void)
+-{
+- struct net_generic *ng;
+- size_t generic_size = sizeof(struct net_generic) +
+- INITIAL_NET_GEN_PTRS * sizeof(void *);
+-
+- ng = kzalloc(generic_size, GFP_KERNEL);
+- if (ng)
+- ng->len = INITIAL_NET_GEN_PTRS;
+-
+- return ng;
+-}
+
+ #ifdef CONFIG_NET_NS
+ static struct kmem_cache *net_cachep;
+@@ -483,6 +483,7 @@ again:
+ }
+ return error;
+ }
++ max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
+ }
+ error = __register_pernet_operations(list, ops);
+ if (error) {
--- /dev/null
+From f4f47f433289346cbcc999282313a2969e959d64 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Tue, 24 Jan 2012 17:03:44 -0500
+Subject: rds: Make rds_sock_lock BH rather than IRQ safe.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit efc3dbc37412c027e363736b4f4c74ee5e8ecffc ]
+
+rds_sock_info() triggers locking warnings because we try to perform a
+local_bh_enable() (via sock_i_ino()) while hardware interrupts are
+disabled (via taking rds_sock_lock).
+
+There is no reason for rds_sock_lock to be a hardware IRQ disabling
+lock, none of these access paths run in hardware interrupt context.
+
+Therefore making it a BH disabling lock is safe and sufficient to
+fix this bug.
+
+Reported-by: Kumar Sanghvi <kumaras@chelsio.com>
+Reported-by: Josh Boyer <jwboyer@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/rds/af_rds.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+--- a/net/rds/af_rds.c
++++ b/net/rds/af_rds.c
+@@ -68,7 +68,6 @@ static int rds_release(struct socket *so
+ {
+ struct sock *sk = sock->sk;
+ struct rds_sock *rs;
+- unsigned long flags;
+
+ if (!sk)
+ goto out;
+@@ -94,10 +93,10 @@ static int rds_release(struct socket *so
+ rds_rdma_drop_keys(rs);
+ rds_notify_queue_get(rs, NULL);
+
+- spin_lock_irqsave(&rds_sock_lock, flags);
++ spin_lock_bh(&rds_sock_lock);
+ list_del_init(&rs->rs_item);
+ rds_sock_count--;
+- spin_unlock_irqrestore(&rds_sock_lock, flags);
++ spin_unlock_bh(&rds_sock_lock);
+
+ rds_trans_put(rs->rs_transport);
+
+@@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_
+
+ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
+ {
+- unsigned long flags;
+ struct rds_sock *rs;
+
+ sock_init_data(sock, sk);
+@@ -426,10 +424,10 @@ static int __rds_create(struct socket *s
+ spin_lock_init(&rs->rs_rdma_lock);
+ rs->rs_rdma_keys = RB_ROOT;
+
+- spin_lock_irqsave(&rds_sock_lock, flags);
++ spin_lock_bh(&rds_sock_lock);
+ list_add_tail(&rs->rs_item, &rds_sock_list);
+ rds_sock_count++;
+- spin_unlock_irqrestore(&rds_sock_lock, flags);
++ spin_unlock_bh(&rds_sock_lock);
+
+ return 0;
+ }
+@@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct soc
+ {
+ struct rds_sock *rs;
+ struct rds_incoming *inc;
+- unsigned long flags;
+ unsigned int total = 0;
+
+ len /= sizeof(struct rds_info_message);
+
+- spin_lock_irqsave(&rds_sock_lock, flags);
++ spin_lock_bh(&rds_sock_lock);
+
+ list_for_each_entry(rs, &rds_sock_list, rs_item) {
+ read_lock(&rs->rs_recv_lock);
+@@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct soc
+ read_unlock(&rs->rs_recv_lock);
+ }
+
+- spin_unlock_irqrestore(&rds_sock_lock, flags);
++ spin_unlock_bh(&rds_sock_lock);
+
+ lens->nr = total;
+ lens->each = sizeof(struct rds_info_message);
+@@ -504,11 +501,10 @@ static void rds_sock_info(struct socket
+ {
+ struct rds_info_socket sinfo;
+ struct rds_sock *rs;
+- unsigned long flags;
+
+ len /= sizeof(struct rds_info_socket);
+
+- spin_lock_irqsave(&rds_sock_lock, flags);
++ spin_lock_bh(&rds_sock_lock);
+
+ if (len < rds_sock_count)
+ goto out;
+@@ -529,7 +525,7 @@ out:
+ lens->nr = rds_sock_count;
+ lens->each = sizeof(struct rds_info_socket);
+
+- spin_unlock_irqrestore(&rds_sock_lock, flags);
++ spin_unlock_bh(&rds_sock_lock);
+ }
+
+ static void rds_exit(void)
usb-cdc-wdm-call-wake_up_all-to-allow-driver-to-shutdown-on-device-removal.patch
usb-cdc-wdm-better-allocate-a-buffer-that-is-at-least-as-big-as-we-tell-the-usb-core.patch
usb-cdc-wdm-avoid-hanging-on-interface-with-no-usb_cdc_dmm_type.patch
+netns-fix-net_alloc_generic.patch
+netns-fail-conspicously-if-someone-uses-net_generic-at-an-inappropriate-time.patch
+net-caif-register-properly-as-a-pernet-subsystem.patch
+af_unix-fix-epollet-regression-for-stream-sockets.patch
+bonding-fix-enslaving-in-alb-mode-when-link-down.patch
+l2tp-l2tp_ip-fix-possible-oops-on-packet-receive.patch
+macvlan-fix-a-possible-use-after-free.patch
+net-bpf_jit-fix-divide-by-0-generation.patch
+net-reintroduce-missing-rcu_assign_pointer-calls.patch
+rds-make-rds_sock_lock-bh-rather-than-irq-safe.patch
+tcp-fix-tcp_trim_head-to-adjust-segment-count-with-skb-mss.patch
+tcp-md5-using-remote-adress-for-md5-lookup-in-rst-packet.patch
--- /dev/null
+From 1ed66f688642437530a70f34e0c2756cc04f723e Mon Sep 17 00:00:00 2001
+From: Neal Cardwell <ncardwell@google.com>
+Date: Sat, 28 Jan 2012 17:29:46 +0000
+Subject: tcp: fix tcp_trim_head() to adjust segment count with skb MSS
+
+
+From: Neal Cardwell <ncardwell@google.com>
+
+[ Upstream commit 5b35e1e6e9ca651e6b291c96d1106043c9af314a ]
+
+This commit fixes tcp_trim_head() to recalculate the number of
+segments in the skb with the skb's existing MSS, so trimming the head
+causes the skb segment count to be monotonically non-increasing - it
+should stay the same or go down, but not increase.
+
+Previously tcp_trim_head() used the current MSS of the connection. But
+if there was a decrease in MSS between original transmission and ACK
+(e.g. due to PMTUD), this could cause tcp_trim_head() to
+counter-intuitively increase the segment count when trimming bytes off
+the head of an skb. This violated assumptions in tcp_tso_acked() that
+tcp_trim_head() only decreases the packet count, so that packets_acked
+in tcp_tso_acked() could underflow, leading tcp_clean_rtx_queue() to
+pass u32 pkts_acked values as large as 0xffffffff to
+ca_ops->pkts_acked().
+
+As an aside, if tcp_trim_head() had really wanted the skb to reflect
+the current MSS, it should have called tcp_set_skb_tso_segs()
+unconditionally, since a decrease in MSS would mean that a
+single-packet skb should now be sliced into multiple segments.
+
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Nandita Dukkipati <nanditad@google.com>
+Acked-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/ipv4/tcp_output.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1138,11 +1138,9 @@ int tcp_trim_head(struct sock *sk, struc
+ sk_mem_uncharge(sk, len);
+ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+
+- /* Any change of skb->len requires recalculation of tso
+- * factor and mss.
+- */
++ /* Any change of skb->len requires recalculation of tso factor. */
+ if (tcp_skb_pcount(skb) > 1)
+- tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
++ tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
+
+ return 0;
+ }
--- /dev/null
+From c337bb50fb05b394229f9c95dce2e4fe79b7852c Mon Sep 17 00:00:00 2001
+From: shawnlu <shawn.lu@ericsson.com>
+Date: Fri, 20 Jan 2012 12:22:04 +0000
+Subject: tcp: md5: using remote adress for md5 lookup in rst packet
+
+
+From: shawnlu <shawn.lu@ericsson.com>
+
+[ Upstream commit 8a622e71f58ec9f092fc99eacae0e6cf14f6e742 ]
+
+md5 key is added in socket through remote address.
+remote address should be used in finding md5 key when
+sending out reset packet.
+
+Signed-off-by: shawnlu <shawn.lu@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/ipv4/tcp_ipv4.c | 2 +-
+ net/ipv6/tcp_ipv6.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -630,7 +630,7 @@ static void tcp_v4_send_reset(struct soc
+ arg.iov[0].iov_len = sizeof(rep.th);
+
+ #ifdef CONFIG_TCP_MD5SIG
+- key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
++ key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
+ if (key) {
+ rep.opt[0] = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1084,7 +1084,7 @@ static void tcp_v6_send_reset(struct soc
+
+ #ifdef CONFIG_TCP_MD5SIG
+ if (sk)
+- key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
++ key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
+ #endif
+
+ if (th->ack)