--- /dev/null
+From foo@baz Tue Sep 29 14:20:37 CEST 2015
+From: Wilson Kok <wkok@cumulusnetworks.com>
+Date: Tue, 22 Sep 2015 21:40:22 -0700
+Subject: fib_rules: fix fib rule dumps across multiple skbs
+
+From: Wilson Kok <wkok@cumulusnetworks.com>
+
+[ Upstream commit 41fc014332d91ee90c32840bf161f9685b7fbf2b ]
+
+dump_rules returns skb length and not error.
+But when family == AF_UNSPEC, the caller of dump_rules
+assumes that it returns an error. Hence, when family == AF_UNSPEC,
+we continue trying to dump on -EMSGSIZE errors resulting in
+incorrect dump idx carried between skbs belonging to the same dump.
+This results in fib rule dump always only dumping rules that fit
+into the first skb.
+
+This patch fixes dump_rules to return error so that we exit correctly
+and idx is correctly maintained between skbs that are part of the
+same dump.
+
+Signed-off-by: Wilson Kok <wkok@cumulusnetworks.com>
+Signed-off-by: Roopa Prabhu <roopa@cumulusnetworks.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/fib_rules.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -621,15 +621,17 @@ static int dump_rules(struct sk_buff *sk
+ {
+ int idx = 0;
+ struct fib_rule *rule;
++ int err = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rule, &ops->rules_list, list) {
+ if (idx < cb->args[1])
+ goto skip;
+
+- if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
+- cb->nlh->nlmsg_seq, RTM_NEWRULE,
+- NLM_F_MULTI, ops) < 0)
++ err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
++ cb->nlh->nlmsg_seq, RTM_NEWRULE,
++ NLM_F_MULTI, ops);
++ if (err)
+ break;
+ skip:
+ idx++;
+@@ -638,7 +640,7 @@ skip:
+ cb->args[1] = idx;
+ rules_ops_put(ops);
+
+- return skb->len;
++ return err;
+ }
+
+ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
+@@ -654,7 +656,9 @@ static int fib_nl_dumprule(struct sk_buf
+ if (ops == NULL)
+ return -EAFNOSUPPORT;
+
+- return dump_rules(skb, cb, ops);
++ dump_rules(skb, cb, ops);
++
++ return skb->len;
+ }
+
+ rcu_read_lock();
--- /dev/null
+From foo@baz Tue Sep 29 14:20:37 CEST 2015
+From: huaibin Wang <huaibin.wang@6wind.com>
+Date: Tue, 25 Aug 2015 16:20:34 +0200
+Subject: ip6_gre: release cached dst on tunnel removal
+
+From: huaibin Wang <huaibin.wang@6wind.com>
+
+[ Upstream commit d4257295ba1b389c693b79de857a96e4b7cd8ac0 ]
+
+When a tunnel is deleted, the cached dst entry should be released.
+
+This problem may prevent the removal of a netns (seen with a x-netns IPv6
+gre tunnel):
+ unregister_netdevice: waiting for lo to become free. Usage count = 3
+
+CC: Dmitry Kozlov <xeb@mail.ru>
+Fixes: c12b395a4664 ("gre: Support GRE over IPv6")
+Signed-off-by: huaibin Wang <huaibin.wang@6wind.com>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_gre.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -357,6 +357,7 @@ static void ip6gre_tunnel_uninit(struct
+ struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+ ip6gre_tunnel_unlink(ign, netdev_priv(dev));
++ ip6_tnl_dst_reset(netdev_priv(dev));
+ dev_put(dev);
+ }
+
--- /dev/null
+From foo@baz Tue Sep 29 14:20:37 CEST 2015
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 3 Sep 2015 00:29:07 +0200
+Subject: ipv6: fix exthdrs offload registration in out_rt path
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit e41b0bedba0293b9e1e8d1e8ed553104b9693656 ]
+
+We previously register IPPROTO_ROUTING offload under inet6_add_offload(),
+but in error path, we try to unregister it with inet_del_offload(). This
+doesn't seem correct, it should actually be inet6_del_offload(), also
+ipv6_exthdrs_offload_exit() from that commit seems rather incorrect (it
+also uses rthdr_offload twice), but it got removed entirely later on.
+
+Fixes: 3336288a9fea ("ipv6: Switch to using new offload infrastructure.")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/exthdrs_offload.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/exthdrs_offload.c
++++ b/net/ipv6/exthdrs_offload.c
+@@ -36,6 +36,6 @@ out:
+ return ret;
+
+ out_rt:
+- inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
++ inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING);
+ goto out;
+ }
--- /dev/null
+From foo@baz Tue Sep 29 14:20:37 CEST 2015
+From: Richard Laing <richard.laing@alliedtelesis.co.nz>
+Date: Thu, 3 Sep 2015 13:52:31 +1200
+Subject: net/ipv6: Correct PIM6 mrt_lock handling
+
+From: Richard Laing <richard.laing@alliedtelesis.co.nz>
+
+[ Upstream commit 25b4a44c19c83d98e8c0807a7ede07c1f28eab8b ]
+
+In the IPv6 multicast routing code the mrt_lock was not being released
+correctly in the MFC iterator, as a result adding or deleting a MIF would
+cause a hang because the mrt_lock could not be acquired.
+
+This fix is a copy of the code for the IPv4 case and ensures that the lock
+is released correctly.
+
+Signed-off-by: Richard Laing <richard.laing@alliedtelesis.co.nz>
+Acked-by: Cong Wang <cwang@twopensource.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6mr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -552,7 +552,7 @@ static void ipmr_mfc_seq_stop(struct seq
+
+ if (it->cache == &mrt->mfc6_unres_queue)
+ spin_unlock_bh(&mfc_unres_lock);
+- else if (it->cache == mrt->mfc6_cache_array)
++ else if (it->cache == &mrt->mfc6_cache_array[it->ct])
+ read_unlock(&mrt_lock);
+ }
+
--- /dev/null
+From foo@baz Tue Sep 29 14:20:37 CEST 2015
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 10 Sep 2015 20:05:46 +0200
+Subject: netlink, mmap: transform mmap skb into full skb on taps
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 1853c949646005b5959c483becde86608f548f24 ]
+
+Ken-ichirou reported that running netlink in mmap mode for receive in
+combination with nlmon will throw a NULL pointer dereference in
+__kfree_skb() on nlmon_xmit(), in my case I can also trigger an "unable
+to handle kernel paging request". The problem is the skb_clone() in
+__netlink_deliver_tap_skb() for skbs that are mmaped.
+
+I.e. the cloned skb doesn't have a destructor, whereas the mmap netlink
+skb has it pointed to netlink_skb_destructor(), set in the handler
+netlink_ring_setup_skb(). There, skb->head is being set to NULL, so
+that in such cases, __kfree_skb() doesn't perform a skb_release_data()
+via skb_release_all(), where skb->head is possibly being freed through
+kfree(head) into slab allocator, although netlink mmap skb->head points
+to the mmap buffer. Similarly, the same has to be done also for large
+netlink skbs where the data area is vmalloced. Therefore, as discussed,
+make a copy for these rather rare cases for now. This fixes the issue
+on my and Ken-ichirou's test-cases.
+
+Reference: http://thread.gmane.org/gmane.linux.network/371129
+Fixes: bcbde0d449ed ("net: netlink: virtual tap device management")
+Reported-by: Ken-ichirou MATSUZAWA <chamaken@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: Ken-ichirou MATSUZAWA <chamaken@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlink/af_netlink.c | 30 +++++++++++++++++++++++-------
+ net/netlink/af_netlink.h | 9 +++++++++
+ 2 files changed, 32 insertions(+), 7 deletions(-)
+
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -115,6 +115,24 @@ static inline struct hlist_head *nl_port
+ return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
+ }
+
++static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
++ gfp_t gfp_mask)
++{
++ unsigned int len = skb_end_offset(skb);
++ struct sk_buff *new;
++
++ new = alloc_skb(len, gfp_mask);
++ if (new == NULL)
++ return NULL;
++
++ NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
++ NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
++ NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
++
++ memcpy(skb_put(new, len), skb->data, len);
++ return new;
++}
++
+ int netlink_add_tap(struct netlink_tap *nt)
+ {
+ if (unlikely(nt->dev->type != ARPHRD_NETLINK))
+@@ -199,7 +217,11 @@ static int __netlink_deliver_tap_skb(str
+ int ret = -ENOMEM;
+
+ dev_hold(dev);
+- nskb = skb_clone(skb, GFP_ATOMIC);
++
++ if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
++ nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
++ else
++ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (nskb) {
+ nskb->dev = dev;
+ nskb->protocol = htons((u16) sk->sk_protocol);
+@@ -271,11 +293,6 @@ static void netlink_rcv_wake(struct sock
+ }
+
+ #ifdef CONFIG_NETLINK_MMAP
+-static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
+-{
+- return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
+-}
+-
+ static bool netlink_rx_is_mmaped(struct sock *sk)
+ {
+ return nlk_sk(sk)->rx_ring.pg_vec != NULL;
+@@ -827,7 +844,6 @@ static void netlink_ring_set_copied(stru
+ }
+
+ #else /* CONFIG_NETLINK_MMAP */
+-#define netlink_skb_is_mmaped(skb) false
+ #define netlink_rx_is_mmaped(sk) false
+ #define netlink_tx_is_mmaped(sk) false
+ #define netlink_mmap sock_no_mmap
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -65,6 +65,15 @@ struct nl_portid_hash {
+ u32 rnd;
+ };
+
++static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
++{
++#ifdef CONFIG_NETLINK_MMAP
++ return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
++#else
++ return false;
++#endif /* CONFIG_NETLINK_MMAP */
++}
++
+ struct netlink_table {
+ struct nl_portid_hash hash;
+ struct hlist_head mc_list;
--- /dev/null
+From foo@baz Tue Sep 29 14:20:37 CEST 2015
+From: Jesse Gross <jesse@nicira.com>
+Date: Mon, 21 Sep 2015 20:21:20 -0700
+Subject: openvswitch: Zero flows on allocation.
+
+From: Jesse Gross <jesse@nicira.com>
+
+[ Upstream commit ae5f2fb1d51fa128a460bcfbe3c56d7ab8bf6a43 ]
+
+When support for megaflows was introduced, OVS needed to start
+installing flows with a mask applied to them. Since masking is an
+expensive operation, OVS also had an optimization that would only
+take the parts of the flow keys that were covered by a non-zero
+mask. The values stored in the remaining pieces should not matter
+because they are masked out.
+
+While this works fine for the purposes of matching (which must always
+look at the mask), serialization to netlink can be problematic. Since
+the flow and the mask are serialized separately, the uninitialized
+portions of the flow can be encoded with whatever values happen to be
+present.
+
+In terms of functionality, this has little effect since these fields
+will be masked out by definition. However, it leaks kernel memory to
+userspace, which is a potential security vulnerability. It is also
+possible that other code paths could look at the masked key and get
+uninitialized data, although this does not currently appear to be an
+issue in practice.
+
+This removes the mask optimization for flows that are being installed.
+This was always intended to be the case as the mask optimizations were
+really targetting per-packet flow operations.
+
+Fixes: 03f0d916 ("openvswitch: Mega flow implementation")
+Signed-off-by: Jesse Gross <jesse@nicira.com>
+Acked-by: Pravin B Shelar <pshelar@nicira.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/datapath.c | 2 +-
+ net/openvswitch/flow_table.c | 21 ++++++++++++---------
+ net/openvswitch/flow_table.h | 2 +-
+ 3 files changed, 14 insertions(+), 11 deletions(-)
+
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -803,7 +803,7 @@ static int ovs_flow_cmd_new_or_set(struc
+ if (IS_ERR(acts))
+ goto error;
+
+- ovs_flow_mask_key(&masked_key, &key, &mask);
++ ovs_flow_mask_key(&masked_key, &key, true, &mask);
+ error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
+ &masked_key, 0, &acts);
+ if (error) {
+--- a/net/openvswitch/flow_table.c
++++ b/net/openvswitch/flow_table.c
+@@ -55,18 +55,21 @@ static u16 range_n_bytes(const struct sw
+ }
+
+ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
+- const struct sw_flow_mask *mask)
++ bool full, const struct sw_flow_mask *mask)
+ {
+- const long *m = (long *)((u8 *)&mask->key + mask->range.start);
+- const long *s = (long *)((u8 *)src + mask->range.start);
+- long *d = (long *)((u8 *)dst + mask->range.start);
++ int start = full ? 0 : mask->range.start;
++ int len = full ? sizeof *dst : range_n_bytes(&mask->range);
++ const long *m = (const long *)((const u8 *)&mask->key + start);
++ const long *s = (const long *)((const u8 *)src + start);
++ long *d = (long *)((u8 *)dst + start);
+ int i;
+
+- /* The memory outside of the 'mask->range' are not set since
+- * further operations on 'dst' only uses contents within
+- * 'mask->range'.
++ /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
++ * if 'full' is false the memory outside of the 'mask->range' is left
++ * uninitialized. This can be used as an optimization when further
++ * operations on 'dst' only use contents within 'mask->range'.
+ */
+- for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
++ for (i = 0; i < len; i += sizeof(long))
+ *d++ = *s++ & *m++;
+ }
+
+@@ -436,7 +439,7 @@ static struct sw_flow *masked_flow_looku
+ u32 hash;
+ struct sw_flow_key masked_key;
+
+- ovs_flow_mask_key(&masked_key, unmasked, mask);
++ ovs_flow_mask_key(&masked_key, unmasked, false, mask);
+ hash = flow_hash(&masked_key, key_start, key_end);
+ head = find_bucket(ti, hash);
+ hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
+--- a/net/openvswitch/flow_table.h
++++ b/net/openvswitch/flow_table.h
+@@ -79,5 +79,5 @@ bool ovs_flow_cmp_unmasked_key(const str
+ struct sw_flow_match *match);
+
+ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
+- const struct sw_flow_mask *mask);
++ bool full, const struct sw_flow_mask *mask);
+ #endif /* flow_table.h */
--- /dev/null
+From foo@baz Tue Sep 29 14:20:37 CEST 2015
+From: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Date: Thu, 10 Sep 2015 17:31:15 -0300
+Subject: sctp: fix race on protocol/netns initialization
+
+From: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+
+[ Upstream commit 8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4 ]
+
+Consider sctp module is unloaded and is being requested because an user
+is creating a sctp socket.
+
+During initialization, sctp will add the new protocol type and then
+initialize pernet subsys:
+
+ status = sctp_v4_protosw_init();
+ if (status)
+ goto err_protosw_init;
+
+ status = sctp_v6_protosw_init();
+ if (status)
+ goto err_v6_protosw_init;
+
+ status = register_pernet_subsys(&sctp_net_ops);
+
+The problem is that after those calls to sctp_v{4,6}_protosw_init(), it
+is possible for userspace to create SCTP sockets like if the module is
+already fully loaded. If that happens, one of the possible effects is
+that we will have readers for net->sctp.local_addr_list list earlier
+than expected and sctp_net_init() does not take precautions while
+dealing with that list, leading to a potential panic but not limited to
+that, as sctp_sock_init() will copy a bunch of blank/partially
+initialized values from net->sctp.
+
+The race happens like this:
+
+ CPU 0 | CPU 1
+ socket() |
+ __sock_create | socket()
+ inet_create | __sock_create
+ list_for_each_entry_rcu( |
+ answer, &inetsw[sock->type], |
+ list) { | inet_create
+ /* no hits */ |
+ if (unlikely(err)) { |
+ ... |
+ request_module() |
+ /* socket creation is blocked |
+ * the module is fully loaded |
+ */ |
+ sctp_init |
+ sctp_v4_protosw_init |
+ inet_register_protosw |
+ list_add_rcu(&p->list, |
+ last_perm); |
+ | list_for_each_entry_rcu(
+ | answer, &inetsw[sock->type],
+ sctp_v6_protosw_init | list) {
+ | /* hit, so assumes protocol
+ | * is already loaded
+ | */
+ | /* socket creation continues
+ | * before netns is initialized
+ | */
+ register_pernet_subsys |
+
+Simply inverting the initialization order between
+register_pernet_subsys() and sctp_v4_protosw_init() is not possible
+because register_pernet_subsys() will create a control sctp socket, so
+the protocol must be already visible by then. Deferring the socket
+creation to a work-queue is not good specially because we loose the
+ability to handle its errors.
+
+So, as suggested by Vlad, the fix is to split netns initialization in
+two moments: defaults and control socket, so that the defaults are
+already loaded by when we register the protocol, while control socket
+initialization is kept at the same moment it is today.
+
+Fixes: 4db67e808640 ("sctp: Make the address lists per network namespace")
+Signed-off-by: Vlad Yasevich <vyasevich@gmail.com>
+Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/protocol.c | 64 +++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 41 insertions(+), 23 deletions(-)
+
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -1167,7 +1167,7 @@ static void sctp_v4_del_protocol(void)
+ unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
+ }
+
+-static int __net_init sctp_net_init(struct net *net)
++static int __net_init sctp_defaults_init(struct net *net)
+ {
+ int status;
+
+@@ -1260,12 +1260,6 @@ static int __net_init sctp_net_init(stru
+
+ sctp_dbg_objcnt_init(net);
+
+- /* Initialize the control inode/socket for handling OOTB packets. */
+- if ((status = sctp_ctl_sock_init(net))) {
+- pr_err("Failed to initialize the SCTP control sock\n");
+- goto err_ctl_sock_init;
+- }
+-
+ /* Initialize the local address list. */
+ INIT_LIST_HEAD(&net->sctp.local_addr_list);
+ spin_lock_init(&net->sctp.local_addr_lock);
+@@ -1281,9 +1275,6 @@ static int __net_init sctp_net_init(stru
+
+ return 0;
+
+-err_ctl_sock_init:
+- sctp_dbg_objcnt_exit(net);
+- sctp_proc_exit(net);
+ err_init_proc:
+ cleanup_sctp_mibs(net);
+ err_init_mibs:
+@@ -1292,15 +1283,12 @@ err_sysctl_register:
+ return status;
+ }
+
+-static void __net_exit sctp_net_exit(struct net *net)
++static void __net_exit sctp_defaults_exit(struct net *net)
+ {
+ /* Free the local address list */
+ sctp_free_addr_wq(net);
+ sctp_free_local_addr_list(net);
+
+- /* Free the control endpoint. */
+- inet_ctl_sock_destroy(net->sctp.ctl_sock);
+-
+ sctp_dbg_objcnt_exit(net);
+
+ sctp_proc_exit(net);
+@@ -1308,9 +1296,32 @@ static void __net_exit sctp_net_exit(str
+ sctp_sysctl_net_unregister(net);
+ }
+
+-static struct pernet_operations sctp_net_ops = {
+- .init = sctp_net_init,
+- .exit = sctp_net_exit,
++static struct pernet_operations sctp_defaults_ops = {
++ .init = sctp_defaults_init,
++ .exit = sctp_defaults_exit,
++};
++
++static int __net_init sctp_ctrlsock_init(struct net *net)
++{
++ int status;
++
++ /* Initialize the control inode/socket for handling OOTB packets. */
++ status = sctp_ctl_sock_init(net);
++ if (status)
++ pr_err("Failed to initialize the SCTP control sock\n");
++
++ return status;
++}
++
++static void __net_init sctp_ctrlsock_exit(struct net *net)
++{
++ /* Free the control endpoint. */
++ inet_ctl_sock_destroy(net->sctp.ctl_sock);
++}
++
++static struct pernet_operations sctp_ctrlsock_ops = {
++ .init = sctp_ctrlsock_init,
++ .exit = sctp_ctrlsock_exit,
+ };
+
+ /* Initialize the universe into something sensible. */
+@@ -1444,8 +1455,11 @@ static __init int sctp_init(void)
+ sctp_v4_pf_init();
+ sctp_v6_pf_init();
+
+- status = sctp_v4_protosw_init();
++ status = register_pernet_subsys(&sctp_defaults_ops);
++ if (status)
++ goto err_register_defaults;
+
++ status = sctp_v4_protosw_init();
+ if (status)
+ goto err_protosw_init;
+
+@@ -1453,9 +1467,9 @@ static __init int sctp_init(void)
+ if (status)
+ goto err_v6_protosw_init;
+
+- status = register_pernet_subsys(&sctp_net_ops);
++ status = register_pernet_subsys(&sctp_ctrlsock_ops);
+ if (status)
+- goto err_register_pernet_subsys;
++ goto err_register_ctrlsock;
+
+ status = sctp_v4_add_protocol();
+ if (status)
+@@ -1471,12 +1485,14 @@ out:
+ err_v6_add_protocol:
+ sctp_v4_del_protocol();
+ err_add_protocol:
+- unregister_pernet_subsys(&sctp_net_ops);
+-err_register_pernet_subsys:
++ unregister_pernet_subsys(&sctp_ctrlsock_ops);
++err_register_ctrlsock:
+ sctp_v6_protosw_exit();
+ err_v6_protosw_init:
+ sctp_v4_protosw_exit();
+ err_protosw_init:
++ unregister_pernet_subsys(&sctp_defaults_ops);
++err_register_defaults:
+ sctp_v4_pf_exit();
+ sctp_v6_pf_exit();
+ sctp_sysctl_unregister();
+@@ -1509,12 +1525,14 @@ static __exit void sctp_exit(void)
+ sctp_v6_del_protocol();
+ sctp_v4_del_protocol();
+
+- unregister_pernet_subsys(&sctp_net_ops);
++ unregister_pernet_subsys(&sctp_ctrlsock_ops);
+
+ /* Free protosw registrations */
+ sctp_v6_protosw_exit();
+ sctp_v4_protosw_exit();
+
++ unregister_pernet_subsys(&sctp_defaults_ops);
++
+ /* Unregister with socket layer. */
+ sctp_v6_pf_exit();
+ sctp_v4_pf_exit();
inet-frags-fix-defragmented-packet-s-ip-header-for-af_packet.patch
netlink-don-t-hold-mutex-in-rcu-callback-when-releasing-mmapd-ring.patch
net-mlx4_core-fix-wrong-index-in-propagating-port-change-event-to-vfs.patch
+ip6_gre-release-cached-dst-on-tunnel-removal.patch
+usbnet-get-event_no_runtime_pm-bit-before-it-is-cleared.patch
+ipv6-fix-exthdrs-offload-registration-in-out_rt-path.patch
+net-ipv6-correct-pim6-mrt_lock-handling.patch
+netlink-mmap-transform-mmap-skb-into-full-skb-on-taps.patch
+sctp-fix-race-on-protocol-netns-initialization.patch
+openvswitch-zero-flows-on-allocation.patch
+fib_rules-fix-fib-rule-dumps-across-multiple-skbs.patch
packet-missing-dev_put-in-packet_do_bind.patch
rds-fix-an-integer-overflow-test-in-rds_info_getsockopt.patch
udp-fix-dst-races-with-multicast-early-demux.patch
From foo@baz Sat Sep 26 11:19:08 PDT 2015
From: Florian Westphal <fw@strlen.de>
Date: Wed, 26 Aug 2015 22:17:39 -0700
-Subject: Subject: [PATCH 3.14-stable] net: gso: use feature flag argument in all protocol gso handlers
+Subject: net: gso: use feature flag argument in all protocol gso handlers
From: Florian Westphal <fw@strlen.de>
--- /dev/null
+From foo@baz Tue Sep 29 14:20:37 CEST 2015
+From: Eugene Shatokhin <eugene.shatokhin@rosalab.ru>
+Date: Mon, 24 Aug 2015 23:13:42 +0300
+Subject: usbnet: Get EVENT_NO_RUNTIME_PM bit before it is cleared
+
+From: Eugene Shatokhin <eugene.shatokhin@rosalab.ru>
+
+[ Upstream commit f50791ac1aca1ac1b0370d62397b43e9f831421a ]
+
+It is needed to check EVENT_NO_RUNTIME_PM bit of dev->flags in
+usbnet_stop(), but its value should be read before it is cleared
+when dev->flags is set to 0.
+
+The problem was spotted and the fix was provided by
+Oliver Neukum <oneukum@suse.de>.
+
+Signed-off-by: Eugene Shatokhin <eugene.shatokhin@rosalab.ru>
+Acked-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/usbnet.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -778,7 +778,7 @@ int usbnet_stop (struct net_device *net)
+ {
+ struct usbnet *dev = netdev_priv(net);
+ struct driver_info *info = dev->driver_info;
+- int retval, pm;
++ int retval, pm, mpn;
+
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
+ netif_stop_queue (net);
+@@ -809,6 +809,8 @@ int usbnet_stop (struct net_device *net)
+
+ usbnet_purge_paused_rxq(dev);
+
++ mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
++
+ /* deferred work (task, timer, softirq) must also stop.
+ * can't flush_scheduled_work() until we drop rtnl (later),
+ * else workers could deadlock; so make workers a NOP.
+@@ -819,8 +821,7 @@ int usbnet_stop (struct net_device *net)
+ if (!pm)
+ usb_autopm_put_interface(dev->intf);
+
+- if (info->manage_power &&
+- !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
++ if (info->manage_power && mpn)
+ info->manage_power(dev, 0);
+ else
+ usb_autopm_put_interface(dev->intf);