--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:50 -0700
+Subject: inet: frags: add a pointer to struct netns_frags
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-3-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+In order to simplify the API, add a pointer to struct inet_frags.
+This will allow us to make things less complex.
+
+These functions no longer have a struct inet_frags parameter :
+
+inet_frag_destroy(struct inet_frag_queue *q /*, struct inet_frags *f */)
+inet_frag_put(struct inet_frag_queue *q /*, struct inet_frags *f */)
+inet_frag_kill(struct inet_frag_queue *q /*, struct inet_frags *f */)
+inet_frags_exit_net(struct netns_frags *nf /*, struct inet_frags *f */)
+ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 093ba72914b696521e4885756a68a3332782c8de)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_frag.h | 11 ++++++-----
+ include/net/ipv6.h | 3 +--
+ net/ieee802154/6lowpan/reassembly.c | 13 +++++++------
+ net/ipv4/inet_fragment.c | 17 ++++++++++-------
+ net/ipv4/ip_fragment.c | 9 +++++----
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 16 +++++++++-------
+ net/ipv6/reassembly.c | 20 ++++++++++----------
+ 7 files changed, 48 insertions(+), 41 deletions(-)
+
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -9,6 +9,7 @@ struct netns_frags {
+ int high_thresh;
+ int low_thresh;
+ int max_dist;
++ struct inet_frags *f;
+ };
+
+ /**
+@@ -108,20 +109,20 @@ static inline int inet_frags_init_net(st
+ atomic_set(&nf->mem, 0);
+ return 0;
+ }
+-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
++void inet_frags_exit_net(struct netns_frags *nf);
+
+-void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
+-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
++void inet_frag_kill(struct inet_frag_queue *q);
++void inet_frag_destroy(struct inet_frag_queue *q);
+ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ struct inet_frags *f, void *key, unsigned int hash);
+
+ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
+ const char *prefix);
+
+-static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
++static inline void inet_frag_put(struct inet_frag_queue *q)
+ {
+ if (atomic_dec_and_test(&q->refcnt))
+- inet_frag_destroy(q, f);
++ inet_frag_destroy(q);
+ }
+
+ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -559,8 +559,7 @@ struct frag_queue {
+ u8 ecn;
+ };
+
+-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
+- struct inet_frags *frags);
++void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
+
+ static inline bool ipv6_addr_any(const struct in6_addr *a)
+ {
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -93,10 +93,10 @@ static void lowpan_frag_expire(unsigned
+ if (fq->q.flags & INET_FRAG_COMPLETE)
+ goto out;
+
+- inet_frag_kill(&fq->q, &lowpan_frags);
++ inet_frag_kill(&fq->q);
+ out:
+ spin_unlock(&fq->q.lock);
+- inet_frag_put(&fq->q, &lowpan_frags);
++ inet_frag_put(&fq->q);
+ }
+
+ static inline struct lowpan_frag_queue *
+@@ -229,7 +229,7 @@ static int lowpan_frag_reasm(struct lowp
+ struct sk_buff *fp, *head = fq->q.fragments;
+ int sum_truesize;
+
+- inet_frag_kill(&fq->q, &lowpan_frags);
++ inet_frag_kill(&fq->q);
+
+ /* Make the one we just received the head. */
+ if (prev) {
+@@ -437,7 +437,7 @@ int lowpan_frag_rcv(struct sk_buff *skb,
+ ret = lowpan_frag_queue(fq, skb, frag_type);
+ spin_unlock(&fq->q.lock);
+
+- inet_frag_put(&fq->q, &lowpan_frags);
++ inet_frag_put(&fq->q);
+ return ret;
+ }
+
+@@ -585,13 +585,14 @@ static int __net_init lowpan_frags_init_
+ ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
++ ieee802154_lowpan->frags.f = &lowpan_frags;
+
+ res = inet_frags_init_net(&ieee802154_lowpan->frags);
+ if (res < 0)
+ return res;
+ res = lowpan_frags_ns_sysctl_register(net);
+ if (res < 0)
+- inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
++ inet_frags_exit_net(&ieee802154_lowpan->frags);
+ return res;
+ }
+
+@@ -601,7 +602,7 @@ static void __net_exit lowpan_frags_exit
+ net_ieee802154_lowpan(net);
+
+ lowpan_frags_ns_sysctl_unregister(net);
+- inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
++ inet_frags_exit_net(&ieee802154_lowpan->frags);
+ }
+
+ static struct pernet_operations lowpan_frags_ops = {
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -219,8 +219,9 @@ void inet_frags_fini(struct inet_frags *
+ }
+ EXPORT_SYMBOL(inet_frags_fini);
+
+-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
++void inet_frags_exit_net(struct netns_frags *nf)
+ {
++ struct inet_frags *f =nf->f;
+ unsigned int seq;
+ int i;
+
+@@ -264,33 +265,34 @@ __acquires(hb->chain_lock)
+ return hb;
+ }
+
+-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
++static inline void fq_unlink(struct inet_frag_queue *fq)
+ {
+ struct inet_frag_bucket *hb;
+
+- hb = get_frag_bucket_locked(fq, f);
++ hb = get_frag_bucket_locked(fq, fq->net->f);
+ hlist_del(&fq->list);
+ fq->flags |= INET_FRAG_COMPLETE;
+ spin_unlock(&hb->chain_lock);
+ }
+
+-void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
++void inet_frag_kill(struct inet_frag_queue *fq)
+ {
+ if (del_timer(&fq->timer))
+ atomic_dec(&fq->refcnt);
+
+ if (!(fq->flags & INET_FRAG_COMPLETE)) {
+- fq_unlink(fq, f);
++ fq_unlink(fq);
+ atomic_dec(&fq->refcnt);
+ }
+ }
+ EXPORT_SYMBOL(inet_frag_kill);
+
+-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
++void inet_frag_destroy(struct inet_frag_queue *q)
+ {
+ struct sk_buff *fp;
+ struct netns_frags *nf;
+ unsigned int sum, sum_truesize = 0;
++ struct inet_frags *f;
+
+ WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
+ WARN_ON(del_timer(&q->timer) != 0);
+@@ -298,6 +300,7 @@ void inet_frag_destroy(struct inet_frag_
+ /* Release all fragment data. */
+ fp = q->fragments;
+ nf = q->net;
++ f = nf->f;
+ while (fp) {
+ struct sk_buff *xp = fp->next;
+
+@@ -333,7 +336,7 @@ static struct inet_frag_queue *inet_frag
+ atomic_inc(&qp->refcnt);
+ spin_unlock(&hb->chain_lock);
+ qp_in->flags |= INET_FRAG_COMPLETE;
+- inet_frag_put(qp_in, f);
++ inet_frag_put(qp_in);
+ return qp;
+ }
+ }
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -167,7 +167,7 @@ static void ip4_frag_free(struct inet_fr
+
+ static void ipq_put(struct ipq *ipq)
+ {
+- inet_frag_put(&ipq->q, &ip4_frags);
++ inet_frag_put(&ipq->q);
+ }
+
+ /* Kill ipq entry. It is not destroyed immediately,
+@@ -175,7 +175,7 @@ static void ipq_put(struct ipq *ipq)
+ */
+ static void ipq_kill(struct ipq *ipq)
+ {
+- inet_frag_kill(&ipq->q, &ip4_frags);
++ inet_frag_kill(&ipq->q);
+ }
+
+ static bool frag_expire_skip_icmp(u32 user)
+@@ -875,20 +875,21 @@ static int __net_init ipv4_frags_init_ne
+ net->ipv4.frags.timeout = IP_FRAG_TIME;
+
+ net->ipv4.frags.max_dist = 64;
++ net->ipv4.frags.f = &ip4_frags;
+
+ res = inet_frags_init_net(&net->ipv4.frags);
+ if (res < 0)
+ return res;
+ res = ip4_frags_ns_ctl_register(net);
+ if (res < 0)
+- inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
++ inet_frags_exit_net(&net->ipv4.frags);
+ return res;
+ }
+
+ static void __net_exit ipv4_frags_exit_net(struct net *net)
+ {
+ ip4_frags_ns_ctl_unregister(net);
+- inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
++ inet_frags_exit_net(&net->ipv4.frags);
+ }
+
+ static struct pernet_operations ip4_frags_ops = {
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -177,7 +177,7 @@ static void nf_ct_frag6_expire(unsigned
+ fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
+ net = container_of(fq->q.net, struct net, nf_frag.frags);
+
+- ip6_expire_frag_queue(net, fq, &nf_frags);
++ ip6_expire_frag_queue(net, fq);
+ }
+
+ /* Creation primitives. */
+@@ -263,7 +263,7 @@ static int nf_ct_frag6_queue(struct frag
+ * this case. -DaveM
+ */
+ pr_debug("end of fragment not rounded to 8 bytes.\n");
+- inet_frag_kill(&fq->q, &nf_frags);
++ inet_frag_kill(&fq->q);
+ return -EPROTO;
+ }
+ if (end > fq->q.len) {
+@@ -356,7 +356,7 @@ found:
+ return 0;
+
+ discard_fq:
+- inet_frag_kill(&fq->q, &nf_frags);
++ inet_frag_kill(&fq->q);
+ err:
+ return -EINVAL;
+ }
+@@ -378,7 +378,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq,
+ int payload_len;
+ u8 ecn;
+
+- inet_frag_kill(&fq->q, &nf_frags);
++ inet_frag_kill(&fq->q);
+
+ WARN_ON(head == NULL);
+ WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
+@@ -623,7 +623,7 @@ int nf_ct_frag6_gather(struct net *net,
+
+ out_unlock:
+ spin_unlock_bh(&fq->q.lock);
+- inet_frag_put(&fq->q, &nf_frags);
++ inet_frag_put(&fq->q);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
+@@ -635,19 +635,21 @@ static int nf_ct_net_init(struct net *ne
+ net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
++ net->nf_frag.frags.f = &nf_frags;
++
+ res = inet_frags_init_net(&net->nf_frag.frags);
+ if (res < 0)
+ return res;
+ res = nf_ct_frag6_sysctl_register(net);
+ if (res < 0)
+- inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
++ inet_frags_exit_net(&net->nf_frag.frags);
+ return res;
+ }
+
+ static void nf_ct_net_exit(struct net *net)
+ {
+ nf_ct_frags6_sysctl_unregister(net);
+- inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
++ inet_frags_exit_net(&net->nf_frag.frags);
+ }
+
+ static struct pernet_operations nf_ct_net_ops = {
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -128,8 +128,7 @@ void ip6_frag_init(struct inet_frag_queu
+ }
+ EXPORT_SYMBOL(ip6_frag_init);
+
+-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
+- struct inet_frags *frags)
++void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
+ {
+ struct net_device *dev = NULL;
+
+@@ -138,7 +137,7 @@ void ip6_expire_frag_queue(struct net *n
+ if (fq->q.flags & INET_FRAG_COMPLETE)
+ goto out;
+
+- inet_frag_kill(&fq->q, frags);
++ inet_frag_kill(&fq->q);
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, fq->iif);
+@@ -166,7 +165,7 @@ out_rcu_unlock:
+ rcu_read_unlock();
+ out:
+ spin_unlock(&fq->q.lock);
+- inet_frag_put(&fq->q, frags);
++ inet_frag_put(&fq->q);
+ }
+ EXPORT_SYMBOL(ip6_expire_frag_queue);
+
+@@ -178,7 +177,7 @@ static void ip6_frag_expire(unsigned lon
+ fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
+ net = container_of(fq->q.net, struct net, ipv6.frags);
+
+- ip6_expire_frag_queue(net, fq, &ip6_frags);
++ ip6_expire_frag_queue(net, fq);
+ }
+
+ static struct frag_queue *
+@@ -359,7 +358,7 @@ found:
+ return -1;
+
+ discard_fq:
+- inet_frag_kill(&fq->q, &ip6_frags);
++ inet_frag_kill(&fq->q);
+ err:
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_REASMFAILS);
+@@ -386,7 +385,7 @@ static int ip6_frag_reasm(struct frag_qu
+ int sum_truesize;
+ u8 ecn;
+
+- inet_frag_kill(&fq->q, &ip6_frags);
++ inet_frag_kill(&fq->q);
+
+ ecn = ip_frag_ecn_table[fq->ecn];
+ if (unlikely(ecn == 0xff))
+@@ -563,7 +562,7 @@ static int ipv6_frag_rcv(struct sk_buff
+ ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
+
+ spin_unlock(&fq->q.lock);
+- inet_frag_put(&fq->q, &ip6_frags);
++ inet_frag_put(&fq->q);
+ return ret;
+ }
+
+@@ -714,6 +713,7 @@ static int __net_init ipv6_frags_init_ne
+ net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
++ net->ipv6.frags.f = &ip6_frags;
+
+ res = inet_frags_init_net(&net->ipv6.frags);
+ if (res < 0)
+@@ -721,14 +721,14 @@ static int __net_init ipv6_frags_init_ne
+
+ res = ip6_frags_ns_sysctl_register(net);
+ if (res < 0)
+- inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
++ inet_frags_exit_net(&net->ipv6.frags);
+ return res;
+ }
+
+ static void __net_exit ipv6_frags_exit_net(struct net *net)
+ {
+ ip6_frags_ns_sysctl_unregister(net);
+- inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
++ inet_frags_exit_net(&net->ipv6.frags);
+ }
+
+ static struct pernet_operations ip6_frags_ops = {
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:00 -0700
+Subject: inet: frags: break the 2GB limit for frags storage
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-13-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+Some users are willing to provision huge amounts of memory to be able
+to perform reassembly reasonnably well under pressure.
+
+Current memory tracking is using one atomic_t and integers.
+
+Switch to atomic_long_t so that 64bit arches can use more than 2GB,
+without any cost for 32bit arches.
+
+Note that this patch avoids an overflow error, if high_thresh was set
+to ~2GB, since this test in inet_frag_alloc() was never true :
+
+if (... || frag_mem_limit(nf) > nf->high_thresh)
+
+Tested:
+
+$ echo 16000000000 >/proc/sys/net/ipv4/ipfrag_high_thresh
+
+<frag DDOS>
+
+$ grep FRAG /proc/net/sockstat
+FRAG: inuse 14705885 memory 16000002880
+
+$ nstat -n ; sleep 1 ; nstat | grep Reas
+IpReasmReqds 3317150 0.0
+IpReasmFails 3317112 0.0
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 3e67f106f619dcfaf6f4e2039599bdb69848c714)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/networking/ip-sysctl.txt | 4 ++--
+ include/net/inet_frag.h | 20 ++++++++++----------
+ net/ieee802154/6lowpan/reassembly.c | 10 +++++-----
+ net/ipv4/ip_fragment.c | 10 +++++-----
+ net/ipv4/proc.c | 2 +-
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 10 +++++-----
+ net/ipv6/proc.c | 2 +-
+ net/ipv6/reassembly.c | 6 +++---
+ 8 files changed, 32 insertions(+), 32 deletions(-)
+
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -122,10 +122,10 @@ min_adv_mss - INTEGER
+
+ IP Fragmentation:
+
+-ipfrag_high_thresh - INTEGER
++ipfrag_high_thresh - LONG INTEGER
+ Maximum memory used to reassemble IP fragments.
+
+-ipfrag_low_thresh - INTEGER
++ipfrag_low_thresh - LONG INTEGER
+ (Obsolete since linux-4.17)
+ Maximum memory used to reassemble IP fragments before the kernel
+ begins to remove incomplete fragment queues to free up resources.
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -7,11 +7,11 @@ struct netns_frags {
+ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
+
+ /* Keep atomic mem on separate cachelines in structs that include it */
+- atomic_t mem ____cacheline_aligned_in_smp;
++ atomic_long_t mem ____cacheline_aligned_in_smp;
+ /* sysctls */
++ long high_thresh;
++ long low_thresh;
+ int timeout;
+- int high_thresh;
+- int low_thresh;
+ int max_dist;
+ struct inet_frags *f;
+ };
+@@ -101,7 +101,7 @@ void inet_frags_fini(struct inet_frags *
+
+ static inline int inet_frags_init_net(struct netns_frags *nf)
+ {
+- atomic_set(&nf->mem, 0);
++ atomic_long_set(&nf->mem, 0);
+ return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
+ }
+ void inet_frags_exit_net(struct netns_frags *nf);
+@@ -118,19 +118,19 @@ static inline void inet_frag_put(struct
+
+ /* Memory Tracking Functions. */
+
+-static inline int frag_mem_limit(struct netns_frags *nf)
++static inline long frag_mem_limit(const struct netns_frags *nf)
+ {
+- return atomic_read(&nf->mem);
++ return atomic_long_read(&nf->mem);
+ }
+
+-static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
++static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
+ {
+- atomic_sub(i, &nf->mem);
++ atomic_long_sub(val, &nf->mem);
+ }
+
+-static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
++static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
+ {
+- atomic_add(i, &nf->mem);
++ atomic_long_add(val, &nf->mem);
+ }
+
+ /* RFC 3168 support :
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -410,23 +410,23 @@ err:
+ }
+
+ #ifdef CONFIG_SYSCTL
+-static int zero;
++static long zero;
+
+ static struct ctl_table lowpan_frags_ns_ctl_table[] = {
+ {
+ .procname = "6lowpanfrag_high_thresh",
+ .data = &init_net.ieee802154_lowpan.frags.high_thresh,
+- .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &init_net.ieee802154_lowpan.frags.low_thresh
+ },
+ {
+ .procname = "6lowpanfrag_low_thresh",
+ .data = &init_net.ieee802154_lowpan.frags.low_thresh,
+- .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
+ },
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -681,23 +681,23 @@ struct sk_buff *ip_check_defrag(struct n
+ EXPORT_SYMBOL(ip_check_defrag);
+
+ #ifdef CONFIG_SYSCTL
+-static int zero;
++static long zero;
+
+ static struct ctl_table ip4_frags_ns_ctl_table[] = {
+ {
+ .procname = "ipfrag_high_thresh",
+ .data = &init_net.ipv4.frags.high_thresh,
+- .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &init_net.ipv4.frags.low_thresh
+ },
+ {
+ .procname = "ipfrag_low_thresh",
+ .data = &init_net.ipv4.frags.low_thresh,
+- .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &init_net.ipv4.frags.high_thresh
+ },
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -73,7 +73,7 @@ static int sockstat_seq_show(struct seq_
+ sock_prot_inuse_get(net, &udplite_prot));
+ seq_printf(seq, "RAW: inuse %d\n",
+ sock_prot_inuse_get(net, &raw_prot));
+- seq_printf(seq, "FRAG: inuse %u memory %u\n",
++ seq_printf(seq, "FRAG: inuse %u memory %lu\n",
+ atomic_read(&net->ipv4.frags.rhashtable.nelems),
+ frag_mem_limit(&net->ipv4.frags));
+ return 0;
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -63,7 +63,7 @@ struct nf_ct_frag6_skb_cb
+ static struct inet_frags nf_frags;
+
+ #ifdef CONFIG_SYSCTL
+-static int zero;
++static long zero;
+
+ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
+ {
+@@ -76,18 +76,18 @@ static struct ctl_table nf_ct_frag6_sysc
+ {
+ .procname = "nf_conntrack_frag6_low_thresh",
+ .data = &init_net.nf_frag.frags.low_thresh,
+- .maxlen = sizeof(unsigned int),
++ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &init_net.nf_frag.frags.high_thresh
+ },
+ {
+ .procname = "nf_conntrack_frag6_high_thresh",
+ .data = &init_net.nf_frag.frags.high_thresh,
+- .maxlen = sizeof(unsigned int),
++ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &init_net.nf_frag.frags.low_thresh
+ },
+ { }
+--- a/net/ipv6/proc.c
++++ b/net/ipv6/proc.c
+@@ -47,7 +47,7 @@ static int sockstat6_seq_show(struct seq
+ sock_prot_inuse_get(net, &udplitev6_prot));
+ seq_printf(seq, "RAW6: inuse %d\n",
+ sock_prot_inuse_get(net, &rawv6_prot));
+- seq_printf(seq, "FRAG6: inuse %u memory %u\n",
++ seq_printf(seq, "FRAG6: inuse %u memory %lu\n",
+ atomic_read(&net->ipv6.frags.rhashtable.nelems),
+ frag_mem_limit(&net->ipv6.frags));
+ return 0;
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -546,15 +546,15 @@ static struct ctl_table ip6_frags_ns_ctl
+ {
+ .procname = "ip6frag_high_thresh",
+ .data = &init_net.ipv6.frags.high_thresh,
+- .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &init_net.ipv6.frags.low_thresh
+ },
+ {
+ .procname = "ip6frag_low_thresh",
+ .data = &init_net.ipv6.frags.low_thresh,
+- .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:49 -0700
+Subject: inet: frags: change inet_frags_init_net() return value
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-2-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+We will soon initialize one rhashtable per struct netns_frags
+in inet_frags_init_net().
+
+This patch changes the return value to eventually propagate an
+error.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 787bea7748a76130566f881c2342a0be4127d182)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_frag.h | 3 ++-
+ net/ieee802154/6lowpan/reassembly.c | 11 ++++++++---
+ net/ipv4/ip_fragment.c | 12 +++++++++---
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 12 +++++++++---
+ net/ipv6/reassembly.c | 11 +++++++++--
+ 5 files changed, 37 insertions(+), 12 deletions(-)
+
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -103,9 +103,10 @@ struct inet_frags {
+ int inet_frags_init(struct inet_frags *);
+ void inet_frags_fini(struct inet_frags *);
+
+-static inline void inet_frags_init_net(struct netns_frags *nf)
++static inline int inet_frags_init_net(struct netns_frags *nf)
+ {
+ atomic_set(&nf->mem, 0);
++ return 0;
+ }
+ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
+
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -580,14 +580,19 @@ static int __net_init lowpan_frags_init_
+ {
+ struct netns_ieee802154_lowpan *ieee802154_lowpan =
+ net_ieee802154_lowpan(net);
++ int res;
+
+ ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
+
+- inet_frags_init_net(&ieee802154_lowpan->frags);
+-
+- return lowpan_frags_ns_sysctl_register(net);
++ res = inet_frags_init_net(&ieee802154_lowpan->frags);
++ if (res < 0)
++ return res;
++ res = lowpan_frags_ns_sysctl_register(net);
++ if (res < 0)
++ inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
++ return res;
+ }
+
+ static void __net_exit lowpan_frags_exit_net(struct net *net)
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -849,6 +849,8 @@ static void __init ip4_frags_ctl_registe
+
+ static int __net_init ipv4_frags_init_net(struct net *net)
+ {
++ int res;
++
+ /* Fragment cache limits.
+ *
+ * The fragment memory accounting code, (tries to) account for
+@@ -874,9 +876,13 @@ static int __net_init ipv4_frags_init_ne
+
+ net->ipv4.frags.max_dist = 64;
+
+- inet_frags_init_net(&net->ipv4.frags);
+-
+- return ip4_frags_ns_ctl_register(net);
++ res = inet_frags_init_net(&net->ipv4.frags);
++ if (res < 0)
++ return res;
++ res = ip4_frags_ns_ctl_register(net);
++ if (res < 0)
++ inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
++ return res;
+ }
+
+ static void __net_exit ipv4_frags_exit_net(struct net *net)
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -630,12 +630,18 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
+
+ static int nf_ct_net_init(struct net *net)
+ {
++ int res;
++
+ net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
+- inet_frags_init_net(&net->nf_frag.frags);
+-
+- return nf_ct_frag6_sysctl_register(net);
++ res = inet_frags_init_net(&net->nf_frag.frags);
++ if (res < 0)
++ return res;
++ res = nf_ct_frag6_sysctl_register(net);
++ if (res < 0)
++ inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
++ return res;
+ }
+
+ static void nf_ct_net_exit(struct net *net)
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -709,13 +709,20 @@ static void ip6_frags_sysctl_unregister(
+
+ static int __net_init ipv6_frags_init_net(struct net *net)
+ {
++ int res;
++
+ net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
+
+- inet_frags_init_net(&net->ipv6.frags);
++ res = inet_frags_init_net(&net->ipv6.frags);
++ if (res < 0)
++ return res;
+
+- return ip6_frags_ns_sysctl_register(net);
++ res = ip6_frags_ns_sysctl_register(net);
++ if (res < 0)
++ inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
++ return res;
+ }
+
+ static void __net_exit ipv6_frags_exit_net(struct net *net)
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:01 -0700
+Subject: inet: frags: do not clone skb in ip_expire()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-14-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+An skb_clone() was added in commit ec4fbd64751d ("inet: frag: release
+spinlock before calling icmp_send()")
+
+While fixing the bug at that time, it also added a very high cost
+for DDOS frags, as the ICMP rate limit is applied after this
+expensive operation (skb_clone() + consume_skb(), implying memory
+allocations, copy, and freeing)
+
+We can use skb_get(head) here, all we want is to make sure skb wont
+be freed by another cpu.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 1eec5d5670084ee644597bd26c25e22c69b9f748)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_fragment.c | 16 ++++++----------
+ 1 file changed, 6 insertions(+), 10 deletions(-)
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -141,8 +141,8 @@ static bool frag_expire_skip_icmp(u32 us
+ */
+ static void ip_expire(unsigned long arg)
+ {
+- struct sk_buff *clone, *head;
+ const struct iphdr *iph;
++ struct sk_buff *head;
+ struct net *net;
+ struct ipq *qp;
+ int err;
+@@ -185,16 +185,12 @@ static void ip_expire(unsigned long arg)
+ (skb_rtable(head)->rt_type != RTN_LOCAL))
+ goto out;
+
+- clone = skb_clone(head, GFP_ATOMIC);
++ skb_get(head);
++ spin_unlock(&qp->q.lock);
++ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
++ kfree_skb(head);
++ goto out_rcu_unlock;
+
+- /* Send an ICMP "Fragment Reassembly Timeout" message. */
+- if (clone) {
+- spin_unlock(&qp->q.lock);
+- icmp_send(clone, ICMP_TIME_EXCEEDED,
+- ICMP_EXC_FRAGTIME, 0);
+- consume_skb(clone);
+- goto out_rcu_unlock;
+- }
+ out:
+ spin_unlock(&qp->q.lock);
+ out_rcu_unlock:
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:06 -0700
+Subject: inet: frags: fix ip6frag_low_thresh boundary
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-19-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+Giving an integer to proc_doulongvec_minmax() is dangerous on 64bit arches,
+since linker might place next to it a non zero value preventing a change
+to ip6frag_low_thresh.
+
+ip6frag_low_thresh is not used anymore in the kernel, but we do not
+want to prematuraly break user scripts wanting to change it.
+
+Since specifying a minimal value of 0 for proc_doulongvec_minmax()
+is moot, let's remove these zero values in all defrag units.
+
+Fixes: 6e00f7dd5e4e ("ipv6: frags: fix /proc/sys/net/ipv6/ip6frag_low_thresh")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Maciej Żenczykowski <maze@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 3d23401283e80ceb03f765842787e0e79ff598b7)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ieee802154/6lowpan/reassembly.c | 2 -
+ net/ipv4/ip_fragment.c | 40 ++++++++++++--------------------
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 2 -
+ net/ipv6/reassembly.c | 4 ---
+ 4 files changed, 17 insertions(+), 31 deletions(-)
+
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -410,7 +410,6 @@ err:
+ }
+
+ #ifdef CONFIG_SYSCTL
+-static long zero;
+
+ static struct ctl_table lowpan_frags_ns_ctl_table[] = {
+ {
+@@ -427,7 +426,6 @@ static struct ctl_table lowpan_frags_ns_
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+- .extra1 = &zero,
+ .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
+ },
+ {
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -56,14 +56,6 @@
+ */
+ static const char ip_frag_cache_name[] = "ip4-frags";
+
+-struct ipfrag_skb_cb
+-{
+- struct inet_skb_parm h;
+- int offset;
+-};
+-
+-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+-
+ /* Describe an entry in the "incomplete datagrams" queue. */
+ struct ipq {
+ struct inet_frag_queue q;
+@@ -351,13 +343,13 @@ static int ip_frag_queue(struct ipq *qp,
+ * this fragment, right?
+ */
+ prev = qp->q.fragments_tail;
+- if (!prev || FRAG_CB(prev)->offset < offset) {
++ if (!prev || prev->ip_defrag_offset < offset) {
+ next = NULL;
+ goto found;
+ }
+ prev = NULL;
+ for (next = qp->q.fragments; next != NULL; next = next->next) {
+- if (FRAG_CB(next)->offset >= offset)
++ if (next->ip_defrag_offset >= offset)
+ break; /* bingo! */
+ prev = next;
+ }
+@@ -368,7 +360,7 @@ found:
+ * any overlaps are eliminated.
+ */
+ if (prev) {
+- int i = (FRAG_CB(prev)->offset + prev->len) - offset;
++ int i = (prev->ip_defrag_offset + prev->len) - offset;
+
+ if (i > 0) {
+ offset += i;
+@@ -385,8 +377,8 @@ found:
+
+ err = -ENOMEM;
+
+- while (next && FRAG_CB(next)->offset < end) {
+- int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
++ while (next && next->ip_defrag_offset < end) {
++ int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */
+
+ if (i < next->len) {
+ int delta = -next->truesize;
+@@ -399,7 +391,7 @@ found:
+ delta += next->truesize;
+ if (delta)
+ add_frag_mem_limit(qp->q.net, delta);
+- FRAG_CB(next)->offset += i;
++ next->ip_defrag_offset += i;
+ qp->q.meat -= i;
+ if (next->ip_summed != CHECKSUM_UNNECESSARY)
+ next->ip_summed = CHECKSUM_NONE;
+@@ -423,7 +415,13 @@ found:
+ }
+ }
+
+- FRAG_CB(skb)->offset = offset;
++ /* Note : skb->ip_defrag_offset and skb->dev share the same location */
++ dev = skb->dev;
++ if (dev)
++ qp->iif = dev->ifindex;
++ /* Makes sure compiler wont do silly aliasing games */
++ barrier();
++ skb->ip_defrag_offset = offset;
+
+ /* Insert this fragment in the chain of fragments. */
+ skb->next = next;
+@@ -434,11 +432,6 @@ found:
+ else
+ qp->q.fragments = skb;
+
+- dev = skb->dev;
+- if (dev) {
+- qp->iif = dev->ifindex;
+- skb->dev = NULL;
+- }
+ qp->q.stamp = skb->tstamp;
+ qp->q.meat += skb->len;
+ qp->ecn |= ecn;
+@@ -514,7 +507,7 @@ static int ip_frag_reasm(struct ipq *qp,
+ }
+
+ WARN_ON(!head);
+- WARN_ON(FRAG_CB(head)->offset != 0);
++ WARN_ON(head->ip_defrag_offset != 0);
+
+ /* Allocate a new buffer for the datagram. */
+ ihlen = ip_hdrlen(head);
+@@ -677,7 +670,7 @@ struct sk_buff *ip_check_defrag(struct n
+ EXPORT_SYMBOL(ip_check_defrag);
+
+ #ifdef CONFIG_SYSCTL
+-static long zero;
++static int dist_min;
+
+ static struct ctl_table ip4_frags_ns_ctl_table[] = {
+ {
+@@ -694,7 +687,6 @@ static struct ctl_table ip4_frags_ns_ctl
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+- .extra1 = &zero,
+ .extra2 = &init_net.ipv4.frags.high_thresh
+ },
+ {
+@@ -710,7 +702,7 @@ static struct ctl_table ip4_frags_ns_ctl
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+- .extra1 = &zero
++ .extra1 = &dist_min,
+ },
+ { }
+ };
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -63,7 +63,6 @@ struct nf_ct_frag6_skb_cb
+ static struct inet_frags nf_frags;
+
+ #ifdef CONFIG_SYSCTL
+-static long zero;
+
+ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
+ {
+@@ -79,7 +78,6 @@ static struct ctl_table nf_ct_frag6_sysc
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+- .extra1 = &zero,
+ .extra2 = &init_net.nf_frag.frags.high_thresh
+ },
+ {
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -548,7 +548,6 @@ static const struct inet6_protocol frag_
+ };
+
+ #ifdef CONFIG_SYSCTL
+-static int zero;
+
+ static struct ctl_table ip6_frags_ns_ctl_table[] = {
+ {
+@@ -564,8 +563,7 @@ static struct ctl_table ip6_frags_ns_ctl
+ .data = &init_net.ipv6.frags.low_thresh,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
+- .extra1 = &zero,
++ .proc_handler = proc_doulongvec_minmax,
+ .extra2 = &init_net.ipv6.frags.high_thresh
+ },
+ {
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:05 -0700
+Subject: inet: frags: get rid of ipfrag_skb_cb/FRAG_CB
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-18-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+ip_defrag uses skb->cb[] to store the fragment offset, and unfortunately
+this integer is currently in a different cache line than skb->next,
+meaning that we use two cache lines per skb when finding the insertion point.
+
+By aliasing skb->ip_defrag_offset and skb->dev, we pack all the fields
+in a single cache line and save precious memory bandwidth.
+
+Note that after the fast path added by Changli Gao in commit
+d6bebca92c66 ("fragment: add fast path for in-order fragments")
+this change wont help the fast path, since we still need
+to access prev->len (2nd cache line), but will show great
+benefits when slow path is entered, since we perform
+a linear scan of a potentially long list.
+
+Also, note that this potential long list is an attack vector,
+we might consider also using an rb-tree there eventually.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit bf66337140c64c27fa37222b7abca7e49d63fb57)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -645,6 +645,11 @@ struct sk_buff {
+ };
+ struct rb_node rbnode; /* used in netem & tcp stack */
+ };
++
++ union {
++ int ip_defrag_offset;
++ };
++
+ struct sock *sk;
+ struct net_device *dev;
+
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:58 -0700
+Subject: inet: frags: get rif of inet_frag_evicting()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-11-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+This refactors ip_expire() since one indentation level is removed.
+
+Note: in the future, we should try hard to avoid the skb_clone()
+since this is a serious performance cost.
+Under DDOS, the ICMP message wont be sent because of rate limits.
+
+Fact that ip6_expire_frag_queue() does not use skb_clone() is
+disturbing too. Presumably IPv6 should have the same
+issue than the one we fixed in commit ec4fbd64751d
+("inet: frag: release spinlock before calling icmp_send()")
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 399d1404be660d355192ff4df5ccc3f4159ec1e4)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_frag.h | 5 ---
+ net/ipv4/ip_fragment.c | 65 +++++++++++++++++++++++-------------------------
+ net/ipv6/reassembly.c | 4 --
+ 3 files changed, 32 insertions(+), 42 deletions(-)
+
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -118,11 +118,6 @@ static inline void inet_frag_put(struct
+ inet_frag_destroy(q);
+ }
+
+-static inline bool inet_frag_evicting(struct inet_frag_queue *q)
+-{
+- return false;
+-}
+-
+ /* Memory Tracking Functions. */
+
+ static inline int frag_mem_limit(struct netns_frags *nf)
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -141,8 +141,11 @@ static bool frag_expire_skip_icmp(u32 us
+ */
+ static void ip_expire(unsigned long arg)
+ {
+- struct ipq *qp;
++ struct sk_buff *clone, *head;
++ const struct iphdr *iph;
+ struct net *net;
++ struct ipq *qp;
++ int err;
+
+ qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
+ net = container_of(qp->q.net, struct net, ipv4.frags);
+@@ -156,45 +159,41 @@ static void ip_expire(unsigned long arg)
+ ipq_kill(qp);
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+
+- if (!inet_frag_evicting(&qp->q)) {
+- struct sk_buff *clone, *head = qp->q.fragments;
+- const struct iphdr *iph;
+- int err;
++ head = qp->q.fragments;
+
+- __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
++ __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
+
+- if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
+- goto out;
++ if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !head)
++ goto out;
+
+- head->dev = dev_get_by_index_rcu(net, qp->iif);
+- if (!head->dev)
+- goto out;
++ head->dev = dev_get_by_index_rcu(net, qp->iif);
++ if (!head->dev)
++ goto out;
+
+
+- /* skb has no dst, perform route lookup again */
+- iph = ip_hdr(head);
+- err = ip_route_input_noref(head, iph->daddr, iph->saddr,
++ /* skb has no dst, perform route lookup again */
++ iph = ip_hdr(head);
++ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+- if (err)
+- goto out;
++ if (err)
++ goto out;
++
++ /* Only an end host needs to send an ICMP
++ * "Fragment Reassembly Timeout" message, per RFC792.
++ */
++ if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
++ (skb_rtable(head)->rt_type != RTN_LOCAL))
++ goto out;
++
++ clone = skb_clone(head, GFP_ATOMIC);
+
+- /* Only an end host needs to send an ICMP
+- * "Fragment Reassembly Timeout" message, per RFC792.
+- */
+- if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
+- (skb_rtable(head)->rt_type != RTN_LOCAL))
+- goto out;
+-
+- clone = skb_clone(head, GFP_ATOMIC);
+-
+- /* Send an ICMP "Fragment Reassembly Timeout" message. */
+- if (clone) {
+- spin_unlock(&qp->q.lock);
+- icmp_send(clone, ICMP_TIME_EXCEEDED,
+- ICMP_EXC_FRAGTIME, 0);
+- consume_skb(clone);
+- goto out_rcu_unlock;
+- }
++ /* Send an ICMP "Fragment Reassembly Timeout" message. */
++ if (clone) {
++ spin_unlock(&qp->q.lock);
++ icmp_send(clone, ICMP_TIME_EXCEEDED,
++ ICMP_EXC_FRAGTIME, 0);
++ consume_skb(clone);
++ goto out_rcu_unlock;
+ }
+ out:
+ spin_unlock(&qp->q.lock);
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -106,10 +106,6 @@ void ip6_expire_frag_queue(struct net *n
+ goto out_rcu_unlock;
+
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+-
+- if (inet_frag_evicting(&fq->q))
+- goto out_rcu_unlock;
+-
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+
+ /* Don't send error if the first segment did not arrive. */
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:51 -0700
+Subject: inet: frags: refactor ipfrag_init()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-4-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+We need to call inet_frags_init() before register_pernet_subsys(),
+as a prereq for following patch ("inet: frags: use rhashtables for reassembly units")
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 483a6e4fa055123142d8956866fe2aa9c98d546d)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_fragment.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -899,8 +899,6 @@ static struct pernet_operations ip4_frag
+
+ void __init ipfrag_init(void)
+ {
+- ip4_frags_ctl_register();
+- register_pernet_subsys(&ip4_frags_ops);
+ ip4_frags.hashfn = ip4_hashfn;
+ ip4_frags.constructor = ip4_frag_init;
+ ip4_frags.destructor = ip4_frag_free;
+@@ -910,4 +908,6 @@ void __init ipfrag_init(void)
+ ip4_frags.frags_cache_name = ip_frag_cache_name;
+ if (inet_frags_init(&ip4_frags))
+ panic("IP: failed to allocate ip4_frags cache\n");
++ ip4_frags_ctl_register();
++ register_pernet_subsys(&ip4_frags_ops);
+ }
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:52 -0700
+Subject: inet: frags: refactor ipv6_frag_init()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-5-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+We want to call inet_frags_init() earlier.
+
+This is a prereq to "inet: frags: use rhashtables for reassembly units"
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 5b975bab23615cd0fdf67af6c9298eb01c4b9f61)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/reassembly.c | 25 ++++++++++++++-----------
+ 1 file changed, 14 insertions(+), 11 deletions(-)
+
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -740,10 +740,21 @@ int __init ipv6_frag_init(void)
+ {
+ int ret;
+
+- ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
++ ip6_frags.hashfn = ip6_hashfn;
++ ip6_frags.constructor = ip6_frag_init;
++ ip6_frags.destructor = NULL;
++ ip6_frags.qsize = sizeof(struct frag_queue);
++ ip6_frags.match = ip6_frag_match;
++ ip6_frags.frag_expire = ip6_frag_expire;
++ ip6_frags.frags_cache_name = ip6_frag_cache_name;
++ ret = inet_frags_init(&ip6_frags);
+ if (ret)
+ goto out;
+
++ ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
++ if (ret)
++ goto err_protocol;
++
+ ret = ip6_frags_sysctl_register();
+ if (ret)
+ goto err_sysctl;
+@@ -752,16 +763,6 @@ int __init ipv6_frag_init(void)
+ if (ret)
+ goto err_pernet;
+
+- ip6_frags.hashfn = ip6_hashfn;
+- ip6_frags.constructor = ip6_frag_init;
+- ip6_frags.destructor = NULL;
+- ip6_frags.qsize = sizeof(struct frag_queue);
+- ip6_frags.match = ip6_frag_match;
+- ip6_frags.frag_expire = ip6_frag_expire;
+- ip6_frags.frags_cache_name = ip6_frag_cache_name;
+- ret = inet_frags_init(&ip6_frags);
+- if (ret)
+- goto err_pernet;
+ out:
+ return ret;
+
+@@ -769,6 +770,8 @@ err_pernet:
+ ip6_frags_sysctl_unregister();
+ err_sysctl:
+ inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
++err_protocol:
++ inet_frags_fini(&ip6_frags);
+ goto out;
+ }
+
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:53 -0700
+Subject: inet: frags: refactor lowpan_net_frag_init()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-6-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+We want to call lowpan_net_frag_init() earlier.
+Similar to commit "inet: frags: refactor ipv6_frag_init()"
+
+This is a prereq to "inet: frags: use rhashtables for reassembly units"
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 807f1844df4ac23594268fa9f41902d0549e92aa)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ieee802154/6lowpan/reassembly.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -614,14 +614,6 @@ int __init lowpan_net_frag_init(void)
+ {
+ int ret;
+
+- ret = lowpan_frags_sysctl_register();
+- if (ret)
+- return ret;
+-
+- ret = register_pernet_subsys(&lowpan_frags_ops);
+- if (ret)
+- goto err_pernet;
+-
+ lowpan_frags.hashfn = lowpan_hashfn;
+ lowpan_frags.constructor = lowpan_frag_init;
+ lowpan_frags.destructor = NULL;
+@@ -631,11 +623,21 @@ int __init lowpan_net_frag_init(void)
+ lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
+ ret = inet_frags_init(&lowpan_frags);
+ if (ret)
+- goto err_pernet;
++ goto out;
++
++ ret = lowpan_frags_sysctl_register();
++ if (ret)
++ goto err_sysctl;
+
++ ret = register_pernet_subsys(&lowpan_frags_ops);
++ if (ret)
++ goto err_pernet;
++out:
+ return ret;
+ err_pernet:
+ lowpan_frags_sysctl_unregister();
++err_sysctl:
++ inet_frags_fini(&lowpan_frags);
+ return ret;
+ }
+
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:59 -0700
+Subject: inet: frags: remove inet_frag_maybe_warn_overflow()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-12-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+This function is obsolete, after rhashtable addition to inet defrag.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 2d44ed22e607f9a285b049de2263e3840673a260)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_frag.h | 2 --
+ net/ieee802154/6lowpan/reassembly.c | 5 ++---
+ net/ipv4/inet_fragment.c | 11 -----------
+ net/ipv4/ip_fragment.c | 5 ++---
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 5 ++---
+ net/ipv6/reassembly.c | 5 ++---
+ 6 files changed, 8 insertions(+), 25 deletions(-)
+
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -109,8 +109,6 @@ void inet_frags_exit_net(struct netns_fr
+ void inet_frag_kill(struct inet_frag_queue *q);
+ void inet_frag_destroy(struct inet_frag_queue *q);
+ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
+-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
+- const char *prefix);
+
+ static inline void inet_frag_put(struct inet_frag_queue *q)
+ {
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -83,10 +83,9 @@ fq_find(struct net *net, const struct lo
+ struct inet_frag_queue *q;
+
+ q = inet_frag_find(&ieee802154_lowpan->frags, &key);
+- if (IS_ERR_OR_NULL(q)) {
+- inet_frag_maybe_warn_overflow(q, pr_fmt());
++ if (!q)
+ return NULL;
+- }
++
+ return container_of(q, struct lowpan_frag_queue, q);
+ }
+
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -217,14 +217,3 @@ struct inet_frag_queue *inet_frag_find(s
+ return inet_frag_create(nf, key);
+ }
+ EXPORT_SYMBOL(inet_frag_find);
+-
+-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
+- const char *prefix)
+-{
+- static const char msg[] = "inet_frag_find: Fragment hash bucket"
+- " list length grew over limit. Dropping fragment.\n";
+-
+- if (PTR_ERR(q) == -ENOBUFS)
+- net_dbg_ratelimited("%s%s", prefix, msg);
+-}
+-EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -219,10 +219,9 @@ static struct ipq *ip_find(struct net *n
+ struct inet_frag_queue *q;
+
+ q = inet_frag_find(&net->ipv4.frags, &key);
+- if (IS_ERR_OR_NULL(q)) {
+- inet_frag_maybe_warn_overflow(q, pr_fmt());
++ if (!q)
+ return NULL;
+- }
++
+ return container_of(q, struct ipq, q);
+ }
+
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -177,10 +177,9 @@ static struct frag_queue *fq_find(struct
+ struct inet_frag_queue *q;
+
+ q = inet_frag_find(&net->nf_frag.frags, &key);
+- if (IS_ERR_OR_NULL(q)) {
+- inet_frag_maybe_warn_overflow(q, pr_fmt());
++ if (!q)
+ return NULL;
+- }
++
+ return container_of(q, struct frag_queue, q);
+ }
+
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -154,10 +154,9 @@ fq_find(struct net *net, __be32 id, cons
+ key.iif = 0;
+
+ q = inet_frag_find(&net->ipv6.frags, &key);
+- if (IS_ERR_OR_NULL(q)) {
+- inet_frag_maybe_warn_overflow(q, pr_fmt());
++ if (!q)
+ return NULL;
+- }
++
+ return container_of(q, struct frag_queue, q);
+ }
+
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:57 -0700
+Subject: inet: frags: remove some helpers
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-10-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+Remove sum_frag_mem_limit(), ip_frag_mem() & ip6_frag_mem()
+
+Also since we use rhashtable we can bring back the number of fragments
+in "grep FRAG /proc/net/sockstat /proc/net/sockstat6" that was
+removed in commit 434d305405ab ("inet: frag: don't account number
+of fragment queues")
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 6befe4a78b1553edb6eed3a78b4bcd9748526672)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_frag.h | 5 -----
+ include/net/ip.h | 1 -
+ include/net/ipv6.h | 7 -------
+ net/ipv4/ip_fragment.c | 5 -----
+ net/ipv4/proc.c | 6 +++---
+ net/ipv6/proc.c | 5 +++--
+ 6 files changed, 6 insertions(+), 23 deletions(-)
+
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -140,11 +140,6 @@ static inline void add_frag_mem_limit(st
+ atomic_add(i, &nf->mem);
+ }
+
+-static inline int sum_frag_mem_limit(struct netns_frags *nf)
+-{
+- return atomic_read(&nf->mem);
+-}
+-
+ /* RFC 3168 support :
+ * We want to check ECN values of all fragments, do detect invalid combinations.
+ * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -548,7 +548,6 @@ static inline struct sk_buff *ip_check_d
+ return skb;
+ }
+ #endif
+-int ip_frag_mem(struct net *net);
+
+ /*
+ * Functions provided by ip_forward.c
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -330,13 +330,6 @@ static inline bool ipv6_accept_ra(struct
+ idev->cnf.accept_ra;
+ }
+
+-#if IS_ENABLED(CONFIG_IPV6)
+-static inline int ip6_frag_mem(struct net *net)
+-{
+- return sum_frag_mem_limit(&net->ipv6.frags);
+-}
+-#endif
+-
+ #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
+ #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
+ #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -82,11 +82,6 @@ static u8 ip4_frag_ecn(u8 tos)
+
+ static struct inet_frags ip4_frags;
+
+-int ip_frag_mem(struct net *net)
+-{
+- return sum_frag_mem_limit(&net->ipv4.frags);
+-}
+-
+ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ struct net_device *dev);
+
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -54,7 +54,6 @@
+ static int sockstat_seq_show(struct seq_file *seq, void *v)
+ {
+ struct net *net = seq->private;
+- unsigned int frag_mem;
+ int orphans, sockets;
+
+ local_bh_disable();
+@@ -74,8 +73,9 @@ static int sockstat_seq_show(struct seq_
+ sock_prot_inuse_get(net, &udplite_prot));
+ seq_printf(seq, "RAW: inuse %d\n",
+ sock_prot_inuse_get(net, &raw_prot));
+- frag_mem = ip_frag_mem(net);
+- seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
++ seq_printf(seq, "FRAG: inuse %u memory %u\n",
++ atomic_read(&net->ipv4.frags.rhashtable.nelems),
++ frag_mem_limit(&net->ipv4.frags));
+ return 0;
+ }
+
+--- a/net/ipv6/proc.c
++++ b/net/ipv6/proc.c
+@@ -38,7 +38,6 @@
+ static int sockstat6_seq_show(struct seq_file *seq, void *v)
+ {
+ struct net *net = seq->private;
+- unsigned int frag_mem = ip6_frag_mem(net);
+
+ seq_printf(seq, "TCP6: inuse %d\n",
+ sock_prot_inuse_get(net, &tcpv6_prot));
+@@ -48,7 +47,9 @@ static int sockstat6_seq_show(struct seq
+ sock_prot_inuse_get(net, &udplitev6_prot));
+ seq_printf(seq, "RAW6: inuse %d\n",
+ sock_prot_inuse_get(net, &rawv6_prot));
+- seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
++ seq_printf(seq, "FRAG6: inuse %u memory %u\n",
++ atomic_read(&net->ipv6.frags.rhashtable.nelems),
++ frag_mem_limit(&net->ipv6.frags));
+ return 0;
+ }
+
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:04 -0700
+Subject: inet: frags: reorganize struct netns_frags
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-17-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+Put the read-mostly fields in a separate cache line
+at the beginning of struct netns_frags, to reduce
+false sharing noticed in inet_frag_kill()
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit c2615cf5a761b32bf74e85bddc223dfff3d9b9f0)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_frag.h | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -4,16 +4,17 @@
+ #include <linux/rhashtable.h>
+
+ struct netns_frags {
+- struct rhashtable rhashtable ____cacheline_aligned_in_smp;
+-
+- /* Keep atomic mem on separate cachelines in structs that include it */
+- atomic_long_t mem ____cacheline_aligned_in_smp;
+ /* sysctls */
+ long high_thresh;
+ long low_thresh;
+ int timeout;
+ int max_dist;
+ struct inet_frags *f;
++
++ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
++
++ /* Keep atomic mem on separate cachelines in structs that include it */
++ atomic_long_t mem ____cacheline_aligned_in_smp;
+ };
+
+ /**
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:56 -0700
+Subject: inet: frags: use rhashtables for reassembly units
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Kirill Tkhai <ktkhai@virtuozzo.com>, Herbert Xu <herbert@gondor.apana.org.au>, Florian Westphal <fw@strlen.de>, Jesper Dangaard Brouer <brouer@redhat.com>, Alexander Aring <alex.aring@gmail.com>, Stefan Schmidt <stefan@osg.samsung.com>
+Message-ID: <20181010193017.25221-9-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+Some applications still rely on IP fragmentation, and to be fair linux
+reassembly unit is not working under any serious load.
+
+It uses static hash tables of 1024 buckets, and up to 128 items per bucket (!!!)
+
+A work queue is supposed to garbage collect items when host is under memory
+pressure, and doing a hash rebuild, changing seed used in hash computations.
+
+This work queue blocks softirqs for up to 25 ms when doing a hash rebuild,
+occurring every 5 seconds if host is under fire.
+
+Then there is the problem of sharing this hash table for all netns.
+
+It is time to switch to rhashtables, and allocate one of them per netns
+to speedup netns dismantle, since this is a critical metric these days.
+
+Lookup is now using RCU. A followup patch will even remove
+the refcount hold/release left from prior implementation and save
+a couple of atomic operations.
+
+Before this patch, 16 cpus (16 RX queue NIC) could not handle more
+than 1 Mpps frags DDOS.
+
+After the patch, I reach 9 Mpps without any tuning, and can use up to 2GB
+of storage for the fragments (exact number depends on frags being evicted
+after timeout)
+
+$ grep FRAG /proc/net/sockstat
+FRAG: inuse 1966916 memory 2140004608
+
+A followup patch will change the limits for 64bit arches.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Jesper Dangaard Brouer <brouer@redhat.com>
+Cc: Alexander Aring <alex.aring@gmail.com>
+Cc: Stefan Schmidt <stefan@osg.samsung.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 648700f76b03b7e8149d13cc2bdb3355035258a9)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/networking/ip-sysctl.txt | 7
+ include/net/inet_frag.h | 81 +++----
+ include/net/ipv6.h | 16 -
+ net/ieee802154/6lowpan/6lowpan_i.h | 26 --
+ net/ieee802154/6lowpan/reassembly.c | 91 +++-----
+ net/ipv4/inet_fragment.c | 349 ++++++--------------------------
+ net/ipv4/ip_fragment.c | 112 ++++------
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 51 +---
+ net/ipv6/reassembly.c | 110 ++++------
+ 9 files changed, 267 insertions(+), 576 deletions(-)
+
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -123,13 +123,10 @@ min_adv_mss - INTEGER
+ IP Fragmentation:
+
+ ipfrag_high_thresh - INTEGER
+- Maximum memory used to reassemble IP fragments. When
+- ipfrag_high_thresh bytes of memory is allocated for this purpose,
+- the fragment handler will toss packets until ipfrag_low_thresh
+- is reached. This also serves as a maximum limit to namespaces
+- different from the initial one.
++ Maximum memory used to reassemble IP fragments.
+
+ ipfrag_low_thresh - INTEGER
++ (Obsolete since linux-4.17)
+ Maximum memory used to reassemble IP fragments before the kernel
+ begins to remove incomplete fragment queues to free up resources.
+ The kernel still accepts new fragments for defragmentation.
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -1,7 +1,11 @@
+ #ifndef __NET_FRAG_H__
+ #define __NET_FRAG_H__
+
++#include <linux/rhashtable.h>
++
+ struct netns_frags {
++ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
++
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_t mem ____cacheline_aligned_in_smp;
+ /* sysctls */
+@@ -25,12 +29,30 @@ enum {
+ INET_FRAG_COMPLETE = BIT(2),
+ };
+
++struct frag_v4_compare_key {
++ __be32 saddr;
++ __be32 daddr;
++ u32 user;
++ u32 vif;
++ __be16 id;
++ u16 protocol;
++};
++
++struct frag_v6_compare_key {
++ struct in6_addr saddr;
++ struct in6_addr daddr;
++ u32 user;
++ __be32 id;
++ u32 iif;
++};
++
+ /**
+ * struct inet_frag_queue - fragment queue
+ *
+- * @lock: spinlock protecting the queue
++ * @node: rhash node
++ * @key: keys identifying this frag.
+ * @timer: queue expiration timer
+- * @list: hash bucket list
++ * @lock: spinlock protecting this frag
+ * @refcnt: reference count of the queue
+ * @fragments: received fragments head
+ * @fragments_tail: received fragments tail
+@@ -40,12 +62,16 @@ enum {
+ * @flags: fragment queue flags
+ * @max_size: maximum received fragment size
+ * @net: namespace that this frag belongs to
+- * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
++ * @rcu: rcu head for freeing deferall
+ */
+ struct inet_frag_queue {
+- spinlock_t lock;
++ struct rhash_head node;
++ union {
++ struct frag_v4_compare_key v4;
++ struct frag_v6_compare_key v6;
++ } key;
+ struct timer_list timer;
+- struct hlist_node list;
++ spinlock_t lock;
+ atomic_t refcnt;
+ struct sk_buff *fragments;
+ struct sk_buff *fragments_tail;
+@@ -54,51 +80,20 @@ struct inet_frag_queue {
+ int meat;
+ __u8 flags;
+ u16 max_size;
+- struct netns_frags *net;
+- struct hlist_node list_evictor;
+-};
+-
+-#define INETFRAGS_HASHSZ 1024
+-
+-/* averaged:
+- * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
+- * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
+- * struct frag_queue))
+- */
+-#define INETFRAGS_MAXDEPTH 128
+-
+-struct inet_frag_bucket {
+- struct hlist_head chain;
+- spinlock_t chain_lock;
++ struct netns_frags *net;
++ struct rcu_head rcu;
+ };
+
+ struct inet_frags {
+- struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
+-
+- struct work_struct frags_work;
+- unsigned int next_bucket;
+- unsigned long last_rebuild_jiffies;
+- bool rebuild;
+-
+- /* The first call to hashfn is responsible to initialize
+- * rnd. This is best done with net_get_random_once.
+- *
+- * rnd_seqlock is used to let hash insertion detect
+- * when it needs to re-lookup the hash chain to use.
+- */
+- u32 rnd;
+- seqlock_t rnd_seqlock;
+ int qsize;
+
+- unsigned int (*hashfn)(const struct inet_frag_queue *);
+- bool (*match)(const struct inet_frag_queue *q,
+- const void *arg);
+ void (*constructor)(struct inet_frag_queue *q,
+ const void *arg);
+ void (*destructor)(struct inet_frag_queue *);
+ void (*frag_expire)(unsigned long data);
+ struct kmem_cache *frags_cachep;
+ const char *frags_cache_name;
++ struct rhashtable_params rhash_params;
+ };
+
+ int inet_frags_init(struct inet_frags *);
+@@ -107,15 +102,13 @@ void inet_frags_fini(struct inet_frags *
+ static inline int inet_frags_init_net(struct netns_frags *nf)
+ {
+ atomic_set(&nf->mem, 0);
+- return 0;
++ return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
+ }
+ void inet_frags_exit_net(struct netns_frags *nf);
+
+ void inet_frag_kill(struct inet_frag_queue *q);
+ void inet_frag_destroy(struct inet_frag_queue *q);
+-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+- struct inet_frags *f, void *key, unsigned int hash);
+-
++struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
+ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
+ const char *prefix);
+
+@@ -127,7 +120,7 @@ static inline void inet_frag_put(struct
+
+ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
+ {
+- return !hlist_unhashed(&q->list_evictor);
++ return false;
+ }
+
+ /* Memory Tracking Functions. */
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -530,17 +530,8 @@ enum ip6_defrag_users {
+ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
+ };
+
+-struct ip6_create_arg {
+- __be32 id;
+- u32 user;
+- const struct in6_addr *src;
+- const struct in6_addr *dst;
+- int iif;
+- u8 ecn;
+-};
+-
+ void ip6_frag_init(struct inet_frag_queue *q, const void *a);
+-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
++extern const struct rhashtable_params ip6_rhash_params;
+
+ /*
+ * Equivalent of ipv4 struct ip
+@@ -548,11 +539,6 @@ bool ip6_frag_match(const struct inet_fr
+ struct frag_queue {
+ struct inet_frag_queue q;
+
+- __be32 id; /* fragment id */
+- u32 user;
+- struct in6_addr saddr;
+- struct in6_addr daddr;
+-
+ int iif;
+ unsigned int csum;
+ __u16 nhoffset;
+--- a/net/ieee802154/6lowpan/6lowpan_i.h
++++ b/net/ieee802154/6lowpan/6lowpan_i.h
+@@ -16,37 +16,19 @@ typedef unsigned __bitwise__ lowpan_rx_r
+ #define LOWPAN_DISPATCH_FRAG1 0xc0
+ #define LOWPAN_DISPATCH_FRAGN 0xe0
+
+-struct lowpan_create_arg {
++struct frag_lowpan_compare_key {
+ u16 tag;
+ u16 d_size;
+- const struct ieee802154_addr *src;
+- const struct ieee802154_addr *dst;
++ const struct ieee802154_addr src;
++ const struct ieee802154_addr dst;
+ };
+
+-/* Equivalent of ipv4 struct ip
++/* Equivalent of ipv4 struct ipq
+ */
+ struct lowpan_frag_queue {
+ struct inet_frag_queue q;
+-
+- u16 tag;
+- u16 d_size;
+- struct ieee802154_addr saddr;
+- struct ieee802154_addr daddr;
+ };
+
+-static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
+-{
+- switch (a->mode) {
+- case IEEE802154_ADDR_LONG:
+- return (((__force u64)a->extended_addr) >> 32) ^
+- (((__force u64)a->extended_addr) & 0xffffffff);
+- case IEEE802154_ADDR_SHORT:
+- return (__force u32)(a->short_addr + (a->pan_id << 16));
+- default:
+- return 0;
+- }
+-}
+-
+ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
+ void lowpan_net_frag_exit(void);
+ int lowpan_net_frag_init(void);
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -37,47 +37,15 @@ static struct inet_frags lowpan_frags;
+ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
+ struct sk_buff *prev, struct net_device *ldev);
+
+-static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
+- const struct ieee802154_addr *saddr,
+- const struct ieee802154_addr *daddr)
+-{
+- net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
+- return jhash_3words(ieee802154_addr_hash(saddr),
+- ieee802154_addr_hash(daddr),
+- (__force u32)(tag + (d_size << 16)),
+- lowpan_frags.rnd);
+-}
+-
+-static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
+-{
+- const struct lowpan_frag_queue *fq;
+-
+- fq = container_of(q, struct lowpan_frag_queue, q);
+- return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
+-}
+-
+-static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
+-{
+- const struct lowpan_frag_queue *fq;
+- const struct lowpan_create_arg *arg = a;
+-
+- fq = container_of(q, struct lowpan_frag_queue, q);
+- return fq->tag == arg->tag && fq->d_size == arg->d_size &&
+- ieee802154_addr_equal(&fq->saddr, arg->src) &&
+- ieee802154_addr_equal(&fq->daddr, arg->dst);
+-}
+-
+ static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
+ {
+- const struct lowpan_create_arg *arg = a;
++ const struct frag_lowpan_compare_key *key = a;
+ struct lowpan_frag_queue *fq;
+
+ fq = container_of(q, struct lowpan_frag_queue, q);
+
+- fq->tag = arg->tag;
+- fq->d_size = arg->d_size;
+- fq->saddr = *arg->src;
+- fq->daddr = *arg->dst;
++ BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
++ memcpy(&q->key, key, sizeof(*key));
+ }
+
+ static void lowpan_frag_expire(unsigned long data)
+@@ -104,21 +72,17 @@ fq_find(struct net *net, const struct lo
+ const struct ieee802154_addr *src,
+ const struct ieee802154_addr *dst)
+ {
+- struct inet_frag_queue *q;
+- struct lowpan_create_arg arg;
+- unsigned int hash;
+ struct netns_ieee802154_lowpan *ieee802154_lowpan =
+ net_ieee802154_lowpan(net);
++ struct frag_lowpan_compare_key key = {
++ .tag = cb->d_tag,
++ .d_size = cb->d_size,
++ .src = *src,
++ .dst = *dst,
++ };
++ struct inet_frag_queue *q;
+
+- arg.tag = cb->d_tag;
+- arg.d_size = cb->d_size;
+- arg.src = src;
+- arg.dst = dst;
+-
+- hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
+-
+- q = inet_frag_find(&ieee802154_lowpan->frags,
+- &lowpan_frags, &arg, hash);
++ q = inet_frag_find(&ieee802154_lowpan->frags, &key);
+ if (IS_ERR_OR_NULL(q)) {
+ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+@@ -610,17 +574,46 @@ static struct pernet_operations lowpan_f
+ .exit = lowpan_frags_exit_net,
+ };
+
++static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
++{
++ return jhash2(data,
++ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
++}
++
++static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
++{
++ const struct inet_frag_queue *fq = data;
++
++ return jhash2((const u32 *)&fq->key,
++ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
++}
++
++static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
++{
++ const struct frag_lowpan_compare_key *key = arg->key;
++ const struct inet_frag_queue *fq = ptr;
++
++ return !!memcmp(&fq->key, key, sizeof(*key));
++}
++
++static const struct rhashtable_params lowpan_rhash_params = {
++ .head_offset = offsetof(struct inet_frag_queue, node),
++ .hashfn = lowpan_key_hashfn,
++ .obj_hashfn = lowpan_obj_hashfn,
++ .obj_cmpfn = lowpan_obj_cmpfn,
++ .automatic_shrinking = true,
++};
++
+ int __init lowpan_net_frag_init(void)
+ {
+ int ret;
+
+- lowpan_frags.hashfn = lowpan_hashfn;
+ lowpan_frags.constructor = lowpan_frag_init;
+ lowpan_frags.destructor = NULL;
+ lowpan_frags.qsize = sizeof(struct frag_queue);
+- lowpan_frags.match = lowpan_frag_match;
+ lowpan_frags.frag_expire = lowpan_frag_expire;
+ lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
++ lowpan_frags.rhash_params = lowpan_rhash_params;
+ ret = inet_frags_init(&lowpan_frags);
+ if (ret)
+ goto out;
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -25,12 +25,6 @@
+ #include <net/inet_frag.h>
+ #include <net/inet_ecn.h>
+
+-#define INETFRAGS_EVICT_BUCKETS 128
+-#define INETFRAGS_EVICT_MAX 512
+-
+-/* don't rebuild inetfrag table with new secret more often than this */
+-#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
+-
+ /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
+ * Value : 0xff if frame should be dropped.
+ * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
+@@ -52,157 +46,8 @@ const u8 ip_frag_ecn_table[16] = {
+ };
+ EXPORT_SYMBOL(ip_frag_ecn_table);
+
+-static unsigned int
+-inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
+-{
+- return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
+-}
+-
+-static bool inet_frag_may_rebuild(struct inet_frags *f)
+-{
+- return time_after(jiffies,
+- f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
+-}
+-
+-static void inet_frag_secret_rebuild(struct inet_frags *f)
+-{
+- int i;
+-
+- write_seqlock_bh(&f->rnd_seqlock);
+-
+- if (!inet_frag_may_rebuild(f))
+- goto out;
+-
+- get_random_bytes(&f->rnd, sizeof(u32));
+-
+- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
+- struct inet_frag_bucket *hb;
+- struct inet_frag_queue *q;
+- struct hlist_node *n;
+-
+- hb = &f->hash[i];
+- spin_lock(&hb->chain_lock);
+-
+- hlist_for_each_entry_safe(q, n, &hb->chain, list) {
+- unsigned int hval = inet_frag_hashfn(f, q);
+-
+- if (hval != i) {
+- struct inet_frag_bucket *hb_dest;
+-
+- hlist_del(&q->list);
+-
+- /* Relink to new hash chain. */
+- hb_dest = &f->hash[hval];
+-
+- /* This is the only place where we take
+- * another chain_lock while already holding
+- * one. As this will not run concurrently,
+- * we cannot deadlock on hb_dest lock below, if its
+- * already locked it will be released soon since
+- * other caller cannot be waiting for hb lock
+- * that we've taken above.
+- */
+- spin_lock_nested(&hb_dest->chain_lock,
+- SINGLE_DEPTH_NESTING);
+- hlist_add_head(&q->list, &hb_dest->chain);
+- spin_unlock(&hb_dest->chain_lock);
+- }
+- }
+- spin_unlock(&hb->chain_lock);
+- }
+-
+- f->rebuild = false;
+- f->last_rebuild_jiffies = jiffies;
+-out:
+- write_sequnlock_bh(&f->rnd_seqlock);
+-}
+-
+-static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
+-{
+- if (!hlist_unhashed(&q->list_evictor))
+- return false;
+-
+- return q->net->low_thresh == 0 ||
+- frag_mem_limit(q->net) >= q->net->low_thresh;
+-}
+-
+-static unsigned int
+-inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
+-{
+- struct inet_frag_queue *fq;
+- struct hlist_node *n;
+- unsigned int evicted = 0;
+- HLIST_HEAD(expired);
+-
+- spin_lock(&hb->chain_lock);
+-
+- hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
+- if (!inet_fragq_should_evict(fq))
+- continue;
+-
+- if (!del_timer(&fq->timer))
+- continue;
+-
+- hlist_add_head(&fq->list_evictor, &expired);
+- ++evicted;
+- }
+-
+- spin_unlock(&hb->chain_lock);
+-
+- hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
+- f->frag_expire((unsigned long) fq);
+-
+- return evicted;
+-}
+-
+-static void inet_frag_worker(struct work_struct *work)
+-{
+- unsigned int budget = INETFRAGS_EVICT_BUCKETS;
+- unsigned int i, evicted = 0;
+- struct inet_frags *f;
+-
+- f = container_of(work, struct inet_frags, frags_work);
+-
+- BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
+-
+- local_bh_disable();
+-
+- for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
+- evicted += inet_evict_bucket(f, &f->hash[i]);
+- i = (i + 1) & (INETFRAGS_HASHSZ - 1);
+- if (evicted > INETFRAGS_EVICT_MAX)
+- break;
+- }
+-
+- f->next_bucket = i;
+-
+- local_bh_enable();
+-
+- if (f->rebuild && inet_frag_may_rebuild(f))
+- inet_frag_secret_rebuild(f);
+-}
+-
+-static void inet_frag_schedule_worker(struct inet_frags *f)
+-{
+- if (unlikely(!work_pending(&f->frags_work)))
+- schedule_work(&f->frags_work);
+-}
+-
+ int inet_frags_init(struct inet_frags *f)
+ {
+- int i;
+-
+- INIT_WORK(&f->frags_work, inet_frag_worker);
+-
+- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
+- struct inet_frag_bucket *hb = &f->hash[i];
+-
+- spin_lock_init(&hb->chain_lock);
+- INIT_HLIST_HEAD(&hb->chain);
+- }
+-
+- seqlock_init(&f->rnd_seqlock);
+- f->last_rebuild_jiffies = 0;
+ f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
+ NULL);
+ if (!f->frags_cachep)
+@@ -214,66 +59,42 @@ EXPORT_SYMBOL(inet_frags_init);
+
+ void inet_frags_fini(struct inet_frags *f)
+ {
+- cancel_work_sync(&f->frags_work);
++ /* We must wait that all inet_frag_destroy_rcu() have completed. */
++ rcu_barrier();
++
+ kmem_cache_destroy(f->frags_cachep);
++ f->frags_cachep = NULL;
+ }
+ EXPORT_SYMBOL(inet_frags_fini);
+
+-void inet_frags_exit_net(struct netns_frags *nf)
++static void inet_frags_free_cb(void *ptr, void *arg)
+ {
+- struct inet_frags *f =nf->f;
+- unsigned int seq;
+- int i;
+-
+- nf->low_thresh = 0;
+-
+-evict_again:
+- local_bh_disable();
+- seq = read_seqbegin(&f->rnd_seqlock);
+-
+- for (i = 0; i < INETFRAGS_HASHSZ ; i++)
+- inet_evict_bucket(f, &f->hash[i]);
+-
+- local_bh_enable();
+- cond_resched();
++ struct inet_frag_queue *fq = ptr;
+
+- if (read_seqretry(&f->rnd_seqlock, seq) ||
+- sum_frag_mem_limit(nf))
+- goto evict_again;
+-}
+-EXPORT_SYMBOL(inet_frags_exit_net);
++ /* If we can not cancel the timer, it means this frag_queue
++ * is already disappearing, we have nothing to do.
++ * Otherwise, we own a refcount until the end of this function.
++ */
++ if (!del_timer(&fq->timer))
++ return;
+
+-static struct inet_frag_bucket *
+-get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
+-__acquires(hb->chain_lock)
+-{
+- struct inet_frag_bucket *hb;
+- unsigned int seq, hash;
+-
+- restart:
+- seq = read_seqbegin(&f->rnd_seqlock);
+-
+- hash = inet_frag_hashfn(f, fq);
+- hb = &f->hash[hash];
+-
+- spin_lock(&hb->chain_lock);
+- if (read_seqretry(&f->rnd_seqlock, seq)) {
+- spin_unlock(&hb->chain_lock);
+- goto restart;
++ spin_lock_bh(&fq->lock);
++ if (!(fq->flags & INET_FRAG_COMPLETE)) {
++ fq->flags |= INET_FRAG_COMPLETE;
++ atomic_dec(&fq->refcnt);
+ }
++ spin_unlock_bh(&fq->lock);
+
+- return hb;
++ inet_frag_put(fq);
+ }
+
+-static inline void fq_unlink(struct inet_frag_queue *fq)
++void inet_frags_exit_net(struct netns_frags *nf)
+ {
+- struct inet_frag_bucket *hb;
++ nf->low_thresh = 0; /* prevent creation of new frags */
+
+- hb = get_frag_bucket_locked(fq, fq->net->f);
+- hlist_del(&fq->list);
+- fq->flags |= INET_FRAG_COMPLETE;
+- spin_unlock(&hb->chain_lock);
++ rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
+ }
++EXPORT_SYMBOL(inet_frags_exit_net);
+
+ void inet_frag_kill(struct inet_frag_queue *fq)
+ {
+@@ -281,12 +102,26 @@ void inet_frag_kill(struct inet_frag_que
+ atomic_dec(&fq->refcnt);
+
+ if (!(fq->flags & INET_FRAG_COMPLETE)) {
+- fq_unlink(fq);
++ struct netns_frags *nf = fq->net;
++
++ fq->flags |= INET_FRAG_COMPLETE;
++ rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
+ atomic_dec(&fq->refcnt);
+ }
+ }
+ EXPORT_SYMBOL(inet_frag_kill);
+
++static void inet_frag_destroy_rcu(struct rcu_head *head)
++{
++ struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
++ rcu);
++ struct inet_frags *f = q->net->f;
++
++ if (f->destructor)
++ f->destructor(q);
++ kmem_cache_free(f->frags_cachep, q);
++}
++
+ void inet_frag_destroy(struct inet_frag_queue *q)
+ {
+ struct sk_buff *fp;
+@@ -310,55 +145,21 @@ void inet_frag_destroy(struct inet_frag_
+ }
+ sum = sum_truesize + f->qsize;
+
+- if (f->destructor)
+- f->destructor(q);
+- kmem_cache_free(f->frags_cachep, q);
++ call_rcu(&q->rcu, inet_frag_destroy_rcu);
+
+ sub_frag_mem_limit(nf, sum);
+ }
+ EXPORT_SYMBOL(inet_frag_destroy);
+
+-static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
+- struct inet_frag_queue *qp_in,
+- struct inet_frags *f,
+- void *arg)
+-{
+- struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
+- struct inet_frag_queue *qp;
+-
+-#ifdef CONFIG_SMP
+- /* With SMP race we have to recheck hash table, because
+- * such entry could have been created on other cpu before
+- * we acquired hash bucket lock.
+- */
+- hlist_for_each_entry(qp, &hb->chain, list) {
+- if (qp->net == nf && f->match(qp, arg)) {
+- atomic_inc(&qp->refcnt);
+- spin_unlock(&hb->chain_lock);
+- qp_in->flags |= INET_FRAG_COMPLETE;
+- inet_frag_put(qp_in);
+- return qp;
+- }
+- }
+-#endif
+- qp = qp_in;
+- if (!mod_timer(&qp->timer, jiffies + nf->timeout))
+- atomic_inc(&qp->refcnt);
+-
+- atomic_inc(&qp->refcnt);
+- hlist_add_head(&qp->list, &hb->chain);
+-
+- spin_unlock(&hb->chain_lock);
+-
+- return qp;
+-}
+-
+ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
+ struct inet_frags *f,
+ void *arg)
+ {
+ struct inet_frag_queue *q;
+
++ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
++ return NULL;
++
+ q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
+ if (!q)
+ return NULL;
+@@ -369,64 +170,51 @@ static struct inet_frag_queue *inet_frag
+
+ setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
+ spin_lock_init(&q->lock);
+- atomic_set(&q->refcnt, 1);
++ atomic_set(&q->refcnt, 3);
+
+ return q;
+ }
+
+ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
+- struct inet_frags *f,
+ void *arg)
+ {
++ struct inet_frags *f = nf->f;
+ struct inet_frag_queue *q;
++ int err;
+
+ q = inet_frag_alloc(nf, f, arg);
+ if (!q)
+ return NULL;
+
+- return inet_frag_intern(nf, q, f, arg);
+-}
++ mod_timer(&q->timer, jiffies + nf->timeout);
+
+-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+- struct inet_frags *f, void *key,
+- unsigned int hash)
+-{
+- struct inet_frag_bucket *hb;
+- struct inet_frag_queue *q;
+- int depth = 0;
+-
+- if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
+- inet_frag_schedule_worker(f);
++ err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
++ f->rhash_params);
++ if (err < 0) {
++ q->flags |= INET_FRAG_COMPLETE;
++ inet_frag_kill(q);
++ inet_frag_destroy(q);
+ return NULL;
+ }
++ return q;
++}
++EXPORT_SYMBOL(inet_frag_create);
+
+- if (frag_mem_limit(nf) > nf->low_thresh)
+- inet_frag_schedule_worker(f);
+-
+- hash &= (INETFRAGS_HASHSZ - 1);
+- hb = &f->hash[hash];
+-
+- spin_lock(&hb->chain_lock);
+- hlist_for_each_entry(q, &hb->chain, list) {
+- if (q->net == nf && f->match(q, key)) {
+- atomic_inc(&q->refcnt);
+- spin_unlock(&hb->chain_lock);
+- return q;
+- }
+- depth++;
+- }
+- spin_unlock(&hb->chain_lock);
+-
+- if (depth <= INETFRAGS_MAXDEPTH)
+- return inet_frag_create(nf, f, key);
++/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
++struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
++{
++ struct inet_frag_queue *fq;
+
+- if (inet_frag_may_rebuild(f)) {
+- if (!f->rebuild)
+- f->rebuild = true;
+- inet_frag_schedule_worker(f);
++ rcu_read_lock();
++ fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
++ if (fq) {
++ if (!atomic_inc_not_zero(&fq->refcnt))
++ fq = NULL;
++ rcu_read_unlock();
++ return fq;
+ }
+-
+- return ERR_PTR(-ENOBUFS);
++ rcu_read_unlock();
++ return inet_frag_create(nf, key);
+ }
+ EXPORT_SYMBOL(inet_frag_find);
+
+@@ -434,8 +222,7 @@ void inet_frag_maybe_warn_overflow(struc
+ const char *prefix)
+ {
+ static const char msg[] = "inet_frag_find: Fragment hash bucket"
+- " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
+- ". Dropping fragment.\n";
++ " list length grew over limit. Dropping fragment.\n";
+
+ if (PTR_ERR(q) == -ENOBUFS)
+ net_dbg_ratelimited("%s%s", prefix, msg);
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -68,15 +68,9 @@ struct ipfrag_skb_cb
+ struct ipq {
+ struct inet_frag_queue q;
+
+- u32 user;
+- __be32 saddr;
+- __be32 daddr;
+- __be16 id;
+- u8 protocol;
+ u8 ecn; /* RFC3168 support */
+ u16 max_df_size; /* largest frag with DF set seen */
+ int iif;
+- int vif; /* L3 master device index */
+ unsigned int rid;
+ struct inet_peer *peer;
+ };
+@@ -96,41 +90,6 @@ int ip_frag_mem(struct net *net)
+ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ struct net_device *dev);
+
+-struct ip4_create_arg {
+- struct iphdr *iph;
+- u32 user;
+- int vif;
+-};
+-
+-static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
+-{
+- net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
+- return jhash_3words((__force u32)id << 16 | prot,
+- (__force u32)saddr, (__force u32)daddr,
+- ip4_frags.rnd);
+-}
+-
+-static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
+-{
+- const struct ipq *ipq;
+-
+- ipq = container_of(q, struct ipq, q);
+- return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
+-}
+-
+-static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
+-{
+- const struct ipq *qp;
+- const struct ip4_create_arg *arg = a;
+-
+- qp = container_of(q, struct ipq, q);
+- return qp->id == arg->iph->id &&
+- qp->saddr == arg->iph->saddr &&
+- qp->daddr == arg->iph->daddr &&
+- qp->protocol == arg->iph->protocol &&
+- qp->user == arg->user &&
+- qp->vif == arg->vif;
+-}
+
+ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
+ {
+@@ -139,17 +98,12 @@ static void ip4_frag_init(struct inet_fr
+ frags);
+ struct net *net = container_of(ipv4, struct net, ipv4);
+
+- const struct ip4_create_arg *arg = a;
++ const struct frag_v4_compare_key *key = a;
+
+- qp->protocol = arg->iph->protocol;
+- qp->id = arg->iph->id;
+- qp->ecn = ip4_frag_ecn(arg->iph->tos);
+- qp->saddr = arg->iph->saddr;
+- qp->daddr = arg->iph->daddr;
+- qp->vif = arg->vif;
+- qp->user = arg->user;
++ q->key.v4 = *key;
++ qp->ecn = 0;
+ qp->peer = q->net->max_dist ?
+- inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
++ inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
+ NULL;
+ }
+
+@@ -232,7 +186,7 @@ static void ip_expire(unsigned long arg)
+ /* Only an end host needs to send an ICMP
+ * "Fragment Reassembly Timeout" message, per RFC792.
+ */
+- if (frag_expire_skip_icmp(qp->user) &&
++ if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
+ (skb_rtable(head)->rt_type != RTN_LOCAL))
+ goto out;
+
+@@ -260,17 +214,17 @@ out_rcu_unlock:
+ static struct ipq *ip_find(struct net *net, struct iphdr *iph,
+ u32 user, int vif)
+ {
++ struct frag_v4_compare_key key = {
++ .saddr = iph->saddr,
++ .daddr = iph->daddr,
++ .user = user,
++ .vif = vif,
++ .id = iph->id,
++ .protocol = iph->protocol,
++ };
+ struct inet_frag_queue *q;
+- struct ip4_create_arg arg;
+- unsigned int hash;
+-
+- arg.iph = iph;
+- arg.user = user;
+- arg.vif = vif;
+
+- hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
+-
+- q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
++ q = inet_frag_find(&net->ipv4.frags, &key);
+ if (IS_ERR_OR_NULL(q)) {
+ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+@@ -659,7 +613,7 @@ out_nomem:
+ err = -ENOMEM;
+ goto out_fail;
+ out_oversize:
+- net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
++ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
+ out_fail:
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+ return err;
+@@ -897,15 +851,47 @@ static struct pernet_operations ip4_frag
+ .exit = ipv4_frags_exit_net,
+ };
+
++
++static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
++{
++ return jhash2(data,
++ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
++}
++
++static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
++{
++ const struct inet_frag_queue *fq = data;
++
++ return jhash2((const u32 *)&fq->key.v4,
++ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
++}
++
++static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
++{
++ const struct frag_v4_compare_key *key = arg->key;
++ const struct inet_frag_queue *fq = ptr;
++
++ return !!memcmp(&fq->key, key, sizeof(*key));
++}
++
++static const struct rhashtable_params ip4_rhash_params = {
++ .head_offset = offsetof(struct inet_frag_queue, node),
++ .key_offset = offsetof(struct inet_frag_queue, key),
++ .key_len = sizeof(struct frag_v4_compare_key),
++ .hashfn = ip4_key_hashfn,
++ .obj_hashfn = ip4_obj_hashfn,
++ .obj_cmpfn = ip4_obj_cmpfn,
++ .automatic_shrinking = true,
++};
++
+ void __init ipfrag_init(void)
+ {
+- ip4_frags.hashfn = ip4_hashfn;
+ ip4_frags.constructor = ip4_frag_init;
+ ip4_frags.destructor = ip4_frag_free;
+ ip4_frags.qsize = sizeof(struct ipq);
+- ip4_frags.match = ip4_frag_match;
+ ip4_frags.frag_expire = ip_expire;
+ ip4_frags.frags_cache_name = ip_frag_cache_name;
++ ip4_frags.rhash_params = ip4_rhash_params;
+ if (inet_frags_init(&ip4_frags))
+ panic("IP: failed to allocate ip4_frags cache\n");
+ ip4_frags_ctl_register();
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -152,23 +152,6 @@ static inline u8 ip6_frag_ecn(const stru
+ return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
+ }
+
+-static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
+- const struct in6_addr *daddr)
+-{
+- net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
+- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+- (__force u32)id, nf_frags.rnd);
+-}
+-
+-
+-static unsigned int nf_hashfn(const struct inet_frag_queue *q)
+-{
+- const struct frag_queue *nq;
+-
+- nq = container_of(q, struct frag_queue, q);
+- return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
+-}
+-
+ static void nf_ct_frag6_expire(unsigned long data)
+ {
+ struct frag_queue *fq;
+@@ -181,26 +164,19 @@ static void nf_ct_frag6_expire(unsigned
+ }
+
+ /* Creation primitives. */
+-static inline struct frag_queue *fq_find(struct net *net, __be32 id,
+- u32 user, struct in6_addr *src,
+- struct in6_addr *dst, int iif, u8 ecn)
++static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
++ const struct ipv6hdr *hdr, int iif)
+ {
++ struct frag_v6_compare_key key = {
++ .id = id,
++ .saddr = hdr->saddr,
++ .daddr = hdr->daddr,
++ .user = user,
++ .iif = iif,
++ };
+ struct inet_frag_queue *q;
+- struct ip6_create_arg arg;
+- unsigned int hash;
+-
+- arg.id = id;
+- arg.user = user;
+- arg.src = src;
+- arg.dst = dst;
+- arg.iif = iif;
+- arg.ecn = ecn;
+-
+- local_bh_disable();
+- hash = nf_hash_frag(id, src, dst);
+
+- q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
+- local_bh_enable();
++ q = inet_frag_find(&net->nf_frag.frags, &key);
+ if (IS_ERR_OR_NULL(q)) {
+ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+@@ -592,8 +568,8 @@ int nf_ct_frag6_gather(struct net *net,
+ fhdr = (struct frag_hdr *)skb_transport_header(skb);
+
+ skb_orphan(skb);
+- fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
+- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
++ fq = fq_find(net, fhdr->identification, user, hdr,
++ skb->dev ? skb->dev->ifindex : 0);
+ if (fq == NULL) {
+ pr_debug("Can't find and can't create new queue\n");
+ return -ENOMEM;
+@@ -661,13 +637,12 @@ int nf_ct_frag6_init(void)
+ {
+ int ret = 0;
+
+- nf_frags.hashfn = nf_hashfn;
+ nf_frags.constructor = ip6_frag_init;
+ nf_frags.destructor = NULL;
+ nf_frags.qsize = sizeof(struct frag_queue);
+- nf_frags.match = ip6_frag_match;
+ nf_frags.frag_expire = nf_ct_frag6_expire;
+ nf_frags.frags_cache_name = nf_frags_cache_name;
++ nf_frags.rhash_params = ip6_rhash_params;
+ ret = inet_frags_init(&nf_frags);
+ if (ret)
+ goto out;
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -79,52 +79,13 @@ static struct inet_frags ip6_frags;
+ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
+ struct net_device *dev);
+
+-/*
+- * callers should be careful not to use the hash value outside the ipfrag_lock
+- * as doing so could race with ipfrag_hash_rnd being recalculated.
+- */
+-static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
+- const struct in6_addr *daddr)
+-{
+- net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
+- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+- (__force u32)id, ip6_frags.rnd);
+-}
+-
+-static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
+-{
+- const struct frag_queue *fq;
+-
+- fq = container_of(q, struct frag_queue, q);
+- return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
+-}
+-
+-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
+-{
+- const struct frag_queue *fq;
+- const struct ip6_create_arg *arg = a;
+-
+- fq = container_of(q, struct frag_queue, q);
+- return fq->id == arg->id &&
+- fq->user == arg->user &&
+- ipv6_addr_equal(&fq->saddr, arg->src) &&
+- ipv6_addr_equal(&fq->daddr, arg->dst) &&
+- (arg->iif == fq->iif ||
+- !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
+- IPV6_ADDR_LINKLOCAL)));
+-}
+-EXPORT_SYMBOL(ip6_frag_match);
+-
+ void ip6_frag_init(struct inet_frag_queue *q, const void *a)
+ {
+ struct frag_queue *fq = container_of(q, struct frag_queue, q);
+- const struct ip6_create_arg *arg = a;
++ const struct frag_v6_compare_key *key = a;
+
+- fq->id = arg->id;
+- fq->user = arg->user;
+- fq->saddr = *arg->src;
+- fq->daddr = *arg->dst;
+- fq->ecn = arg->ecn;
++ q->key.v6 = *key;
++ fq->ecn = 0;
+ }
+ EXPORT_SYMBOL(ip6_frag_init);
+
+@@ -181,23 +142,22 @@ static void ip6_frag_expire(unsigned lon
+ }
+
+ static struct frag_queue *
+-fq_find(struct net *net, __be32 id, const struct in6_addr *src,
+- const struct in6_addr *dst, int iif, u8 ecn)
++fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
+ {
++ struct frag_v6_compare_key key = {
++ .id = id,
++ .saddr = hdr->saddr,
++ .daddr = hdr->daddr,
++ .user = IP6_DEFRAG_LOCAL_DELIVER,
++ .iif = iif,
++ };
+ struct inet_frag_queue *q;
+- struct ip6_create_arg arg;
+- unsigned int hash;
+-
+- arg.id = id;
+- arg.user = IP6_DEFRAG_LOCAL_DELIVER;
+- arg.src = src;
+- arg.dst = dst;
+- arg.iif = iif;
+- arg.ecn = ecn;
+
+- hash = inet6_hash_frag(id, src, dst);
++ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
++ IPV6_ADDR_LINKLOCAL)))
++ key.iif = 0;
+
+- q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
++ q = inet_frag_find(&net->ipv6.frags, &key);
+ if (IS_ERR_OR_NULL(q)) {
+ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+@@ -524,6 +484,7 @@ static int ipv6_frag_rcv(struct sk_buff
+ struct frag_queue *fq;
+ const struct ipv6hdr *hdr = ipv6_hdr(skb);
+ struct net *net = dev_net(skb_dst(skb)->dev);
++ int iif;
+
+ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
+ goto fail_hdr;
+@@ -552,13 +513,14 @@ static int ipv6_frag_rcv(struct sk_buff
+ return 1;
+ }
+
+- fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
+- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
++ iif = skb->dev ? skb->dev->ifindex : 0;
++ fq = fq_find(net, fhdr->identification, hdr, iif);
+ if (fq) {
+ int ret;
+
+ spin_lock(&fq->q.lock);
+
++ fq->iif = iif;
+ ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
+
+ spin_unlock(&fq->q.lock);
+@@ -732,17 +694,47 @@ static struct pernet_operations ip6_frag
+ .exit = ipv6_frags_exit_net,
+ };
+
++static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
++{
++ return jhash2(data,
++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
++}
++
++static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
++{
++ const struct inet_frag_queue *fq = data;
++
++ return jhash2((const u32 *)&fq->key.v6,
++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
++}
++
++static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
++{
++ const struct frag_v6_compare_key *key = arg->key;
++ const struct inet_frag_queue *fq = ptr;
++
++ return !!memcmp(&fq->key, key, sizeof(*key));
++}
++
++const struct rhashtable_params ip6_rhash_params = {
++ .head_offset = offsetof(struct inet_frag_queue, node),
++ .hashfn = ip6_key_hashfn,
++ .obj_hashfn = ip6_obj_hashfn,
++ .obj_cmpfn = ip6_obj_cmpfn,
++ .automatic_shrinking = true,
++};
++EXPORT_SYMBOL(ip6_rhash_params);
++
+ int __init ipv6_frag_init(void)
+ {
+ int ret;
+
+- ip6_frags.hashfn = ip6_hashfn;
+ ip6_frags.constructor = ip6_frag_init;
+ ip6_frags.destructor = NULL;
+ ip6_frags.qsize = sizeof(struct frag_queue);
+- ip6_frags.match = ip6_frag_match;
+ ip6_frags.frag_expire = ip6_frag_expire;
+ ip6_frags.frags_cache_name = ip6_frag_cache_name;
++ ip6_frags.rhash_params = ip6_rhash_params;
+ ret = inet_frags_init(&ip6_frags);
+ if (ret)
+ goto out;
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:14 -0700
+Subject: ip: add helpers to process in-order fragments faster.
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Peter Oskolkov <posk@google.com>, Florian Westphal <fw@strlen.de>
+Message-ID: <20181010193017.25221-27-f.fainelli@gmail.com>
+
+From: Peter Oskolkov <posk@google.com>
+
+This patch introduces several helper functions/macros that will be
+used in the follow-up patch. No runtime changes yet.
+
+The new logic (fully implemented in the second patch) is as follows:
+
+* Nodes in the rb-tree will now contain not single fragments, but lists
+ of consecutive fragments ("runs").
+
+* At each point in time, the current "active" run at the tail is
+ maintained/tracked. Fragments that arrive in-order, adjacent
+ to the previous tail fragment, are added to this tail run without
+ triggering the re-balancing of the rb-tree.
+
+* If a fragment arrives out of order with the offset _before_ the tail run,
+ it is inserted into the rb-tree as a single fragment.
+
+* If a fragment arrives after the current tail fragment (with a gap),
+ it starts a new "tail" run, as is inserted into the rb-tree
+ at the end as the head of the new run.
+
+skb->cb is used to store additional information
+needed here (suggested by Eric Dumazet).
+
+Reported-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Peter Oskolkov <posk@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 353c9cb360874e737fb000545f783df756c06f9a)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_frag.h | 6 +++
+ net/ipv4/ip_fragment.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 79 insertions(+)
+
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -56,7 +56,9 @@ struct frag_v6_compare_key {
+ * @lock: spinlock protecting this frag
+ * @refcnt: reference count of the queue
+ * @fragments: received fragments head
++ * @rb_fragments: received fragments rb-tree root
+ * @fragments_tail: received fragments tail
++ * @last_run_head: the head of the last "run". see ip_fragment.c
+ * @stamp: timestamp of the last received fragment
+ * @len: total length of the original datagram
+ * @meat: length of received fragments so far
+@@ -77,6 +79,7 @@ struct inet_frag_queue {
+ struct sk_buff *fragments; /* Used in IPv6. */
+ struct rb_root rb_fragments; /* Used in IPv4. */
+ struct sk_buff *fragments_tail;
++ struct sk_buff *last_run_head;
+ ktime_t stamp;
+ int len;
+ int meat;
+@@ -112,6 +115,9 @@ void inet_frag_kill(struct inet_frag_que
+ void inet_frag_destroy(struct inet_frag_queue *q);
+ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
+
++/* Free all skbs in the queue; return the sum of their truesizes. */
++unsigned int inet_frag_rbtree_purge(struct rb_root *root);
++
+ static inline void inet_frag_put(struct inet_frag_queue *q)
+ {
+ if (atomic_dec_and_test(&q->refcnt))
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -56,6 +56,57 @@
+ */
+ static const char ip_frag_cache_name[] = "ip4-frags";
+
++/* Use skb->cb to track consecutive/adjacent fragments coming at
++ * the end of the queue. Nodes in the rb-tree queue will
++ * contain "runs" of one or more adjacent fragments.
++ *
++ * Invariants:
++ * - next_frag is NULL at the tail of a "run";
++ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
++ */
++struct ipfrag_skb_cb {
++ struct inet_skb_parm h;
++ struct sk_buff *next_frag;
++ int frag_run_len;
++};
++
++#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
++
++static void ip4_frag_init_run(struct sk_buff *skb)
++{
++ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
++
++ FRAG_CB(skb)->next_frag = NULL;
++ FRAG_CB(skb)->frag_run_len = skb->len;
++}
++
++/* Append skb to the last "run". */
++static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
++ struct sk_buff *skb)
++{
++ RB_CLEAR_NODE(&skb->rbnode);
++ FRAG_CB(skb)->next_frag = NULL;
++
++ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
++ FRAG_CB(q->fragments_tail)->next_frag = skb;
++ q->fragments_tail = skb;
++}
++
++/* Create a new "run" with the skb. */
++static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
++{
++ if (q->last_run_head)
++ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
++ &q->last_run_head->rbnode.rb_right);
++ else
++ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
++ rb_insert_color(&skb->rbnode, &q->rb_fragments);
++
++ ip4_frag_init_run(skb);
++ q->fragments_tail = skb;
++ q->last_run_head = skb;
++}
++
+ /* Describe an entry in the "incomplete datagrams" queue. */
+ struct ipq {
+ struct inet_frag_queue q;
+@@ -652,6 +703,28 @@ struct sk_buff *ip_check_defrag(struct n
+ }
+ EXPORT_SYMBOL(ip_check_defrag);
+
++unsigned int inet_frag_rbtree_purge(struct rb_root *root)
++{
++ struct rb_node *p = rb_first(root);
++ unsigned int sum = 0;
++
++ while (p) {
++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
++
++ p = rb_next(p);
++ rb_erase(&skb->rbnode, root);
++ while (skb) {
++ struct sk_buff *next = FRAG_CB(skb)->next_frag;
++
++ sum += skb->truesize;
++ kfree_skb(skb);
++ skb = next;
++ }
++ }
++ return sum;
++}
++EXPORT_SYMBOL(inet_frag_rbtree_purge);
++
+ #ifdef CONFIG_SYSCTL
+ static int dist_min;
+
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:07 -0700
+Subject: ip: discard IPv4 datagrams with overlapping segments.
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Peter Oskolkov <posk@google.com>, Florian Westphal <fw@strlen.de>
+Message-ID: <20181010193017.25221-20-f.fainelli@gmail.com>
+
+From: Peter Oskolkov <posk@google.com>
+
+This behavior is required in IPv6, and there is little need
+to tolerate overlapping fragments in IPv4. This change
+simplifies the code and eliminates potential DDoS attack vectors.
+
+Tested: ran ip_defrag selftest (not yet available uptream).
+
+Suggested-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Peter Oskolkov <posk@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Acked-by: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 7969e5c40dfd04799d4341f1b7cd266b6e47f227)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/uapi/linux/snmp.h | 1
+ net/ipv4/ip_fragment.c | 77 +++++++++++-----------------------------------
+ net/ipv4/proc.c | 1
+ 3 files changed, 22 insertions(+), 57 deletions(-)
+
+--- a/include/uapi/linux/snmp.h
++++ b/include/uapi/linux/snmp.h
+@@ -55,6 +55,7 @@ enum
+ IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
+ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
+ IPSTATS_MIB_CEPKTS, /* InCEPkts */
++ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
+ __IPSTATS_MIB_MAX
+ };
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -275,6 +275,7 @@ static int ip_frag_reinit(struct ipq *qp
+ /* Add new segment to existing queue. */
+ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ {
++ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct sk_buff *prev, *next;
+ struct net_device *dev;
+ unsigned int fragsize;
+@@ -355,65 +356,23 @@ static int ip_frag_queue(struct ipq *qp,
+ }
+
+ found:
+- /* We found where to put this one. Check for overlap with
+- * preceding fragment, and, if needed, align things so that
+- * any overlaps are eliminated.
++ /* RFC5722, Section 4, amended by Errata ID : 3089
++ * When reassembling an IPv6 datagram, if
++ * one or more its constituent fragments is determined to be an
++ * overlapping fragment, the entire datagram (and any constituent
++ * fragments) MUST be silently discarded.
++ *
++ * We do the same here for IPv4.
+ */
+- if (prev) {
+- int i = (prev->ip_defrag_offset + prev->len) - offset;
+
+- if (i > 0) {
+- offset += i;
+- err = -EINVAL;
+- if (end <= offset)
+- goto err;
+- err = -ENOMEM;
+- if (!pskb_pull(skb, i))
+- goto err;
+- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+- skb->ip_summed = CHECKSUM_NONE;
+- }
+- }
+-
+- err = -ENOMEM;
+-
+- while (next && next->ip_defrag_offset < end) {
+- int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */
+-
+- if (i < next->len) {
+- int delta = -next->truesize;
+-
+- /* Eat head of the next overlapped fragment
+- * and leave the loop. The next ones cannot overlap.
+- */
+- if (!pskb_pull(next, i))
+- goto err;
+- delta += next->truesize;
+- if (delta)
+- add_frag_mem_limit(qp->q.net, delta);
+- next->ip_defrag_offset += i;
+- qp->q.meat -= i;
+- if (next->ip_summed != CHECKSUM_UNNECESSARY)
+- next->ip_summed = CHECKSUM_NONE;
+- break;
+- } else {
+- struct sk_buff *free_it = next;
+-
+- /* Old fragment is completely overridden with
+- * new one drop it.
+- */
+- next = next->next;
+-
+- if (prev)
+- prev->next = next;
+- else
+- qp->q.fragments = next;
+-
+- qp->q.meat -= free_it->len;
+- sub_frag_mem_limit(qp->q.net, free_it->truesize);
+- kfree_skb(free_it);
+- }
+- }
++ /* Is there an overlap with the previous fragment? */
++ if (prev &&
++ (prev->ip_defrag_offset + prev->len) > offset)
++ goto discard_qp;
++
++ /* Is there an overlap with the next fragment? */
++ if (next && next->ip_defrag_offset < end)
++ goto discard_qp;
+
+ /* Note : skb->ip_defrag_offset and skb->dev share the same location */
+ dev = skb->dev;
+@@ -461,6 +420,10 @@ found:
+ skb_dst_drop(skb);
+ return -EINPROGRESS;
+
++discard_qp:
++ inet_frag_kill(&qp->q);
++ err = -EINVAL;
++ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
+ err:
+ kfree_skb(skb);
+ return err;
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -134,6 +134,7 @@ static const struct snmp_mib snmp4_ipext
+ SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
++ SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
+ SNMP_MIB_SENTINEL
+ };
+
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:16 -0700
+Subject: ip: frags: fix crash in ip_do_fragment()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Taehee Yoo <ap420073@gmail.com>
+Message-ID: <20181010193017.25221-29-f.fainelli@gmail.com>
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+commit 5d407b071dc369c26a38398326ee2be53651cfe4 upstream
+
+A kernel crash occurrs when defragmented packet is fragmented
+in ip_do_fragment().
+In defragment routine, skb_orphan() is called and
+skb->ip_defrag_offset is set. but skb->sk and
+skb->ip_defrag_offset are same union member. so that
+frag->sk is not NULL.
+Hence crash occurrs in skb->sk check routine in ip_do_fragment() when
+defragmented packet is fragmented.
+
+test commands:
+ %iptables -t nat -I POSTROUTING -j MASQUERADE
+ %hping3 192.168.4.2 -s 1000 -p 2000 -d 60000
+
+splat looks like:
+[ 261.069429] kernel BUG at net/ipv4/ip_output.c:636!
+[ 261.075753] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI
+[ 261.083854] CPU: 1 PID: 1349 Comm: hping3 Not tainted 4.19.0-rc2+ #3
+[ 261.100977] RIP: 0010:ip_do_fragment+0x1613/0x2600
+[ 261.106945] Code: e8 e2 38 e3 fe 4c 8b 44 24 18 48 8b 74 24 08 e9 92 f6 ff ff 80 3c 02 00 0f 85 da 07 00 00 48 8b b5 d0 00 00 00 e9 25 f6 ff ff <0f> 0b 0f 0b 44 8b 54 24 58 4c 8b 4c 24 18 4c 8b 5c 24 60 4c 8b 6c
+[ 261.127015] RSP: 0018:ffff8801031cf2c0 EFLAGS: 00010202
+[ 261.134156] RAX: 1ffff1002297537b RBX: ffffed0020639e6e RCX: 0000000000000004
+[ 261.142156] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880114ba9bd8
+[ 261.150157] RBP: ffff880114ba8a40 R08: ffffed0022975395 R09: ffffed0022975395
+[ 261.158157] R10: 0000000000000001 R11: ffffed0022975394 R12: ffff880114ba9ca4
+[ 261.166159] R13: 0000000000000010 R14: ffff880114ba9bc0 R15: dffffc0000000000
+[ 261.174169] FS: 00007fbae2199700(0000) GS:ffff88011b400000(0000) knlGS:0000000000000000
+[ 261.183012] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 261.189013] CR2: 00005579244fe000 CR3: 0000000119bf4000 CR4: 00000000001006e0
+[ 261.198158] Call Trace:
+[ 261.199018] ? dst_output+0x180/0x180
+[ 261.205011] ? save_trace+0x300/0x300
+[ 261.209018] ? ip_copy_metadata+0xb00/0xb00
+[ 261.213034] ? sched_clock_local+0xd4/0x140
+[ 261.218158] ? kill_l4proto+0x120/0x120 [nf_conntrack]
+[ 261.223014] ? rt_cpu_seq_stop+0x10/0x10
+[ 261.227014] ? find_held_lock+0x39/0x1c0
+[ 261.233008] ip_finish_output+0x51d/0xb50
+[ 261.237006] ? ip_fragment.constprop.56+0x220/0x220
+[ 261.243011] ? nf_ct_l4proto_register_one+0x5b0/0x5b0 [nf_conntrack]
+[ 261.250152] ? rcu_is_watching+0x77/0x120
+[ 261.255010] ? nf_nat_ipv4_out+0x1e/0x2b0 [nf_nat_ipv4]
+[ 261.261033] ? nf_hook_slow+0xb1/0x160
+[ 261.265007] ip_output+0x1c7/0x710
+[ 261.269005] ? ip_mc_output+0x13f0/0x13f0
+[ 261.273002] ? __local_bh_enable_ip+0xe9/0x1b0
+[ 261.278152] ? ip_fragment.constprop.56+0x220/0x220
+[ 261.282996] ? nf_hook_slow+0xb1/0x160
+[ 261.287007] raw_sendmsg+0x21f9/0x4420
+[ 261.291008] ? dst_output+0x180/0x180
+[ 261.297003] ? sched_clock_cpu+0x126/0x170
+[ 261.301003] ? find_held_lock+0x39/0x1c0
+[ 261.306155] ? stop_critical_timings+0x420/0x420
+[ 261.311004] ? check_flags.part.36+0x450/0x450
+[ 261.315005] ? _raw_spin_unlock_irq+0x29/0x40
+[ 261.320995] ? _raw_spin_unlock_irq+0x29/0x40
+[ 261.326142] ? cyc2ns_read_end+0x10/0x10
+[ 261.330139] ? raw_bind+0x280/0x280
+[ 261.334138] ? sched_clock_cpu+0x126/0x170
+[ 261.338995] ? check_flags.part.36+0x450/0x450
+[ 261.342991] ? __lock_acquire+0x4500/0x4500
+[ 261.348994] ? inet_sendmsg+0x11c/0x500
+[ 261.352989] ? dst_output+0x180/0x180
+[ 261.357012] inet_sendmsg+0x11c/0x500
+[ ... ]
+
+v2:
+ - clear skb->sk at reassembly routine.(Eric Dumarzet)
+
+Fixes: fa0f527358bd ("ip: use rb trees for IP frag queue.")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_fragment.c | 1 +
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -597,6 +597,7 @@ static int ip_frag_reasm(struct ipq *qp,
+ nextp = &fp->next;
+ fp->prev = NULL;
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
++ fp->sk = NULL;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -452,6 +452,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq,
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
++ fp->sk = NULL;
+ }
+ sub_frag_mem_limit(fq->q.net, head->truesize);
+
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:15 -0700
+Subject: ip: process in-order fragments efficiently
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Peter Oskolkov <posk@google.com>, Florian Westphal <fw@strlen.de>
+Message-ID: <20181010193017.25221-28-f.fainelli@gmail.com>
+
+From: Peter Oskolkov <posk@google.com>
+
+This patch changes the runtime behavior of IP defrag queue:
+incoming in-order fragments are added to the end of the current
+list/"run" of in-order fragments at the tail.
+
+On some workloads, UDP stream performance is substantially improved:
+
+RX: ./udp_stream -F 10 -T 2 -l 60
+TX: ./udp_stream -c -H <host> -F 10 -T 5 -l 60
+
+with this patchset applied on a 10Gbps receiver:
+
+ throughput=9524.18
+ throughput_units=Mbit/s
+
+upstream (net-next):
+
+ throughput=4608.93
+ throughput_units=Mbit/s
+
+Reported-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Peter Oskolkov <posk@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit a4fd284a1f8fd4b6c59aa59db2185b1e17c5c11c)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_fragment.c | 2
+ net/ipv4/ip_fragment.c | 110 +++++++++++++++++++++++++++++------------------
+ 2 files changed, 70 insertions(+), 42 deletions(-)
+
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -145,7 +145,7 @@ void inet_frag_destroy(struct inet_frag_
+ fp = xp;
+ } while (fp);
+ } else {
+- sum_truesize = skb_rbtree_purge(&q->rb_fragments);
++ sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
+ }
+ sum = sum_truesize + f->qsize;
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -125,8 +125,8 @@ static u8 ip4_frag_ecn(u8 tos)
+
+ static struct inet_frags ip4_frags;
+
+-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+- struct net_device *dev);
++static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
++ struct sk_buff *prev_tail, struct net_device *dev);
+
+
+ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
+@@ -217,7 +217,12 @@ static void ip_expire(unsigned long arg)
+ head = skb_rb_first(&qp->q.rb_fragments);
+ if (!head)
+ goto out;
+- rb_erase(&head->rbnode, &qp->q.rb_fragments);
++ if (FRAG_CB(head)->next_frag)
++ rb_replace_node(&head->rbnode,
++ &FRAG_CB(head)->next_frag->rbnode,
++ &qp->q.rb_fragments);
++ else
++ rb_erase(&head->rbnode, &qp->q.rb_fragments);
+ memset(&head->rbnode, 0, sizeof(head->rbnode));
+ barrier();
+ }
+@@ -318,7 +323,7 @@ static int ip_frag_reinit(struct ipq *qp
+ return -ETIMEDOUT;
+ }
+
+- sum_truesize = skb_rbtree_purge(&qp->q.rb_fragments);
++ sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
+ sub_frag_mem_limit(qp->q.net, sum_truesize);
+
+ qp->q.flags = 0;
+@@ -327,6 +332,7 @@ static int ip_frag_reinit(struct ipq *qp
+ qp->q.fragments = NULL;
+ qp->q.rb_fragments = RB_ROOT;
+ qp->q.fragments_tail = NULL;
++ qp->q.last_run_head = NULL;
+ qp->iif = 0;
+ qp->ecn = 0;
+
+@@ -338,7 +344,7 @@ static int ip_frag_queue(struct ipq *qp,
+ {
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct rb_node **rbn, *parent;
+- struct sk_buff *skb1;
++ struct sk_buff *skb1, *prev_tail;
+ struct net_device *dev;
+ unsigned int fragsize;
+ int flags, offset;
+@@ -416,38 +422,41 @@ static int ip_frag_queue(struct ipq *qp,
+ */
+
+ /* Find out where to put this fragment. */
+- skb1 = qp->q.fragments_tail;
+- if (!skb1) {
+- /* This is the first fragment we've received. */
+- rb_link_node(&skb->rbnode, NULL, &qp->q.rb_fragments.rb_node);
+- qp->q.fragments_tail = skb;
+- } else if ((skb1->ip_defrag_offset + skb1->len) < end) {
+- /* This is the common/special case: skb goes to the end. */
++ prev_tail = qp->q.fragments_tail;
++ if (!prev_tail)
++ ip4_frag_create_run(&qp->q, skb); /* First fragment. */
++ else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
++ /* This is the common case: skb goes to the end. */
+ /* Detect and discard overlaps. */
+- if (offset < (skb1->ip_defrag_offset + skb1->len))
++ if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
+ goto discard_qp;
+- /* Insert after skb1. */
+- rb_link_node(&skb->rbnode, &skb1->rbnode, &skb1->rbnode.rb_right);
+- qp->q.fragments_tail = skb;
++ if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
++ ip4_frag_append_to_last_run(&qp->q, skb);
++ else
++ ip4_frag_create_run(&qp->q, skb);
+ } else {
+- /* Binary search. Note that skb can become the first fragment, but
+- * not the last (covered above). */
++ /* Binary search. Note that skb can become the first fragment,
++ * but not the last (covered above).
++ */
+ rbn = &qp->q.rb_fragments.rb_node;
+ do {
+ parent = *rbn;
+ skb1 = rb_to_skb(parent);
+ if (end <= skb1->ip_defrag_offset)
+ rbn = &parent->rb_left;
+- else if (offset >= skb1->ip_defrag_offset + skb1->len)
++ else if (offset >= skb1->ip_defrag_offset +
++ FRAG_CB(skb1)->frag_run_len)
+ rbn = &parent->rb_right;
+ else /* Found an overlap with skb1. */
+ goto discard_qp;
+ } while (*rbn);
+ /* Here we have parent properly set, and rbn pointing to
+- * one of its NULL left/right children. Insert skb. */
++ * one of its NULL left/right children. Insert skb.
++ */
++ ip4_frag_init_run(skb);
+ rb_link_node(&skb->rbnode, parent, rbn);
++ rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
+ }
+- rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
+
+ if (dev)
+ qp->iif = dev->ifindex;
+@@ -474,7 +483,7 @@ static int ip_frag_queue(struct ipq *qp,
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+- err = ip_frag_reasm(qp, skb, dev);
++ err = ip_frag_reasm(qp, skb, prev_tail, dev);
+ skb->_skb_refdst = orefdst;
+ return err;
+ }
+@@ -493,7 +502,7 @@ err:
+
+ /* Build a new IP datagram from all its fragments. */
+ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+- struct net_device *dev)
++ struct sk_buff *prev_tail, struct net_device *dev)
+ {
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct iphdr *iph;
+@@ -517,10 +526,16 @@ static int ip_frag_reasm(struct ipq *qp,
+ fp = skb_clone(skb, GFP_ATOMIC);
+ if (!fp)
+ goto out_nomem;
+- rb_replace_node(&skb->rbnode, &fp->rbnode, &qp->q.rb_fragments);
++ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
++ if (RB_EMPTY_NODE(&skb->rbnode))
++ FRAG_CB(prev_tail)->next_frag = fp;
++ else
++ rb_replace_node(&skb->rbnode, &fp->rbnode,
++ &qp->q.rb_fragments);
+ if (qp->q.fragments_tail == skb)
+ qp->q.fragments_tail = fp;
+ skb_morph(skb, head);
++ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &qp->q.rb_fragments);
+ consume_skb(head);
+@@ -556,7 +571,7 @@ static int ip_frag_reasm(struct ipq *qp,
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->len = clone->data_len = head->data_len - plen;
+- skb->truesize += clone->truesize;
++ head->truesize += clone->truesize;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+ add_frag_mem_limit(qp->q.net, clone->truesize);
+@@ -569,24 +584,36 @@ static int ip_frag_reasm(struct ipq *qp,
+ skb_push(head, head->data - skb_network_header(head));
+
+ /* Traverse the tree in order, to build frag_list. */
++ fp = FRAG_CB(head)->next_frag;
+ rbn = rb_next(&head->rbnode);
+ rb_erase(&head->rbnode, &qp->q.rb_fragments);
+- while (rbn) {
+- struct rb_node *rbnext = rb_next(rbn);
+- fp = rb_to_skb(rbn);
+- rb_erase(rbn, &qp->q.rb_fragments);
+- rbn = rbnext;
+- *nextp = fp;
+- nextp = &fp->next;
+- fp->prev = NULL;
+- memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+- head->data_len += fp->len;
+- head->len += fp->len;
+- if (head->ip_summed != fp->ip_summed)
+- head->ip_summed = CHECKSUM_NONE;
+- else if (head->ip_summed == CHECKSUM_COMPLETE)
+- head->csum = csum_add(head->csum, fp->csum);
+- head->truesize += fp->truesize;
++ while (rbn || fp) {
++ /* fp points to the next sk_buff in the current run;
++ * rbn points to the next run.
++ */
++ /* Go through the current run. */
++ while (fp) {
++ *nextp = fp;
++ nextp = &fp->next;
++ fp->prev = NULL;
++ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
++ head->data_len += fp->len;
++ head->len += fp->len;
++ if (head->ip_summed != fp->ip_summed)
++ head->ip_summed = CHECKSUM_NONE;
++ else if (head->ip_summed == CHECKSUM_COMPLETE)
++ head->csum = csum_add(head->csum, fp->csum);
++ head->truesize += fp->truesize;
++ fp = FRAG_CB(fp)->next_frag;
++ }
++ /* Move to the next run. */
++ if (rbn) {
++ struct rb_node *rbnext = rb_next(rbn);
++
++ fp = rb_to_skb(rbn);
++ rb_erase(rbn, &qp->q.rb_fragments);
++ rbn = rbnext;
++ }
+ }
+ sub_frag_mem_limit(qp->q.net, head->truesize);
+
+@@ -622,6 +649,7 @@ static int ip_frag_reasm(struct ipq *qp,
+ qp->q.fragments = NULL;
+ qp->q.rb_fragments = RB_ROOT;
+ qp->q.fragments_tail = NULL;
++ qp->q.last_run_head = NULL;
+ return 0;
+
+ out_nomem:
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:13 -0700
+Subject: ip: use rb trees for IP frag queue.
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Peter Oskolkov <posk@google.com>, Florian Westphal <fw@strlen.de>
+Message-ID: <20181010193017.25221-26-f.fainelli@gmail.com>
+
+From: Peter Oskolkov <posk@google.com>
+
+(commit fa0f527358bd900ef92f925878ed6bfbd51305cc upstream)
+
+Similar to TCP OOO RX queue, it makes sense to use rb trees to store
+IP fragments, so that OOO fragments are inserted faster.
+
+Tested:
+
+- a follow-up patch contains a rather comprehensive ip defrag
+ self-test (functional)
+- ran neper `udp_stream -c -H <host> -F 100 -l 300 -T 20`:
+ netstat --statistics
+ Ip:
+ 282078937 total packets received
+ 0 forwarded
+ 0 incoming packets discarded
+ 946760 incoming packets delivered
+ 18743456 requests sent out
+ 101 fragments dropped after timeout
+ 282077129 reassemblies required
+ 944952 packets reassembled ok
+ 262734239 packet reassembles failed
+ (The numbers/stats above are somewhat better re:
+ reassemblies vs a kernel without this patchset. More
+ comprehensive performance testing TBD).
+
+Reported-by: Jann Horn <jannh@google.com>
+Reported-by: Juha-Matti Tilli <juha-matti.tilli@iki.fi>
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Peter Oskolkov <posk@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 4
+ include/net/inet_frag.h | 3
+ net/ipv4/inet_fragment.c | 14 +-
+ net/ipv4/ip_fragment.c | 182 +++++++++++++++++---------------
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 1
+ net/ipv6/reassembly.c | 1
+ 6 files changed, 116 insertions(+), 89 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -643,14 +643,14 @@ struct sk_buff {
+ struct skb_mstamp skb_mstamp;
+ };
+ };
+- struct rb_node rbnode; /* used in netem & tcp stack */
++ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
+ };
+
+ union {
++ struct sock *sk;
+ int ip_defrag_offset;
+ };
+
+- struct sock *sk;
+ struct net_device *dev;
+
+ /*
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -74,7 +74,8 @@ struct inet_frag_queue {
+ struct timer_list timer;
+ spinlock_t lock;
+ atomic_t refcnt;
+- struct sk_buff *fragments;
++ struct sk_buff *fragments; /* Used in IPv6. */
++ struct rb_root rb_fragments; /* Used in IPv4. */
+ struct sk_buff *fragments_tail;
+ ktime_t stamp;
+ int len;
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -136,12 +136,16 @@ void inet_frag_destroy(struct inet_frag_
+ fp = q->fragments;
+ nf = q->net;
+ f = nf->f;
+- while (fp) {
+- struct sk_buff *xp = fp->next;
++ if (fp) {
++ do {
++ struct sk_buff *xp = fp->next;
+
+- sum_truesize += fp->truesize;
+- kfree_skb(fp);
+- fp = xp;
++ sum_truesize += fp->truesize;
++ kfree_skb(fp);
++ fp = xp;
++ } while (fp);
++ } else {
++ sum_truesize = skb_rbtree_purge(&q->rb_fragments);
+ }
+ sum = sum_truesize + f->qsize;
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -134,7 +134,7 @@ static bool frag_expire_skip_icmp(u32 us
+ static void ip_expire(unsigned long arg)
+ {
+ const struct iphdr *iph;
+- struct sk_buff *head;
++ struct sk_buff *head = NULL;
+ struct net *net;
+ struct ipq *qp;
+ int err;
+@@ -150,14 +150,31 @@ static void ip_expire(unsigned long arg)
+
+ ipq_kill(qp);
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+-
+- head = qp->q.fragments;
+-
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
+
+- if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !head)
++ if (!qp->q.flags & INET_FRAG_FIRST_IN)
+ goto out;
+
++ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
++ * pull the head out of the tree in order to be able to
++ * deal with head->dev.
++ */
++ if (qp->q.fragments) {
++ head = qp->q.fragments;
++ qp->q.fragments = head->next;
++ } else {
++ head = skb_rb_first(&qp->q.rb_fragments);
++ if (!head)
++ goto out;
++ rb_erase(&head->rbnode, &qp->q.rb_fragments);
++ memset(&head->rbnode, 0, sizeof(head->rbnode));
++ barrier();
++ }
++ if (head == qp->q.fragments_tail)
++ qp->q.fragments_tail = NULL;
++
++ sub_frag_mem_limit(qp->q.net, head->truesize);
++
+ head->dev = dev_get_by_index_rcu(net, qp->iif);
+ if (!head->dev)
+ goto out;
+@@ -177,16 +194,16 @@ static void ip_expire(unsigned long arg)
+ (skb_rtable(head)->rt_type != RTN_LOCAL))
+ goto out;
+
+- skb_get(head);
+ spin_unlock(&qp->q.lock);
+ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+- kfree_skb(head);
+ goto out_rcu_unlock;
+
+ out:
+ spin_unlock(&qp->q.lock);
+ out_rcu_unlock:
+ rcu_read_unlock();
++ if (head)
++ kfree_skb(head);
+ ipq_put(qp);
+ }
+
+@@ -229,7 +246,7 @@ static int ip_frag_too_far(struct ipq *q
+ end = atomic_inc_return(&peer->rid);
+ qp->rid = end;
+
+- rc = qp->q.fragments && (end - start) > max;
++ rc = qp->q.fragments_tail && (end - start) > max;
+
+ if (rc) {
+ struct net *net;
+@@ -243,7 +260,6 @@ static int ip_frag_too_far(struct ipq *q
+
+ static int ip_frag_reinit(struct ipq *qp)
+ {
+- struct sk_buff *fp;
+ unsigned int sum_truesize = 0;
+
+ if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
+@@ -251,20 +267,14 @@ static int ip_frag_reinit(struct ipq *qp
+ return -ETIMEDOUT;
+ }
+
+- fp = qp->q.fragments;
+- do {
+- struct sk_buff *xp = fp->next;
+-
+- sum_truesize += fp->truesize;
+- kfree_skb(fp);
+- fp = xp;
+- } while (fp);
++ sum_truesize = skb_rbtree_purge(&qp->q.rb_fragments);
+ sub_frag_mem_limit(qp->q.net, sum_truesize);
+
+ qp->q.flags = 0;
+ qp->q.len = 0;
+ qp->q.meat = 0;
+ qp->q.fragments = NULL;
++ qp->q.rb_fragments = RB_ROOT;
+ qp->q.fragments_tail = NULL;
+ qp->iif = 0;
+ qp->ecn = 0;
+@@ -276,7 +286,8 @@ static int ip_frag_reinit(struct ipq *qp
+ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ {
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+- struct sk_buff *prev, *next;
++ struct rb_node **rbn, *parent;
++ struct sk_buff *skb1;
+ struct net_device *dev;
+ unsigned int fragsize;
+ int flags, offset;
+@@ -339,58 +350,58 @@ static int ip_frag_queue(struct ipq *qp,
+ if (err)
+ goto err;
+
+- /* Find out which fragments are in front and at the back of us
+- * in the chain of fragments so far. We must know where to put
+- * this fragment, right?
+- */
+- prev = qp->q.fragments_tail;
+- if (!prev || prev->ip_defrag_offset < offset) {
+- next = NULL;
+- goto found;
+- }
+- prev = NULL;
+- for (next = qp->q.fragments; next != NULL; next = next->next) {
+- if (next->ip_defrag_offset >= offset)
+- break; /* bingo! */
+- prev = next;
+- }
++ /* Note : skb->rbnode and skb->dev share the same location. */
++ dev = skb->dev;
++ /* Makes sure compiler wont do silly aliasing games */
++ barrier();
+
+-found:
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ *
+- * We do the same here for IPv4.
++ * We do the same here for IPv4 (and increment an snmp counter).
+ */
+
+- /* Is there an overlap with the previous fragment? */
+- if (prev &&
+- (prev->ip_defrag_offset + prev->len) > offset)
+- goto discard_qp;
+-
+- /* Is there an overlap with the next fragment? */
+- if (next && next->ip_defrag_offset < end)
+- goto discard_qp;
++ /* Find out where to put this fragment. */
++ skb1 = qp->q.fragments_tail;
++ if (!skb1) {
++ /* This is the first fragment we've received. */
++ rb_link_node(&skb->rbnode, NULL, &qp->q.rb_fragments.rb_node);
++ qp->q.fragments_tail = skb;
++ } else if ((skb1->ip_defrag_offset + skb1->len) < end) {
++ /* This is the common/special case: skb goes to the end. */
++ /* Detect and discard overlaps. */
++ if (offset < (skb1->ip_defrag_offset + skb1->len))
++ goto discard_qp;
++ /* Insert after skb1. */
++ rb_link_node(&skb->rbnode, &skb1->rbnode, &skb1->rbnode.rb_right);
++ qp->q.fragments_tail = skb;
++ } else {
++ /* Binary search. Note that skb can become the first fragment, but
++ * not the last (covered above). */
++ rbn = &qp->q.rb_fragments.rb_node;
++ do {
++ parent = *rbn;
++ skb1 = rb_to_skb(parent);
++ if (end <= skb1->ip_defrag_offset)
++ rbn = &parent->rb_left;
++ else if (offset >= skb1->ip_defrag_offset + skb1->len)
++ rbn = &parent->rb_right;
++ else /* Found an overlap with skb1. */
++ goto discard_qp;
++ } while (*rbn);
++ /* Here we have parent properly set, and rbn pointing to
++ * one of its NULL left/right children. Insert skb. */
++ rb_link_node(&skb->rbnode, parent, rbn);
++ }
++ rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
+
+- /* Note : skb->ip_defrag_offset and skb->dev share the same location */
+- dev = skb->dev;
+ if (dev)
+ qp->iif = dev->ifindex;
+- /* Makes sure compiler wont do silly aliasing games */
+- barrier();
+ skb->ip_defrag_offset = offset;
+
+- /* Insert this fragment in the chain of fragments. */
+- skb->next = next;
+- if (!next)
+- qp->q.fragments_tail = skb;
+- if (prev)
+- prev->next = skb;
+- else
+- qp->q.fragments = skb;
+-
+ qp->q.stamp = skb->tstamp;
+ qp->q.meat += skb->len;
+ qp->ecn |= ecn;
+@@ -412,7 +423,7 @@ found:
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+- err = ip_frag_reasm(qp, prev, dev);
++ err = ip_frag_reasm(qp, skb, dev);
+ skb->_skb_refdst = orefdst;
+ return err;
+ }
+@@ -429,15 +440,15 @@ err:
+ return err;
+ }
+
+-
+ /* Build a new IP datagram from all its fragments. */
+-
+-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
++static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ struct net_device *dev)
+ {
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct iphdr *iph;
+- struct sk_buff *fp, *head = qp->q.fragments;
++ struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
++ struct sk_buff **nextp; /* To build frag_list. */
++ struct rb_node *rbn;
+ int len;
+ int ihlen;
+ int err;
+@@ -451,25 +462,20 @@ static int ip_frag_reasm(struct ipq *qp,
+ goto out_fail;
+ }
+ /* Make the one we just received the head. */
+- if (prev) {
+- head = prev->next;
+- fp = skb_clone(head, GFP_ATOMIC);
++ if (head != skb) {
++ fp = skb_clone(skb, GFP_ATOMIC);
+ if (!fp)
+ goto out_nomem;
+-
+- fp->next = head->next;
+- if (!fp->next)
++ rb_replace_node(&skb->rbnode, &fp->rbnode, &qp->q.rb_fragments);
++ if (qp->q.fragments_tail == skb)
+ qp->q.fragments_tail = fp;
+- prev->next = fp;
+-
+- skb_morph(head, qp->q.fragments);
+- head->next = qp->q.fragments->next;
+-
+- consume_skb(qp->q.fragments);
+- qp->q.fragments = head;
++ skb_morph(skb, head);
++ rb_replace_node(&head->rbnode, &skb->rbnode,
++ &qp->q.rb_fragments);
++ consume_skb(head);
++ head = skb;
+ }
+
+- WARN_ON(!head);
+ WARN_ON(head->ip_defrag_offset != 0);
+
+ /* Allocate a new buffer for the datagram. */
+@@ -494,24 +500,35 @@ static int ip_frag_reasm(struct ipq *qp,
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
+ goto out_nomem;
+- clone->next = head->next;
+- head->next = clone;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->len = clone->data_len = head->data_len - plen;
+- head->data_len -= clone->len;
+- head->len -= clone->len;
++ skb->truesize += clone->truesize;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+ add_frag_mem_limit(qp->q.net, clone->truesize);
++ skb_shinfo(head)->frag_list = clone;
++ nextp = &clone->next;
++ } else {
++ nextp = &skb_shinfo(head)->frag_list;
+ }
+
+- skb_shinfo(head)->frag_list = head->next;
+ skb_push(head, head->data - skb_network_header(head));
+
+- for (fp=head->next; fp; fp = fp->next) {
++ /* Traverse the tree in order, to build frag_list. */
++ rbn = rb_next(&head->rbnode);
++ rb_erase(&head->rbnode, &qp->q.rb_fragments);
++ while (rbn) {
++ struct rb_node *rbnext = rb_next(rbn);
++ fp = rb_to_skb(rbn);
++ rb_erase(rbn, &qp->q.rb_fragments);
++ rbn = rbnext;
++ *nextp = fp;
++ nextp = &fp->next;
++ fp->prev = NULL;
++ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+@@ -522,7 +539,9 @@ static int ip_frag_reasm(struct ipq *qp,
+ }
+ sub_frag_mem_limit(qp->q.net, head->truesize);
+
++ *nextp = NULL;
+ head->next = NULL;
++ head->prev = NULL;
+ head->dev = dev;
+ head->tstamp = qp->q.stamp;
+ IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
+@@ -550,6 +569,7 @@ static int ip_frag_reasm(struct ipq *qp,
+
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
+ qp->q.fragments = NULL;
++ qp->q.rb_fragments = RB_ROOT;
+ qp->q.fragments_tail = NULL;
+ return 0;
+
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -470,6 +470,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq,
+ head->csum);
+
+ fq->q.fragments = NULL;
++ fq->q.rb_fragments = RB_ROOT;
+ fq->q.fragments_tail = NULL;
+
+ return true;
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -466,6 +466,7 @@ static int ip6_frag_reasm(struct frag_qu
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
+ rcu_read_unlock();
+ fq->q.fragments = NULL;
++ fq->q.rb_fragments = RB_ROOT;
+ fq->q.fragments_tail = NULL;
+ return 1;
+
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:17 -0700
+Subject: ipv4: frags: precedence bug in ip_expire()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Dan Carpenter <dan.carpenter@oracle.com>
+Message-ID: <20181010193017.25221-30-f.fainelli@gmail.com>
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+(commit 70837ffe3085c9a91488b52ca13ac84424da1042 upstream)
+
+We accidentally removed the parentheses here, but they are required
+because '!' has higher precedence than '&'.
+
+Fixes: fa0f527358bd ("ip: use rb trees for IP frag queue.")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_fragment.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -203,7 +203,7 @@ static void ip_expire(unsigned long arg)
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
+
+- if (!qp->q.flags & INET_FRAG_FIRST_IN)
++ if (!(qp->q.flags & INET_FRAG_FIRST_IN))
+ goto out;
+
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:10 -0700
+Subject: ipv6: defrag: drop non-last frags smaller than min mtu
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Florian Westphal <fw@strlen.de>, Peter Oskolkov <posk@google.com>
+Message-ID: <20181010193017.25221-23-f.fainelli@gmail.com>
+
+From: Florian Westphal <fw@strlen.de>
+
+don't bother with pathological cases, they only waste cycles.
+IPv6 requires a minimum MTU of 1280 so we should never see fragments
+smaller than this (except last frag).
+
+v3: don't use awkward "-offset + len"
+v2: drop IPv4 part, which added same check w. IPV4_MIN_MTU (68).
+ There were concerns that there could be even smaller frags
+ generated by intermediate nodes, e.g. on radio networks.
+
+Cc: Peter Oskolkov <posk@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 0ed4229b08c13c84a3c301a08defdc9e7f4467e6)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 4 ++++
+ net/ipv6/reassembly.c | 4 ++++
+ 2 files changed, 8 insertions(+)
+
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -564,6 +564,10 @@ int nf_ct_frag6_gather(struct net *net,
+ hdr = ipv6_hdr(skb);
+ fhdr = (struct frag_hdr *)skb_transport_header(skb);
+
++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
++ fhdr->frag_off & htons(IP6_MF))
++ return -EINVAL;
++
+ skb_orphan(skb);
+ fq = fq_find(net, fhdr->identification, user, hdr,
+ skb->dev ? skb->dev->ifindex : 0);
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -516,6 +516,10 @@ static int ipv6_frag_rcv(struct sk_buff
+ return 1;
+ }
+
++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
++ fhdr->frag_off & htons(IP6_MF))
++ goto fail_hdr;
++
+ iif = skb->dev ? skb->dev->ifindex : 0;
+ fq = fq_find(net, fhdr->identification, hdr, iif);
+ if (fq) {
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:54 -0700
+Subject: ipv6: export ip6 fragments sysctl to unprivileged users
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Nikolay Borisov <kernel@kyup.com>
+Message-ID: <20181010193017.25221-7-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+IPv4 was changed in commit 52a773d645e9 ("net: Export ip fragment
+sysctl to unprivileged users")
+
+The only sysctl that is not per-netns is not used :
+ip6frag_secret_interval
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Nikolay Borisov <kernel@kyup.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 18dcbe12fe9fca0ab825f7eff993060525ac2503)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/reassembly.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -643,10 +643,6 @@ static int __net_init ip6_frags_ns_sysct
+ table[1].data = &net->ipv6.frags.low_thresh;
+ table[1].extra2 = &net->ipv6.frags.high_thresh;
+ table[2].data = &net->ipv6.frags.timeout;
+-
+- /* Don't export sysctls to unprivileged users */
+- if (net->user_ns != &init_user_ns)
+- table[0].procname = NULL;
+ }
+
+ hdr = register_net_sysctl(net, "net/ipv6", table);
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:02 -0700
+Subject: ipv6: frags: rewrite ip6_expire_frag_queue()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-15-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+Make it similar to IPv4 ip_expire(), and release the lock
+before calling icmp functions.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 05c0b86b9696802fd0ce5676a92a63f1b455bdf3)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/reassembly.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -92,7 +92,9 @@ EXPORT_SYMBOL(ip6_frag_init);
+ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
+ {
+ struct net_device *dev = NULL;
++ struct sk_buff *head;
+
++ rcu_read_lock();
+ spin_lock(&fq->q.lock);
+
+ if (fq->q.flags & INET_FRAG_COMPLETE)
+@@ -100,28 +102,34 @@ void ip6_expire_frag_queue(struct net *n
+
+ inet_frag_kill(&fq->q);
+
+- rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, fq->iif);
+ if (!dev)
+- goto out_rcu_unlock;
++ goto out;
+
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+
+ /* Don't send error if the first segment did not arrive. */
+- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
+- goto out_rcu_unlock;
++ head = fq->q.fragments;
++ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
++ goto out;
+
+ /* But use as source device on which LAST ARRIVED
+ * segment was received. And do not use fq->dev
+ * pointer directly, device might already disappeared.
+ */
+- fq->q.fragments->dev = dev;
+- icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+-out_rcu_unlock:
+- rcu_read_unlock();
++ head->dev = dev;
++ skb_get(head);
++ spin_unlock(&fq->q.lock);
++
++ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
++ kfree_skb(head);
++ goto out_rcu_unlock;
++
+ out:
+ spin_unlock(&fq->q.lock);
++out_rcu_unlock:
++ rcu_read_unlock();
+ inet_frag_put(&fq->q);
+ }
+ EXPORT_SYMBOL(ip6_expire_frag_queue);
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:12 -0700
+Subject: net: add rb_to_skb() and other rb tree helpers
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-25-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+Geeralize private netem_rb_to_skb()
+
+TCP rtx queue will soon be converted to rb-tree,
+so we will need skb_rbtree_walk() helpers.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 18a4c0eab2623cc95be98a1e6af1ad18e7695977)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 18 ++++++++++++++++++
+ net/ipv4/tcp_input.c | 33 ++++++++++++---------------------
+ 2 files changed, 30 insertions(+), 21 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2988,6 +2988,12 @@ static inline int __skb_grow_rcsum(struc
+
+ #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+
++#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
++#define skb_rb_first(root) rb_to_skb(rb_first(root))
++#define skb_rb_last(root) rb_to_skb(rb_last(root))
++#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
++#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
++
+ #define skb_queue_walk(queue, skb) \
+ for (skb = (queue)->next; \
+ skb != (struct sk_buff *)(queue); \
+@@ -3002,6 +3008,18 @@ static inline int __skb_grow_rcsum(struc
+ for (; skb != (struct sk_buff *)(queue); \
+ skb = skb->next)
+
++#define skb_rbtree_walk(skb, root) \
++ for (skb = skb_rb_first(root); skb != NULL; \
++ skb = skb_rb_next(skb))
++
++#define skb_rbtree_walk_from(skb) \
++ for (; skb != NULL; \
++ skb = skb_rb_next(skb))
++
++#define skb_rbtree_walk_from_safe(skb, tmp) \
++ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
++ skb = tmp)
++
+ #define skb_queue_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->next; \
+ skb != (struct sk_buff *)(queue); \
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4406,7 +4406,7 @@ static void tcp_ofo_queue(struct sock *s
+
+ p = rb_first(&tp->out_of_order_queue);
+ while (p) {
+- skb = rb_entry(p, struct sk_buff, rbnode);
++ skb = rb_to_skb(p);
+ if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
+ break;
+
+@@ -4470,7 +4470,7 @@ static int tcp_try_rmem_schedule(struct
+ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+- struct rb_node **p, *q, *parent;
++ struct rb_node **p, *parent;
+ struct sk_buff *skb1;
+ u32 seq, end_seq;
+ bool fragstolen;
+@@ -4529,7 +4529,7 @@ coalesce_done:
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+- skb1 = rb_entry(parent, struct sk_buff, rbnode);
++ skb1 = rb_to_skb(parent);
+ if (before(seq, TCP_SKB_CB(skb1)->seq)) {
+ p = &parent->rb_left;
+ continue;
+@@ -4574,9 +4574,7 @@ insert:
+
+ merge_right:
+ /* Remove other segments covered by skb. */
+- while ((q = rb_next(&skb->rbnode)) != NULL) {
+- skb1 = rb_entry(q, struct sk_buff, rbnode);
+-
++ while ((skb1 = skb_rb_next(skb)) != NULL) {
+ if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
+ break;
+ if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
+@@ -4591,7 +4589,7 @@ merge_right:
+ tcp_drop(sk, skb1);
+ }
+ /* If there is no skb after us, we are the last_skb ! */
+- if (!q)
++ if (!skb1)
+ tp->ooo_last_skb = skb;
+
+ add_sack:
+@@ -4792,7 +4790,7 @@ static struct sk_buff *tcp_skb_next(stru
+ if (list)
+ return !skb_queue_is_last(list, skb) ? skb->next : NULL;
+
+- return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
++ return skb_rb_next(skb);
+ }
+
+ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+@@ -4821,7 +4819,7 @@ static void tcp_rbtree_insert(struct rb_
+
+ while (*p) {
+ parent = *p;
+- skb1 = rb_entry(parent, struct sk_buff, rbnode);
++ skb1 = rb_to_skb(parent);
+ if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
+ p = &parent->rb_left;
+ else
+@@ -4941,19 +4939,12 @@ static void tcp_collapse_ofo_queue(struc
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 range_truesize, sum_tiny = 0;
+ struct sk_buff *skb, *head;
+- struct rb_node *p;
+ u32 start, end;
+
+- p = rb_first(&tp->out_of_order_queue);
+- skb = rb_entry_safe(p, struct sk_buff, rbnode);
++ skb = skb_rb_first(&tp->out_of_order_queue);
+ new_range:
+ if (!skb) {
+- p = rb_last(&tp->out_of_order_queue);
+- /* Note: This is possible p is NULL here. We do not
+- * use rb_entry_safe(), as ooo_last_skb is valid only
+- * if rbtree is not empty.
+- */
+- tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
++ tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
+ return;
+ }
+ start = TCP_SKB_CB(skb)->seq;
+@@ -4961,7 +4952,7 @@ new_range:
+ range_truesize = skb->truesize;
+
+ for (head = skb;;) {
+- skb = tcp_skb_next(skb, NULL);
++ skb = skb_rb_next(skb);
+
+ /* Range is terminated when we see a gap or when
+ * we are at the queue end.
+@@ -5017,7 +5008,7 @@ static bool tcp_prune_ofo_queue(struct s
+ prev = rb_prev(node);
+ rb_erase(node, &tp->out_of_order_queue);
+ goal -= rb_to_skb(node)->truesize;
+- tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
++ tcp_drop(sk, rb_to_skb(node));
+ if (!prev || goal <= 0) {
+ sk_mem_reclaim(sk);
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+@@ -5027,7 +5018,7 @@ static bool tcp_prune_ofo_queue(struct s
+ }
+ node = prev;
+ } while (node);
+- tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
++ tp->ooo_last_skb = rb_to_skb(prev);
+
+ /* Reset SACK state. A conforming SACK implementation will
+ * do the same at a timeout based retransmit. When a connection
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:09 -0700
+Subject: net: modify skb_rbtree_purge to return the truesize of all purged skbs.
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com, Peter Oskolkov <posk@google.com>, Florian Westphal <fw@strlen.de>
+Message-ID: <20181010193017.25221-22-f.fainelli@gmail.com>
+
+From: Peter Oskolkov <posk@google.com>
+
+Tested: see the next patch is the series.
+
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Peter Oskolkov <posk@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 385114dec8a49b5e5945e77ba7de6356106713f4)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 2 +-
+ net/core/skbuff.c | 6 +++++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2418,7 +2418,7 @@ static inline void __skb_queue_purge(str
+ kfree_skb(skb);
+ }
+
+-void skb_rbtree_purge(struct rb_root *root);
++unsigned int skb_rbtree_purge(struct rb_root *root);
+
+ void *netdev_alloc_frag(unsigned int fragsz);
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2425,23 +2425,27 @@ EXPORT_SYMBOL(skb_queue_purge);
+ /**
+ * skb_rbtree_purge - empty a skb rbtree
+ * @root: root of the rbtree to empty
++ * Return value: the sum of truesizes of all purged skbs.
+ *
+ * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
+ * the list and one reference dropped. This function does not take
+ * any lock. Synchronization should be handled by the caller (e.g., TCP
+ * out-of-order queue is protected by the socket lock).
+ */
+-void skb_rbtree_purge(struct rb_root *root)
++unsigned int skb_rbtree_purge(struct rb_root *root)
+ {
+ struct rb_node *p = rb_first(root);
++ unsigned int sum = 0;
+
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
++ sum += skb->truesize;
+ kfree_skb(skb);
+ }
++ return sum;
+ }
+
+ /**
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:11 -0700
+Subject: net: pskb_trim_rcsum() and CHECKSUM_COMPLETE are friends
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-24-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+After working on IP defragmentation lately, I found that some large
+packets defeat CHECKSUM_COMPLETE optimization because of NIC adding
+zero paddings on the last (small) fragment.
+
+While removing the padding with pskb_trim_rcsum(), we set skb->ip_summed
+to CHECKSUM_NONE, forcing a full csum validation, even if all prior
+fragments had CHECKSUM_COMPLETE set.
+
+We can instead compute the checksum of the part we are trimming,
+usually smaller than the part we keep.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 88078d98d1bb085d72af8437707279e203524fa5)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 5 ++---
+ net/core/skbuff.c | 14 ++++++++++++++
+ 2 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2954,6 +2954,7 @@ static inline unsigned char *skb_push_rc
+ return skb->data;
+ }
+
++int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
+ /**
+ * pskb_trim_rcsum - trim received skb and update checksum
+ * @skb: buffer to trim
+@@ -2967,9 +2968,7 @@ static inline int pskb_trim_rcsum(struct
+ {
+ if (likely(len >= skb->len))
+ return 0;
+- if (skb->ip_summed == CHECKSUM_COMPLETE)
+- skb->ip_summed = CHECKSUM_NONE;
+- return __pskb_trim(skb, len);
++ return pskb_trim_rcsum_slow(skb, len);
+ }
+
+ static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1578,6 +1578,20 @@ done:
+ }
+ EXPORT_SYMBOL(___pskb_trim);
+
++/* Note : use pskb_trim_rcsum() instead of calling this directly
++ */
++int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
++{
++ if (skb->ip_summed == CHECKSUM_COMPLETE) {
++ int delta = skb->len - len;
++
++ skb->csum = csum_sub(skb->csum,
++ skb_checksum(skb, len, delta, 0));
++ }
++ return __pskb_trim(skb, len);
++}
++EXPORT_SYMBOL(pskb_trim_rcsum_slow);
++
+ /**
+ * __pskb_pull_tail - advance tail of skb header
+ * @skb: buffer to reallocate
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:08 -0700
+Subject: net: speed up skb_rbtree_purge()
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-21-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+As measured in my prior patch ("sch_netem: faster rb tree removal"),
+rbtree_postorder_for_each_entry_safe() is nice looking but much slower
+than using rb_next() directly, except when tree is small enough
+to fit in CPU caches (then the cost is the same)
+
+Also note that there is not even an increase of text size :
+$ size net/core/skbuff.o.before net/core/skbuff.o
+ text data bss dec hex filename
+ 40711 1298 0 42009 a419 net/core/skbuff.o.before
+ 40711 1298 0 42009 a419 net/core/skbuff.o
+
+From: Eric Dumazet <edumazet@google.com>
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 7c90584c66cc4b033a3b684b0e0950f79e7b7166)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2433,12 +2433,15 @@ EXPORT_SYMBOL(skb_queue_purge);
+ */
+ void skb_rbtree_purge(struct rb_root *root)
+ {
+- struct sk_buff *skb, *next;
++ struct rb_node *p = rb_first(root);
+
+- rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
+- kfree_skb(skb);
++ while (p) {
++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+- *root = RB_ROOT;
++ p = rb_next(p);
++ rb_erase(&skb->rbnode, root);
++ kfree_skb(skb);
++ }
+ }
+
+ /**
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:29:55 -0700
+Subject: rhashtable: add schedule points
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-8-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+Rehashing and destroying large hash table takes a lot of time,
+and happens in process context. It is safe to add cond_resched()
+in rhashtable_rehash_table() and rhashtable_free_and_destroy()
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit ae6da1f503abb5a5081f9f6c4a6881de97830f3e)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/rhashtable.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -251,8 +251,10 @@ static int rhashtable_rehash_table(struc
+ if (!new_tbl)
+ return 0;
+
+- for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
++ for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
+ rhashtable_rehash_chain(ht, old_hash);
++ cond_resched();
++ }
+
+ /* Publish the new table pointer. */
+ rcu_assign_pointer(ht->tbl, new_tbl);
+@@ -993,6 +995,7 @@ void rhashtable_free_and_destroy(struct
+ for (i = 0; i < tbl->size; i++) {
+ struct rhash_head *pos, *next;
+
++ cond_resched();
+ for (pos = rht_dereference(tbl->buckets[i], ht),
+ next = !rht_is_a_nulls(pos) ?
+ rht_dereference(pos->next, ht) : NULL;
--- /dev/null
+From foo@baz Tue Oct 16 18:12:46 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 10 Oct 2018 12:30:03 -0700
+Subject: rhashtable: reorganize struct rhashtable layout
+To: netdev@vger.kernel.org
+Cc: davem@davemloft.net, gregkh@linuxfoundation.org, stable@vger.kernel.org, edumazet@google.com, sthemmin@microsoft.com
+Message-ID: <20181010193017.25221-16-f.fainelli@gmail.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+While under frags DDOS I noticed unfortunate false sharing between
+@nelems and @params.automatic_shrinking
+
+Move @nelems at the end of struct rhashtable so that first cache line
+is shared between all cpus, because almost never dirtied.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit e5d672a0780d9e7118caad4c171ec88b8299398d)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/rhashtable.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/rhashtable.h
++++ b/include/linux/rhashtable.h
+@@ -138,7 +138,6 @@ struct rhashtable_params {
+ /**
+ * struct rhashtable - Hash table handle
+ * @tbl: Bucket table
+- * @nelems: Number of elements in table
+ * @key_len: Key length for hashfn
+ * @elasticity: Maximum chain length before rehash
+ * @p: Configuration parameters
+@@ -146,10 +145,10 @@ struct rhashtable_params {
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @lock: Spin lock to protect walker list
++ * @nelems: Number of elements in table
+ */
+ struct rhashtable {
+ struct bucket_table __rcu *tbl;
+- atomic_t nelems;
+ unsigned int key_len;
+ unsigned int elasticity;
+ struct rhashtable_params p;
+@@ -157,6 +156,7 @@ struct rhashtable {
+ struct work_struct run_work;
+ struct mutex mutex;
+ spinlock_t lock;
++ atomic_t nelems;
+ };
+
+ /**
rtnl-limit-ifla_num_tx_queues-and-ifla_num_rx_queues-to-4096.patch
tcp-dccp-fix-lockdep-issue-when-syn-is-backlogged.patch
inet-make-sure-to-grab-rcu_read_lock-before-using-ireq-ireq_opt.patch
+inet-frags-change-inet_frags_init_net-return-value.patch
+inet-frags-add-a-pointer-to-struct-netns_frags.patch
+inet-frags-refactor-ipfrag_init.patch
+inet-frags-refactor-ipv6_frag_init.patch
+inet-frags-refactor-lowpan_net_frag_init.patch
+ipv6-export-ip6-fragments-sysctl-to-unprivileged-users.patch
+rhashtable-add-schedule-points.patch
+inet-frags-use-rhashtables-for-reassembly-units.patch
+inet-frags-remove-some-helpers.patch
+inet-frags-get-rif-of-inet_frag_evicting.patch
+inet-frags-remove-inet_frag_maybe_warn_overflow.patch
+inet-frags-break-the-2gb-limit-for-frags-storage.patch
+inet-frags-do-not-clone-skb-in-ip_expire.patch
+ipv6-frags-rewrite-ip6_expire_frag_queue.patch
+rhashtable-reorganize-struct-rhashtable-layout.patch
+inet-frags-reorganize-struct-netns_frags.patch
+inet-frags-get-rid-of-ipfrag_skb_cb-frag_cb.patch
+inet-frags-fix-ip6frag_low_thresh-boundary.patch
+ip-discard-ipv4-datagrams-with-overlapping-segments.patch
+net-speed-up-skb_rbtree_purge.patch
+net-modify-skb_rbtree_purge-to-return-the-truesize-of-all-purged-skbs.patch
+ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch
+net-pskb_trim_rcsum-and-checksum_complete-are-friends.patch
+net-add-rb_to_skb-and-other-rb-tree-helpers.patch
+ip-use-rb-trees-for-ip-frag-queue.patch
+ip-add-helpers-to-process-in-order-fragments-faster.patch
+ip-process-in-order-fragments-efficiently.patch
+ip-frags-fix-crash-in-ip_do_fragment.patch
+ipv4-frags-precedence-bug-in-ip_expire.patch