]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.13-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 10 Nov 2017 11:49:30 +0000 (12:49 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 10 Nov 2017 11:49:30 +0000 (12:49 +0100)
added patches:
netfilter-nat-revert-netfilter-nat-convert-nat-bysrc-hash-to-rhashtable.patch
netfilter-nft_set_hash-disable-fast_ops-for-2-len-keys.patch

queue-4.13/netfilter-nat-revert-netfilter-nat-convert-nat-bysrc-hash-to-rhashtable.patch [new file with mode: 0644]
queue-4.13/netfilter-nft_set_hash-disable-fast_ops-for-2-len-keys.patch [new file with mode: 0644]

diff --git a/queue-4.13/netfilter-nat-revert-netfilter-nat-convert-nat-bysrc-hash-to-rhashtable.patch b/queue-4.13/netfilter-nat-revert-netfilter-nat-convert-nat-bysrc-hash-to-rhashtable.patch
new file mode 100644 (file)
index 0000000..f8fb9a2
--- /dev/null
@@ -0,0 +1,298 @@
+From e1bf1687740ce1a3598a1c5e452b852ff2190682 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Wed, 6 Sep 2017 14:39:51 +0200
+Subject: netfilter: nat: Revert "netfilter: nat: convert nat bysrc hash to rhashtable"
+
+From: Florian Westphal <fw@strlen.de>
+
+commit e1bf1687740ce1a3598a1c5e452b852ff2190682 upstream.
+
+This reverts commit 870190a9ec9075205c0fa795a09fa931694a3ff1.
+
+It was not a good idea. The custom hash table was a much better
+fit for this purpose.
+
+A fast lookup is not essential, in fact for most cases there is no lookup
+at all because original tuple is not taken and can be used as-is.
+What needs to be fast is insertion and deletion.
+
+rhlist removal however requires a rhlist walk.
+We can have thousands of entries in such a list if source port/addresses
+are reused for multiple flows, if this happens removal requests are so
+expensive that deletions of a few thousand flows can take several
+seconds(!).
+
+The advantages that we got from rhashtable are:
+1) table auto-sizing
+2) multiple locks
+
+1) would be nice to have, but it is not essential as we have at
+most one lookup per new flow, so even a million flows in the bysource
+table are not a problem compared to current deletion cost.
+2) is easy to add to custom hash table.
+
+I tried to add hlist_node to rhlist to speed up rhltable_remove but this
+isn't doable without changing semantics.  rhltable_remove_fast will
+check that the to-be-deleted object is part of the table and that
+requires a list walk that we want to avoid.
+
+Furthermore, using hlist_node increases size of struct rhlist_head, which
+in turn increases nf_conn size.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=196821
+Reported-by: Ivan Babrou <ibobrik@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/netfilter/nf_conntrack.h |    3 
+ include/net/netfilter/nf_nat.h       |    1 
+ net/netfilter/nf_nat_core.c          |  128 ++++++++++++++---------------------
+ 3 files changed, 53 insertions(+), 79 deletions(-)
+
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -17,7 +17,6 @@
+ #include <linux/bitops.h>
+ #include <linux/compiler.h>
+ #include <linux/atomic.h>
+-#include <linux/rhashtable.h>
+ #include <linux/netfilter/nf_conntrack_tcp.h>
+ #include <linux/netfilter/nf_conntrack_dccp.h>
+@@ -83,7 +82,7 @@ struct nf_conn {
+       possible_net_t ct_net;
+ #if IS_ENABLED(CONFIG_NF_NAT)
+-      struct rhlist_head nat_bysource;
++      struct hlist_node       nat_bysource;
+ #endif
+       /* all members below initialized via memset */
+       u8 __nfct_init_offset[0];
+--- a/include/net/netfilter/nf_nat.h
++++ b/include/net/netfilter/nf_nat.h
+@@ -1,6 +1,5 @@
+ #ifndef _NF_NAT_H
+ #define _NF_NAT_H
+-#include <linux/rhashtable.h>
+ #include <linux/netfilter_ipv4.h>
+ #include <linux/netfilter/nf_nat.h>
+ #include <net/netfilter/nf_conntrack_tuple.h>
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -30,19 +30,17 @@
+ #include <net/netfilter/nf_conntrack_zones.h>
+ #include <linux/netfilter/nf_nat.h>
++static DEFINE_SPINLOCK(nf_nat_lock);
++
+ static DEFINE_MUTEX(nf_nat_proto_mutex);
+ static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
+                                               __read_mostly;
+ static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
+                                               __read_mostly;
+-struct nf_nat_conn_key {
+-      const struct net *net;
+-      const struct nf_conntrack_tuple *tuple;
+-      const struct nf_conntrack_zone *zone;
+-};
+-
+-static struct rhltable nf_nat_bysource_table;
++static struct hlist_head *nf_nat_bysource __read_mostly;
++static unsigned int nf_nat_htable_size __read_mostly;
++static unsigned int nf_nat_hash_rnd __read_mostly;
+ inline const struct nf_nat_l3proto *
+ __nf_nat_l3proto_find(u8 family)
+@@ -118,17 +116,19 @@ int nf_xfrm_me_harder(struct net *net, s
+ EXPORT_SYMBOL(nf_xfrm_me_harder);
+ #endif /* CONFIG_XFRM */
+-static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed)
++/* We keep an extra hash for each conntrack, for fast searching. */
++static unsigned int
++hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
+ {
+-      const struct nf_conntrack_tuple *t;
+-      const struct nf_conn *ct = data;
++      unsigned int hash;
++
++      get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
+-      t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+       /* Original src, to ensure we map it consistently if poss. */
++      hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
++                    tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
+-      seed ^= net_hash_mix(nf_ct_net(ct));
+-      return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
+-                    t->dst.protonum ^ seed);
++      return reciprocal_scale(hash, nf_nat_htable_size);
+ }
+ /* Is this tuple already taken? (not by us) */
+@@ -184,28 +184,6 @@ same_src(const struct nf_conn *ct,
+               t->src.u.all == tuple->src.u.all);
+ }
+-static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
+-                             const void *obj)
+-{
+-      const struct nf_nat_conn_key *key = arg->key;
+-      const struct nf_conn *ct = obj;
+-
+-      if (!same_src(ct, key->tuple) ||
+-          !net_eq(nf_ct_net(ct), key->net) ||
+-          !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
+-              return 1;
+-
+-      return 0;
+-}
+-
+-static struct rhashtable_params nf_nat_bysource_params = {
+-      .head_offset = offsetof(struct nf_conn, nat_bysource),
+-      .obj_hashfn = nf_nat_bysource_hash,
+-      .obj_cmpfn = nf_nat_bysource_cmp,
+-      .nelem_hint = 256,
+-      .min_size = 1024,
+-};
+-
+ /* Only called for SRC manip */
+ static int
+ find_appropriate_src(struct net *net,
+@@ -216,26 +194,22 @@ find_appropriate_src(struct net *net,
+                    struct nf_conntrack_tuple *result,
+                    const struct nf_nat_range *range)
+ {
++      unsigned int h = hash_by_src(net, tuple);
+       const struct nf_conn *ct;
+-      struct nf_nat_conn_key key = {
+-              .net = net,
+-              .tuple = tuple,
+-              .zone = zone
+-      };
+-      struct rhlist_head *hl, *h;
+-
+-      hl = rhltable_lookup(&nf_nat_bysource_table, &key,
+-                           nf_nat_bysource_params);
+-      rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
+-              nf_ct_invert_tuplepr(result,
+-                                   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+-              result->dst = tuple->dst;
++      hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
++              if (same_src(ct, tuple) &&
++                  net_eq(net, nf_ct_net(ct)) &&
++                  nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
++                      /* Copy source part from reply tuple. */
++                      nf_ct_invert_tuplepr(result,
++                                     &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
++                      result->dst = tuple->dst;
+-              if (in_range(l3proto, l4proto, result, range))
+-                      return 1;
++                      if (in_range(l3proto, l4proto, result, range))
++                              return 1;
++              }
+       }
+-
+       return 0;
+ }
+@@ -408,6 +382,7 @@ nf_nat_setup_info(struct nf_conn *ct,
+                 const struct nf_nat_range *range,
+                 enum nf_nat_manip_type maniptype)
+ {
++      struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_tuple curr_tuple, new_tuple;
+       /* Can't setup nat info for confirmed ct. */
+@@ -447,19 +422,14 @@ nf_nat_setup_info(struct nf_conn *ct,
+       }
+       if (maniptype == NF_NAT_MANIP_SRC) {
+-              struct nf_nat_conn_key key = {
+-                      .net = nf_ct_net(ct),
+-                      .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+-                      .zone = nf_ct_zone(ct),
+-              };
+-              int err;
+-
+-              err = rhltable_insert_key(&nf_nat_bysource_table,
+-                                        &key,
+-                                        &ct->nat_bysource,
+-                                        nf_nat_bysource_params);
+-              if (err)
+-                      return NF_DROP;
++              unsigned int srchash;
++
++              srchash = hash_by_src(net,
++                                    &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
++              spin_lock_bh(&nf_nat_lock);
++              hlist_add_head_rcu(&ct->nat_bysource,
++                                 &nf_nat_bysource[srchash]);
++              spin_unlock_bh(&nf_nat_lock);
+       }
+       /* It's done. */
+@@ -568,8 +538,9 @@ static int nf_nat_proto_clean(struct nf_
+        * will delete entry from already-freed table.
+        */
+       clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+-      rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
+-                      nf_nat_bysource_params);
++      spin_lock_bh(&nf_nat_lock);
++      hlist_del_rcu(&ct->nat_bysource);
++      spin_unlock_bh(&nf_nat_lock);
+       /* don't delete conntrack.  Although that would make things a lot
+        * simpler, we'd end up flushing all conntracks on nat rmmod.
+@@ -697,9 +668,11 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregis
+ /* No one using conntrack by the time this called. */
+ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
+ {
+-      if (ct->status & IPS_SRC_NAT_DONE)
+-              rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
+-                              nf_nat_bysource_params);
++      if (ct->status & IPS_SRC_NAT_DONE) {
++              spin_lock_bh(&nf_nat_lock);
++              hlist_del_rcu(&ct->nat_bysource);
++              spin_unlock_bh(&nf_nat_lock);
++      }
+ }
+ static struct nf_ct_ext_type nat_extend __read_mostly = {
+@@ -823,13 +796,16 @@ static int __init nf_nat_init(void)
+ {
+       int ret;
+-      ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
+-      if (ret)
+-              return ret;
++      /* Leave them the same for the moment. */
++      nf_nat_htable_size = nf_conntrack_htable_size;
++
++      nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
++      if (!nf_nat_bysource)
++              return -ENOMEM;
+       ret = nf_ct_extend_register(&nat_extend);
+       if (ret < 0) {
+-              rhltable_destroy(&nf_nat_bysource_table);
++              nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
+               printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
+               return ret;
+       }
+@@ -863,8 +839,8 @@ static void __exit nf_nat_cleanup(void)
+       for (i = 0; i < NFPROTO_NUMPROTO; i++)
+               kfree(nf_nat_l4protos[i]);
+-
+-      rhltable_destroy(&nf_nat_bysource_table);
++      synchronize_net();
++      nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
+ }
+ MODULE_LICENSE("GPL");
diff --git a/queue-4.13/netfilter-nft_set_hash-disable-fast_ops-for-2-len-keys.patch b/queue-4.13/netfilter-nft_set_hash-disable-fast_ops-for-2-len-keys.patch
new file mode 100644 (file)
index 0000000..377941d
--- /dev/null
@@ -0,0 +1,57 @@
+From 0414c78f14861cb704d6e6888efd53dd36e3bdde Mon Sep 17 00:00:00 2001
+From: Anatole Denis <anatole@rezel.net>
+Date: Wed, 4 Oct 2017 01:17:14 +0100
+Subject: netfilter: nft_set_hash: disable fast_ops for 2-len keys
+
+From: Anatole Denis <anatole@rezel.net>
+
+commit 0414c78f14861cb704d6e6888efd53dd36e3bdde upstream.
+
+jhash_1word of a u16 is a different value from jhash of the same u16 with
+length 2.
+Since elements are always inserted in sets using jhash over the actual
+klen, this would lead to incorrect lookups on fixed-size sets with a key
+length of 2, as they would be inserted with hash value jhash(key, 2) and
+looked up with hash value jhash_1word(key), which is different.
+
+Example reproducer(v4.13+), using anonymous sets which always have a
+fixed size:
+
+  table inet t {
+      chain c {
+                  type filter hook output priority 0; policy accept;
+                  tcp dport { 10001, 10003, 10005, 10007, 10009 } counter packets 4 bytes 240 reject
+                  tcp dport 10001 counter packets 4 bytes 240 reject
+                  tcp dport 10003 counter packets 4 bytes 240 reject
+                  tcp dport 10005 counter packets 4 bytes 240 reject
+                  tcp dport 10007 counter packets 0 bytes 0 reject
+                  tcp dport 10009 counter packets 4 bytes 240 reject
+          }
+  }
+
+then use nc -z localhost <port> to probe; incorrectly hashed ports will
+pass through the set lookup and increment the counter of an individual
+rule.
+
+jhash being seeded with a random value, it is not deterministic which
+ports will incorrectly hash, but in testing with 5 ports in the set I
+always had 4 or 5 with an incorrect hash value.
+
+Signed-off-by: Anatole Denis <anatole@rezel.net>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nft_set_hash.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -643,7 +643,6 @@ nft_hash_select_ops(const struct nft_ctx
+ {
+       if (desc->size) {
+               switch (desc->klen) {
+-              case 2:
+               case 4:
+                       return &nft_hash_fast_ops;
+               default: