From: Greg Kroah-Hartman Date: Wed, 6 Nov 2019 17:34:42 +0000 (+0100) Subject: 4.14-stable patches X-Git-Tag: v4.4.200~18 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b6cfbed0ac3d3524b76d0da360b12c5ec4c94eda;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: net-flow_dissector-switch-to-siphash.patch --- diff --git a/queue-4.14/net-flow_dissector-switch-to-siphash.patch b/queue-4.14/net-flow_dissector-switch-to-siphash.patch new file mode 100644 index 00000000000..43f07460df8 --- /dev/null +++ b/queue-4.14/net-flow_dissector-switch-to-siphash.patch @@ -0,0 +1,389 @@ +From 55667441c84fa5e0911a0aac44fb059c15ba6da2 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Tue, 22 Oct 2019 07:57:46 -0700 +Subject: net/flow_dissector: switch to siphash + +From: Eric Dumazet + +commit 55667441c84fa5e0911a0aac44fb059c15ba6da2 upstream. + +UDP IPv6 packets auto flowlabels are using a 32bit secret +(static u32 hashrnd in net/core/flow_dissector.c) and +apply jhash() over fields known by the receivers. + +Attackers can easily infer the 32bit secret and use this information +to identify a device and/or user, since this 32bit secret is only +set at boot time. + +Really, using jhash() to generate cookies sent on the wire +is a serious security concern. + +Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be +a dead end. Trying to periodically change the secret (like in sch_sfq.c) +could change paths taken in the network for long lived flows. + +Let's switch to siphash, as we did in commit df453700e8d8 +("inet: switch IP ID generator to siphash") + +Using a cryptographically strong pseudo random function will solve this +privacy issue and more generally remove other weak points in the stack. + +Packet schedulers using skb_get_hash_perturb() benefit from this change. + +Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default") +Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels") +Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel") +Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit") +Signed-off-by: Eric Dumazet +Reported-by: Jonathan Berger +Reported-by: Amit Klein +Reported-by: Benny Pinkas +Cc: Tom Herbert +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman + + +--- + include/linux/skbuff.h | 3 +- + include/net/flow_dissector.h | 3 +- + include/net/fq.h | 2 - + include/net/fq_impl.h | 4 +-- + net/core/flow_dissector.c | 48 +++++++++++++++++-------------------------- + net/sched/sch_hhf.c | 8 +++---- + net/sched/sch_sfb.c | 13 ++++++----- + net/sched/sch_sfq.c | 14 +++++++----- + 8 files changed, 46 insertions(+), 49 deletions(-) + +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -1228,7 +1228,8 @@ static inline __u32 skb_get_hash_flowi6( + return skb->hash; + } + +-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); ++__u32 skb_get_hash_perturb(const struct sk_buff *skb, ++ const siphash_key_t *perturb); + + static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) + { +--- a/include/net/flow_dissector.h ++++ b/include/net/flow_dissector.h +@@ -4,6 +4,7 @@ + + #include + #include ++#include + #include + + /** +@@ -229,7 +230,7 @@ struct flow_dissector { + struct flow_keys { + struct flow_dissector_key_control control; + #define FLOW_KEYS_HASH_START_FIELD basic +- struct flow_dissector_key_basic basic; ++ struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT); + struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_keyid keyid; +--- a/include/net/fq.h ++++ b/include/net/fq.h +@@ -70,7 +70,7 @@ struct fq { + struct list_head backlogs; + spinlock_t lock; + u32 flows_cnt; +- u32 perturbation; ++ siphash_key_t perturbation; + u32 limit; + u32 memory_limit; + u32 memory_usage; +--- a/include/net/fq_impl.h ++++ b/include/net/fq_impl.h +@@ -105,7 +105,7 @@ static struct fq_flow *fq_flow_classify( + + lockdep_assert_held(&fq->lock); + +- hash = skb_get_hash_perturb(skb, fq->perturbation); ++ hash = skb_get_hash_perturb(skb, &fq->perturbation); + idx = reciprocal_scale(hash, fq->flows_cnt); + flow = &fq->flows[idx]; + +@@ -255,7 +255,7 @@ static int fq_init(struct fq *fq, int fl + INIT_LIST_HEAD(&fq->backlogs); + spin_lock_init(&fq->lock); + fq->flows_cnt = max_t(u32, flows_cnt, 1); +- fq->perturbation = prandom_u32(); ++ get_random_bytes(&fq->perturbation, sizeof(fq->perturbation)); + fq->quantum = 300; + fq->limit = 8192; + fq->memory_limit = 16 << 20; /* 16 MBytes */ +--- a/net/core/flow_dissector.c ++++ b/net/core/flow_dissector.c +@@ -889,45 +889,34 @@ out_bad: + } + EXPORT_SYMBOL(__skb_flow_dissect); + +-static u32 hashrnd __read_mostly; ++static siphash_key_t hashrnd __read_mostly; + static __always_inline void __flow_hash_secret_init(void) + { + net_get_random_once(&hashrnd, sizeof(hashrnd)); + } + +-static __always_inline u32 __flow_hash_words(const u32 *words, u32 length, +- u32 keyval) ++static const void *flow_keys_hash_start(const struct flow_keys *flow) + { +- return jhash2(words, length, keyval); +-} +- +-static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow) +-{ +- const void *p = flow; +- +- BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); +- return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET); ++ BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT); ++ return &flow->FLOW_KEYS_HASH_START_FIELD; + } + + static inline size_t flow_keys_hash_length(const struct flow_keys *flow) + { +- size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); +- BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); +- BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != +- sizeof(*flow) - sizeof(flow->addrs)); ++ size_t len = offsetof(typeof(*flow), addrs) - FLOW_KEYS_HASH_OFFSET; + + switch (flow->control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: +- diff -= sizeof(flow->addrs.v4addrs); ++ len += sizeof(flow->addrs.v4addrs); + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: +- diff -= sizeof(flow->addrs.v6addrs); ++ len += sizeof(flow->addrs.v6addrs); + break; + case FLOW_DISSECTOR_KEY_TIPC_ADDRS: +- diff -= sizeof(flow->addrs.tipcaddrs); ++ len += sizeof(flow->addrs.tipcaddrs); + break; + } +- return (sizeof(*flow) - diff) / sizeof(u32); ++ return len; + } + + __be32 flow_get_u32_src(const struct flow_keys *flow) +@@ -993,14 +982,15 @@ static inline void __flow_hash_consisten + } + } + +-static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) ++static inline u32 __flow_hash_from_keys(struct flow_keys *keys, ++ const siphash_key_t *keyval) + { + u32 hash; + + __flow_hash_consistentify(keys); + +- hash = __flow_hash_words(flow_keys_hash_start(keys), +- flow_keys_hash_length(keys), keyval); ++ hash = siphash(flow_keys_hash_start(keys), ++ flow_keys_hash_length(keys), keyval); + if (!hash) + hash = 1; + +@@ -1010,12 +1000,13 @@ static inline u32 __flow_hash_from_keys( + u32 flow_hash_from_keys(struct flow_keys *keys) + { + __flow_hash_secret_init(); +- return __flow_hash_from_keys(keys, hashrnd); ++ return __flow_hash_from_keys(keys, &hashrnd); + } + EXPORT_SYMBOL(flow_hash_from_keys); + + static inline u32 ___skb_get_hash(const struct sk_buff *skb, +- struct flow_keys *keys, u32 keyval) ++ struct flow_keys *keys, ++ const siphash_key_t *keyval) + { + skb_flow_dissect_flow_keys(skb, keys, + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); +@@ -1063,7 +1054,7 @@ u32 __skb_get_hash_symmetric(const struc + NULL, 0, 0, 0, + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + +- return __flow_hash_from_keys(&keys, hashrnd); ++ return __flow_hash_from_keys(&keys, &hashrnd); + } + EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); + +@@ -1083,13 +1074,14 @@ void __skb_get_hash(struct sk_buff *skb) + + __flow_hash_secret_init(); + +- hash = ___skb_get_hash(skb, &keys, hashrnd); ++ hash = ___skb_get_hash(skb, &keys, &hashrnd); + + __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); + } + EXPORT_SYMBOL(__skb_get_hash); + +-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb) ++__u32 skb_get_hash_perturb(const struct sk_buff *skb, ++ const siphash_key_t *perturb) + { + struct flow_keys keys; + +--- a/net/sched/sch_hhf.c ++++ b/net/sched/sch_hhf.c +@@ -4,11 +4,11 @@ + * Copyright (C) 2013 Nandita Dukkipati + */ + +-#include + #include + #include + #include + #include ++#include + #include + #include + +@@ -125,7 +125,7 @@ struct wdrr_bucket { + + struct hhf_sched_data { + struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; +- u32 perturbation; /* hash perturbation */ ++ siphash_key_t perturbation; /* hash perturbation */ + u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ + u32 drop_overlimit; /* number of times max qdisc packet + * limit was hit +@@ -263,7 +263,7 @@ static enum wdrr_bucket_idx hhf_classify + } + + /* Get hashed flow-id of the skb. */ +- hash = skb_get_hash_perturb(skb, q->perturbation); ++ hash = skb_get_hash_perturb(skb, &q->perturbation); + + /* Check if this packet belongs to an already established HH flow. */ + flow_pos = hash & HHF_BIT_MASK; +@@ -578,7 +578,7 @@ static int hhf_init(struct Qdisc *sch, s + + sch->limit = 1000; + q->quantum = psched_mtu(qdisc_dev(sch)); +- q->perturbation = prandom_u32(); ++ get_random_bytes(&q->perturbation, sizeof(q->perturbation)); + INIT_LIST_HEAD(&q->new_buckets); + INIT_LIST_HEAD(&q->old_buckets); + +--- a/net/sched/sch_sfb.c ++++ b/net/sched/sch_sfb.c +@@ -22,7 +22,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +@@ -49,7 +49,7 @@ struct sfb_bucket { + * (Section 4.4 of SFB reference : moving hash functions) + */ + struct sfb_bins { +- u32 perturbation; /* jhash perturbation */ ++ siphash_key_t perturbation; /* siphash key */ + struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; + }; + +@@ -221,7 +221,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, + + static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) + { +- q->bins[slot].perturbation = prandom_u32(); ++ get_random_bytes(&q->bins[slot].perturbation, ++ sizeof(q->bins[slot].perturbation)); + } + + static void sfb_swap_slot(struct sfb_sched_data *q) +@@ -317,9 +318,9 @@ static int sfb_enqueue(struct sk_buff *s + /* If using external classifiers, get result and record it. */ + if (!sfb_classify(skb, fl, &ret, &salt)) + goto other_drop; +- sfbhash = jhash_1word(salt, q->bins[slot].perturbation); ++ sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation); + } else { +- sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); ++ sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); + } + + +@@ -355,7 +356,7 @@ static int sfb_enqueue(struct sk_buff *s + /* Inelastic flow */ + if (q->double_buffering) { + sfbhash = skb_get_hash_perturb(skb, +- q->bins[slot].perturbation); ++ &q->bins[slot].perturbation); + if (!sfbhash) + sfbhash = 1; + sfb_skb_cb(skb)->hashes[slot] = sfbhash; +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -18,7 +18,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +@@ -121,7 +121,7 @@ struct sfq_sched_data { + u8 headdrop; + u8 maxdepth; /* limit of packets per flow */ + +- u32 perturbation; ++ siphash_key_t perturbation; + u8 cur_depth; /* depth of longest slot */ + u8 flags; + unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ +@@ -160,7 +160,7 @@ static inline struct sfq_head *sfq_dep_h + static unsigned int sfq_hash(const struct sfq_sched_data *q, + const struct sk_buff *skb) + { +- return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1); ++ return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); + } + + static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, +@@ -609,9 +609,11 @@ static void sfq_perturbation(unsigned lo + struct Qdisc *sch = (struct Qdisc *)arg; + struct sfq_sched_data *q = qdisc_priv(sch); + spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); ++ siphash_key_t nkey; + ++ get_random_bytes(&nkey, sizeof(nkey)); + spin_lock(root_lock); +- q->perturbation = prandom_u32(); ++ q->perturbation = nkey; + if (!q->filter_list && q->tail) + sfq_rehash(sch); + spin_unlock(root_lock); +@@ -690,7 +692,7 @@ static int sfq_change(struct Qdisc *sch, + del_timer(&q->perturb_timer); + if (q->perturb_period) { + mod_timer(&q->perturb_timer, jiffies + q->perturb_period); +- q->perturbation = prandom_u32(); ++ get_random_bytes(&q->perturbation, sizeof(q->perturbation)); + } + sch_tree_unlock(sch); + kfree(p); +@@ -746,7 +748,7 @@ static int sfq_init(struct Qdisc *sch, s + q->quantum = psched_mtu(qdisc_dev(sch)); + q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); + q->perturb_period = 0; +- q->perturbation = prandom_u32(); ++ get_random_bytes(&q->perturbation, sizeof(q->perturbation)); + + if (opt) { + int err = sfq_change(sch, opt); diff --git a/queue-4.14/series b/queue-4.14/series index 86d541f2397..fac684cd87c 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -46,5 +46,6 @@ net-use-skb_queue_empty_lockless-in-busy-poll-contexts.patch vxlan-check-tun_info-options_len-properly.patch erspan-fix-the-tun_info-options_len-check-for-erspan.patch inet-stop-leaking-jiffies-on-the-wire.patch +net-flow_dissector-switch-to-siphash.patch kbuild-use-fmacro-prefix-map-to-make-__file__-a-rela.patch kbuild-add-fcf-protection-none-when-using-retpoline-.patch