]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Dec 2022 18:31:14 +0000 (19:31 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Dec 2022 18:31:14 +0000 (19:31 +0100)
added patches:
once-add-do_once_slow-for-sleepable-contexts.patch

queue-4.14/once-add-do_once_slow-for-sleepable-contexts.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/once-add-do_once_slow-for-sleepable-contexts.patch b/queue-4.14/once-add-do_once_slow-for-sleepable-contexts.patch
new file mode 100644 (file)
index 0000000..5b42fe5
--- /dev/null
@@ -0,0 +1,142 @@
+From 62c07983bef9d3e78e71189441e1a470f0d1e653 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 1 Oct 2022 13:51:02 -0700
+Subject: once: add DO_ONCE_SLOW() for sleepable contexts
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 62c07983bef9d3e78e71189441e1a470f0d1e653 upstream.
+
+Christophe Leroy reported a ~80ms latency spike
+happening at first TCP connect() time.
+
+This is because __inet_hash_connect() uses get_random_once()
+to populate a perturbation table which became quite big
+after commit 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
+
+get_random_once() uses DO_ONCE(), which block hard irqs for the duration
+of the operation.
+
+This patch adds DO_ONCE_SLOW() which uses a mutex instead of a spinlock
+for operations where we prefer to stay in process context.
+
+Then __inet_hash_connect() can use get_random_slow_once()
+to populate its perturbation table.
+
+Fixes: 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
+Fixes: 190cc82489f4 ("tcp: change source port randomizarion at connect() time")
+Reported-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Link: https://lore.kernel.org/netdev/CANn89iLAEYBaoYajy0Y9UmGFff5GPxDUoG-ErVB2jDdRNQ5Tug@mail.gmail.com/T/#t
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willy Tarreau <w@1wt.eu>
+Tested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/once.h       |   28 ++++++++++++++++++++++++++++
+ lib/once.c                 |   30 ++++++++++++++++++++++++++++++
+ net/ipv4/inet_hashtables.c |    4 ++--
+ 3 files changed, 60 insertions(+), 2 deletions(-)
+
+--- a/include/linux/once.h
++++ b/include/linux/once.h
+@@ -5,10 +5,18 @@
+ #include <linux/types.h>
+ #include <linux/jump_label.h>
++/* Helpers used from arbitrary contexts.
++ * Hard irqs are blocked, be cautious.
++ */
+ bool __do_once_start(bool *done, unsigned long *flags);
+ void __do_once_done(bool *done, struct static_key *once_key,
+                   unsigned long *flags);
++/* Variant for process contexts only. */
++bool __do_once_slow_start(bool *done);
++void __do_once_slow_done(bool *done, struct static_key *once_key,
++                       struct module *mod);
++
+ /* Call a function exactly once. The idea of DO_ONCE() is to perform
+  * a function call such as initialization of random seeds, etc, only
+  * once, where DO_ONCE() can live in the fast-path. After @func has
+@@ -52,9 +60,29 @@ void __do_once_done(bool *done, struct s
+               ___ret;                                                      \
+       })
++/* Variant of DO_ONCE() for process/sleepable contexts. */
++#define DO_ONCE_SLOW(func, ...)                                                    \
++      ({                                                                   \
++              bool ___ret = false;                                         \
++              static bool ___done = false;                                 \
++              static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \
++              if (static_key_true(&___once_key)) {                 \
++                      ___ret = __do_once_slow_start(&___done);             \
++                      if (unlikely(___ret)) {                              \
++                              func(__VA_ARGS__);                           \
++                              __do_once_slow_done(&___done, &___once_key,  \
++                                                  THIS_MODULE);            \
++                      }                                                    \
++              }                                                            \
++              ___ret;                                                      \
++      })
++
+ #define get_random_once(buf, nbytes)                                       \
+       DO_ONCE(get_random_bytes, (buf), (nbytes))
+ #define get_random_once_wait(buf, nbytes)                                    \
+       DO_ONCE(get_random_bytes_wait, (buf), (nbytes))                      \
++#define get_random_slow_once(buf, nbytes)                                  \
++      DO_ONCE_SLOW(get_random_bytes, (buf), (nbytes))
++
+ #endif /* _LINUX_ONCE_H */
+--- a/lib/once.c
++++ b/lib/once.c
+@@ -61,3 +61,33 @@ void __do_once_done(bool *done, struct s
+       once_disable_jump(once_key);
+ }
+ EXPORT_SYMBOL(__do_once_done);
++
++static DEFINE_MUTEX(once_mutex);
++
++bool __do_once_slow_start(bool *done)
++      __acquires(once_mutex)
++{
++      mutex_lock(&once_mutex);
++      if (*done) {
++              mutex_unlock(&once_mutex);
++              /* Keep sparse happy by restoring an even lock count on
++               * this mutex. In case we return here, we don't call into
++               * __do_once_done but return early in the DO_ONCE_SLOW() macro.
++               */
++              __acquire(once_mutex);
++              return false;
++      }
++
++      return true;
++}
++EXPORT_SYMBOL(__do_once_slow_start);
++
++void __do_once_slow_done(bool *done, struct static_key *once_key,
++                       struct module *mod)
++      __releases(once_mutex)
++{
++      *done = true;
++      mutex_unlock(&once_mutex);
++      once_disable_jump(once_key);
++}
++EXPORT_SYMBOL(__do_once_slow_done);
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -638,8 +638,8 @@ int __inet_hash_connect(struct inet_time
+       if (likely(remaining > 1))
+               remaining &= ~1U;
+-      net_get_random_once(table_perturb,
+-                          INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
++      get_random_slow_once(table_perturb,
++                           INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
+       index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
+       offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
index e1b9f4ba7c21942c297d5675903922d0e8646c25..d067f6253d70de6831c19f5bc2eae8e596a80f27 100644 (file)
@@ -1 +1,2 @@
 libtraceevent-fix-build-with-binutils-2.35.patch
+once-add-do_once_slow-for-sleepable-contexts.patch