--- /dev/null
+From cb538267ea1e9e025ec692577c9ae75797261889 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 31 Jul 2018 14:35:32 +0200
+Subject: jump_label/lockdep: Assert we hold the hotplug lock for _cpuslocked() operations
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit cb538267ea1e9e025ec692577c9ae75797261889 upstream.
+
+Weirdly we seem to have forgotten this...
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Will McVicker <willmcvicker@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/jump_label.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -83,6 +83,7 @@ void static_key_slow_inc_cpuslocked(stru
+ int v, v1;
+
+ STATIC_KEY_CHECK_USE(key);
++ lockdep_assert_cpus_held();
+
+ /*
+ * Careful if we get concurrent static_key_slow_inc() calls;
+@@ -128,6 +129,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
+ void static_key_enable_cpuslocked(struct static_key *key)
+ {
+ STATIC_KEY_CHECK_USE(key);
++ lockdep_assert_cpus_held();
+
+ if (atomic_read(&key->enabled) > 0) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
+@@ -158,6 +160,7 @@ EXPORT_SYMBOL_GPL(static_key_enable);
+ void static_key_disable_cpuslocked(struct static_key *key)
+ {
+ STATIC_KEY_CHECK_USE(key);
++ lockdep_assert_cpus_held();
+
+ if (atomic_read(&key->enabled) != 1) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
+@@ -183,6 +186,8 @@ static void __static_key_slow_dec_cpuslo
+ unsigned long rate_limit,
+ struct delayed_work *work)
+ {
++ lockdep_assert_cpus_held();
++
+ /*
+ * The negative count check is valid even when a negative
+ * key->enabled is in use by static_key_slow_inc(); a
--- /dev/null
+From a1247d06d01045d7ab2882a9c074fbf21137c690 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 19 Mar 2019 13:18:56 +0100
+Subject: locking/static_key: Fix false positive warnings on concurrent dec/inc
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit a1247d06d01045d7ab2882a9c074fbf21137c690 upstream.
+
+Even though the atomic_dec_and_mutex_lock() in
+__static_key_slow_dec_cpuslocked() can never see a negative value in
+key->enabled the subsequent sanity check is re-reading key->enabled, which may
+have been set to -1 in the meantime by static_key_slow_inc_cpuslocked().
+
+ CPU A CPU B
+
+ __static_key_slow_dec_cpuslocked(): static_key_slow_inc_cpuslocked():
+ # enabled = 1
+ atomic_dec_and_mutex_lock()
+ # enabled = 0
+ atomic_read() == 0
+ atomic_set(-1)
+ # enabled = -1
+ val = atomic_read()
+ # Oops - val == -1!
+
+The test case is TCP's clean_acked_data_enable() / clean_acked_data_disable()
+as tickled by KTLS (net/ktls).
+
+Suggested-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reported-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Tested-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: ard.biesheuvel@linaro.org
+Cc: oss-drivers@netronome.com
+Cc: pbonzini@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Will McVicker <willmcvicker@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/jump_label.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -186,6 +186,8 @@ static void __static_key_slow_dec_cpuslo
+ unsigned long rate_limit,
+ struct delayed_work *work)
+ {
++ int val;
++
+ lockdep_assert_cpus_held();
+
+ /*
+@@ -195,17 +197,20 @@ static void __static_key_slow_dec_cpuslo
+ * returns is unbalanced, because all other static_key_slow_inc()
+ * instances block while the update is in progress.
+ */
+- if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
+- WARN(atomic_read(&key->enabled) < 0,
+- "jump label: negative count!\n");
++ val = atomic_fetch_add_unless(&key->enabled, -1, 1);
++ if (val != 1) {
++ WARN(val < 0, "jump label: negative count!\n");
+ return;
+ }
+
+- if (rate_limit) {
+- atomic_inc(&key->enabled);
+- schedule_delayed_work(work, rate_limit);
+- } else {
+- jump_label_update(key);
++ jump_label_lock();
++ if (atomic_dec_and_test(&key->enabled)) {
++ if (rate_limit) {
++ atomic_inc(&key->enabled);
++ schedule_delayed_work(work, rate_limit);
++ } else {
++ jump_label_update(key);
++ }
+ }
+ jump_label_unlock();
+ }