]> git.ipfire.org Git - people/arne_f/kernel.git/commitdiff
jump_label: Split out code under the hotplug lock
authorMarc Zyngier <marc.zyngier@arm.com>
Tue, 1 Aug 2017 08:02:55 +0000 (09:02 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 10 Aug 2017 10:28:58 +0000 (12:28 +0200)
In order to later introduce an "already locked" version of some
of the static key funcions, let's split the code into the core stuff
(the *_cpuslocked functions) and the usual helpers, which now
take/release the hotplug lock and call into the _cpuslocked
versions.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20170801080257.5056-3-marc.zyngier@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/jump_label.c

index 161301fff97d05f00d53c8e95d211bcf26d1c1a0..cc6d815c75ed5fbfc1fecf1592d3dc4b3133f94d 100644 (file)
@@ -79,11 +79,10 @@ int static_key_count(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_count);
 
-void static_key_slow_inc(struct static_key *key)
+static void static_key_slow_inc_cpuslocked(struct static_key *key)
 {
        int v, v1;
 
-       cpus_read_lock();
        STATIC_KEY_CHECK_USE();
 
        /*
@@ -100,10 +99,8 @@ void static_key_slow_inc(struct static_key *key)
         */
        for (v = atomic_read(&key->enabled); v > 0; v = v1) {
                v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
-               if (likely(v1 == v)) {
-                       cpus_read_unlock();
+               if (likely(v1 == v))
                        return;
-               }
        }
 
        jump_label_lock();
@@ -119,6 +116,12 @@ void static_key_slow_inc(struct static_key *key)
                atomic_inc(&key->enabled);
        }
        jump_label_unlock();
+}
+
+void static_key_slow_inc(struct static_key *key)
+{
+       cpus_read_lock();
+       static_key_slow_inc_cpuslocked(key);
        cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -163,10 +166,10 @@ void static_key_disable(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_disable);
 
-static void __static_key_slow_dec(struct static_key *key,
-               unsigned long rate_limit, struct delayed_work *work)
+static void static_key_slow_dec_cpuslocked(struct static_key *key,
+                                          unsigned long rate_limit,
+                                          struct delayed_work *work)
 {
-       cpus_read_lock();
        /*
         * The negative count check is valid even when a negative
         * key->enabled is in use by static_key_slow_inc(); a
@@ -177,7 +180,6 @@ static void __static_key_slow_dec(struct static_key *key,
        if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
                WARN(atomic_read(&key->enabled) < 0,
                     "jump label: negative count!\n");
-               cpus_read_unlock();
                return;
        }
 
@@ -188,6 +190,14 @@ static void __static_key_slow_dec(struct static_key *key,
                jump_label_update(key);
        }
        jump_label_unlock();
+}
+
+static void __static_key_slow_dec(struct static_key *key,
+                                 unsigned long rate_limit,
+                                 struct delayed_work *work)
+{
+       cpus_read_lock();
+       static_key_slow_dec_cpuslocked(key, rate_limit, work);
        cpus_read_unlock();
 }