]> git.ipfire.org Git - thirdparty/linux.git/blob - kernel/jump_label.c
Merge tag 'for-5.2/block-post-20190516' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / kernel / jump_label.c
1 /*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra
6 *
7 */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 #include <linux/bug.h>
18 #include <linux/cpu.h>
19 #include <asm/sections.h>
20
21 /* mutex to protect coming/going of the the jump_label table */
22 static DEFINE_MUTEX(jump_label_mutex);
23
24 void jump_label_lock(void)
25 {
26 mutex_lock(&jump_label_mutex);
27 }
28
29 void jump_label_unlock(void)
30 {
31 mutex_unlock(&jump_label_mutex);
32 }
33
34 static int jump_label_cmp(const void *a, const void *b)
35 {
36 const struct jump_entry *jea = a;
37 const struct jump_entry *jeb = b;
38
39 if (jump_entry_key(jea) < jump_entry_key(jeb))
40 return -1;
41
42 if (jump_entry_key(jea) > jump_entry_key(jeb))
43 return 1;
44
45 return 0;
46 }
47
48 static void jump_label_swap(void *a, void *b, int size)
49 {
50 long delta = (unsigned long)a - (unsigned long)b;
51 struct jump_entry *jea = a;
52 struct jump_entry *jeb = b;
53 struct jump_entry tmp = *jea;
54
55 jea->code = jeb->code - delta;
56 jea->target = jeb->target - delta;
57 jea->key = jeb->key - delta;
58
59 jeb->code = tmp.code + delta;
60 jeb->target = tmp.target + delta;
61 jeb->key = tmp.key + delta;
62 }
63
64 static void
65 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66 {
67 unsigned long size;
68 void *swapfn = NULL;
69
70 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
71 swapfn = jump_label_swap;
72
73 size = (((unsigned long)stop - (unsigned long)start)
74 / sizeof(struct jump_entry));
75 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
76 }
77
78 static void jump_label_update(struct static_key *key);
79
80 /*
81 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
82 * The use of 'atomic_read()' requires atomic.h and its problematic for some
83 * kernel headers such as kernel.h and others. Since static_key_count() is not
84 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
85 * to have it be a function here. Similarly, for 'static_key_enable()' and
86 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
87 * to be included from most/all places for CONFIG_JUMP_LABEL.
88 */
89 int static_key_count(struct static_key *key)
90 {
91 /*
92 * -1 means the first static_key_slow_inc() is in progress.
93 * static_key_enabled() must return true, so return 1 here.
94 */
95 int n = atomic_read(&key->enabled);
96
97 return n >= 0 ? n : 1;
98 }
99 EXPORT_SYMBOL_GPL(static_key_count);
100
101 void static_key_slow_inc_cpuslocked(struct static_key *key)
102 {
103 int v, v1;
104
105 STATIC_KEY_CHECK_USE(key);
106 lockdep_assert_cpus_held();
107
108 /*
109 * Careful if we get concurrent static_key_slow_inc() calls;
110 * later calls must wait for the first one to _finish_ the
111 * jump_label_update() process. At the same time, however,
112 * the jump_label_update() call below wants to see
113 * static_key_enabled(&key) for jumps to be updated properly.
114 *
115 * So give a special meaning to negative key->enabled: it sends
116 * static_key_slow_inc() down the slow path, and it is non-zero
117 * so it counts as "enabled" in jump_label_update(). Note that
118 * atomic_inc_unless_negative() checks >= 0, so roll our own.
119 */
120 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
121 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
122 if (likely(v1 == v))
123 return;
124 }
125
126 jump_label_lock();
127 if (atomic_read(&key->enabled) == 0) {
128 atomic_set(&key->enabled, -1);
129 jump_label_update(key);
130 /*
131 * Ensure that if the above cmpxchg loop observes our positive
132 * value, it must also observe all the text changes.
133 */
134 atomic_set_release(&key->enabled, 1);
135 } else {
136 atomic_inc(&key->enabled);
137 }
138 jump_label_unlock();
139 }
140
141 void static_key_slow_inc(struct static_key *key)
142 {
143 cpus_read_lock();
144 static_key_slow_inc_cpuslocked(key);
145 cpus_read_unlock();
146 }
147 EXPORT_SYMBOL_GPL(static_key_slow_inc);
148
149 void static_key_enable_cpuslocked(struct static_key *key)
150 {
151 STATIC_KEY_CHECK_USE(key);
152 lockdep_assert_cpus_held();
153
154 if (atomic_read(&key->enabled) > 0) {
155 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
156 return;
157 }
158
159 jump_label_lock();
160 if (atomic_read(&key->enabled) == 0) {
161 atomic_set(&key->enabled, -1);
162 jump_label_update(key);
163 /*
164 * See static_key_slow_inc().
165 */
166 atomic_set_release(&key->enabled, 1);
167 }
168 jump_label_unlock();
169 }
170 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
171
172 void static_key_enable(struct static_key *key)
173 {
174 cpus_read_lock();
175 static_key_enable_cpuslocked(key);
176 cpus_read_unlock();
177 }
178 EXPORT_SYMBOL_GPL(static_key_enable);
179
180 void static_key_disable_cpuslocked(struct static_key *key)
181 {
182 STATIC_KEY_CHECK_USE(key);
183 lockdep_assert_cpus_held();
184
185 if (atomic_read(&key->enabled) != 1) {
186 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
187 return;
188 }
189
190 jump_label_lock();
191 if (atomic_cmpxchg(&key->enabled, 1, 0))
192 jump_label_update(key);
193 jump_label_unlock();
194 }
195 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
196
197 void static_key_disable(struct static_key *key)
198 {
199 cpus_read_lock();
200 static_key_disable_cpuslocked(key);
201 cpus_read_unlock();
202 }
203 EXPORT_SYMBOL_GPL(static_key_disable);
204
205 static bool static_key_slow_try_dec(struct static_key *key)
206 {
207 int val;
208
209 val = atomic_fetch_add_unless(&key->enabled, -1, 1);
210 if (val == 1)
211 return false;
212
213 /*
214 * The negative count check is valid even when a negative
215 * key->enabled is in use by static_key_slow_inc(); a
216 * __static_key_slow_dec() before the first static_key_slow_inc()
217 * returns is unbalanced, because all other static_key_slow_inc()
218 * instances block while the update is in progress.
219 */
220 WARN(val < 0, "jump label: negative count!\n");
221 return true;
222 }
223
224 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
225 {
226 lockdep_assert_cpus_held();
227
228 if (static_key_slow_try_dec(key))
229 return;
230
231 jump_label_lock();
232 if (atomic_dec_and_test(&key->enabled))
233 jump_label_update(key);
234 jump_label_unlock();
235 }
236
237 static void __static_key_slow_dec(struct static_key *key)
238 {
239 cpus_read_lock();
240 __static_key_slow_dec_cpuslocked(key);
241 cpus_read_unlock();
242 }
243
244 void jump_label_update_timeout(struct work_struct *work)
245 {
246 struct static_key_deferred *key =
247 container_of(work, struct static_key_deferred, work.work);
248 __static_key_slow_dec(&key->key);
249 }
250 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
251
252 void static_key_slow_dec(struct static_key *key)
253 {
254 STATIC_KEY_CHECK_USE(key);
255 __static_key_slow_dec(key);
256 }
257 EXPORT_SYMBOL_GPL(static_key_slow_dec);
258
259 void static_key_slow_dec_cpuslocked(struct static_key *key)
260 {
261 STATIC_KEY_CHECK_USE(key);
262 __static_key_slow_dec_cpuslocked(key);
263 }
264
265 void __static_key_slow_dec_deferred(struct static_key *key,
266 struct delayed_work *work,
267 unsigned long timeout)
268 {
269 STATIC_KEY_CHECK_USE(key);
270
271 if (static_key_slow_try_dec(key))
272 return;
273
274 schedule_delayed_work(work, timeout);
275 }
276 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
277
278 void __static_key_deferred_flush(void *key, struct delayed_work *work)
279 {
280 STATIC_KEY_CHECK_USE(key);
281 flush_delayed_work(work);
282 }
283 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
284
285 void jump_label_rate_limit(struct static_key_deferred *key,
286 unsigned long rl)
287 {
288 STATIC_KEY_CHECK_USE(key);
289 key->timeout = rl;
290 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
291 }
292 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
293
294 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
295 {
296 if (jump_entry_code(entry) <= (unsigned long)end &&
297 jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
298 return 1;
299
300 return 0;
301 }
302
303 static int __jump_label_text_reserved(struct jump_entry *iter_start,
304 struct jump_entry *iter_stop, void *start, void *end)
305 {
306 struct jump_entry *iter;
307
308 iter = iter_start;
309 while (iter < iter_stop) {
310 if (addr_conflict(iter, start, end))
311 return 1;
312 iter++;
313 }
314
315 return 0;
316 }
317
318 /*
319 * Update code which is definitely not currently executing.
320 * Architectures which need heavyweight synchronization to modify
321 * running code can override this to make the non-live update case
322 * cheaper.
323 */
324 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
325 enum jump_label_type type)
326 {
327 arch_jump_label_transform(entry, type);
328 }
329
330 static inline struct jump_entry *static_key_entries(struct static_key *key)
331 {
332 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
333 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
334 }
335
336 static inline bool static_key_type(struct static_key *key)
337 {
338 return key->type & JUMP_TYPE_TRUE;
339 }
340
341 static inline bool static_key_linked(struct static_key *key)
342 {
343 return key->type & JUMP_TYPE_LINKED;
344 }
345
346 static inline void static_key_clear_linked(struct static_key *key)
347 {
348 key->type &= ~JUMP_TYPE_LINKED;
349 }
350
351 static inline void static_key_set_linked(struct static_key *key)
352 {
353 key->type |= JUMP_TYPE_LINKED;
354 }
355
356 /***
357 * A 'struct static_key' uses a union such that it either points directly
358 * to a table of 'struct jump_entry' or to a linked list of modules which in
359 * turn point to 'struct jump_entry' tables.
360 *
361 * The two lower bits of the pointer are used to keep track of which pointer
362 * type is in use and to store the initial branch direction, we use an access
363 * function which preserves these bits.
364 */
365 static void static_key_set_entries(struct static_key *key,
366 struct jump_entry *entries)
367 {
368 unsigned long type;
369
370 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
371 type = key->type & JUMP_TYPE_MASK;
372 key->entries = entries;
373 key->type |= type;
374 }
375
376 static enum jump_label_type jump_label_type(struct jump_entry *entry)
377 {
378 struct static_key *key = jump_entry_key(entry);
379 bool enabled = static_key_enabled(key);
380 bool branch = jump_entry_is_branch(entry);
381
382 /* See the comment in linux/jump_label.h */
383 return enabled ^ branch;
384 }
385
386 static void __jump_label_update(struct static_key *key,
387 struct jump_entry *entry,
388 struct jump_entry *stop,
389 bool init)
390 {
391 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
392 /*
393 * An entry->code of 0 indicates an entry which has been
394 * disabled because it was in an init text area.
395 */
396 if (init || !jump_entry_is_init(entry)) {
397 if (kernel_text_address(jump_entry_code(entry)))
398 arch_jump_label_transform(entry, jump_label_type(entry));
399 else
400 WARN_ONCE(1, "can't patch jump_label at %pS",
401 (void *)jump_entry_code(entry));
402 }
403 }
404 }
405
406 void __init jump_label_init(void)
407 {
408 struct jump_entry *iter_start = __start___jump_table;
409 struct jump_entry *iter_stop = __stop___jump_table;
410 struct static_key *key = NULL;
411 struct jump_entry *iter;
412
413 /*
414 * Since we are initializing the static_key.enabled field with
415 * with the 'raw' int values (to avoid pulling in atomic.h) in
416 * jump_label.h, let's make sure that is safe. There are only two
417 * cases to check since we initialize to 0 or 1.
418 */
419 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
420 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
421
422 if (static_key_initialized)
423 return;
424
425 cpus_read_lock();
426 jump_label_lock();
427 jump_label_sort_entries(iter_start, iter_stop);
428
429 for (iter = iter_start; iter < iter_stop; iter++) {
430 struct static_key *iterk;
431
432 /* rewrite NOPs */
433 if (jump_label_type(iter) == JUMP_LABEL_NOP)
434 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
435
436 if (init_section_contains((void *)jump_entry_code(iter), 1))
437 jump_entry_set_init(iter);
438
439 iterk = jump_entry_key(iter);
440 if (iterk == key)
441 continue;
442
443 key = iterk;
444 static_key_set_entries(key, iter);
445 }
446 static_key_initialized = true;
447 jump_label_unlock();
448 cpus_read_unlock();
449 }
450
451 #ifdef CONFIG_MODULES
452
453 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
454 {
455 struct static_key *key = jump_entry_key(entry);
456 bool type = static_key_type(key);
457 bool branch = jump_entry_is_branch(entry);
458
459 /* See the comment in linux/jump_label.h */
460 return type ^ branch;
461 }
462
463 struct static_key_mod {
464 struct static_key_mod *next;
465 struct jump_entry *entries;
466 struct module *mod;
467 };
468
469 static inline struct static_key_mod *static_key_mod(struct static_key *key)
470 {
471 WARN_ON_ONCE(!static_key_linked(key));
472 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
473 }
474
475 /***
476 * key->type and key->next are the same via union.
477 * This sets key->next and preserves the type bits.
478 *
479 * See additional comments above static_key_set_entries().
480 */
481 static void static_key_set_mod(struct static_key *key,
482 struct static_key_mod *mod)
483 {
484 unsigned long type;
485
486 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
487 type = key->type & JUMP_TYPE_MASK;
488 key->next = mod;
489 key->type |= type;
490 }
491
492 static int __jump_label_mod_text_reserved(void *start, void *end)
493 {
494 struct module *mod;
495
496 preempt_disable();
497 mod = __module_text_address((unsigned long)start);
498 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
499 preempt_enable();
500
501 if (!mod)
502 return 0;
503
504
505 return __jump_label_text_reserved(mod->jump_entries,
506 mod->jump_entries + mod->num_jump_entries,
507 start, end);
508 }
509
510 static void __jump_label_mod_update(struct static_key *key)
511 {
512 struct static_key_mod *mod;
513
514 for (mod = static_key_mod(key); mod; mod = mod->next) {
515 struct jump_entry *stop;
516 struct module *m;
517
518 /*
519 * NULL if the static_key is defined in a module
520 * that does not use it
521 */
522 if (!mod->entries)
523 continue;
524
525 m = mod->mod;
526 if (!m)
527 stop = __stop___jump_table;
528 else
529 stop = m->jump_entries + m->num_jump_entries;
530 __jump_label_update(key, mod->entries, stop,
531 m && m->state == MODULE_STATE_COMING);
532 }
533 }
534
535 /***
536 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
537 * @mod: module to patch
538 *
539 * Allow for run-time selection of the optimal nops. Before the module
540 * loads patch these with arch_get_jump_label_nop(), which is specified by
541 * the arch specific jump label code.
542 */
543 void jump_label_apply_nops(struct module *mod)
544 {
545 struct jump_entry *iter_start = mod->jump_entries;
546 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
547 struct jump_entry *iter;
548
549 /* if the module doesn't have jump label entries, just return */
550 if (iter_start == iter_stop)
551 return;
552
553 for (iter = iter_start; iter < iter_stop; iter++) {
554 /* Only write NOPs for arch_branch_static(). */
555 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
556 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
557 }
558 }
559
560 static int jump_label_add_module(struct module *mod)
561 {
562 struct jump_entry *iter_start = mod->jump_entries;
563 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
564 struct jump_entry *iter;
565 struct static_key *key = NULL;
566 struct static_key_mod *jlm, *jlm2;
567
568 /* if the module doesn't have jump label entries, just return */
569 if (iter_start == iter_stop)
570 return 0;
571
572 jump_label_sort_entries(iter_start, iter_stop);
573
574 for (iter = iter_start; iter < iter_stop; iter++) {
575 struct static_key *iterk;
576
577 if (within_module_init(jump_entry_code(iter), mod))
578 jump_entry_set_init(iter);
579
580 iterk = jump_entry_key(iter);
581 if (iterk == key)
582 continue;
583
584 key = iterk;
585 if (within_module((unsigned long)key, mod)) {
586 static_key_set_entries(key, iter);
587 continue;
588 }
589 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
590 if (!jlm)
591 return -ENOMEM;
592 if (!static_key_linked(key)) {
593 jlm2 = kzalloc(sizeof(struct static_key_mod),
594 GFP_KERNEL);
595 if (!jlm2) {
596 kfree(jlm);
597 return -ENOMEM;
598 }
599 preempt_disable();
600 jlm2->mod = __module_address((unsigned long)key);
601 preempt_enable();
602 jlm2->entries = static_key_entries(key);
603 jlm2->next = NULL;
604 static_key_set_mod(key, jlm2);
605 static_key_set_linked(key);
606 }
607 jlm->mod = mod;
608 jlm->entries = iter;
609 jlm->next = static_key_mod(key);
610 static_key_set_mod(key, jlm);
611 static_key_set_linked(key);
612
613 /* Only update if we've changed from our initial state */
614 if (jump_label_type(iter) != jump_label_init_type(iter))
615 __jump_label_update(key, iter, iter_stop, true);
616 }
617
618 return 0;
619 }
620
621 static void jump_label_del_module(struct module *mod)
622 {
623 struct jump_entry *iter_start = mod->jump_entries;
624 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
625 struct jump_entry *iter;
626 struct static_key *key = NULL;
627 struct static_key_mod *jlm, **prev;
628
629 for (iter = iter_start; iter < iter_stop; iter++) {
630 if (jump_entry_key(iter) == key)
631 continue;
632
633 key = jump_entry_key(iter);
634
635 if (within_module((unsigned long)key, mod))
636 continue;
637
638 /* No memory during module load */
639 if (WARN_ON(!static_key_linked(key)))
640 continue;
641
642 prev = &key->next;
643 jlm = static_key_mod(key);
644
645 while (jlm && jlm->mod != mod) {
646 prev = &jlm->next;
647 jlm = jlm->next;
648 }
649
650 /* No memory during module load */
651 if (WARN_ON(!jlm))
652 continue;
653
654 if (prev == &key->next)
655 static_key_set_mod(key, jlm->next);
656 else
657 *prev = jlm->next;
658
659 kfree(jlm);
660
661 jlm = static_key_mod(key);
662 /* if only one etry is left, fold it back into the static_key */
663 if (jlm->next == NULL) {
664 static_key_set_entries(key, jlm->entries);
665 static_key_clear_linked(key);
666 kfree(jlm);
667 }
668 }
669 }
670
671 static int
672 jump_label_module_notify(struct notifier_block *self, unsigned long val,
673 void *data)
674 {
675 struct module *mod = data;
676 int ret = 0;
677
678 cpus_read_lock();
679 jump_label_lock();
680
681 switch (val) {
682 case MODULE_STATE_COMING:
683 ret = jump_label_add_module(mod);
684 if (ret) {
685 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
686 jump_label_del_module(mod);
687 }
688 break;
689 case MODULE_STATE_GOING:
690 jump_label_del_module(mod);
691 break;
692 }
693
694 jump_label_unlock();
695 cpus_read_unlock();
696
697 return notifier_from_errno(ret);
698 }
699
700 static struct notifier_block jump_label_module_nb = {
701 .notifier_call = jump_label_module_notify,
702 .priority = 1, /* higher than tracepoints */
703 };
704
705 static __init int jump_label_init_module(void)
706 {
707 return register_module_notifier(&jump_label_module_nb);
708 }
709 early_initcall(jump_label_init_module);
710
711 #endif /* CONFIG_MODULES */
712
713 /***
714 * jump_label_text_reserved - check if addr range is reserved
715 * @start: start text addr
716 * @end: end text addr
717 *
718 * checks if the text addr located between @start and @end
719 * overlaps with any of the jump label patch addresses. Code
720 * that wants to modify kernel text should first verify that
721 * it does not overlap with any of the jump label addresses.
722 * Caller must hold jump_label_mutex.
723 *
724 * returns 1 if there is an overlap, 0 otherwise
725 */
726 int jump_label_text_reserved(void *start, void *end)
727 {
728 int ret = __jump_label_text_reserved(__start___jump_table,
729 __stop___jump_table, start, end);
730
731 if (ret)
732 return ret;
733
734 #ifdef CONFIG_MODULES
735 ret = __jump_label_mod_text_reserved(start, end);
736 #endif
737 return ret;
738 }
739
740 static void jump_label_update(struct static_key *key)
741 {
742 struct jump_entry *stop = __stop___jump_table;
743 struct jump_entry *entry;
744 #ifdef CONFIG_MODULES
745 struct module *mod;
746
747 if (static_key_linked(key)) {
748 __jump_label_mod_update(key);
749 return;
750 }
751
752 preempt_disable();
753 mod = __module_address((unsigned long)key);
754 if (mod)
755 stop = mod->jump_entries + mod->num_jump_entries;
756 preempt_enable();
757 #endif
758 entry = static_key_entries(key);
759 /* if there are no users, entry can be NULL */
760 if (entry)
761 __jump_label_update(key, entry, stop,
762 system_state < SYSTEM_RUNNING);
763 }
764
765 #ifdef CONFIG_STATIC_KEYS_SELFTEST
766 static DEFINE_STATIC_KEY_TRUE(sk_true);
767 static DEFINE_STATIC_KEY_FALSE(sk_false);
768
769 static __init int jump_label_test(void)
770 {
771 int i;
772
773 for (i = 0; i < 2; i++) {
774 WARN_ON(static_key_enabled(&sk_true.key) != true);
775 WARN_ON(static_key_enabled(&sk_false.key) != false);
776
777 WARN_ON(!static_branch_likely(&sk_true));
778 WARN_ON(!static_branch_unlikely(&sk_true));
779 WARN_ON(static_branch_likely(&sk_false));
780 WARN_ON(static_branch_unlikely(&sk_false));
781
782 static_branch_disable(&sk_true);
783 static_branch_enable(&sk_false);
784
785 WARN_ON(static_key_enabled(&sk_true.key) == true);
786 WARN_ON(static_key_enabled(&sk_false.key) == false);
787
788 WARN_ON(static_branch_likely(&sk_true));
789 WARN_ON(static_branch_unlikely(&sk_true));
790 WARN_ON(!static_branch_likely(&sk_false));
791 WARN_ON(!static_branch_unlikely(&sk_false));
792
793 static_branch_enable(&sk_true);
794 static_branch_disable(&sk_false);
795 }
796
797 return 0;
798 }
799 early_initcall(jump_label_test);
800 #endif /* STATIC_KEYS_SELFTEST */