]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
tracing: Reuse logic from perf's get_recursion_context()
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Fri, 15 Oct 2021 17:42:40 +0000 (13:42 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 28 Nov 2023 16:56:21 +0000 (16:56 +0000)
[ Upstream commit 9b84fadc444de5456ab5f5487e2108311c724c3f ]

Instead of having branches that adds noise to the branch prediction, use
the addition logic to set the bit for the level of interrupt context that
the state is currently in. This copies the logic from perf's
get_recursion_context() function.

Link: https://lore.kernel.org/all/20211015161702.GF174703@worktop.programming.kicks-ass.net/
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Stable-dep-of: 87c3a5893e86 ("sched/core: Optimize in_task() and in_interrupt() a bit")
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/linux/trace_recursion.h
kernel/trace/ring_buffer.c

index fe95f0922526624201eebf7644868380ed9d378b..00acd7dca7a7d2e3921fea7fe20be2f4edcebcd7 100644 (file)
@@ -117,12 +117,13 @@ enum {
 static __always_inline int trace_get_context_bit(void)
 {
        unsigned long pc = preempt_count();
+       unsigned char bit = 0;
 
-       if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
-               return TRACE_CTX_NORMAL;
-       else
-               return pc & NMI_MASK ? TRACE_CTX_NMI :
-                       pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
+       bit += !!(pc & (NMI_MASK));
+       bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+       bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+       return TRACE_CTX_NORMAL - bit;
 }
 
 #ifdef CONFIG_FTRACE_RECORD_RECURSION
index e5dc7b5a261c6facffe380c1a1432031ff915467..c3c9960c9f27bf02e8eff1b1fb6d1af577b87429 100644 (file)
@@ -3250,13 +3250,13 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
        unsigned int val = cpu_buffer->current_context;
        unsigned long pc = preempt_count();
-       int bit;
+       int bit = 0;
 
-       if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
-               bit = RB_CTX_NORMAL;
-       else
-               bit = pc & NMI_MASK ? RB_CTX_NMI :
-                       pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
+       bit += !!(pc & (NMI_MASK));
+       bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+       bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+       bit = RB_CTX_NORMAL - bit;
 
        if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
                /*