struct task_struct;
-struct pcpu_hot {
- struct task_struct *current_task;
-};
-
-DECLARE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot);
-
-/* const-qualified alias to pcpu_hot, aliased by linker. */
-DECLARE_PER_CPU_CACHE_HOT(const struct pcpu_hot __percpu_seg_override,
- const_pcpu_hot);
+DECLARE_PER_CPU_CACHE_HOT(struct task_struct *, current_task);
+/* const-qualified alias provided by the linker. */
+DECLARE_PER_CPU_CACHE_HOT(struct task_struct * const __percpu_seg_override,
+ const_current_task);
static __always_inline struct task_struct *get_current(void)
{
if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT))
- return this_cpu_read_const(const_pcpu_hot.current_task);
+ return this_cpu_read_const(const_current_task);
- return this_cpu_read_stable(pcpu_hot.current_task);
+ return this_cpu_read_stable(current_task);
}
#define current get_current()
* it is accessed while this_cpu_read_stable() allows the value to be cached.
* this_cpu_read_stable() is more efficient and can be used if its value
* is guaranteed to be valid across CPUs. The current users include
- * pcpu_hot.current_task and cpu_current_top_of_stack, both of which are
+ * current_task and cpu_current_top_of_stack, both of which are
* actually per-thread variables implemented as per-CPU variables and
* thus stable for the duration of the respective task.
*/
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
- OFFSET(X86_current_task, pcpu_hot, current_task);
#if IS_ENABLED(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64)
/* Offset for fields in aria_ctx */
BLANK();
}
__setup("setcpuid=", setup_setcpuid);
-DEFINE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot) = {
- .current_task = &init_task,
-};
-EXPORT_PER_CPU_SYMBOL(pcpu_hot);
-EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
+DEFINE_PER_CPU_CACHE_HOT(struct task_struct *, current_task) = &init_task;
+EXPORT_PER_CPU_SYMBOL(current_task);
+EXPORT_PER_CPU_SYMBOL(const_current_task);
DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
*
* RDX contains the per-cpu offset
*/
- movq pcpu_hot + X86_current_task(%rdx), %rax
+ movq current_task(%rdx), %rax
movq TASK_threadsp(%rax), %rsp
/*
UNWIND_HINT_END_OF_STACK
/* Find the idle task stack */
- movq PER_CPU_VAR(pcpu_hot + X86_current_task), %rcx
+ movq PER_CPU_VAR(current_task), %rcx
movq TASK_threadsp(%rcx), %rsp
jmp .Ljump_to_C_code
if (prev->gs | next->gs)
loadsegment(gs, next->gs);
- raw_cpu_write(pcpu_hot.current_task, next_p);
+ raw_cpu_write(current_task, next_p);
switch_fpu_finish(next_p);
/*
* Switch the PDA and FPU contexts.
*/
- raw_cpu_write(pcpu_hot.current_task, next_p);
+ raw_cpu_write(current_task, next_p);
raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
switch_fpu_finish(next_p);
/* Just in case we booted with a single CPU. */
alternatives_enable_smp();
- per_cpu(pcpu_hot.current_task, cpu) = idle;
+ per_cpu(current_task, cpu) = idle;
cpu_init_stack_canary(cpu, idle);
/* Initialize the interrupt stack(s) */
#endif
jiffies = jiffies_64;
-const_pcpu_hot = pcpu_hot;
+const_current_task = current_task;
const_cpu_current_top_of_stack = cpu_current_top_of_stack;
#if defined(CONFIG_X86_64)
var_ptr = gdb.parse_and_eval("(struct task_struct *)cpu_tasks[0].task")
return var_ptr.dereference()
else:
- var_ptr = gdb.parse_and_eval("&pcpu_hot.current_task")
+ var_ptr = gdb.parse_and_eval("¤t_task")
return per_cpu(var_ptr, cpu).dereference()
elif utils.is_target_arch("aarch64"):
current_task_addr = gdb.parse_and_eval("(unsigned long)$SP_EL0")