struct task_struct;
struct pcpu_hot {
- union {
- struct {
- struct task_struct *current_task;
- int preempt_count;
- int cpu_number;
+ struct task_struct *current_task;
+ int preempt_count;
+ int cpu_number;
#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
- u64 call_depth;
+ u64 call_depth;
#endif
- unsigned long top_of_stack;
- void *hardirq_stack_ptr;
- u16 softirq_pending;
+ unsigned long top_of_stack;
+ void *hardirq_stack_ptr;
+ u16 softirq_pending;
#ifdef CONFIG_X86_64
- bool hardirq_stack_inuse;
+ bool hardirq_stack_inuse;
#else
- void *softirq_stack_ptr;
+ void *softirq_stack_ptr;
#endif
- };
- u8 pad[64];
- };
};
-static_assert(sizeof(struct pcpu_hot) == 64);
-DECLARE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot);
+DECLARE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot);
/* const-qualified alias to pcpu_hot, aliased by linker. */
-DECLARE_PER_CPU_ALIGNED(const struct pcpu_hot __percpu_seg_override,
+DECLARE_PER_CPU_CACHE_HOT(const struct pcpu_hot __percpu_seg_override,
const_pcpu_hot);
static __always_inline struct task_struct *get_current(void)
}
__setup("setcpuid=", setup_setcpuid);
-DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
+DEFINE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot) = {
.current_task = &init_task,
.preempt_count = INIT_PREEMPT_COUNT,
.top_of_stack = TOP_OF_INIT_STACK,
PAGE_ALIGNED_DATA(PAGE_SIZE)
+ CACHE_HOT_DATA(L1_CACHE_BYTES)
+
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
DATA_DATA
EXIT_DATA
}
- PERCPU_SECTION(INTERNODE_CACHE_BYTES)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot section too large")
RUNTIME_CONST_VARIABLES
RUNTIME_CONST(ptr, USER_PTR_MAX)