return delta;
}
-#else /* CONFIG_IRQ_TIME_ACCOUNTING */
+#else /* !CONFIG_IRQ_TIME_ACCOUNTING: */
static u64 irqtime_tick_accounted(u64 dummy)
{
task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
}
-#endif
+#endif /* CONFIG_SCHED_CORE */
/*
* When a guest is interrupted for a longer amount of time, missed clock
return steal;
}
-#endif
+#endif /* CONFIG_PARAVIRT */
return 0;
}
{
return t->se.sum_exec_runtime;
}
-#else
+#else /* !CONFIG_64BIT: */
static u64 read_sum_exec_runtime(struct task_struct *t)
{
u64 ns;
return ns;
}
-#endif
+#endif /* !CONFIG_64BIT */
/*
* Accumulate raw cputime values of dead tasks (sig->[us]time) and live
{
irqtime_account_process_tick(current, 0, ticks);
}
-#else /* CONFIG_IRQ_TIME_ACCOUNTING */
+#else /* !CONFIG_IRQ_TIME_ACCOUNTING: */
static inline void irqtime_account_idle_ticks(int ticks) { }
static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
int nr_ticks) { }
-#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
/*
* Use precise platform statistics if available: