Archs need to ensure they use a high enough resolution clock to
support irq time accounting and then call enable_sched_clock_irqtime().
+config HAVE_PV_STEAL_CLOCK_GEN
+ bool
+
config HAVE_MOVE_PUD
bool
help
#ifdef CONFIG_PARAVIRT
#include <linux/static_call_types.h>
-struct static_key;
-extern struct static_key paravirt_steal_enabled;
-extern struct static_key paravirt_steal_rq_enabled;
-
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
#include <linux/static_call.h>
#include <asm/paravirt.h>
-struct static_key paravirt_steal_enabled;
-struct static_key paravirt_steal_rq_enabled;
-
static u64 native_steal_clock(int cpu)
{
return 0;
#ifdef CONFIG_PARAVIRT
#include <linux/static_call_types.h>
-struct static_key;
-extern struct static_key paravirt_steal_enabled;
-extern struct static_key paravirt_steal_rq_enabled;
-
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/static_call.h>
+#include <linux/sched/cputime.h>
#include <asm/paravirt.h>
#include <asm/pvclock-abi.h>
#include <asm/smp_plat.h>
-struct static_key paravirt_steal_enabled;
-struct static_key paravirt_steal_rq_enabled;
-
static u64 native_steal_clock(int cpu)
{
return 0;
#ifdef CONFIG_PARAVIRT
#include <linux/static_call_types.h>
-struct static_key;
-extern struct static_key paravirt_steal_enabled;
-extern struct static_key paravirt_steal_rq_enabled;
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
#include <linux/kvm_para.h>
#include <linux/reboot.h>
#include <linux/static_call.h>
+#include <linux/sched/cputime.h>
#include <asm/paravirt.h>
static int has_steal_clock;
-struct static_key paravirt_steal_enabled;
-struct static_key paravirt_steal_rq_enabled;
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
}
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-extern struct static_key paravirt_steal_enabled;
-extern struct static_key paravirt_steal_rq_enabled;
-
u64 pseries_paravirt_steal_clock(int cpu);
static inline u64 paravirt_steal_clock(int cpu)
#include <linux/memblock.h>
#include <linux/swiotlb.h>
#include <linux/seq_buf.h>
+#include <linux/sched/cputime.h>
#include <asm/mmu.h>
#include <asm/processor.h>
EXPORT_SYMBOL(shared_processor);
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-struct static_key paravirt_steal_enabled;
-struct static_key paravirt_steal_rq_enabled;
-
static bool steal_acc = true;
static int __init parse_no_stealacc(char *arg)
{
#ifdef CONFIG_PARAVIRT
#include <linux/static_call_types.h>
-struct static_key;
-extern struct static_key paravirt_steal_enabled;
-extern struct static_key paravirt_steal_rq_enabled;
-
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
#include <linux/printk.h>
#include <linux/static_call.h>
#include <linux/types.h>
+#include <linux/sched/cputime.h>
#include <asm/barrier.h>
#include <asm/page.h>
#include <asm/paravirt.h>
#include <asm/sbi.h>
-struct static_key paravirt_steal_enabled;
-struct static_key paravirt_steal_rq_enabled;
-
static u64 native_steal_clock(int cpu)
{
return 0;
return static_call(pv_sched_clock)();
}
-struct static_key;
-extern struct static_key paravirt_steal_enabled;
-extern struct static_key paravirt_steal_rq_enabled;
-
__visible void __native_queued_spin_unlock(struct qspinlock *lock);
bool pv_is_native_spin_unlock(void);
__visible bool __native_vcpu_is_preempted(long cpu);
#include <linux/efi.h>
#include <linux/reboot.h>
#include <linux/static_call.h>
+#include <linux/sched/cputime.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
#include <linux/cc_platform.h>
#include <linux/efi.h>
#include <linux/kvm_types.h>
+#include <linux/sched/cputime.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
static_branch_enable(&virt_spin_lock_key);
}
-struct static_key paravirt_steal_enabled;
-struct static_key paravirt_steal_rq_enabled;
-
static u64 native_steal_clock(int cpu)
{
return 0;
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/static_call.h>
+#include <linux/sched/cputime.h>
#include <asm/paravirt.h>
#include <asm/xen/hypervisor.h>
#ifndef _LINUX_SCHED_CPUTIME_H
#define _LINUX_SCHED_CPUTIME_H
+#include <linux/static_call_types.h>
#include <linux/sched/signal.h>
/*
extern unsigned long long
task_sched_runtime(struct task_struct *task);
+#ifdef CONFIG_PARAVIRT
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+#ifdef CONFIG_HAVE_PV_STEAL_CLOCK_GEN
+u64 dummy_steal_clock(int cpu);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return static_call(pv_steal_clock)(cpu);
+}
+#endif
+#endif
+
#endif /* _LINUX_SCHED_CPUTIME_H */
* RQ-clock updating methods:
*/
+/* Use CONFIG_PARAVIRT as this will avoid more #ifdef in arch code. */
+#ifdef CONFIG_PARAVIRT
+struct static_key paravirt_steal_rq_enabled;
+#endif
+
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
/*
* ticks are not redelivered later. Due to that, this function may on
* occasion account more time than the calling functions think elapsed.
*/
+#ifdef CONFIG_PARAVIRT
+struct static_key paravirt_steal_enabled;
+
+#ifdef CONFIG_HAVE_PV_STEAL_CLOCK_GEN
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+#endif
+#endif
+
static __always_inline u64 steal_account_process_time(u64 maxtime)
{
#ifdef CONFIG_PARAVIRT
struct sched_group;
struct cpuidle_state;
-#ifdef CONFIG_PARAVIRT
+#if defined(CONFIG_PARAVIRT) && !defined(CONFIG_HAVE_PV_STEAL_CLOCK_GEN)
# include <asm/paravirt.h>
#endif