#define __LINUX_IRQENTRYCOMMON_H
#include <linux/context_tracking.h>
+#include <linux/hrtimer_rearm.h>
#include <linux/kmsan.h>
#include <linux/rseq_entry.h>
#include <linux/static_call_types.h>
_TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | _TIF_RSEQ | \
ARCH_EXIT_TO_USER_MODE_WORK)
+#ifdef CONFIG_HRTIMER_REARM_DEFERRED
+# define EXIT_TO_USER_MODE_WORK_SYSCALL (EXIT_TO_USER_MODE_WORK)
+# define EXIT_TO_USER_MODE_WORK_IRQ (EXIT_TO_USER_MODE_WORK | _TIF_HRTIMER_REARM)
+#else
+# define EXIT_TO_USER_MODE_WORK_SYSCALL (EXIT_TO_USER_MODE_WORK)
+# define EXIT_TO_USER_MODE_WORK_IRQ (EXIT_TO_USER_MODE_WORK)
+#endif
+
/**
* arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
* @regs: Pointer to currents pt_regs
/**
* __exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
* @regs: Pointer to pt_regs on entry stack
+ * @work_mask: Which TIF bits need to be evaluated
*
* 1) check that interrupts are disabled
* 2) call tick_nohz_user_enter_prepare()
*
* Don't invoke directly, use the syscall/irqentry_ prefixed variants below
*/
-static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs)
+static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs,
+ const unsigned long work_mask)
{
unsigned long ti_work;
tick_nohz_user_enter_prepare();
ti_work = read_thread_flags();
- if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
- ti_work = exit_to_user_mode_loop(regs, ti_work);
+ if (unlikely(ti_work & work_mask)) {
+ if (!hrtimer_rearm_deferred_user_irq(&ti_work, work_mask))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+ }
arch_exit_to_user_mode_prepare(regs, ti_work);
}
/* Temporary workaround to keep ARM64 alive */
static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *regs)
{
- __exit_to_user_mode_prepare(regs);
+ __exit_to_user_mode_prepare(regs, EXIT_TO_USER_MODE_WORK);
rseq_exit_to_user_mode_legacy();
__exit_to_user_mode_validate();
}
*/
static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
{
- __exit_to_user_mode_prepare(regs);
+ __exit_to_user_mode_prepare(regs, EXIT_TO_USER_MODE_WORK_SYSCALL);
rseq_syscall_exit_to_user_mode();
__exit_to_user_mode_validate();
}
*/
static __always_inline void irqentry_exit_to_user_mode_prepare(struct pt_regs *regs)
{
- __exit_to_user_mode_prepare(regs);
+ __exit_to_user_mode_prepare(regs, EXIT_TO_USER_MODE_WORK_IRQ);
rseq_irqentry_exit_to_user_mode();
__exit_to_user_mode_validate();
}
#endif /* !CONFIG_RSEQ_STATS */
#ifdef CONFIG_RSEQ
+#include <linux/hrtimer_rearm.h>
#include <linux/jump_label.h>
#include <linux/rseq.h>
#include <linux/sched/signal.h>
t->rseq.slice.state.granted = false;
}
-static __always_inline bool rseq_grant_slice_extension(bool work_pending)
+static __always_inline bool __rseq_grant_slice_extension(bool work_pending)
{
struct task_struct *curr = current;
struct rseq_slice_ctrl usr_ctrl;
return false;
}
+static __always_inline bool rseq_grant_slice_extension(unsigned long ti_work, unsigned long mask)
+{
+ if (unlikely(__rseq_grant_slice_extension(ti_work & mask))) {
+ hrtimer_rearm_deferred_tif(ti_work);
+ return true;
+ }
+ return false;
+}
+
#else /* CONFIG_RSEQ_SLICE_EXTENSION */
static inline bool rseq_slice_extension_enabled(void) { return false; }
static inline bool rseq_arm_slice_extension_timer(void) { return false; }
static inline void rseq_slice_clear_grant(struct task_struct *t) { }
-static inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
+static inline bool rseq_grant_slice_extension(unsigned long ti_work, unsigned long mask) { return false; }
#endif /* !CONFIG_RSEQ_SLICE_EXTENSION */
bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
static inline void rseq_irqentry_exit_to_user_mode(void) { }
static inline void rseq_exit_to_user_mode_legacy(void) { }
static inline void rseq_debug_syscall_return(struct pt_regs *regs) { }
-static inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
+static inline bool rseq_grant_slice_extension(unsigned long ti_work, unsigned long mask) { return false; }
#endif /* !CONFIG_RSEQ */
#endif /* _LINUX_RSEQ_ENTRY_H */
local_irq_enable_exit_to_user(ti_work);
if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) {
- if (!rseq_grant_slice_extension(ti_work & TIF_SLICE_EXT_DENY))
+ if (!rseq_grant_slice_extension(ti_work, TIF_SLICE_EXT_DENY))
schedule();
}
*/
if (state.exit_rcu) {
instrumentation_begin();
+ hrtimer_rearm_deferred();
/* Tell the tracer that IRET will enable interrupts */
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
if (IS_ENABLED(CONFIG_PREEMPTION))
irqentry_exit_cond_resched();
+ hrtimer_rearm_deferred();
/* Covers both tracing and lockdep */
trace_hardirqs_on();
instrumentation_end();