SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
ARCH_SYSCALL_WORK_EXIT)
- long syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long work);
+ /**
+ * arch_ptrace_report_syscall_entry - Architecture specific ptrace_report_syscall_entry() wrapper
+ *
+ * Invoked from syscall_trace_enter() to wrap ptrace_report_syscall_entry().
+ *
+ * This allows architecture specific ptrace_report_syscall_entry()
+ * implementations. If not defined by the architecture this falls back to
+ * to ptrace_report_syscall_entry().
+ */
+ static __always_inline int arch_ptrace_report_syscall_entry(struct pt_regs *regs);
+
+ #ifndef arch_ptrace_report_syscall_entry
+ static __always_inline int arch_ptrace_report_syscall_entry(struct pt_regs *regs)
+ {
+ return ptrace_report_syscall_entry(regs);
+ }
+ #endif
+
+ bool syscall_user_dispatch(struct pt_regs *regs);
+ long trace_syscall_enter(struct pt_regs *regs, long syscall);
+ void trace_syscall_exit(struct pt_regs *regs, long ret);
+
+ static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
+ {
+ if (unlikely(audit_context())) {
+ unsigned long args[6];
+
+ syscall_get_arguments(current, regs, args);
+ audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
+ }
+ }
+
+ static __always_inline long syscall_trace_enter(struct pt_regs *regs, unsigned long work)
+ {
+ long syscall, ret = 0;
+
+ /*
+ * Handle Syscall User Dispatch. This must comes first, since
+ * the ABI here can be something that doesn't make sense for
+ * other syscall_work features.
+ */
+ if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
+ if (syscall_user_dispatch(regs))
+ return -1L;
+ }
+
++ /*
++ * User space got a time slice extension granted and relinquishes
++ * the CPU. The work stops the slice timer to avoid an extra round
++ * through hrtimer_interrupt().
++ */
++ if (work & SYSCALL_WORK_SYSCALL_RSEQ_SLICE)
++ rseq_syscall_enter_work(syscall_get_nr(current, regs));
++
+ /* Handle ptrace */
+ if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
+ ret = arch_ptrace_report_syscall_entry(regs);
+ if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
+ return -1L;
+ }
+
+ /* Do seccomp after ptrace, to catch any tracer changes. */
+ if (work & SYSCALL_WORK_SECCOMP) {
+ ret = __secure_computing();
+ if (ret == -1L)
+ return ret;
+ }
+
+ /* Either of the above might have changed the syscall number */
+ syscall = syscall_get_nr(current, regs);
+
+ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
+ syscall = trace_syscall_enter(regs, syscall);
+
+ syscall_enter_audit(regs, syscall);
+
+ return ret ? : syscall;
+ }
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking