mem_cgroup_handle_over_high(GFP_KERNEL);
blkcg_maybe_throttle_current();
- rseq_handle_notify_resume(NULL, regs);
+ rseq_handle_notify_resume(regs);
}
#endif /* LINUX_RESUME_USER_MODE_H */
void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct pt_regs *regs)
{
if (current->rseq)
- __rseq_handle_notify_resume(ksig, regs);
+ __rseq_handle_notify_resume(NULL, regs);
}
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
- scoped_guard(RSEQ_EVENT_GUARD)
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
- rseq_handle_notify_resume(ksig, regs);
+ if (current->rseq) {
+ scoped_guard(RSEQ_EVENT_GUARD)
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
+ __rseq_handle_notify_resume(ksig, regs);
+ }
}
/* rseq_preempt() requires preemption to be disabled. */
#else /* CONFIG_RSEQ */
static inline void rseq_set_notify_resume(struct task_struct *t) { }
-static inline void rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs) { }
+static inline void rseq_handle_notify_resume(struct pt_regs *regs) { }
static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { }
static inline void rseq_preempt(struct task_struct *t) { }
static inline void rseq_migrate(struct task_struct *t) { }