int unwind_deferred_request(struct unwind_work *work, u64 *cookie);
void unwind_deferred_cancel(struct unwind_work *work);
+void unwind_deferred_task_exit(struct task_struct *task);
+
static __always_inline void unwind_reset_info(void)
{
struct unwind_task_info *info = ¤t->unwind_info;
static inline int unwind_deferred_request(struct unwind_work *work, u64 *timestamp) { return -ENOSYS; }
static inline void unwind_deferred_cancel(struct unwind_work *work) {}
+static inline void unwind_deferred_task_exit(struct task_struct *task) {}
static inline void unwind_reset_info(void) {}
#endif /* !CONFIG_UNWIND_USER */
#include <linux/rethook.h>
#include <linux/sysfs.h>
#include <linux/user_events.h>
+#include <linux/unwind_deferred.h>
#include <linux/uaccess.h>
#include <linux/pidfs.h>
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
+ unwind_deferred_task_exit(tsk);
trace_sched_process_exit(tsk, group_dead);
/*
/* Should always be called from faultable context */
might_fault();
- if (current->flags & PF_EXITING)
+ if (!current->mm)
return -EINVAL;
if (!info->cache) {
return 0;
}
-static void unwind_deferred_task_work(struct callback_head *head)
+static void process_unwind_deferred(struct task_struct *task)
{
- struct unwind_task_info *info = container_of(head, struct unwind_task_info, work);
+ struct unwind_task_info *info = &task->unwind_info;
struct unwind_stacktrace trace;
struct unwind_work *work;
unsigned long bits;
}
}
+static void unwind_deferred_task_work(struct callback_head *head)
+{
+ process_unwind_deferred(current);
+}
+
+void unwind_deferred_task_exit(struct task_struct *task)
+{
+ struct unwind_task_info *info = ¤t->unwind_info;
+
+ if (!unwind_pending(info))
+ return;
+
+ process_unwind_deferred(task);
+
+ task_work_cancel(task, &info->work);
+}
+
/**
* unwind_deferred_request - Request a user stacktrace on task kernel exit
* @work: Unwind descriptor requesting the trace