unsigned sched_rt_mutex:1;
#endif
+ /* Save user-dumpable when mm goes away */
+ unsigned user_dumpable:1;
+
/* Bit to tell TOMOYO we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
*/
smp_mb__after_spinlock();
local_irq_disable();
+ current->user_dumpable = (get_dumpable(mm) == SUID_DUMP_USER);
current->mm = NULL;
membarrier_update_current_mm(NULL);
enter_lazy_tlb(mm, current);
return ns_capable(ns, CAP_SYS_PTRACE);
}
+static bool task_still_dumpable(struct task_struct *task, unsigned int mode)
+{
+ struct mm_struct *mm = task->mm;
+ if (mm) {
+ if (get_dumpable(mm) == SUID_DUMP_USER)
+ return true;
+ return ptrace_has_cap(mm->user_ns, mode);
+ }
+
+ if (task->user_dumpable)
+ return true;
+ return ptrace_has_cap(&init_user_ns, mode);
+}
+
/* Returns 0 on success, -errno on denial. */
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
const struct cred *cred = current_cred(), *tcred;
- struct mm_struct *mm;
kuid_t caller_uid;
kgid_t caller_gid;
* Pairs with a write barrier in commit_creds().
*/
smp_rmb();
- mm = task->mm;
- if (mm &&
- ((get_dumpable(mm) != SUID_DUMP_USER) &&
- !ptrace_has_cap(mm->user_ns, mode)))
- return -EPERM;
+ if (!task_still_dumpable(task, mode))
+ return -EPERM;
return security_ptrace_access_check(task, mode);
}