#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
{
- /*
- * This is called by fs/exec.c and sys_unshare()
- * when the new ->mm is used for the first time.
- */
- __switch_mm(&new->context.id);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned cpu = smp_processor_id();
- if(prev != next){
+ if (prev != next) {
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
- if(next != &init_mm)
- __switch_mm(&next->context.id);
}
}
int syscall_fd_map[STUB_MAX_FDS];
};
-void __switch_mm(struct mm_id *mm_idp);
-
void notify_mm_kill(int pid);
#endif
#include <sysdep/ptrace.h>
extern int using_seccomp;
-extern int userspace_pid[];
extern void new_thread_handler(void);
extern void handle_syscall(struct uml_pt_regs *regs);
get_safe_registers(current_pt_regs()->regs.gp,
current_pt_regs()->regs.fp);
-
- __switch_mm(¤t->mm->context.id);
}
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
return 0;
}
-extern int userspace_pid[];
-
static char cpu0_irqstack[THREAD_SIZE] __aligned(THREAD_SIZE);
int __init start_uml(void)
__initcall(init_stub_exe_fd);
int using_seccomp;
-int userspace_pid[NR_CPUS];
/**
* start_userspace() - prepare a new userspace process
void userspace(struct uml_pt_regs *regs)
{
- int err, status, op, pid = userspace_pid[0];
+ int err, status, op;
siginfo_t si_ptrace;
siginfo_t *si;
int sig;
interrupt_end();
while (1) {
+ struct mm_id *mm_id = current_mm_id();
+
/*
* When we are in time-travel mode, userspace can theoretically
* do a *lot* of work without being scheduled. The problem with
current_mm_sync();
if (using_seccomp) {
- struct mm_id *mm_id = current_mm_id();
struct stub_data *proc_data = (void *) mm_id->stack;
err = set_stub_state(regs, proc_data, singlestepping());
GET_FAULTINFO_FROM_MC(regs->faultinfo, mcontext);
}
} else {
+ int pid = mm_id->pid;
+
/* Flush out any pending syscalls */
- err = syscall_stub_flush(current_mm_id());
+ err = syscall_stub_flush(mm_id);
if (err) {
if (err == -ENOMEM)
report_enomem();
__func__, sig);
fatal_sigsegv();
}
- pid = userspace_pid[0];
interrupt_end();
/* Avoid -ERESTARTSYS handling in host */
block_signals_trace();
UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
}
-
-void __switch_mm(struct mm_id *mm_idp)
-{
- userspace_pid[0] = mm_idp->pid;
-}
/*
* We have no need whatsoever to switch TLS for kernel threads; beyond
* that, that would also result in us calling os_set_thread_area with
- * userspace_pid[cpu] == 0, which gives an error.
+ * task->mm == NULL, which would cause a crash.
*/
if (likely(to->mm))
return load_TLS(O_FORCE, to);