struct perf_event_context;
struct blk_plug;
struct filename;
+struct linux_binprm;
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
extern int in_sched_functions(unsigned long addr);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
-extern signed long schedule_timeout(signed long timeout);
+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
struct user_namespace;
#ifdef CONFIG_MMU
+
+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
+#else
+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
+{
+ return 0;
+}
+#endif
+
+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
+
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
#ifdef CONFIG_TASKSTATS
struct taskstats *stats;
#endif
+
+#ifdef CONFIG_GRKERNSEC
+ u32 curr_ip;
+ u32 saved_ip;
+ u32 gr_saddr;
+ u32 gr_daddr;
+ u16 gr_sport;
+ u16 gr_dport;
+ u8 used_accept:1;
+#endif
+
#ifdef CONFIG_AUDIT
unsigned audit_tty;
unsigned audit_tty_log_passwd;
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
-};
+} __randomize_layout;
/*
* Bits in flags field of signal_struct.
struct key *session_keyring; /* UID's default session keyring */
#endif
+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
+ unsigned char kernel_banned;
+#endif
+#ifdef CONFIG_GRKERNSEC_BRUTE
+ unsigned char suid_banned;
+ unsigned long suid_ban_expires;
+#endif
+
/* Hash table maintenance information */
struct hlist_node uidhash_node;
kuid_t uid;
#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
-};
+} __randomize_layout;
extern int uids_sysfs_init(void);
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+ void *lowmem_stack;
+#endif
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
struct list_head thread_node;
struct completion *vfork_done; /* for vfork() */
- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
-/* process credentials */
- const struct cred __rcu *real_cred; /* objective and real subjective task
- * credentials (COW) */
- const struct cred __rcu *cred; /* effective (overridable) subjective task
- * credentials (COW) */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
+/* thread_info moved to task_struct */
+#ifdef CONFIG_X86
+ struct thread_info tinfo;
+#endif
/* filesystem information */
struct fs_struct *fs;
/* open file information */
gfp_t lockdep_reclaim_gfp;
#endif
+/* process credentials */
+ const struct cred __rcu *real_cred; /* objective and real subjective task
+ * credentials (COW) */
+
/* journalling filesystem info */
void *journal_info;
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
+
+ const struct cred __rcu *cred; /* effective (overridable) subjective task
+ * credentials (COW) */
+
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
* Number of functions that haven't been traced
* because of depth overrun.
*/
- atomic_t trace_overrun;
+ atomic_unchecked_t trace_overrun;
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
-};
+
+#ifdef CONFIG_GRKERNSEC
+ /* grsecurity */
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+ u64 exec_id;
+#endif
+#ifdef CONFIG_GRKERNSEC_SETXID
+ const struct cred *delayed_cred;
+#endif
+ struct dentry *gr_chroot_dentry;
+ struct acl_subject_label *acl;
+ struct acl_subject_label *tmpacl;
+ struct acl_role_label *role;
+ struct file *exec_file;
+ unsigned long brute_expires;
+ u16 acl_role_id;
+ u8 inherited;
+ /* is this the task that authenticated to the special role */
+ u8 acl_sp_role;
+ u8 is_writable;
+ u8 brute;
+ u8 gr_is_chrooted;
+#endif
+
+} __randomize_layout;
+
+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
+
+#ifdef CONFIG_PAX_SOFTMODE
+extern int pax_softmode;
+#endif
+
+extern int pax_check_flags(unsigned long *);
+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
+
+/* if tsk != current then task_lock must be held on it */
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+static inline unsigned long pax_get_flags(struct task_struct *tsk)
+{
+ if (likely(tsk->mm))
+ return tsk->mm->pax_flags;
+ else
+ return 0UL;
+}
+
+/* if tsk != current then task_lock must be held on it */
+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
+{
+ if (likely(tsk->mm)) {
+ tsk->mm->pax_flags = flags;
+ return 0;
+ }
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
+extern void pax_set_initial_flags(struct linux_binprm *bprm);
+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
+#endif
+
+struct path;
+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_refcount_overflow(struct pt_regs *regs);
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
struct pid_namespace *ns);
-static inline pid_t task_pid_nr(struct task_struct *tsk)
+static inline pid_t task_pid_nr(const struct task_struct *tsk)
{
return tsk->pid;
}
extern void sched_clock_init(void);
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+static inline void populate_stack(void)
+{
+ struct task_struct *curtask = current;
+ int c;
+ int *ptr = curtask->stack;
+ int *end = curtask->stack + THREAD_SIZE;
+
+ while (ptr < end) {
+ c = *(volatile int *)ptr;
+ ptr += PAGE_SIZE/sizeof(int);
+ }
+}
+#else
+static inline void populate_stack(void)
+{
+}
+#endif
+
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
extern struct exec_domain default_exec_domain;
union thread_union {
+#ifndef CONFIG_X86
struct thread_info thread_info;
+#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
-extern void do_group_exit(int);
+extern __noreturn void do_group_exit(int);
extern int do_execve(struct filename *,
const char __user * const __user *,
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
-static inline int object_is_on_stack(void *obj)
+static inline int object_starts_on_stack(const void *obj)
{
- void *stack = task_stack_page(current);
+ const void *stack = task_stack_page(current);
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}