1 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/elf.h linux-2.6.39.4/arch/alpha/include/asm/elf.h
2 --- linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
3 +++ linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/pgtable.h linux-2.6.39.4/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
20 +++ linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.39.4/arch/alpha/kernel/module.c linux-2.6.39.4/arch/alpha/kernel/module.c
40 --- linux-2.6.39.4/arch/alpha/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
41 +++ linux-2.6.39.4/arch/alpha/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.39.4/arch/alpha/kernel/osf_sys.c linux-2.6.39.4/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 21:11:51.000000000 -0400
53 +++ linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 19:44:33.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
82 if (addr != (unsigned long) -ENOMEM)
85 diff -urNp linux-2.6.39.4/arch/alpha/mm/fault.c linux-2.6.39.4/arch/alpha/mm/fault.c
86 --- linux-2.6.39.4/arch/alpha/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
87 +++ linux-2.6.39.4/arch/alpha/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
92 +#ifdef CONFIG_PAX_PAGEEXEC
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
103 +#ifdef CONFIG_PAX_EMUPLT
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
159 + do { /* PaX: unpatched PLT emulation */
162 + err = get_user(br, (unsigned int *)regs->pc);
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
195 +void pax_report_insns(void *pc, void *sp)
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
205 + printk(KERN_CONT "%08x ", c);
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
215 si_code = SEGV_ACCERR;
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
227 +#ifdef CONFIG_PAX_EMUPLT
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.39.4/arch/arm/include/asm/elf.h linux-2.6.39.4/arch/arm/include/asm/elf.h
245 --- linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
246 +++ linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
247 @@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-2.6.39.4/arch/arm/include/asm/kmap_types.h linux-2.6.39.4/arch/arm/include/asm/kmap_types.h
275 --- linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
276 +++ linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
285 diff -urNp linux-2.6.39.4/arch/arm/include/asm/uaccess.h linux-2.6.39.4/arch/arm/include/asm/uaccess.h
286 --- linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
287 +++ linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
343 diff -urNp linux-2.6.39.4/arch/arm/kernel/armksyms.c linux-2.6.39.4/arch/arm/kernel/armksyms.c
344 --- linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-05-19 00:06:34.000000000 -0400
345 +++ linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-08-05 19:44:33.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
348 EXPORT_SYMBOL(copy_page);
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-2.6.39.4/arch/arm/kernel/process.c linux-2.6.39.4/arch/arm/kernel/process.c
358 --- linux-2.6.39.4/arch/arm/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
359 +++ linux-2.6.39.4/arch/arm/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-2.6.39.4/arch/arm/kernel/traps.c linux-2.6.39.4/arch/arm/kernel/traps.c
382 --- linux-2.6.39.4/arch/arm/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
383 +++ linux-2.6.39.4/arch/arm/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
384 @@ -258,6 +258,8 @@ static int __die(const char *str, int er
386 static DEFINE_SPINLOCK(die_lock);
388 +extern void gr_handle_kernel_exploit(void);
391 * This function is protected against re-entrancy.
393 @@ -285,6 +287,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
396 panic("Fatal exception");
398 + gr_handle_kernel_exploit();
400 if (ret != NOTIFY_STOP)
403 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_from_user.S linux-2.6.39.4/arch/arm/lib/copy_from_user.S
404 --- linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-05-19 00:06:34.000000000 -0400
405 +++ linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-08-05 19:44:33.000000000 -0400
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
422 #include "copy_template.S"
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
427 .pushsection .fixup,"ax"
429 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_to_user.S linux-2.6.39.4/arch/arm/lib/copy_to_user.S
430 --- linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-05-19 00:06:34.000000000 -0400
431 +++ linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-08-05 19:44:33.000000000 -0400
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
448 #include "copy_template.S"
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
454 .pushsection .fixup,"ax"
455 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess.S linux-2.6.39.4/arch/arm/lib/uaccess.S
456 --- linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-05-19 00:06:34.000000000 -0400
457 +++ linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-08-05 19:44:33.000000000 -0400
460 #define PAGE_SHIFT 12
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
478 USER( T(strgtb) r3, [r0], #1) @ May fault
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
483 .pushsection .fixup,"ax"
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
509 .pushsection .fixup,"ax"
511 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-05-19 00:06:34.000000000 -0400
513 +++ linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-05 19:44:33.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-05-19 00:06:34.000000000 -0400
525 +++ linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-05 19:44:33.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
533 static int mbox_show(struct seq_file *s, void *data)
535 diff -urNp linux-2.6.39.4/arch/arm/mm/fault.c linux-2.6.39.4/arch/arm/mm/fault.c
536 --- linux-2.6.39.4/arch/arm/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
537 +++ linux-2.6.39.4/arch/arm/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
554 #endif /* CONFIG_MMU */
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
567 + printk(KERN_CONT "%02x ", c);
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
577 + printk(KERN_CONT "%08lx ", c);
584 * First Level Translation Fault Handler
586 diff -urNp linux-2.6.39.4/arch/arm/mm/mmap.c linux-2.6.39.4/arch/arm/mm/mmap.c
587 --- linux-2.6.39.4/arch/arm/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
588 +++ linux-2.6.39.4/arch/arm/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
636 * Remember the place where we stopped the search:
638 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/elf.h linux-2.6.39.4/arch/avr32/include/asm/elf.h
639 --- linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
640 +++ linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h
658 --- linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
659 +++ linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
670 diff -urNp linux-2.6.39.4/arch/avr32/mm/fault.c linux-2.6.39.4/arch/avr32/mm/fault.c
671 --- linux-2.6.39.4/arch/avr32/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
672 +++ linux-2.6.39.4/arch/avr32/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
675 int exception_trace = 1;
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
688 + printk(KERN_CONT "%02x ", c);
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
700 if (user_mode(regs)) {
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-2.6.39.4/arch/frv/include/asm/kmap_types.h linux-2.6.39.4/arch/frv/include/asm/kmap_types.h
715 --- linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
716 +++ linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
725 diff -urNp linux-2.6.39.4/arch/frv/mm/elf-fdpic.c linux-2.6.39.4/arch/frv/mm/elf-fdpic.c
726 --- linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-05-19 00:06:34.000000000 -0400
727 +++ linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-08-05 19:44:33.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
756 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/elf.h linux-2.6.39.4/arch/ia64/include/asm/elf.h
757 --- linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
758 +++ linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
770 #define PT_IA_64_UNWIND 0x70000001
772 /* IA-64 relocations: */
773 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/pgtable.h linux-2.6.39.4/arch/ia64/include/asm/pgtable.h
774 --- linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
775 +++ linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
777 * David Mosberger-Tang <davidm@hpl.hp.com>
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/spinlock.h linux-2.6.39.4/arch/ia64/include/asm/spinlock.h
804 --- linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
805 +++ linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/uaccess.h linux-2.6.39.4/arch/ia64/include/asm/uaccess.h
816 --- linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
817 +++ linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
836 diff -urNp linux-2.6.39.4/arch/ia64/kernel/module.c linux-2.6.39.4/arch/ia64/kernel/module.c
837 --- linux-2.6.39.4/arch/ia64/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
838 +++ linux-2.6.39.4/arch/ia64/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
841 module_free (struct module *mod, void *module_region)
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
853 +in_init_rx (const struct module *mod, uint64_t addr)
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
859 +in_init_rw (const struct module *mod, uint64_t addr)
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
865 in_init (const struct module *mod, uint64_t addr)
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
872 +in_core_rx (const struct module *mod, uint64_t addr)
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
878 +in_core_rw (const struct module *mod, uint64_t addr)
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
884 in_core (const struct module *mod, uint64_t addr)
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
927 diff -urNp linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c
928 --- linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-05-19 00:06:34.000000000 -0400
929 +++ linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-08-05 19:44:33.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
942 addr = mm->free_area_cache;
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
962 diff -urNp linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
964 +++ linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-05 19:44:33.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-2.6.39.4/arch/ia64/mm/fault.c linux-2.6.39.4/arch/ia64/mm/fault.c
975 --- linux-2.6.39.4/arch/ia64/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
976 +++ linux-2.6.39.4/arch/ia64/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
977 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
992 + printk(KERN_CONT "%08x ", c);
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1001 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c
1027 --- linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
1028 +++ linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1038 diff -urNp linux-2.6.39.4/arch/ia64/mm/init.c linux-2.6.39.4/arch/ia64/mm/init.c
1039 --- linux-2.6.39.4/arch/ia64/mm/init.c 2011-05-19 00:06:34.000000000 -0400
1040 +++ linux-2.6.39.4/arch/ia64/mm/init.c 2011-08-05 19:44:33.000000000 -0400
1041 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(¤t->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-2.6.39.4/arch/m32r/lib/usercopy.c linux-2.6.39.4/arch/m32r/lib/usercopy.c
1062 --- linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-05-19 00:06:34.000000000 -0400
1063 +++ linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-08-05 19:44:33.000000000 -0400
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-2.6.39.4/arch/mips/include/asm/elf.h linux-2.6.39.4/arch/mips/include/asm/elf.h
1085 --- linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1086 +++ linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-2.6.39.4/arch/mips/include/asm/page.h linux-2.6.39.4/arch/mips/include/asm/page.h
1109 --- linux-2.6.39.4/arch/mips/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1110 +++ linux-2.6.39.4/arch/mips/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-2.6.39.4/arch/mips/include/asm/system.h linux-2.6.39.4/arch/mips/include/asm/system.h
1121 --- linux-2.6.39.4/arch/mips/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1122 +++ linux-2.6.39.4/arch/mips/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-05-19 00:06:34.000000000 -0400
1133 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-05 19:44:33.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-05-19 00:06:34.000000000 -0400
1150 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-05 19:44:33.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1162 #include <asm/processor.h>
1165 diff -urNp linux-2.6.39.4/arch/mips/kernel/process.c linux-2.6.39.4/arch/mips/kernel/process.c
1166 --- linux-2.6.39.4/arch/mips/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
1167 +++ linux-2.6.39.4/arch/mips/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1177 -unsigned long arch_align_stack(unsigned long sp)
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1182 - return sp & ALMASK;
1184 diff -urNp linux-2.6.39.4/arch/mips/kernel/syscall.c linux-2.6.39.4/arch/mips/kernel/syscall.c
1185 --- linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-05-19 00:06:34.000000000 -0400
1186 +++ linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-08-05 19:44:33.000000000 -0400
1187 @@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str
1189 if (filp || (flags & MAP_SHARED))
1192 +#ifdef CONFIG_PAX_RANDMMAP
1193 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1198 addr = COLOUR_ALIGN(addr, pgoff);
1200 addr = PAGE_ALIGN(addr);
1201 vmm = find_vma(current->mm, addr);
1202 - if (task_size - len >= addr &&
1203 - (!vmm || addr + len <= vmm->vm_start))
1204 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1207 addr = current->mm->mmap_base;
1208 @@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str
1209 /* At this point: (!vmm || addr < vmm->vm_end). */
1210 if (task_size - len < addr)
1212 - if (!vmm || addr + len <= vmm->vm_start)
1213 + if (check_heap_stack_gap(vmm, addr, len))
1217 @@ -154,33 +158,6 @@ void arch_pick_mmap_layout(struct mm_str
1218 mm->unmap_area = arch_unmap_area;
1221 -static inline unsigned long brk_rnd(void)
1223 - unsigned long rnd = get_random_int();
1225 - rnd = rnd << PAGE_SHIFT;
1226 - /* 8MB for 32bit, 256MB for 64bit */
1227 - if (TASK_IS_32BIT_ADDR)
1228 - rnd = rnd & 0x7ffffful;
1230 - rnd = rnd & 0xffffffful;
1235 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1237 - unsigned long base = mm->brk;
1238 - unsigned long ret;
1240 - ret = PAGE_ALIGN(base + brk_rnd());
1242 - if (ret < mm->brk)
1248 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
1249 unsigned long, prot, unsigned long, flags, unsigned long,
1251 diff -urNp linux-2.6.39.4/arch/mips/mm/fault.c linux-2.6.39.4/arch/mips/mm/fault.c
1252 --- linux-2.6.39.4/arch/mips/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1253 +++ linux-2.6.39.4/arch/mips/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1255 #include <asm/highmem.h> /* For VMALLOC_END */
1256 #include <linux/kdebug.h>
1258 +#ifdef CONFIG_PAX_PAGEEXEC
1259 +void pax_report_insns(void *pc, void *sp)
1263 + printk(KERN_ERR "PAX: bytes at PC: ");
1264 + for (i = 0; i < 5; i++) {
1266 + if (get_user(c, (unsigned int *)pc+i))
1267 + printk(KERN_CONT "???????? ");
1269 + printk(KERN_CONT "%08x ", c);
1276 * This routine handles page faults. It determines the address,
1277 * and the problem, and then passes it off to one of the appropriate
1278 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/elf.h linux-2.6.39.4/arch/parisc/include/asm/elf.h
1279 --- linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1280 +++ linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1281 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1283 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1285 +#ifdef CONFIG_PAX_ASLR
1286 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1288 +#define PAX_DELTA_MMAP_LEN 16
1289 +#define PAX_DELTA_STACK_LEN 16
1292 /* This yields a mask that user programs can use to figure out what
1293 instruction set this CPU supports. This could be done in user space,
1294 but it's not easy, and we've already done it here. */
1295 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/pgtable.h linux-2.6.39.4/arch/parisc/include/asm/pgtable.h
1296 --- linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1297 +++ linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1298 @@ -207,6 +207,17 @@ struct vm_area_struct;
1299 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1300 #define PAGE_COPY PAGE_EXECREAD
1301 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1303 +#ifdef CONFIG_PAX_PAGEEXEC
1304 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1305 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1306 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1308 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1309 +# define PAGE_COPY_NOEXEC PAGE_COPY
1310 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1313 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1314 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1315 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1316 diff -urNp linux-2.6.39.4/arch/parisc/kernel/module.c linux-2.6.39.4/arch/parisc/kernel/module.c
1317 --- linux-2.6.39.4/arch/parisc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
1318 +++ linux-2.6.39.4/arch/parisc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
1321 /* three functions to determine where in the module core
1322 * or init pieces the location is */
1323 +static inline int in_init_rx(struct module *me, void *loc)
1325 + return (loc >= me->module_init_rx &&
1326 + loc < (me->module_init_rx + me->init_size_rx));
1329 +static inline int in_init_rw(struct module *me, void *loc)
1331 + return (loc >= me->module_init_rw &&
1332 + loc < (me->module_init_rw + me->init_size_rw));
1335 static inline int in_init(struct module *me, void *loc)
1337 - return (loc >= me->module_init &&
1338 - loc <= (me->module_init + me->init_size));
1339 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1342 +static inline int in_core_rx(struct module *me, void *loc)
1344 + return (loc >= me->module_core_rx &&
1345 + loc < (me->module_core_rx + me->core_size_rx));
1348 +static inline int in_core_rw(struct module *me, void *loc)
1350 + return (loc >= me->module_core_rw &&
1351 + loc < (me->module_core_rw + me->core_size_rw));
1354 static inline int in_core(struct module *me, void *loc)
1356 - return (loc >= me->module_core &&
1357 - loc <= (me->module_core + me->core_size));
1358 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1361 static inline int in_local(struct module *me, void *loc)
1362 @@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
1365 /* align things a bit */
1366 - me->core_size = ALIGN(me->core_size, 16);
1367 - me->arch.got_offset = me->core_size;
1368 - me->core_size += gots * sizeof(struct got_entry);
1370 - me->core_size = ALIGN(me->core_size, 16);
1371 - me->arch.fdesc_offset = me->core_size;
1372 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1373 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1374 + me->arch.got_offset = me->core_size_rw;
1375 + me->core_size_rw += gots * sizeof(struct got_entry);
1377 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1378 + me->arch.fdesc_offset = me->core_size_rw;
1379 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1381 me->arch.got_max = gots;
1382 me->arch.fdesc_max = fdescs;
1383 @@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
1387 - got = me->module_core + me->arch.got_offset;
1388 + got = me->module_core_rw + me->arch.got_offset;
1389 for (i = 0; got[i].addr; i++)
1390 if (got[i].addr == value)
1392 @@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
1394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1396 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1397 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1401 @@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
1403 /* Create new one */
1404 fdesc->addr = value;
1405 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1406 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1407 return (Elf_Addr)fdesc;
1409 #endif /* CONFIG_64BIT */
1410 @@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
1412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1413 end = table + sechdrs[me->arch.unwind_section].sh_size;
1414 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1415 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1418 me->arch.unwind_section, table, end, gp);
1419 diff -urNp linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c
1420 --- linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-05-19 00:06:34.000000000 -0400
1421 +++ linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-08-05 19:44:33.000000000 -0400
1422 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1423 /* At this point: (!vma || addr < vma->vm_end). */
1424 if (TASK_SIZE - len < addr)
1426 - if (!vma || addr + len <= vma->vm_start)
1427 + if (check_heap_stack_gap(vma, addr, len))
1431 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1432 /* At this point: (!vma || addr < vma->vm_end). */
1433 if (TASK_SIZE - len < addr)
1435 - if (!vma || addr + len <= vma->vm_start)
1436 + if (check_heap_stack_gap(vma, addr, len))
1438 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1439 if (addr < vma->vm_end) /* handle wraparound */
1440 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1441 if (flags & MAP_FIXED)
1444 - addr = TASK_UNMAPPED_BASE;
1445 + addr = current->mm->mmap_base;
1448 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1449 diff -urNp linux-2.6.39.4/arch/parisc/kernel/traps.c linux-2.6.39.4/arch/parisc/kernel/traps.c
1450 --- linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
1451 +++ linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
1452 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1454 down_read(¤t->mm->mmap_sem);
1455 vma = find_vma(current->mm,regs->iaoq[0]);
1456 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1457 - && (vma->vm_flags & VM_EXEC)) {
1459 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1460 fault_address = regs->iaoq[0];
1461 fault_space = regs->iasq[0];
1463 diff -urNp linux-2.6.39.4/arch/parisc/mm/fault.c linux-2.6.39.4/arch/parisc/mm/fault.c
1464 --- linux-2.6.39.4/arch/parisc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1465 +++ linux-2.6.39.4/arch/parisc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1467 #include <linux/sched.h>
1468 #include <linux/interrupt.h>
1469 #include <linux/module.h>
1470 +#include <linux/unistd.h>
1472 #include <asm/uaccess.h>
1473 #include <asm/traps.h>
1474 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1475 static unsigned long
1476 parisc_acctyp(unsigned long code, unsigned int inst)
1478 - if (code == 6 || code == 16)
1479 + if (code == 6 || code == 7 || code == 16)
1482 switch (inst & 0xf0000000) {
1483 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1487 +#ifdef CONFIG_PAX_PAGEEXEC
1489 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1491 + * returns 1 when task should be killed
1492 + * 2 when rt_sigreturn trampoline was detected
1493 + * 3 when unpatched PLT trampoline was detected
1495 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1498 +#ifdef CONFIG_PAX_EMUPLT
1501 + do { /* PaX: unpatched PLT emulation */
1502 + unsigned int bl, depwi;
1504 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1505 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1510 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1511 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1513 + err = get_user(ldw, (unsigned int *)addr);
1514 + err |= get_user(bv, (unsigned int *)(addr+4));
1515 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1520 + if (ldw == 0x0E801096U &&
1521 + bv == 0xEAC0C000U &&
1522 + ldw2 == 0x0E881095U)
1524 + unsigned int resolver, map;
1526 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1527 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1531 + regs->gr[20] = instruction_pointer(regs)+8;
1532 + regs->gr[21] = map;
1533 + regs->gr[22] = resolver;
1534 + regs->iaoq[0] = resolver | 3UL;
1535 + regs->iaoq[1] = regs->iaoq[0] + 4;
1542 +#ifdef CONFIG_PAX_EMUTRAMP
1544 +#ifndef CONFIG_PAX_EMUSIGRT
1545 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1549 + do { /* PaX: rt_sigreturn emulation */
1550 + unsigned int ldi1, ldi2, bel, nop;
1552 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1553 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1554 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1555 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1560 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1561 + ldi2 == 0x3414015AU &&
1562 + bel == 0xE4008200U &&
1563 + nop == 0x08000240U)
1565 + regs->gr[25] = (ldi1 & 2) >> 1;
1566 + regs->gr[20] = __NR_rt_sigreturn;
1567 + regs->gr[31] = regs->iaoq[1] + 16;
1568 + regs->sr[0] = regs->iasq[1];
1569 + regs->iaoq[0] = 0x100UL;
1570 + regs->iaoq[1] = regs->iaoq[0] + 4;
1571 + regs->iasq[0] = regs->sr[2];
1572 + regs->iasq[1] = regs->sr[2];
1581 +void pax_report_insns(void *pc, void *sp)
1585 + printk(KERN_ERR "PAX: bytes at PC: ");
1586 + for (i = 0; i < 5; i++) {
1588 + if (get_user(c, (unsigned int *)pc+i))
1589 + printk(KERN_CONT "???????? ");
1591 + printk(KERN_CONT "%08x ", c);
1597 int fixup_exception(struct pt_regs *regs)
1599 const struct exception_table_entry *fix;
1600 @@ -192,8 +303,33 @@ good_area:
1602 acc_type = parisc_acctyp(code,regs->iir);
1604 - if ((vma->vm_flags & acc_type) != acc_type)
1605 + if ((vma->vm_flags & acc_type) != acc_type) {
1607 +#ifdef CONFIG_PAX_PAGEEXEC
1608 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1609 + (address & ~3UL) == instruction_pointer(regs))
1611 + up_read(&mm->mmap_sem);
1612 + switch (pax_handle_fetch_fault(regs)) {
1614 +#ifdef CONFIG_PAX_EMUPLT
1619 +#ifdef CONFIG_PAX_EMUTRAMP
1625 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1626 + do_group_exit(SIGKILL);
1634 * If for any reason at all we couldn't handle the fault, make
1635 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/elf.h linux-2.6.39.4/arch/powerpc/include/asm/elf.h
1636 --- linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1637 +++ linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1638 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1639 the loader. We need to make sure that it is out of the way of the program
1640 that it will "exec", and that there is sufficient room for the brk. */
1642 -extern unsigned long randomize_et_dyn(unsigned long base);
1643 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1644 +#define ELF_ET_DYN_BASE (0x20000000)
1646 +#ifdef CONFIG_PAX_ASLR
1647 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1649 +#ifdef __powerpc64__
1650 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1651 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1653 +#define PAX_DELTA_MMAP_LEN 15
1654 +#define PAX_DELTA_STACK_LEN 15
1659 * Our registers are always unsigned longs, whether we're a 32 bit
1660 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1661 (0x7ff >> (PAGE_SHIFT - 12)) : \
1662 (0x3ffff >> (PAGE_SHIFT - 12)))
1664 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1665 -#define arch_randomize_brk arch_randomize_brk
1667 #endif /* __KERNEL__ */
1670 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h
1671 --- linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
1672 +++ linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
1673 @@ -27,6 +27,7 @@ enum km_type {
1681 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page_64.h linux-2.6.39.4/arch/powerpc/include/asm/page_64.h
1682 --- linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-05-19 00:06:34.000000000 -0400
1683 +++ linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-08-05 19:44:33.000000000 -0400
1684 @@ -172,15 +172,18 @@ do { \
1685 * stack by default, so in the absence of a PT_GNU_STACK program header
1686 * we turn execute permission off.
1688 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1689 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1690 +#define VM_STACK_DEFAULT_FLAGS32 \
1691 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1692 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1694 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1695 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1697 +#ifndef CONFIG_PAX_PAGEEXEC
1698 #define VM_STACK_DEFAULT_FLAGS \
1699 (is_32bit_task() ? \
1700 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1703 #include <asm-generic/getorder.h>
1705 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page.h linux-2.6.39.4/arch/powerpc/include/asm/page.h
1706 --- linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1707 +++ linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1708 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1709 * and needs to be executable. This means the whole heap ends
1710 * up being executable.
1712 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1713 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1714 +#define VM_DATA_DEFAULT_FLAGS32 \
1715 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1716 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1718 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1719 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1720 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1721 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1724 +#define ktla_ktva(addr) (addr)
1725 +#define ktva_ktla(addr) (addr)
1727 #ifndef __ASSEMBLY__
1729 #undef STRICT_MM_TYPECHECKS
1730 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h
1731 --- linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1732 +++ linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1734 #define _ASM_POWERPC_PGTABLE_H
1737 +#include <linux/const.h>
1738 #ifndef __ASSEMBLY__
1739 #include <asm/processor.h> /* For TASK_SIZE */
1740 #include <asm/mmu.h>
1741 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h
1742 --- linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-05-19 00:06:34.000000000 -0400
1743 +++ linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-05 19:44:33.000000000 -0400
1745 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1746 #define _PAGE_USER 0x004 /* usermode access allowed */
1747 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1748 +#define _PAGE_EXEC _PAGE_GUARDED
1749 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1750 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1751 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1752 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/reg.h linux-2.6.39.4/arch/powerpc/include/asm/reg.h
1753 --- linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-05-19 00:06:34.000000000 -0400
1754 +++ linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-08-05 19:44:33.000000000 -0400
1756 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1757 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1758 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1759 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1760 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1761 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1762 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1763 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/system.h linux-2.6.39.4/arch/powerpc/include/asm/system.h
1764 --- linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1765 +++ linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1766 @@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1767 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1770 -extern unsigned long arch_align_stack(unsigned long sp);
1771 +#define arch_align_stack(x) ((x) & ~0xfUL)
1773 /* Used in very early kernel initialization. */
1774 extern unsigned long reloc_offset(void);
1775 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h
1776 --- linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
1777 +++ linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
1779 #define VERIFY_READ 0
1780 #define VERIFY_WRITE 1
1782 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1785 * The fs value determines whether argument validity checking should be
1786 * performed or not. If get_fs() == USER_DS, checking is performed, with
1787 @@ -327,52 +329,6 @@ do { \
1788 extern unsigned long __copy_tofrom_user(void __user *to,
1789 const void __user *from, unsigned long size);
1791 -#ifndef __powerpc64__
1793 -static inline unsigned long copy_from_user(void *to,
1794 - const void __user *from, unsigned long n)
1796 - unsigned long over;
1798 - if (access_ok(VERIFY_READ, from, n))
1799 - return __copy_tofrom_user((__force void __user *)to, from, n);
1800 - if ((unsigned long)from < TASK_SIZE) {
1801 - over = (unsigned long)from + n - TASK_SIZE;
1802 - return __copy_tofrom_user((__force void __user *)to, from,
1808 -static inline unsigned long copy_to_user(void __user *to,
1809 - const void *from, unsigned long n)
1811 - unsigned long over;
1813 - if (access_ok(VERIFY_WRITE, to, n))
1814 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1815 - if ((unsigned long)to < TASK_SIZE) {
1816 - over = (unsigned long)to + n - TASK_SIZE;
1817 - return __copy_tofrom_user(to, (__force void __user *)from,
1823 -#else /* __powerpc64__ */
1825 -#define __copy_in_user(to, from, size) \
1826 - __copy_tofrom_user((to), (from), (size))
1828 -extern unsigned long copy_from_user(void *to, const void __user *from,
1830 -extern unsigned long copy_to_user(void __user *to, const void *from,
1832 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1835 -#endif /* __powerpc64__ */
1837 static inline unsigned long __copy_from_user_inatomic(void *to,
1838 const void __user *from, unsigned long n)
1840 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1845 + if (!__builtin_constant_p(n))
1846 + check_object_size(to, n, false);
1848 return __copy_tofrom_user((__force void __user *)to, from, n);
1851 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1856 + if (!__builtin_constant_p(n))
1857 + check_object_size(from, n, true);
1859 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1862 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1863 return __copy_to_user_inatomic(to, from, size);
1866 +#ifndef __powerpc64__
1868 +static inline unsigned long __must_check copy_from_user(void *to,
1869 + const void __user *from, unsigned long n)
1871 + unsigned long over;
1876 + if (access_ok(VERIFY_READ, from, n)) {
1877 + if (!__builtin_constant_p(n))
1878 + check_object_size(to, n, false);
1879 + return __copy_tofrom_user((__force void __user *)to, from, n);
1881 + if ((unsigned long)from < TASK_SIZE) {
1882 + over = (unsigned long)from + n - TASK_SIZE;
1883 + if (!__builtin_constant_p(n - over))
1884 + check_object_size(to, n - over, false);
1885 + return __copy_tofrom_user((__force void __user *)to, from,
1891 +static inline unsigned long __must_check copy_to_user(void __user *to,
1892 + const void *from, unsigned long n)
1894 + unsigned long over;
1899 + if (access_ok(VERIFY_WRITE, to, n)) {
1900 + if (!__builtin_constant_p(n))
1901 + check_object_size(from, n, true);
1902 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1904 + if ((unsigned long)to < TASK_SIZE) {
1905 + over = (unsigned long)to + n - TASK_SIZE;
1906 + if (!__builtin_constant_p(n))
1907 + check_object_size(from, n - over, true);
1908 + return __copy_tofrom_user(to, (__force void __user *)from,
1914 +#else /* __powerpc64__ */
1916 +#define __copy_in_user(to, from, size) \
1917 + __copy_tofrom_user((to), (from), (size))
1919 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1921 + if ((long)n < 0 || n > INT_MAX)
1924 + if (!__builtin_constant_p(n))
1925 + check_object_size(to, n, false);
1927 + if (likely(access_ok(VERIFY_READ, from, n)))
1928 + n = __copy_from_user(to, from, n);
1934 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1936 + if ((long)n < 0 || n > INT_MAX)
1939 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1940 + if (!__builtin_constant_p(n))
1941 + check_object_size(from, n, true);
1942 + n = __copy_to_user(to, from, n);
1947 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1950 +#endif /* __powerpc64__ */
1952 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1954 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1955 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S
1956 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-05-19 00:06:34.000000000 -0400
1957 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-05 19:44:33.000000000 -0400
1958 @@ -495,6 +495,7 @@ storage_fault_common:
1961 addi r3,r1,STACK_FRAME_OVERHEAD
1965 ld r14,PACA_EXGEN+EX_R14(r13)
1966 @@ -504,8 +505,7 @@ storage_fault_common:
1969 b .ret_from_except_lite
1973 addi r3,r1,STACK_FRAME_OVERHEAD
1976 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S
1977 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-05-19 00:06:34.000000000 -0400
1978 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-05 19:44:33.000000000 -0400
1979 @@ -848,10 +848,10 @@ handle_page_fault:
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1991 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module_32.c linux-2.6.39.4/arch/powerpc/kernel/module_32.c
1992 --- linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-05-19 00:06:34.000000000 -0400
1993 +++ linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-08-05 19:44:33.000000000 -0400
1994 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
1995 me->arch.core_plt_section = i;
1997 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
1998 - printk("Module doesn't contain .plt or .init.plt sections.\n");
1999 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2003 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2005 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2006 /* Init, or core PLT? */
2007 - if (location >= mod->module_core
2008 - && location < mod->module_core + mod->core_size)
2009 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2010 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2011 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2013 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2014 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2015 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2017 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2021 /* Find this entry, or if that fails, the next avail. entry */
2022 while (entry->jump[0]) {
2023 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module.c linux-2.6.39.4/arch/powerpc/kernel/module.c
2024 --- linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2025 +++ linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2028 LIST_HEAD(module_bug_list);
2030 +#ifdef CONFIG_PAX_KERNEXEC
2031 void *module_alloc(unsigned long size)
2036 + return vmalloc(size);
2039 +void *module_alloc_exec(unsigned long size)
2041 +void *module_alloc(unsigned long size)
2048 return vmalloc_exec(size);
2051 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2052 vfree(module_region);
2055 +#ifdef CONFIG_PAX_KERNEXEC
2056 +void module_free_exec(struct module *mod, void *module_region)
2058 + module_free(mod, module_region);
2062 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2063 const Elf_Shdr *sechdrs,
2065 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/process.c linux-2.6.39.4/arch/powerpc/kernel/process.c
2066 --- linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2067 +++ linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2068 @@ -655,8 +655,8 @@ void show_regs(struct pt_regs * regs)
2069 * Lookup NIP late so we have the best change of getting the
2070 * above info out without failing
2072 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2073 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2074 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2075 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2077 show_stack(current, (unsigned long *) regs->gpr[1]);
2078 if (!user_mode(regs))
2079 @@ -1146,10 +1146,10 @@ void show_stack(struct task_struct *tsk,
2081 ip = stack[STACK_FRAME_LR_SAVE];
2082 if (!firstframe || ip != lr) {
2083 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2084 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2085 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2086 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2089 (void *)current->ret_stack[curr_frame].ret);
2092 @@ -1169,7 +1169,7 @@ void show_stack(struct task_struct *tsk,
2093 struct pt_regs *regs = (struct pt_regs *)
2094 (sp + STACK_FRAME_OVERHEAD);
2096 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2097 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2098 regs->trap, (void *)regs->nip, (void *)lr);
2101 @@ -1244,58 +1244,3 @@ void thread_info_cache_init(void)
2104 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2106 -unsigned long arch_align_stack(unsigned long sp)
2108 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2109 - sp -= get_random_int() & ~PAGE_MASK;
2113 -static inline unsigned long brk_rnd(void)
2115 - unsigned long rnd = 0;
2117 - /* 8MB for 32bit, 1GB for 64bit */
2118 - if (is_32bit_task())
2119 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2121 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2123 - return rnd << PAGE_SHIFT;
2126 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2128 - unsigned long base = mm->brk;
2129 - unsigned long ret;
2131 -#ifdef CONFIG_PPC_STD_MMU_64
2133 - * If we are using 1TB segments and we are allowed to randomise
2134 - * the heap, we can put it above 1TB so it is backed by a 1TB
2135 - * segment. Otherwise the heap will be in the bottom 1TB
2136 - * which always uses 256MB segments and this may result in a
2137 - * performance penalty.
2139 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2140 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2143 - ret = PAGE_ALIGN(base + brk_rnd());
2145 - if (ret < mm->brk)
2151 -unsigned long randomize_et_dyn(unsigned long base)
2153 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2160 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_32.c linux-2.6.39.4/arch/powerpc/kernel/signal_32.c
2161 --- linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-05-19 00:06:34.000000000 -0400
2162 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-08-05 19:44:33.000000000 -0400
2163 @@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig
2164 /* Save user registers on the stack */
2165 frame = &rt_sf->uc.uc_mcontext;
2167 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2168 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2169 if (save_user_regs(regs, frame, 0, 1))
2171 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2172 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_64.c linux-2.6.39.4/arch/powerpc/kernel/signal_64.c
2173 --- linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-05-19 00:06:34.000000000 -0400
2174 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-08-05 19:44:33.000000000 -0400
2175 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2176 current->thread.fpscr.val = 0;
2178 /* Set up to return from userspace. */
2179 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2180 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2181 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2183 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2184 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/traps.c linux-2.6.39.4/arch/powerpc/kernel/traps.c
2185 --- linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
2186 +++ linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
2187 @@ -96,6 +96,8 @@ static void pmac_backlight_unblank(void)
2188 static inline void pmac_backlight_unblank(void) { }
2191 +extern void gr_handle_kernel_exploit(void);
2193 int die(const char *str, struct pt_regs *regs, long err)
2196 @@ -170,6 +172,8 @@ int die(const char *str, struct pt_regs
2198 panic("Fatal exception");
2200 + gr_handle_kernel_exploit();
2205 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/vdso.c linux-2.6.39.4/arch/powerpc/kernel/vdso.c
2206 --- linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-05-19 00:06:34.000000000 -0400
2207 +++ linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-08-05 19:44:33.000000000 -0400
2209 #include <asm/firmware.h>
2210 #include <asm/vdso.h>
2211 #include <asm/vdso_datapage.h>
2212 +#include <asm/mman.h>
2216 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2217 vdso_base = VDSO32_MBASE;
2220 - current->mm->context.vdso_base = 0;
2221 + current->mm->context.vdso_base = ~0UL;
2223 /* vDSO has a problem and was disabled, just don't "enable" it for the
2225 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = get_unmapped_area(NULL, vdso_base,
2227 (vdso_pages << PAGE_SHIFT) +
2228 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2230 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2231 if (IS_ERR_VALUE(vdso_base)) {
2234 diff -urNp linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c
2235 --- linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
2236 +++ linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-08-05 19:44:33.000000000 -0400
2238 #include <linux/module.h>
2239 #include <asm/uaccess.h>
2241 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2243 - if (likely(access_ok(VERIFY_READ, from, n)))
2244 - n = __copy_from_user(to, from, n);
2250 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2252 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2253 - n = __copy_to_user(to, from, n);
2257 unsigned long copy_in_user(void __user *to, const void __user *from,
2260 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2264 -EXPORT_SYMBOL(copy_from_user);
2265 -EXPORT_SYMBOL(copy_to_user);
2266 EXPORT_SYMBOL(copy_in_user);
2268 diff -urNp linux-2.6.39.4/arch/powerpc/mm/fault.c linux-2.6.39.4/arch/powerpc/mm/fault.c
2269 --- linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
2270 +++ linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
2272 #include <linux/kdebug.h>
2273 #include <linux/perf_event.h>
2274 #include <linux/magic.h>
2275 +#include <linux/slab.h>
2276 +#include <linux/pagemap.h>
2277 +#include <linux/compiler.h>
2278 +#include <linux/unistd.h>
2280 #include <asm/firmware.h>
2281 #include <asm/page.h>
2283 #include <asm/tlbflush.h>
2284 #include <asm/siginfo.h>
2285 #include <mm/mmu_decl.h>
2286 +#include <asm/ptrace.h>
2288 #ifdef CONFIG_KPROBES
2289 static inline int notify_page_fault(struct pt_regs *regs)
2290 @@ -65,6 +70,33 @@ static inline int notify_page_fault(stru
2294 +#ifdef CONFIG_PAX_PAGEEXEC
2296 + * PaX: decide what to do with offenders (regs->nip = fault address)
2298 + * returns 1 when task should be killed
2300 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2305 +void pax_report_insns(void *pc, void *sp)
2309 + printk(KERN_ERR "PAX: bytes at PC: ");
2310 + for (i = 0; i < 5; i++) {
2312 + if (get_user(c, (unsigned int __user *)pc+i))
2313 + printk(KERN_CONT "???????? ");
2315 + printk(KERN_CONT "%08x ", c);
2322 * Check whether the instruction at regs->nip is a store using
2323 * an update addressing form which will update r1.
2324 @@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re
2325 * indicate errors in DSISR but can validly be set in SRR1.
2328 - error_code &= 0x48200000;
2329 + error_code &= 0x58200000;
2331 is_write = error_code & DSISR_ISSTORE;
2333 @@ -258,7 +290,7 @@ good_area:
2334 * "undefined". Of those that can be set, this is the only
2335 * one which seems bad.
2337 - if (error_code & 0x10000000)
2338 + if (error_code & DSISR_GUARDED)
2339 /* Guarded storage error. */
2341 #endif /* CONFIG_8xx */
2342 @@ -273,7 +305,7 @@ good_area:
2343 * processors use the same I/D cache coherency mechanism
2346 - if (error_code & DSISR_PROTFAULT)
2347 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2349 #endif /* CONFIG_PPC_STD_MMU */
2351 @@ -342,6 +374,23 @@ bad_area:
2352 bad_area_nosemaphore:
2353 /* User mode accesses cause a SIGSEGV */
2354 if (user_mode(regs)) {
2356 +#ifdef CONFIG_PAX_PAGEEXEC
2357 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2358 +#ifdef CONFIG_PPC_STD_MMU
2359 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2361 + if (is_exec && regs->nip == address) {
2363 + switch (pax_handle_fetch_fault(regs)) {
2366 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2367 + do_group_exit(SIGKILL);
2372 _exception(SIGSEGV, regs, code, address);
2375 diff -urNp linux-2.6.39.4/arch/powerpc/mm/mmap_64.c linux-2.6.39.4/arch/powerpc/mm/mmap_64.c
2376 --- linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-05-19 00:06:34.000000000 -0400
2377 +++ linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-08-05 19:44:33.000000000 -0400
2378 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2380 if (mmap_is_legacy()) {
2381 mm->mmap_base = TASK_UNMAPPED_BASE;
2383 +#ifdef CONFIG_PAX_RANDMMAP
2384 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2385 + mm->mmap_base += mm->delta_mmap;
2388 mm->get_unmapped_area = arch_get_unmapped_area;
2389 mm->unmap_area = arch_unmap_area;
2391 mm->mmap_base = mmap_base();
2393 +#ifdef CONFIG_PAX_RANDMMAP
2394 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2395 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2398 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2399 mm->unmap_area = arch_unmap_area_topdown;
2401 diff -urNp linux-2.6.39.4/arch/powerpc/mm/slice.c linux-2.6.39.4/arch/powerpc/mm/slice.c
2402 --- linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-05-19 00:06:34.000000000 -0400
2403 +++ linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-08-05 19:44:33.000000000 -0400
2404 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2405 if ((mm->task_size - len) < addr)
2407 vma = find_vma(mm, addr);
2408 - return (!vma || (addr + len) <= vma->vm_start);
2409 + return check_heap_stack_gap(vma, addr, len);
2412 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2413 @@ -256,7 +256,7 @@ full_search:
2414 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2417 - if (!vma || addr + len <= vma->vm_start) {
2418 + if (check_heap_stack_gap(vma, addr, len)) {
2420 * Remember the place where we stopped the search:
2422 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2426 - addr = mm->mmap_base;
2427 - while (addr > len) {
2428 + if (mm->mmap_base < len)
2431 + addr = mm->mmap_base - len;
2433 + while (!IS_ERR_VALUE(addr)) {
2434 /* Go down by chunk size */
2435 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2436 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2438 /* Check for hit with different page size */
2439 mask = slice_range_to_mask(addr, len);
2440 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2441 * return with success:
2443 vma = find_vma(mm, addr);
2444 - if (!vma || (addr + len) <= vma->vm_start) {
2445 + if (check_heap_stack_gap(vma, addr, len)) {
2446 /* remember the address as a hint for next time */
2448 mm->free_area_cache = addr;
2449 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2450 mm->cached_hole_size = vma->vm_start - addr;
2452 /* try just below the current vma->vm_start */
2453 - addr = vma->vm_start;
2454 + addr = skip_heap_stack_gap(vma, len);
2458 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2459 if (fixed && addr > (mm->task_size - len))
2462 +#ifdef CONFIG_PAX_RANDMMAP
2463 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2467 /* If hint, make sure it matches our alignment restrictions */
2468 if (!fixed && addr) {
2469 addr = _ALIGN_UP(addr, 1ul << pshift);
2470 diff -urNp linux-2.6.39.4/arch/s390/include/asm/elf.h linux-2.6.39.4/arch/s390/include/asm/elf.h
2471 --- linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
2472 +++ linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
2473 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2474 the loader. We need to make sure that it is out of the way of the program
2475 that it will "exec", and that there is sufficient room for the brk. */
2477 -extern unsigned long randomize_et_dyn(unsigned long base);
2478 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2479 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2481 +#ifdef CONFIG_PAX_ASLR
2482 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2484 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2485 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2488 /* This yields a mask that user programs can use to figure out what
2489 instruction set this CPU supports. */
2490 @@ -222,7 +228,4 @@ struct linux_binprm;
2491 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2492 int arch_setup_additional_pages(struct linux_binprm *, int);
2494 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2495 -#define arch_randomize_brk arch_randomize_brk
2498 diff -urNp linux-2.6.39.4/arch/s390/include/asm/system.h linux-2.6.39.4/arch/s390/include/asm/system.h
2499 --- linux-2.6.39.4/arch/s390/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2500 +++ linux-2.6.39.4/arch/s390/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2501 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2502 extern void (*_machine_halt)(void);
2503 extern void (*_machine_power_off)(void);
2505 -extern unsigned long arch_align_stack(unsigned long sp);
2506 +#define arch_align_stack(x) ((x) & ~0xfUL)
2508 static inline int tprot(unsigned long addr)
2510 diff -urNp linux-2.6.39.4/arch/s390/include/asm/uaccess.h linux-2.6.39.4/arch/s390/include/asm/uaccess.h
2511 --- linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
2512 +++ linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
2513 @@ -234,6 +234,10 @@ static inline unsigned long __must_check
2514 copy_to_user(void __user *to, const void *from, unsigned long n)
2521 if (access_ok(VERIFY_WRITE, to, n))
2522 n = __copy_to_user(to, from, n);
2524 @@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
2525 static inline unsigned long __must_check
2526 __copy_from_user(void *to, const void __user *from, unsigned long n)
2531 if (__builtin_constant_p(n) && (n <= 256))
2532 return uaccess.copy_from_user_small(n, from, to);
2534 @@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
2535 unsigned int sz = __compiletime_object_size(to);
2542 if (unlikely(sz != -1 && sz < n)) {
2543 copy_from_user_overflow();
2545 diff -urNp linux-2.6.39.4/arch/s390/Kconfig linux-2.6.39.4/arch/s390/Kconfig
2546 --- linux-2.6.39.4/arch/s390/Kconfig 2011-05-19 00:06:34.000000000 -0400
2547 +++ linux-2.6.39.4/arch/s390/Kconfig 2011-08-05 19:44:33.000000000 -0400
2548 @@ -234,11 +234,9 @@ config S390_EXEC_PROTECT
2549 prompt "Data execute protection"
2551 This option allows to enable a buffer overflow protection for user
2552 - space programs and it also selects the addressing mode option above.
2553 - The kernel parameter noexec=on will enable this feature and also
2554 - switch the addressing modes, default is disabled. Enabling this (via
2555 - kernel parameter) on machines earlier than IBM System z9 this will
2556 - reduce system performance.
2558 + Enabling this (via kernel parameter) on machines earlier than IBM
2559 + System z9 this will reduce system performance.
2561 comment "Code generation options"
2563 diff -urNp linux-2.6.39.4/arch/s390/kernel/module.c linux-2.6.39.4/arch/s390/kernel/module.c
2564 --- linux-2.6.39.4/arch/s390/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2565 +++ linux-2.6.39.4/arch/s390/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2566 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2568 /* Increase core size by size of got & plt and set start
2569 offsets for got and plt. */
2570 - me->core_size = ALIGN(me->core_size, 4);
2571 - me->arch.got_offset = me->core_size;
2572 - me->core_size += me->arch.got_size;
2573 - me->arch.plt_offset = me->core_size;
2574 - me->core_size += me->arch.plt_size;
2575 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2576 + me->arch.got_offset = me->core_size_rw;
2577 + me->core_size_rw += me->arch.got_size;
2578 + me->arch.plt_offset = me->core_size_rx;
2579 + me->core_size_rx += me->arch.plt_size;
2583 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 if (info->got_initialized == 0) {
2587 - gotent = me->module_core + me->arch.got_offset +
2588 + gotent = me->module_core_rw + me->arch.got_offset +
2591 info->got_initialized = 1;
2592 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 else if (r_type == R_390_GOTENT ||
2594 r_type == R_390_GOTPLTENT)
2595 *(unsigned int *) loc =
2596 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2597 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2598 else if (r_type == R_390_GOT64 ||
2599 r_type == R_390_GOTPLT64)
2600 *(unsigned long *) loc = val;
2601 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2603 if (info->plt_initialized == 0) {
2605 - ip = me->module_core + me->arch.plt_offset +
2606 + ip = me->module_core_rx + me->arch.plt_offset +
2608 #ifndef CONFIG_64BIT
2609 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2610 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 val - loc + 0xffffUL < 0x1ffffeUL) ||
2612 (r_type == R_390_PLT32DBL &&
2613 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2614 - val = (Elf_Addr) me->module_core +
2615 + val = (Elf_Addr) me->module_core_rx +
2616 me->arch.plt_offset +
2618 val += rela->r_addend - loc;
2619 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2621 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2622 val = val + rela->r_addend -
2623 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2624 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2625 if (r_type == R_390_GOTOFF16)
2626 *(unsigned short *) loc = val;
2627 else if (r_type == R_390_GOTOFF32)
2628 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2630 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2631 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2632 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2633 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2634 rela->r_addend - loc;
2635 if (r_type == R_390_GOTPC)
2636 *(unsigned int *) loc = val;
2637 diff -urNp linux-2.6.39.4/arch/s390/kernel/process.c linux-2.6.39.4/arch/s390/kernel/process.c
2638 --- linux-2.6.39.4/arch/s390/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2639 +++ linux-2.6.39.4/arch/s390/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2640 @@ -334,39 +334,3 @@ unsigned long get_wchan(struct task_stru
2645 -unsigned long arch_align_stack(unsigned long sp)
2647 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2648 - sp -= get_random_int() & ~PAGE_MASK;
2652 -static inline unsigned long brk_rnd(void)
2654 - /* 8MB for 32bit, 1GB for 64bit */
2655 - if (is_32bit_task())
2656 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2658 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2661 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2663 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2665 - if (ret < mm->brk)
2670 -unsigned long randomize_et_dyn(unsigned long base)
2672 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2674 - if (!(current->flags & PF_RANDOMIZE))
2680 diff -urNp linux-2.6.39.4/arch/s390/kernel/setup.c linux-2.6.39.4/arch/s390/kernel/setup.c
2681 --- linux-2.6.39.4/arch/s390/kernel/setup.c 2011-05-19 00:06:34.000000000 -0400
2682 +++ linux-2.6.39.4/arch/s390/kernel/setup.c 2011-08-05 19:44:33.000000000 -0400
2683 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2685 early_param("mem", early_parse_mem);
2687 -unsigned int user_mode = HOME_SPACE_MODE;
2688 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2689 EXPORT_SYMBOL_GPL(user_mode);
2691 static int set_amode_and_uaccess(unsigned long user_amode,
2692 @@ -300,17 +300,6 @@ static int set_amode_and_uaccess(unsigne
2697 - * Switch kernel/user addressing modes?
2699 -static int __init early_parse_switch_amode(char *p)
2701 - if (user_mode != SECONDARY_SPACE_MODE)
2702 - user_mode = PRIMARY_SPACE_MODE;
2705 -early_param("switch_amode", early_parse_switch_amode);
2707 static int __init early_parse_user_mode(char *p)
2709 if (p && strcmp(p, "primary") == 0)
2710 @@ -327,20 +316,6 @@ static int __init early_parse_user_mode(
2712 early_param("user_mode", early_parse_user_mode);
2714 -#ifdef CONFIG_S390_EXEC_PROTECT
2716 - * Enable execute protection?
2718 -static int __init early_parse_noexec(char *p)
2720 - if (!strncmp(p, "off", 3))
2722 - user_mode = SECONDARY_SPACE_MODE;
2725 -early_param("noexec", early_parse_noexec);
2726 -#endif /* CONFIG_S390_EXEC_PROTECT */
2728 static void setup_addressing_mode(void)
2730 if (user_mode == SECONDARY_SPACE_MODE) {
2731 diff -urNp linux-2.6.39.4/arch/s390/mm/mmap.c linux-2.6.39.4/arch/s390/mm/mmap.c
2732 --- linux-2.6.39.4/arch/s390/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2733 +++ linux-2.6.39.4/arch/s390/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2734 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2736 if (mmap_is_legacy()) {
2737 mm->mmap_base = TASK_UNMAPPED_BASE;
2739 +#ifdef CONFIG_PAX_RANDMMAP
2740 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2741 + mm->mmap_base += mm->delta_mmap;
2744 mm->get_unmapped_area = arch_get_unmapped_area;
2745 mm->unmap_area = arch_unmap_area;
2747 mm->mmap_base = mmap_base();
2749 +#ifdef CONFIG_PAX_RANDMMAP
2750 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2751 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2754 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2755 mm->unmap_area = arch_unmap_area_topdown;
2757 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2759 if (mmap_is_legacy()) {
2760 mm->mmap_base = TASK_UNMAPPED_BASE;
2762 +#ifdef CONFIG_PAX_RANDMMAP
2763 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2764 + mm->mmap_base += mm->delta_mmap;
2767 mm->get_unmapped_area = s390_get_unmapped_area;
2768 mm->unmap_area = arch_unmap_area;
2770 mm->mmap_base = mmap_base();
2772 +#ifdef CONFIG_PAX_RANDMMAP
2773 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2774 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2777 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2778 mm->unmap_area = arch_unmap_area_topdown;
2780 diff -urNp linux-2.6.39.4/arch/score/include/asm/system.h linux-2.6.39.4/arch/score/include/asm/system.h
2781 --- linux-2.6.39.4/arch/score/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2782 +++ linux-2.6.39.4/arch/score/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2783 @@ -17,7 +17,7 @@ do { \
2784 #define finish_arch_switch(prev) do {} while (0)
2786 typedef void (*vi_handler_t)(void);
2787 -extern unsigned long arch_align_stack(unsigned long sp);
2788 +#define arch_align_stack(x) (x)
2790 #define mb() barrier()
2791 #define rmb() barrier()
2792 diff -urNp linux-2.6.39.4/arch/score/kernel/process.c linux-2.6.39.4/arch/score/kernel/process.c
2793 --- linux-2.6.39.4/arch/score/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2794 +++ linux-2.6.39.4/arch/score/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2795 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2797 return task_pt_regs(task)->cp0_epc;
2800 -unsigned long arch_align_stack(unsigned long sp)
2804 diff -urNp linux-2.6.39.4/arch/sh/mm/mmap.c linux-2.6.39.4/arch/sh/mm/mmap.c
2805 --- linux-2.6.39.4/arch/sh/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2806 +++ linux-2.6.39.4/arch/sh/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2807 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2808 addr = PAGE_ALIGN(addr);
2810 vma = find_vma(mm, addr);
2811 - if (TASK_SIZE - len >= addr &&
2812 - (!vma || addr + len <= vma->vm_start))
2813 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2817 @@ -106,7 +105,7 @@ full_search:
2821 - if (likely(!vma || addr + len <= vma->vm_start)) {
2822 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2824 * Remember the place where we stopped the search:
2826 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2827 addr = PAGE_ALIGN(addr);
2829 vma = find_vma(mm, addr);
2830 - if (TASK_SIZE - len >= addr &&
2831 - (!vma || addr + len <= vma->vm_start))
2832 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2836 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2837 /* make sure it can fit in the remaining address space */
2838 if (likely(addr > len)) {
2839 vma = find_vma(mm, addr-len);
2840 - if (!vma || addr <= vma->vm_start) {
2841 + if (check_heap_stack_gap(vma, addr - len, len)) {
2842 /* remember the address as a hint for next time */
2843 return (mm->free_area_cache = addr-len);
2845 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2846 if (unlikely(mm->mmap_base < len))
2849 - addr = mm->mmap_base-len;
2850 - if (do_colour_align)
2851 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2852 + addr = mm->mmap_base - len;
2855 + if (do_colour_align)
2856 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2858 * Lookup failure means no vma is above this address,
2859 * else if new region fits below vma->vm_start,
2860 * return with success:
2862 vma = find_vma(mm, addr);
2863 - if (likely(!vma || addr+len <= vma->vm_start)) {
2864 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2865 /* remember the address as a hint for next time */
2866 return (mm->free_area_cache = addr);
2868 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2869 mm->cached_hole_size = vma->vm_start - addr;
2871 /* try just below the current vma->vm_start */
2872 - addr = vma->vm_start-len;
2873 - if (do_colour_align)
2874 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2875 - } while (likely(len < vma->vm_start));
2876 + addr = skip_heap_stack_gap(vma, len);
2877 + } while (!IS_ERR_VALUE(addr));
2881 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h
2882 --- linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-05-19 00:06:34.000000000 -0400
2883 +++ linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-08-05 20:34:06.000000000 -0400
2885 #define ATOMIC64_INIT(i) { (i) }
2887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2888 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2890 + return v->counter;
2892 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2893 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2895 + return v->counter;
2898 #define atomic_set(v, i) (((v)->counter) = i)
2899 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2903 #define atomic64_set(v, i) (((v)->counter) = i)
2904 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2909 extern void atomic_add(int, atomic_t *);
2910 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2911 extern void atomic64_add(long, atomic64_t *);
2912 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2913 extern void atomic_sub(int, atomic_t *);
2914 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2915 extern void atomic64_sub(long, atomic64_t *);
2916 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2918 extern int atomic_add_ret(int, atomic_t *);
2919 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2920 extern long atomic64_add_ret(long, atomic64_t *);
2921 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2922 extern int atomic_sub_ret(int, atomic_t *);
2923 extern long atomic64_sub_ret(long, atomic64_t *);
2925 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2926 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2928 #define atomic_inc_return(v) atomic_add_ret(1, v)
2929 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2931 + return atomic_add_ret_unchecked(1, v);
2933 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2934 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2936 + return atomic64_add_ret_unchecked(1, v);
2939 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2940 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2942 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2943 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2945 + return atomic_add_ret_unchecked(i, v);
2947 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2948 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2950 + return atomic64_add_ret_unchecked(i, v);
2954 * atomic_inc_and_test - increment and test
2955 @@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
2958 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2959 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
2960 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2962 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2963 @@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
2964 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2966 #define atomic_inc(v) atomic_add(1, v)
2967 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2969 + atomic_add_unchecked(1, v);
2971 #define atomic64_inc(v) atomic64_add(1, v)
2972 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2974 + atomic64_add_unchecked(1, v);
2977 #define atomic_dec(v) atomic_sub(1, v)
2978 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2980 + atomic_sub_unchecked(1, v);
2982 #define atomic64_dec(v) atomic64_sub(1, v)
2983 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2985 + atomic64_sub_unchecked(1, v);
2988 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2989 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2991 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2992 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2993 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2994 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
2996 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3002 - if (unlikely(c == (u)))
3003 + if (unlikely(c == u))
3005 - old = atomic_cmpxchg((v), c, c + (a));
3007 + asm volatile("addcc %2, %0, %0\n"
3009 +#ifdef CONFIG_PAX_REFCOUNT
3014 + : "0" (c), "ir" (a)
3017 + old = atomic_cmpxchg(v, c, new);
3018 if (likely(old == c))
3026 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3027 @@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3029 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3033 c = atomic64_read(v);
3035 - if (unlikely(c == (u)))
3036 + if (unlikely(c == u))
3038 - old = atomic64_cmpxchg((v), c, c + (a));
3040 + asm volatile("addcc %2, %0, %0\n"
3042 +#ifdef CONFIG_PAX_REFCOUNT
3047 + : "0" (c), "ir" (a)
3050 + old = atomic64_cmpxchg(v, c, new);
3051 if (likely(old == c))
3059 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3060 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/cache.h linux-2.6.39.4/arch/sparc/include/asm/cache.h
3061 --- linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
3062 +++ linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
3064 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3066 #define L1_CACHE_SHIFT 5
3067 -#define L1_CACHE_BYTES 32
3068 +#define L1_CACHE_BYTES 32UL
3070 #ifdef CONFIG_SPARC32
3071 #define SMP_CACHE_BYTES_SHIFT 5
3072 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_32.h linux-2.6.39.4/arch/sparc/include/asm/elf_32.h
3073 --- linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-05-19 00:06:34.000000000 -0400
3074 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-08-05 19:44:33.000000000 -0400
3075 @@ -114,6 +114,13 @@ typedef struct {
3077 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3079 +#ifdef CONFIG_PAX_ASLR
3080 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3082 +#define PAX_DELTA_MMAP_LEN 16
3083 +#define PAX_DELTA_STACK_LEN 16
3086 /* This yields a mask that user programs can use to figure out what
3087 instruction set this cpu supports. This can NOT be done in userspace
3089 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_64.h linux-2.6.39.4/arch/sparc/include/asm/elf_64.h
3090 --- linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-05-19 00:06:34.000000000 -0400
3091 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-08-05 19:44:33.000000000 -0400
3092 @@ -162,6 +162,12 @@ typedef struct {
3093 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3094 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3096 +#ifdef CONFIG_PAX_ASLR
3097 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3099 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3100 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3103 /* This yields a mask that user programs can use to figure out what
3104 instruction set this cpu supports. */
3105 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h
3106 --- linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
3107 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
3108 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3109 BTFIXUPDEF_INT(page_none)
3110 BTFIXUPDEF_INT(page_copy)
3111 BTFIXUPDEF_INT(page_readonly)
3113 +#ifdef CONFIG_PAX_PAGEEXEC
3114 +BTFIXUPDEF_INT(page_shared_noexec)
3115 +BTFIXUPDEF_INT(page_copy_noexec)
3116 +BTFIXUPDEF_INT(page_readonly_noexec)
3119 BTFIXUPDEF_INT(page_kernel)
3121 #define PMD_SHIFT SUN4C_PMD_SHIFT
3122 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3123 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3124 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3126 +#ifdef CONFIG_PAX_PAGEEXEC
3127 +extern pgprot_t PAGE_SHARED_NOEXEC;
3128 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3129 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3131 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3132 +# define PAGE_COPY_NOEXEC PAGE_COPY
3133 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3136 extern unsigned long page_kernel;
3139 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h
3140 --- linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-05-19 00:06:34.000000000 -0400
3141 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-05 19:44:33.000000000 -0400
3142 @@ -115,6 +115,13 @@
3143 SRMMU_EXEC | SRMMU_REF)
3144 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3145 SRMMU_EXEC | SRMMU_REF)
3147 +#ifdef CONFIG_PAX_PAGEEXEC
3148 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3149 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3150 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3153 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3154 SRMMU_DIRTY | SRMMU_REF)
3156 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h
3157 --- linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-05-19 00:06:34.000000000 -0400
3158 +++ linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-08-05 19:44:33.000000000 -0400
3159 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3161 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3163 -static void inline arch_read_lock(arch_rwlock_t *lock)
3164 +static inline void arch_read_lock(arch_rwlock_t *lock)
3166 unsigned long tmp1, tmp2;
3168 __asm__ __volatile__ (
3169 "1: ldsw [%2], %0\n"
3171 -"4: add %0, 1, %1\n"
3172 +"4: addcc %0, 1, %1\n"
3174 +#ifdef CONFIG_PAX_REFCOUNT
3178 " cas [%2], %0, %1\n"
3180 " bne,pn %%icc, 1b\n"
3181 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3183 : "=&r" (tmp1), "=&r" (tmp2)
3186 + : "memory", "cc");
3189 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3190 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3194 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3195 "1: ldsw [%2], %0\n"
3196 " brlz,a,pn %0, 2f\n"
3199 +" addcc %0, 1, %1\n"
3201 +#ifdef CONFIG_PAX_REFCOUNT
3205 " cas [%2], %0, %1\n"
3207 " bne,pn %%icc, 1b\n"
3208 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3212 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3213 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3215 unsigned long tmp1, tmp2;
3217 __asm__ __volatile__(
3218 "1: lduw [%2], %0\n"
3220 +" subcc %0, 1, %1\n"
3222 +#ifdef CONFIG_PAX_REFCOUNT
3226 " cas [%2], %0, %1\n"
3228 " bne,pn %%xcc, 1b\n"
3229 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3233 -static void inline arch_write_lock(arch_rwlock_t *lock)
3234 +static inline void arch_write_lock(arch_rwlock_t *lock)
3236 unsigned long mask, tmp1, tmp2;
3238 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3242 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3243 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3245 __asm__ __volatile__(
3247 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3251 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3252 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3254 unsigned long mask, tmp1, tmp2, result;
3256 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h
3257 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-05-19 00:06:34.000000000 -0400
3258 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-08-05 19:44:33.000000000 -0400
3259 @@ -50,6 +50,8 @@ struct thread_info {
3260 unsigned long w_saved;
3262 struct restart_block restart_block;
3264 + unsigned long lowest_stack;
3268 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h
3269 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-05-19 00:06:34.000000000 -0400
3270 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-08-05 19:44:33.000000000 -0400
3271 @@ -63,6 +63,8 @@ struct thread_info {
3272 struct pt_regs *kern_una_regs;
3273 unsigned int kern_una_insn;
3275 + unsigned long lowest_stack;
3277 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3280 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h
3281 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
3282 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
3283 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3285 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3287 - if (n && __access_ok((unsigned long) to, n))
3291 + if (n && __access_ok((unsigned long) to, n)) {
3292 + if (!__builtin_constant_p(n))
3293 + check_object_size(from, n, true);
3294 return __copy_user(to, (__force void __user *) from, n);
3300 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3305 + if (!__builtin_constant_p(n))
3306 + check_object_size(from, n, true);
3308 return __copy_user(to, (__force void __user *) from, n);
3311 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3313 - if (n && __access_ok((unsigned long) from, n))
3317 + if (n && __access_ok((unsigned long) from, n)) {
3318 + if (!__builtin_constant_p(n))
3319 + check_object_size(to, n, false);
3320 return __copy_user((__force void __user *) to, from, n);
3326 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3331 return __copy_user((__force void __user *) to, from, n);
3334 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h
3335 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
3336 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
3338 #include <linux/compiler.h>
3339 #include <linux/string.h>
3340 #include <linux/thread_info.h>
3341 +#include <linux/kernel.h>
3342 #include <asm/asi.h>
3343 #include <asm/system.h>
3344 #include <asm/spitfire.h>
3345 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3346 static inline unsigned long __must_check
3347 copy_from_user(void *to, const void __user *from, unsigned long size)
3349 - unsigned long ret = ___copy_from_user(to, from, size);
3350 + unsigned long ret;
3352 + if ((long)size < 0 || size > INT_MAX)
3355 + if (!__builtin_constant_p(size))
3356 + check_object_size(to, size, false);
3358 + ret = ___copy_from_user(to, from, size);
3360 ret = copy_from_user_fixup(to, from, size);
3362 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3363 static inline unsigned long __must_check
3364 copy_to_user(void __user *to, const void *from, unsigned long size)
3366 - unsigned long ret = ___copy_to_user(to, from, size);
3367 + unsigned long ret;
3369 + if ((long)size < 0 || size > INT_MAX)
3372 + if (!__builtin_constant_p(size))
3373 + check_object_size(from, size, true);
3375 + ret = ___copy_to_user(to, from, size);
3377 ret = copy_to_user_fixup(to, from, size);
3379 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess.h linux-2.6.39.4/arch/sparc/include/asm/uaccess.h
3380 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
3381 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
3383 #ifndef ___ASM_SPARC_UACCESS_H
3384 #define ___ASM_SPARC_UACCESS_H
3387 +#ifndef __ASSEMBLY__
3388 +#include <linux/types.h>
3389 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3393 #if defined(__sparc__) && defined(__arch64__)
3394 #include <asm/uaccess_64.h>
3396 diff -urNp linux-2.6.39.4/arch/sparc/kernel/Makefile linux-2.6.39.4/arch/sparc/kernel/Makefile
3397 --- linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-05-19 00:06:34.000000000 -0400
3398 +++ linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-08-05 19:44:33.000000000 -0400
3403 -ccflags-y := -Werror
3404 +#ccflags-y := -Werror
3406 extra-y := head_$(BITS).o
3407 extra-y += init_task.o
3408 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_32.c linux-2.6.39.4/arch/sparc/kernel/process_32.c
3409 --- linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400
3410 +++ linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-08-05 19:44:33.000000000 -0400
3411 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
3412 rw->ins[4], rw->ins[5],
3415 - printk("%pS\n", (void *) rw->ins[7]);
3416 + printk("%pA\n", (void *) rw->ins[7]);
3417 rw = (struct reg_window32 *) rw->ins[6];
3419 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3420 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
3422 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3423 r->psr, r->pc, r->npc, r->y, print_tainted());
3424 - printk("PC: <%pS>\n", (void *) r->pc);
3425 + printk("PC: <%pA>\n", (void *) r->pc);
3426 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3427 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3428 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3429 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3430 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3431 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3432 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3433 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3435 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3436 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3437 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
3438 rw = (struct reg_window32 *) fp;
3440 printk("[%08lx : ", pc);
3441 - printk("%pS ] ", (void *) pc);
3442 + printk("%pA ] ", (void *) pc);
3444 } while (++count < 16);
3446 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_64.c linux-2.6.39.4/arch/sparc/kernel/process_64.c
3447 --- linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400
3448 +++ linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-08-05 19:44:33.000000000 -0400
3449 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3450 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3451 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3452 if (regs->tstate & TSTATE_PRIV)
3453 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3454 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3457 void show_regs(struct pt_regs *regs)
3459 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3460 regs->tpc, regs->tnpc, regs->y, print_tainted());
3461 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3462 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3463 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3464 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3466 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3467 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3468 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3470 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3471 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3472 show_regwindow(regs);
3473 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3475 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3476 ((tp && tp->task) ? tp->task->pid : -1));
3478 if (gp->tstate & TSTATE_PRIV) {
3479 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3480 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3484 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c
3485 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-05-19 00:06:34.000000000 -0400
3486 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-05 19:44:33.000000000 -0400
3487 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3488 if (ARCH_SUN4C && len > 0x20000000)
3491 - addr = TASK_UNMAPPED_BASE;
3492 + addr = current->mm->mmap_base;
3494 if (flags & MAP_SHARED)
3495 addr = COLOUR_ALIGN(addr);
3496 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3498 if (TASK_SIZE - PAGE_SIZE - len < addr)
3500 - if (!vmm || addr + len <= vmm->vm_start)
3501 + if (check_heap_stack_gap(vmm, addr, len))
3504 if (flags & MAP_SHARED)
3505 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c
3506 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-05-19 00:06:34.000000000 -0400
3507 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-05 19:44:33.000000000 -0400
3508 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3509 /* We do not accept a shared mapping if it would violate
3510 * cache aliasing constraints.
3512 - if ((flags & MAP_SHARED) &&
3513 + if ((filp || (flags & MAP_SHARED)) &&
3514 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3517 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3518 if (filp || (flags & MAP_SHARED))
3521 +#ifdef CONFIG_PAX_RANDMMAP
3522 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3527 addr = COLOUR_ALIGN(addr, pgoff);
3528 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3529 addr = PAGE_ALIGN(addr);
3531 vma = find_vma(mm, addr);
3532 - if (task_size - len >= addr &&
3533 - (!vma || addr + len <= vma->vm_start))
3534 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3538 if (len > mm->cached_hole_size) {
3539 - start_addr = addr = mm->free_area_cache;
3540 + start_addr = addr = mm->free_area_cache;
3542 - start_addr = addr = TASK_UNMAPPED_BASE;
3543 + start_addr = addr = mm->mmap_base;
3544 mm->cached_hole_size = 0;
3547 @@ -174,14 +177,14 @@ full_search:
3548 vma = find_vma(mm, VA_EXCLUDE_END);
3550 if (unlikely(task_size < addr)) {
3551 - if (start_addr != TASK_UNMAPPED_BASE) {
3552 - start_addr = addr = TASK_UNMAPPED_BASE;
3553 + if (start_addr != mm->mmap_base) {
3554 + start_addr = addr = mm->mmap_base;
3555 mm->cached_hole_size = 0;
3560 - if (likely(!vma || addr + len <= vma->vm_start)) {
3561 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3563 * Remember the place where we stopped the search:
3565 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3566 /* We do not accept a shared mapping if it would violate
3567 * cache aliasing constraints.
3569 - if ((flags & MAP_SHARED) &&
3570 + if ((filp || (flags & MAP_SHARED)) &&
3571 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3574 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3575 addr = PAGE_ALIGN(addr);
3577 vma = find_vma(mm, addr);
3578 - if (task_size - len >= addr &&
3579 - (!vma || addr + len <= vma->vm_start))
3580 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3584 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3585 /* make sure it can fit in the remaining address space */
3586 if (likely(addr > len)) {
3587 vma = find_vma(mm, addr-len);
3588 - if (!vma || addr <= vma->vm_start) {
3589 + if (check_heap_stack_gap(vma, addr - len, len)) {
3590 /* remember the address as a hint for next time */
3591 return (mm->free_area_cache = addr-len);
3593 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3594 if (unlikely(mm->mmap_base < len))
3597 - addr = mm->mmap_base-len;
3598 - if (do_color_align)
3599 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3600 + addr = mm->mmap_base - len;
3603 + if (do_color_align)
3604 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3606 * Lookup failure means no vma is above this address,
3607 * else if new region fits below vma->vm_start,
3608 * return with success:
3610 vma = find_vma(mm, addr);
3611 - if (likely(!vma || addr+len <= vma->vm_start)) {
3612 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3613 /* remember the address as a hint for next time */
3614 return (mm->free_area_cache = addr);
3616 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3617 mm->cached_hole_size = vma->vm_start - addr;
3619 /* try just below the current vma->vm_start */
3620 - addr = vma->vm_start-len;
3621 - if (do_color_align)
3622 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3623 - } while (likely(len < vma->vm_start));
3624 + addr = skip_heap_stack_gap(vma, len);
3625 + } while (!IS_ERR_VALUE(addr));
3629 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3630 gap == RLIM_INFINITY ||
3631 sysctl_legacy_va_layout) {
3632 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3634 +#ifdef CONFIG_PAX_RANDMMAP
3635 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3636 + mm->mmap_base += mm->delta_mmap;
3639 mm->get_unmapped_area = arch_get_unmapped_area;
3640 mm->unmap_area = arch_unmap_area;
3642 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3643 gap = (task_size / 6 * 5);
3645 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3647 +#ifdef CONFIG_PAX_RANDMMAP
3648 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3649 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3652 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3653 mm->unmap_area = arch_unmap_area_topdown;
3655 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_32.c linux-2.6.39.4/arch/sparc/kernel/traps_32.c
3656 --- linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-05-19 00:06:34.000000000 -0400
3657 +++ linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-08-05 19:44:33.000000000 -0400
3658 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3659 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3660 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3662 +extern void gr_handle_kernel_exploit(void);
3664 void die_if_kernel(char *str, struct pt_regs *regs)
3666 static int die_counter;
3667 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3669 (((unsigned long) rw) >= PAGE_OFFSET) &&
3670 !(((unsigned long) rw) & 0x7)) {
3671 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3672 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3673 (void *) rw->ins[7]);
3674 rw = (struct reg_window32 *)rw->ins[6];
3677 printk("Instruction DUMP:");
3678 instruction_dump ((unsigned long *) regs->pc);
3679 - if(regs->psr & PSR_PS)
3680 + if(regs->psr & PSR_PS) {
3681 + gr_handle_kernel_exploit();
3687 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_64.c linux-2.6.39.4/arch/sparc/kernel/traps_64.c
3688 --- linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-05-19 00:06:34.000000000 -0400
3689 +++ linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-08-05 19:44:33.000000000 -0400
3690 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3692 p->trapstack[i].tstate, p->trapstack[i].tpc,
3693 p->trapstack[i].tnpc, p->trapstack[i].tt);
3694 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3695 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3699 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3702 if (regs->tstate & TSTATE_PRIV) {
3704 +#ifdef CONFIG_PAX_REFCOUNT
3706 + pax_report_refcount_overflow(regs);
3709 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3710 die_if_kernel(buffer, regs);
3712 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3713 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3718 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3719 0, lvl, SIGTRAP) == NOTIFY_STOP)
3722 +#ifdef CONFIG_PAX_REFCOUNT
3724 + pax_report_refcount_overflow(regs);
3727 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3729 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3730 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3731 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3732 printk("%s" "ERROR(%d): ",
3733 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3734 - printk("TPC<%pS>\n", (void *) regs->tpc);
3735 + printk("TPC<%pA>\n", (void *) regs->tpc);
3736 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3737 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3738 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3739 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3741 (type & 0x1) ? 'I' : 'D',
3743 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3744 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3745 panic("Irrecoverable Cheetah+ parity error.");
3748 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3750 (type & 0x1) ? 'I' : 'D',
3752 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3753 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3756 struct sun4v_error_entry {
3757 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3759 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3761 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3762 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3763 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3764 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3765 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3766 (void *) regs->u_regs[UREG_I7]);
3767 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3768 "pte[%lx] error[%lx]\n",
3769 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3771 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3773 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3774 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3775 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3776 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3777 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3778 (void *) regs->u_regs[UREG_I7]);
3779 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3780 "pte[%lx] error[%lx]\n",
3781 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3782 fp = (unsigned long)sf->fp + STACK_BIAS;
3785 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3786 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3787 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3788 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3789 int index = tsk->curr_ret_stack;
3790 if (tsk->ret_stack && index >= graph) {
3791 pc = tsk->ret_stack[index - graph].ret;
3792 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3793 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3797 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3798 return (struct reg_window *) (fp + STACK_BIAS);
3801 +extern void gr_handle_kernel_exploit(void);
3803 void die_if_kernel(char *str, struct pt_regs *regs)
3805 static int die_counter;
3806 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3809 kstack_valid(tp, (unsigned long) rw)) {
3810 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3811 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3812 (void *) rw->ins[7]);
3814 rw = kernel_stack_up(rw);
3815 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3817 user_instruction_dump ((unsigned int __user *) regs->tpc);
3819 - if (regs->tstate & TSTATE_PRIV)
3820 + if (regs->tstate & TSTATE_PRIV) {
3821 + gr_handle_kernel_exploit();
3826 EXPORT_SYMBOL(die_if_kernel);
3827 diff -urNp linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c
3828 --- linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-05-19 00:06:34.000000000 -0400
3829 +++ linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-08-05 19:44:33.000000000 -0400
3830 @@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs
3831 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3833 if (__ratelimit(&ratelimit)) {
3834 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3835 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3836 regs->tpc, (void *) regs->tpc);
3839 diff -urNp linux-2.6.39.4/arch/sparc/lib/atomic_64.S linux-2.6.39.4/arch/sparc/lib/atomic_64.S
3840 --- linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-05-19 00:06:34.000000000 -0400
3841 +++ linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-08-05 19:44:33.000000000 -0400
3843 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3847 + addcc %g1, %o0, %g7
3849 +#ifdef CONFIG_PAX_REFCOUNT
3855 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3856 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3857 2: BACKOFF_SPIN(%o2, %o3, 1b)
3858 .size atomic_add, .-atomic_add
3860 + .globl atomic_add_unchecked
3861 + .type atomic_add_unchecked,#function
3862 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3863 + BACKOFF_SETUP(%o2)
3866 + cas [%o1], %g1, %g7
3872 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3873 + .size atomic_add_unchecked, .-atomic_add_unchecked
3876 .type atomic_sub,#function
3877 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3881 + subcc %g1, %o0, %g7
3883 +#ifdef CONFIG_PAX_REFCOUNT
3889 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3890 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3891 2: BACKOFF_SPIN(%o2, %o3, 1b)
3892 .size atomic_sub, .-atomic_sub
3894 + .globl atomic_sub_unchecked
3895 + .type atomic_sub_unchecked,#function
3896 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3897 + BACKOFF_SETUP(%o2)
3900 + cas [%o1], %g1, %g7
3906 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3907 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3909 .globl atomic_add_ret
3910 .type atomic_add_ret,#function
3911 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3915 + addcc %g1, %o0, %g7
3917 +#ifdef CONFIG_PAX_REFCOUNT
3923 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3924 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3925 2: BACKOFF_SPIN(%o2, %o3, 1b)
3926 .size atomic_add_ret, .-atomic_add_ret
3928 + .globl atomic_add_ret_unchecked
3929 + .type atomic_add_ret_unchecked,#function
3930 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3931 + BACKOFF_SETUP(%o2)
3933 + addcc %g1, %o0, %g7
3934 + cas [%o1], %g1, %g7
3941 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3942 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3944 .globl atomic_sub_ret
3945 .type atomic_sub_ret,#function
3946 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3950 + subcc %g1, %o0, %g7
3952 +#ifdef CONFIG_PAX_REFCOUNT
3958 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3959 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3960 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3964 + addcc %g1, %o0, %g7
3966 +#ifdef CONFIG_PAX_REFCOUNT
3970 casx [%o1], %g1, %g7
3972 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3973 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3974 2: BACKOFF_SPIN(%o2, %o3, 1b)
3975 .size atomic64_add, .-atomic64_add
3977 + .globl atomic64_add_unchecked
3978 + .type atomic64_add_unchecked,#function
3979 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3980 + BACKOFF_SETUP(%o2)
3982 + addcc %g1, %o0, %g7
3983 + casx [%o1], %g1, %g7
3989 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3990 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3993 .type atomic64_sub,#function
3994 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3998 + subcc %g1, %o0, %g7
4000 +#ifdef CONFIG_PAX_REFCOUNT
4004 casx [%o1], %g1, %g7
4006 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4007 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4008 2: BACKOFF_SPIN(%o2, %o3, 1b)
4009 .size atomic64_sub, .-atomic64_sub
4011 + .globl atomic64_sub_unchecked
4012 + .type atomic64_sub_unchecked,#function
4013 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4014 + BACKOFF_SETUP(%o2)
4016 + subcc %g1, %o0, %g7
4017 + casx [%o1], %g1, %g7
4023 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4024 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4026 .globl atomic64_add_ret
4027 .type atomic64_add_ret,#function
4028 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4032 + addcc %g1, %o0, %g7
4034 +#ifdef CONFIG_PAX_REFCOUNT
4038 casx [%o1], %g1, %g7
4040 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4041 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4042 2: BACKOFF_SPIN(%o2, %o3, 1b)
4043 .size atomic64_add_ret, .-atomic64_add_ret
4045 + .globl atomic64_add_ret_unchecked
4046 + .type atomic64_add_ret_unchecked,#function
4047 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4048 + BACKOFF_SETUP(%o2)
4050 + addcc %g1, %o0, %g7
4051 + casx [%o1], %g1, %g7
4058 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4059 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4061 .globl atomic64_sub_ret
4062 .type atomic64_sub_ret,#function
4063 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4067 + subcc %g1, %o0, %g7
4069 +#ifdef CONFIG_PAX_REFCOUNT
4073 casx [%o1], %g1, %g7
4075 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4076 diff -urNp linux-2.6.39.4/arch/sparc/lib/ksyms.c linux-2.6.39.4/arch/sparc/lib/ksyms.c
4077 --- linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-05-19 00:06:34.000000000 -0400
4078 +++ linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-08-05 19:44:33.000000000 -0400
4079 @@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
4081 /* Atomic counter implementation. */
4082 EXPORT_SYMBOL(atomic_add);
4083 +EXPORT_SYMBOL(atomic_add_unchecked);
4084 EXPORT_SYMBOL(atomic_add_ret);
4085 EXPORT_SYMBOL(atomic_sub);
4086 +EXPORT_SYMBOL(atomic_sub_unchecked);
4087 EXPORT_SYMBOL(atomic_sub_ret);
4088 EXPORT_SYMBOL(atomic64_add);
4089 +EXPORT_SYMBOL(atomic64_add_unchecked);
4090 EXPORT_SYMBOL(atomic64_add_ret);
4091 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4092 EXPORT_SYMBOL(atomic64_sub);
4093 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4094 EXPORT_SYMBOL(atomic64_sub_ret);
4096 /* Atomic bit operations. */
4097 diff -urNp linux-2.6.39.4/arch/sparc/lib/Makefile linux-2.6.39.4/arch/sparc/lib/Makefile
4098 --- linux-2.6.39.4/arch/sparc/lib/Makefile 2011-05-19 00:06:34.000000000 -0400
4099 +++ linux-2.6.39.4/arch/sparc/lib/Makefile 2011-08-05 19:44:33.000000000 -0400
4103 asflags-y := -ansi -DST_DIV0=0x02
4104 -ccflags-y := -Werror
4105 +#ccflags-y := -Werror
4107 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4108 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4109 diff -urNp linux-2.6.39.4/arch/sparc/Makefile linux-2.6.39.4/arch/sparc/Makefile
4110 --- linux-2.6.39.4/arch/sparc/Makefile 2011-05-19 00:06:34.000000000 -0400
4111 +++ linux-2.6.39.4/arch/sparc/Makefile 2011-08-05 19:44:33.000000000 -0400
4112 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4113 # Export what is needed by arch/sparc/boot/Makefile
4114 export VMLINUX_INIT VMLINUX_MAIN
4115 VMLINUX_INIT := $(head-y) $(init-y)
4116 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4117 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4118 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4119 VMLINUX_MAIN += $(drivers-y) $(net-y)
4121 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_32.c linux-2.6.39.4/arch/sparc/mm/fault_32.c
4122 --- linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-05-19 00:06:34.000000000 -0400
4123 +++ linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-08-05 19:44:33.000000000 -0400
4125 #include <linux/interrupt.h>
4126 #include <linux/module.h>
4127 #include <linux/kdebug.h>
4128 +#include <linux/slab.h>
4129 +#include <linux/pagemap.h>
4130 +#include <linux/compiler.h>
4132 #include <asm/system.h>
4133 #include <asm/page.h>
4134 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4135 return safe_compute_effective_address(regs, insn);
4138 +#ifdef CONFIG_PAX_PAGEEXEC
4139 +#ifdef CONFIG_PAX_DLRESOLVE
4140 +static void pax_emuplt_close(struct vm_area_struct *vma)
4142 + vma->vm_mm->call_dl_resolve = 0UL;
4145 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4147 + unsigned int *kaddr;
4149 + vmf->page = alloc_page(GFP_HIGHUSER);
4151 + return VM_FAULT_OOM;
4153 + kaddr = kmap(vmf->page);
4154 + memset(kaddr, 0, PAGE_SIZE);
4155 + kaddr[0] = 0x9DE3BFA8U; /* save */
4156 + flush_dcache_page(vmf->page);
4157 + kunmap(vmf->page);
4158 + return VM_FAULT_MAJOR;
4161 +static const struct vm_operations_struct pax_vm_ops = {
4162 + .close = pax_emuplt_close,
4163 + .fault = pax_emuplt_fault
4166 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4170 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4171 + vma->vm_mm = current->mm;
4172 + vma->vm_start = addr;
4173 + vma->vm_end = addr + PAGE_SIZE;
4174 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4175 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4176 + vma->vm_ops = &pax_vm_ops;
4178 + ret = insert_vm_struct(current->mm, vma);
4182 + ++current->mm->total_vm;
4188 + * PaX: decide what to do with offenders (regs->pc = fault address)
4190 + * returns 1 when task should be killed
4191 + * 2 when patched PLT trampoline was detected
4192 + * 3 when unpatched PLT trampoline was detected
4194 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4197 +#ifdef CONFIG_PAX_EMUPLT
4200 + do { /* PaX: patched PLT emulation #1 */
4201 + unsigned int sethi1, sethi2, jmpl;
4203 + err = get_user(sethi1, (unsigned int *)regs->pc);
4204 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4205 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4210 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4211 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4212 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4214 + unsigned int addr;
4216 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4217 + addr = regs->u_regs[UREG_G1];
4218 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4220 + regs->npc = addr+4;
4225 + { /* PaX: patched PLT emulation #2 */
4228 + err = get_user(ba, (unsigned int *)regs->pc);
4230 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4231 + unsigned int addr;
4233 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4235 + regs->npc = addr+4;
4240 + do { /* PaX: patched PLT emulation #3 */
4241 + unsigned int sethi, jmpl, nop;
4243 + err = get_user(sethi, (unsigned int *)regs->pc);
4244 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4245 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4250 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4251 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4252 + nop == 0x01000000U)
4254 + unsigned int addr;
4256 + addr = (sethi & 0x003FFFFFU) << 10;
4257 + regs->u_regs[UREG_G1] = addr;
4258 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4260 + regs->npc = addr+4;
4265 + do { /* PaX: unpatched PLT emulation step 1 */
4266 + unsigned int sethi, ba, nop;
4268 + err = get_user(sethi, (unsigned int *)regs->pc);
4269 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4270 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4275 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4276 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4277 + nop == 0x01000000U)
4279 + unsigned int addr, save, call;
4281 + if ((ba & 0xFFC00000U) == 0x30800000U)
4282 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4284 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4286 + err = get_user(save, (unsigned int *)addr);
4287 + err |= get_user(call, (unsigned int *)(addr+4));
4288 + err |= get_user(nop, (unsigned int *)(addr+8));
4292 +#ifdef CONFIG_PAX_DLRESOLVE
4293 + if (save == 0x9DE3BFA8U &&
4294 + (call & 0xC0000000U) == 0x40000000U &&
4295 + nop == 0x01000000U)
4297 + struct vm_area_struct *vma;
4298 + unsigned long call_dl_resolve;
4300 + down_read(¤t->mm->mmap_sem);
4301 + call_dl_resolve = current->mm->call_dl_resolve;
4302 + up_read(¤t->mm->mmap_sem);
4303 + if (likely(call_dl_resolve))
4306 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4308 + down_write(¤t->mm->mmap_sem);
4309 + if (current->mm->call_dl_resolve) {
4310 + call_dl_resolve = current->mm->call_dl_resolve;
4311 + up_write(¤t->mm->mmap_sem);
4313 + kmem_cache_free(vm_area_cachep, vma);
4317 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4318 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4319 + up_write(¤t->mm->mmap_sem);
4321 + kmem_cache_free(vm_area_cachep, vma);
4325 + if (pax_insert_vma(vma, call_dl_resolve)) {
4326 + up_write(¤t->mm->mmap_sem);
4327 + kmem_cache_free(vm_area_cachep, vma);
4331 + current->mm->call_dl_resolve = call_dl_resolve;
4332 + up_write(¤t->mm->mmap_sem);
4335 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4336 + regs->pc = call_dl_resolve;
4337 + regs->npc = addr+4;
4342 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4343 + if ((save & 0xFFC00000U) == 0x05000000U &&
4344 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4345 + nop == 0x01000000U)
4347 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4348 + regs->u_regs[UREG_G2] = addr + 4;
4349 + addr = (save & 0x003FFFFFU) << 10;
4350 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4352 + regs->npc = addr+4;
4358 + do { /* PaX: unpatched PLT emulation step 2 */
4359 + unsigned int save, call, nop;
4361 + err = get_user(save, (unsigned int *)(regs->pc-4));
4362 + err |= get_user(call, (unsigned int *)regs->pc);
4363 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4367 + if (save == 0x9DE3BFA8U &&
4368 + (call & 0xC0000000U) == 0x40000000U &&
4369 + nop == 0x01000000U)
4371 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4373 + regs->u_regs[UREG_RETPC] = regs->pc;
4374 + regs->pc = dl_resolve;
4375 + regs->npc = dl_resolve+4;
4384 +void pax_report_insns(void *pc, void *sp)
4388 + printk(KERN_ERR "PAX: bytes at PC: ");
4389 + for (i = 0; i < 8; i++) {
4391 + if (get_user(c, (unsigned int *)pc+i))
4392 + printk(KERN_CONT "???????? ");
4394 + printk(KERN_CONT "%08x ", c);
4400 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4403 @@ -281,6 +546,24 @@ good_area:
4404 if(!(vma->vm_flags & VM_WRITE))
4408 +#ifdef CONFIG_PAX_PAGEEXEC
4409 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4410 + up_read(&mm->mmap_sem);
4411 + switch (pax_handle_fetch_fault(regs)) {
4413 +#ifdef CONFIG_PAX_EMUPLT
4420 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4421 + do_group_exit(SIGKILL);
4425 /* Allow reads even for write-only mappings */
4426 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4428 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_64.c linux-2.6.39.4/arch/sparc/mm/fault_64.c
4429 --- linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-05-19 00:06:34.000000000 -0400
4430 +++ linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-08-05 19:44:33.000000000 -0400
4432 #include <linux/kprobes.h>
4433 #include <linux/kdebug.h>
4434 #include <linux/percpu.h>
4435 +#include <linux/slab.h>
4436 +#include <linux/pagemap.h>
4437 +#include <linux/compiler.h>
4439 #include <asm/page.h>
4440 #include <asm/pgtable.h>
4441 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4442 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4444 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4445 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4446 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4447 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4449 unhandled_fault(regs->tpc, current, regs);
4450 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4454 +#ifdef CONFIG_PAX_PAGEEXEC
4455 +#ifdef CONFIG_PAX_DLRESOLVE
4456 +static void pax_emuplt_close(struct vm_area_struct *vma)
4458 + vma->vm_mm->call_dl_resolve = 0UL;
4461 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4463 + unsigned int *kaddr;
4465 + vmf->page = alloc_page(GFP_HIGHUSER);
4467 + return VM_FAULT_OOM;
4469 + kaddr = kmap(vmf->page);
4470 + memset(kaddr, 0, PAGE_SIZE);
4471 + kaddr[0] = 0x9DE3BFA8U; /* save */
4472 + flush_dcache_page(vmf->page);
4473 + kunmap(vmf->page);
4474 + return VM_FAULT_MAJOR;
4477 +static const struct vm_operations_struct pax_vm_ops = {
4478 + .close = pax_emuplt_close,
4479 + .fault = pax_emuplt_fault
4482 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4486 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4487 + vma->vm_mm = current->mm;
4488 + vma->vm_start = addr;
4489 + vma->vm_end = addr + PAGE_SIZE;
4490 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4491 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4492 + vma->vm_ops = &pax_vm_ops;
4494 + ret = insert_vm_struct(current->mm, vma);
4498 + ++current->mm->total_vm;
4504 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4506 + * returns 1 when task should be killed
4507 + * 2 when patched PLT trampoline was detected
4508 + * 3 when unpatched PLT trampoline was detected
4510 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4513 +#ifdef CONFIG_PAX_EMUPLT
4516 + do { /* PaX: patched PLT emulation #1 */
4517 + unsigned int sethi1, sethi2, jmpl;
4519 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4520 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4521 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4526 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4527 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4528 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4530 + unsigned long addr;
4532 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4533 + addr = regs->u_regs[UREG_G1];
4534 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4536 + if (test_thread_flag(TIF_32BIT))
4537 + addr &= 0xFFFFFFFFUL;
4540 + regs->tnpc = addr+4;
4545 + { /* PaX: patched PLT emulation #2 */
4548 + err = get_user(ba, (unsigned int *)regs->tpc);
4550 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4551 + unsigned long addr;
4553 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4555 + if (test_thread_flag(TIF_32BIT))
4556 + addr &= 0xFFFFFFFFUL;
4559 + regs->tnpc = addr+4;
4564 + do { /* PaX: patched PLT emulation #3 */
4565 + unsigned int sethi, jmpl, nop;
4567 + err = get_user(sethi, (unsigned int *)regs->tpc);
4568 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4569 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4576 + nop == 0x01000000U)
4578 + unsigned long addr;
4580 + addr = (sethi & 0x003FFFFFU) << 10;
4581 + regs->u_regs[UREG_G1] = addr;
4582 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4588 + regs->tnpc = addr+4;
4593 + do { /* PaX: patched PLT emulation #4 */
4594 + unsigned int sethi, mov1, call, mov2;
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4604 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4605 + mov1 == 0x8210000FU &&
4606 + (call & 0xC0000000U) == 0x40000000U &&
4607 + mov2 == 0x9E100001U)
4609 + unsigned long addr;
4611 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4612 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4614 + if (test_thread_flag(TIF_32BIT))
4615 + addr &= 0xFFFFFFFFUL;
4618 + regs->tnpc = addr+4;
4623 + do { /* PaX: patched PLT emulation #5 */
4624 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4626 + err = get_user(sethi, (unsigned int *)regs->tpc);
4627 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4628 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4629 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4630 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4631 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4632 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4633 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4638 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4639 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4640 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4641 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4642 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4643 + sllx == 0x83287020U &&
4644 + jmpl == 0x81C04005U &&
4645 + nop == 0x01000000U)
4647 + unsigned long addr;
4649 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4650 + regs->u_regs[UREG_G1] <<= 32;
4651 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4652 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4654 + regs->tnpc = addr+4;
4659 + do { /* PaX: patched PLT emulation #6 */
4660 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4662 + err = get_user(sethi, (unsigned int *)regs->tpc);
4663 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4664 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4665 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4666 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4667 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4675 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4676 + sllx == 0x83287020U &&
4677 + (or & 0xFFFFE000U) == 0x8A116000U &&
4678 + jmpl == 0x81C04005U &&
4679 + nop == 0x01000000U)
4681 + unsigned long addr;
4683 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4684 + regs->u_regs[UREG_G1] <<= 32;
4685 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4686 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4688 + regs->tnpc = addr+4;
4693 + do { /* PaX: unpatched PLT emulation step 1 */
4694 + unsigned int sethi, ba, nop;
4696 + err = get_user(sethi, (unsigned int *)regs->tpc);
4697 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4698 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4703 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4704 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4705 + nop == 0x01000000U)
4707 + unsigned long addr;
4708 + unsigned int save, call;
4709 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4711 + if ((ba & 0xFFC00000U) == 0x30800000U)
4712 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4714 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4716 + if (test_thread_flag(TIF_32BIT))
4717 + addr &= 0xFFFFFFFFUL;
4719 + err = get_user(save, (unsigned int *)addr);
4720 + err |= get_user(call, (unsigned int *)(addr+4));
4721 + err |= get_user(nop, (unsigned int *)(addr+8));
4725 +#ifdef CONFIG_PAX_DLRESOLVE
4726 + if (save == 0x9DE3BFA8U &&
4727 + (call & 0xC0000000U) == 0x40000000U &&
4728 + nop == 0x01000000U)
4730 + struct vm_area_struct *vma;
4731 + unsigned long call_dl_resolve;
4733 + down_read(¤t->mm->mmap_sem);
4734 + call_dl_resolve = current->mm->call_dl_resolve;
4735 + up_read(¤t->mm->mmap_sem);
4736 + if (likely(call_dl_resolve))
4739 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4741 + down_write(¤t->mm->mmap_sem);
4742 + if (current->mm->call_dl_resolve) {
4743 + call_dl_resolve = current->mm->call_dl_resolve;
4744 + up_write(¤t->mm->mmap_sem);
4746 + kmem_cache_free(vm_area_cachep, vma);
4750 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4751 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4752 + up_write(¤t->mm->mmap_sem);
4754 + kmem_cache_free(vm_area_cachep, vma);
4758 + if (pax_insert_vma(vma, call_dl_resolve)) {
4759 + up_write(¤t->mm->mmap_sem);
4760 + kmem_cache_free(vm_area_cachep, vma);
4764 + current->mm->call_dl_resolve = call_dl_resolve;
4765 + up_write(¤t->mm->mmap_sem);
4768 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4769 + regs->tpc = call_dl_resolve;
4770 + regs->tnpc = addr+4;
4775 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4776 + if ((save & 0xFFC00000U) == 0x05000000U &&
4777 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4778 + nop == 0x01000000U)
4780 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4781 + regs->u_regs[UREG_G2] = addr + 4;
4782 + addr = (save & 0x003FFFFFU) << 10;
4783 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4785 + if (test_thread_flag(TIF_32BIT))
4786 + addr &= 0xFFFFFFFFUL;
4789 + regs->tnpc = addr+4;
4793 + /* PaX: 64-bit PLT stub */
4794 + err = get_user(sethi1, (unsigned int *)addr);
4795 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4796 + err |= get_user(or1, (unsigned int *)(addr+8));
4797 + err |= get_user(or2, (unsigned int *)(addr+12));
4798 + err |= get_user(sllx, (unsigned int *)(addr+16));
4799 + err |= get_user(add, (unsigned int *)(addr+20));
4800 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4801 + err |= get_user(nop, (unsigned int *)(addr+28));
4805 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4806 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4807 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4808 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4809 + sllx == 0x89293020U &&
4810 + add == 0x8A010005U &&
4811 + jmpl == 0x89C14000U &&
4812 + nop == 0x01000000U)
4814 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4815 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4816 + regs->u_regs[UREG_G4] <<= 32;
4817 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4818 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4819 + regs->u_regs[UREG_G4] = addr + 24;
4820 + addr = regs->u_regs[UREG_G5];
4822 + regs->tnpc = addr+4;
4828 +#ifdef CONFIG_PAX_DLRESOLVE
4829 + do { /* PaX: unpatched PLT emulation step 2 */
4830 + unsigned int save, call, nop;
4832 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4833 + err |= get_user(call, (unsigned int *)regs->tpc);
4834 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4838 + if (save == 0x9DE3BFA8U &&
4839 + (call & 0xC0000000U) == 0x40000000U &&
4840 + nop == 0x01000000U)
4842 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4844 + if (test_thread_flag(TIF_32BIT))
4845 + dl_resolve &= 0xFFFFFFFFUL;
4847 + regs->u_regs[UREG_RETPC] = regs->tpc;
4848 + regs->tpc = dl_resolve;
4849 + regs->tnpc = dl_resolve+4;
4855 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4856 + unsigned int sethi, ba, nop;
4858 + err = get_user(sethi, (unsigned int *)regs->tpc);
4859 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4860 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4865 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4866 + (ba & 0xFFF00000U) == 0x30600000U &&
4867 + nop == 0x01000000U)
4869 + unsigned long addr;
4871 + addr = (sethi & 0x003FFFFFU) << 10;
4872 + regs->u_regs[UREG_G1] = addr;
4873 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4875 + if (test_thread_flag(TIF_32BIT))
4876 + addr &= 0xFFFFFFFFUL;
4879 + regs->tnpc = addr+4;
4889 +void pax_report_insns(void *pc, void *sp)
4893 + printk(KERN_ERR "PAX: bytes at PC: ");
4894 + for (i = 0; i < 8; i++) {
4896 + if (get_user(c, (unsigned int *)pc+i))
4897 + printk(KERN_CONT "???????? ");
4899 + printk(KERN_CONT "%08x ", c);
4905 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4907 struct mm_struct *mm = current->mm;
4908 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4912 +#ifdef CONFIG_PAX_PAGEEXEC
4913 + /* PaX: detect ITLB misses on non-exec pages */
4914 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4915 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4917 + if (address != regs->tpc)
4920 + up_read(&mm->mmap_sem);
4921 + switch (pax_handle_fetch_fault(regs)) {
4923 +#ifdef CONFIG_PAX_EMUPLT
4930 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4931 + do_group_exit(SIGKILL);
4935 /* Pure DTLB misses do not tell us whether the fault causing
4936 * load/store/atomic was a write or not, it only says that there
4937 * was no match. So in such a case we (carefully) read the
4938 diff -urNp linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c
4939 --- linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
4940 +++ linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
4941 @@ -68,7 +68,7 @@ full_search:
4945 - if (likely(!vma || addr + len <= vma->vm_start)) {
4946 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4948 * Remember the place where we stopped the search:
4950 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4951 /* make sure it can fit in the remaining address space */
4952 if (likely(addr > len)) {
4953 vma = find_vma(mm, addr-len);
4954 - if (!vma || addr <= vma->vm_start) {
4955 + if (check_heap_stack_gap(vma, addr - len, len)) {
4956 /* remember the address as a hint for next time */
4957 return (mm->free_area_cache = addr-len);
4959 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4960 if (unlikely(mm->mmap_base < len))
4963 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4964 + addr = mm->mmap_base - len;
4967 + addr &= HPAGE_MASK;
4969 * Lookup failure means no vma is above this address,
4970 * else if new region fits below vma->vm_start,
4971 * return with success:
4973 vma = find_vma(mm, addr);
4974 - if (likely(!vma || addr+len <= vma->vm_start)) {
4975 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4976 /* remember the address as a hint for next time */
4977 return (mm->free_area_cache = addr);
4979 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4980 mm->cached_hole_size = vma->vm_start - addr;
4982 /* try just below the current vma->vm_start */
4983 - addr = (vma->vm_start-len) & HPAGE_MASK;
4984 - } while (likely(len < vma->vm_start));
4985 + addr = skip_heap_stack_gap(vma, len);
4986 + } while (!IS_ERR_VALUE(addr));
4990 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4992 addr = ALIGN(addr, HPAGE_SIZE);
4993 vma = find_vma(mm, addr);
4994 - if (task_size - len >= addr &&
4995 - (!vma || addr + len <= vma->vm_start))
4996 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4999 if (mm->get_unmapped_area == arch_get_unmapped_area)
5000 diff -urNp linux-2.6.39.4/arch/sparc/mm/init_32.c linux-2.6.39.4/arch/sparc/mm/init_32.c
5001 --- linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
5002 +++ linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-08-05 19:44:33.000000000 -0400
5003 @@ -318,6 +318,9 @@ extern void device_scan(void);
5004 pgprot_t PAGE_SHARED __read_mostly;
5005 EXPORT_SYMBOL(PAGE_SHARED);
5007 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5008 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5010 void __init paging_init(void)
5012 switch(sparc_cpu_model) {
5013 @@ -346,17 +349,17 @@ void __init paging_init(void)
5015 /* Initialize the protection map with non-constant, MMU dependent values. */
5016 protection_map[0] = PAGE_NONE;
5017 - protection_map[1] = PAGE_READONLY;
5018 - protection_map[2] = PAGE_COPY;
5019 - protection_map[3] = PAGE_COPY;
5020 + protection_map[1] = PAGE_READONLY_NOEXEC;
5021 + protection_map[2] = PAGE_COPY_NOEXEC;
5022 + protection_map[3] = PAGE_COPY_NOEXEC;
5023 protection_map[4] = PAGE_READONLY;
5024 protection_map[5] = PAGE_READONLY;
5025 protection_map[6] = PAGE_COPY;
5026 protection_map[7] = PAGE_COPY;
5027 protection_map[8] = PAGE_NONE;
5028 - protection_map[9] = PAGE_READONLY;
5029 - protection_map[10] = PAGE_SHARED;
5030 - protection_map[11] = PAGE_SHARED;
5031 + protection_map[9] = PAGE_READONLY_NOEXEC;
5032 + protection_map[10] = PAGE_SHARED_NOEXEC;
5033 + protection_map[11] = PAGE_SHARED_NOEXEC;
5034 protection_map[12] = PAGE_READONLY;
5035 protection_map[13] = PAGE_READONLY;
5036 protection_map[14] = PAGE_SHARED;
5037 diff -urNp linux-2.6.39.4/arch/sparc/mm/Makefile linux-2.6.39.4/arch/sparc/mm/Makefile
5038 --- linux-2.6.39.4/arch/sparc/mm/Makefile 2011-05-19 00:06:34.000000000 -0400
5039 +++ linux-2.6.39.4/arch/sparc/mm/Makefile 2011-08-05 19:44:33.000000000 -0400
5044 -ccflags-y := -Werror
5045 +#ccflags-y := -Werror
5047 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5048 obj-y += fault_$(BITS).o
5049 diff -urNp linux-2.6.39.4/arch/sparc/mm/srmmu.c linux-2.6.39.4/arch/sparc/mm/srmmu.c
5050 --- linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-05-19 00:06:34.000000000 -0400
5051 +++ linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-08-05 19:44:33.000000000 -0400
5052 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5053 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5054 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5055 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5057 +#ifdef CONFIG_PAX_PAGEEXEC
5058 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5059 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5060 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5063 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5064 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5066 diff -urNp linux-2.6.39.4/arch/um/include/asm/kmap_types.h linux-2.6.39.4/arch/um/include/asm/kmap_types.h
5067 --- linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
5068 +++ linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
5069 @@ -23,6 +23,7 @@ enum km_type {
5077 diff -urNp linux-2.6.39.4/arch/um/include/asm/page.h linux-2.6.39.4/arch/um/include/asm/page.h
5078 --- linux-2.6.39.4/arch/um/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
5079 +++ linux-2.6.39.4/arch/um/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
5081 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5082 #define PAGE_MASK (~(PAGE_SIZE-1))
5084 +#define ktla_ktva(addr) (addr)
5085 +#define ktva_ktla(addr) (addr)
5087 #ifndef __ASSEMBLY__
5090 diff -urNp linux-2.6.39.4/arch/um/kernel/process.c linux-2.6.39.4/arch/um/kernel/process.c
5091 --- linux-2.6.39.4/arch/um/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
5092 +++ linux-2.6.39.4/arch/um/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
5093 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5098 - * Only x86 and x86_64 have an arch_align_stack().
5099 - * All other arches have "#define arch_align_stack(x) (x)"
5100 - * in their asm/system.h
5101 - * As this is included in UML from asm-um/system-generic.h,
5102 - * we can use it to behave as the subarch does.
5104 -#ifndef arch_align_stack
5105 -unsigned long arch_align_stack(unsigned long sp)
5107 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5108 - sp -= get_random_int() % 8192;
5113 unsigned long get_wchan(struct task_struct *p)
5115 unsigned long stack_page, sp, ip;
5116 diff -urNp linux-2.6.39.4/arch/um/sys-i386/syscalls.c linux-2.6.39.4/arch/um/sys-i386/syscalls.c
5117 --- linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-05-19 00:06:34.000000000 -0400
5118 +++ linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-08-05 19:44:33.000000000 -0400
5120 #include "asm/uaccess.h"
5121 #include "asm/unistd.h"
5123 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5125 + unsigned long pax_task_size = TASK_SIZE;
5127 +#ifdef CONFIG_PAX_SEGMEXEC
5128 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5129 + pax_task_size = SEGMEXEC_TASK_SIZE;
5132 + if (len > pax_task_size || addr > pax_task_size - len)
5139 * The prototype on i386 is:
5141 diff -urNp linux-2.6.39.4/arch/x86/boot/bitops.h linux-2.6.39.4/arch/x86/boot/bitops.h
5142 --- linux-2.6.39.4/arch/x86/boot/bitops.h 2011-05-19 00:06:34.000000000 -0400
5143 +++ linux-2.6.39.4/arch/x86/boot/bitops.h 2011-08-05 19:44:33.000000000 -0400
5144 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5146 const u32 *p = (const u32 *)addr;
5148 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5149 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5153 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5155 static inline void set_bit(int nr, void *addr)
5157 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5158 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5161 #endif /* BOOT_BITOPS_H */
5162 diff -urNp linux-2.6.39.4/arch/x86/boot/boot.h linux-2.6.39.4/arch/x86/boot/boot.h
5163 --- linux-2.6.39.4/arch/x86/boot/boot.h 2011-05-19 00:06:34.000000000 -0400
5164 +++ linux-2.6.39.4/arch/x86/boot/boot.h 2011-08-05 19:44:33.000000000 -0400
5165 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5166 static inline u16 ds(void)
5169 - asm("movw %%ds,%0" : "=rm" (seg));
5170 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5174 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5175 static inline int memcmp(const void *s1, const void *s2, size_t len)
5178 - asm("repe; cmpsb; setnz %0"
5179 + asm volatile("repe; cmpsb; setnz %0"
5180 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5183 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_32.S linux-2.6.39.4/arch/x86/boot/compressed/head_32.S
5184 --- linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-05-19 00:06:34.000000000 -0400
5185 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-08-05 19:44:33.000000000 -0400
5186 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5190 - movl $LOAD_PHYSICAL_ADDR, %ebx
5191 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5194 /* Target address to relocate to for decompression */
5195 @@ -162,7 +162,7 @@ relocated:
5196 * and where it was actually loaded.
5199 - subl $LOAD_PHYSICAL_ADDR, %ebx
5200 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5201 jz 2f /* Nothing to be done if loaded at compiled addr. */
5203 * Process relocations.
5204 @@ -170,8 +170,7 @@ relocated:
5211 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5214 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_64.S linux-2.6.39.4/arch/x86/boot/compressed/head_64.S
5215 --- linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-05-19 00:06:34.000000000 -0400
5216 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-08-05 19:44:33.000000000 -0400
5217 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5221 - movl $LOAD_PHYSICAL_ADDR, %ebx
5222 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5225 /* Target address to relocate to for decompression */
5226 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5230 - movq $LOAD_PHYSICAL_ADDR, %rbp
5231 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5234 /* Target address to relocate to for decompression */
5235 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/Makefile linux-2.6.39.4/arch/x86/boot/compressed/Makefile
5236 --- linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-05-19 00:06:34.000000000 -0400
5237 +++ linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-08-05 20:34:06.000000000 -0400
5238 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5239 KBUILD_CFLAGS += $(cflags-y)
5240 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5241 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5242 +ifdef CONSTIFY_PLUGIN
5243 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5246 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5248 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/misc.c linux-2.6.39.4/arch/x86/boot/compressed/misc.c
5249 --- linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-05-19 00:06:34.000000000 -0400
5250 +++ linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-08-05 19:44:33.000000000 -0400
5251 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5253 #ifdef CONFIG_RELOCATABLE
5255 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5256 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5258 dest = (void *)(phdr->p_paddr);
5260 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5261 error("Destination address too large");
5263 #ifndef CONFIG_RELOCATABLE
5264 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5265 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5266 error("Wrong destination address");
5269 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/relocs.c linux-2.6.39.4/arch/x86/boot/compressed/relocs.c
5270 --- linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-05-19 00:06:34.000000000 -0400
5271 +++ linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-08-05 19:44:33.000000000 -0400
5274 static void die(char *fmt, ...);
5276 +#include "../../../../include/generated/autoconf.h"
5278 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5279 static Elf32_Ehdr ehdr;
5280 +static Elf32_Phdr *phdr;
5281 static unsigned long reloc_count, reloc_idx;
5282 static unsigned long *relocs;
5284 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5288 +static void read_phdrs(FILE *fp)
5292 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5294 + die("Unable to allocate %d program headers\n",
5297 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5298 + die("Seek to %d failed: %s\n",
5299 + ehdr.e_phoff, strerror(errno));
5301 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5302 + die("Cannot read ELF program headers: %s\n",
5305 + for(i = 0; i < ehdr.e_phnum; i++) {
5306 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5307 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5308 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5309 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5310 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5311 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5312 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5313 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5318 static void read_shdrs(FILE *fp)
5324 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5325 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5327 static void read_strtabs(FILE *fp)
5331 for (i = 0; i < ehdr.e_shnum; i++) {
5332 struct section *sec = &secs[i];
5333 if (sec->shdr.sh_type != SHT_STRTAB) {
5334 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5336 static void read_symtabs(FILE *fp)
5340 for (i = 0; i < ehdr.e_shnum; i++) {
5341 struct section *sec = &secs[i];
5342 if (sec->shdr.sh_type != SHT_SYMTAB) {
5343 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5345 static void read_relocs(FILE *fp)
5351 for (i = 0; i < ehdr.e_shnum; i++) {
5352 struct section *sec = &secs[i];
5353 if (sec->shdr.sh_type != SHT_REL) {
5354 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5355 die("Cannot read symbol table: %s\n",
5359 + for (j = 0; j < ehdr.e_phnum; j++) {
5360 + if (phdr[j].p_type != PT_LOAD )
5362 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5364 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5367 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5368 Elf32_Rel *rel = &sec->reltab[j];
5369 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5370 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5371 rel->r_info = elf32_to_cpu(rel->r_info);
5374 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5376 static void print_absolute_symbols(void)
5380 printf("Absolute symbols\n");
5381 printf(" Num: Value Size Type Bind Visibility Name\n");
5382 for (i = 0; i < ehdr.e_shnum; i++) {
5383 struct section *sec = &secs[i];
5385 Elf32_Sym *sh_symtab;
5389 if (sec->shdr.sh_type != SHT_SYMTAB) {
5391 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5393 static void print_absolute_relocs(void)
5395 - int i, printed = 0;
5396 + unsigned int i, printed = 0;
5398 for (i = 0; i < ehdr.e_shnum; i++) {
5399 struct section *sec = &secs[i];
5400 struct section *sec_applies, *sec_symtab;
5402 Elf32_Sym *sh_symtab;
5405 if (sec->shdr.sh_type != SHT_REL) {
5408 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5410 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5414 /* Walk through the relocations */
5415 for (i = 0; i < ehdr.e_shnum; i++) {
5417 Elf32_Sym *sh_symtab;
5418 struct section *sec_applies, *sec_symtab;
5421 struct section *sec = &secs[i];
5423 if (sec->shdr.sh_type != SHT_REL) {
5424 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5425 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5428 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5429 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5432 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5433 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5434 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5436 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5438 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5440 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5447 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5449 static void emit_relocs(int as_text)
5453 /* Count how many relocations I have and allocate space for them. */
5455 walk_relocs(count_reloc);
5456 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5457 fname, strerror(errno));
5464 diff -urNp linux-2.6.39.4/arch/x86/boot/cpucheck.c linux-2.6.39.4/arch/x86/boot/cpucheck.c
5465 --- linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-05-19 00:06:34.000000000 -0400
5466 +++ linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-08-05 19:44:33.000000000 -0400
5467 @@ -74,7 +74,7 @@ static int has_fpu(void)
5468 u16 fcw = -1, fsw = -1;
5471 - asm("movl %%cr0,%0" : "=r" (cr0));
5472 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5473 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5474 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5475 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5476 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5481 + asm volatile("pushfl ; "
5485 @@ -115,7 +115,7 @@ static void get_flags(void)
5486 set_bit(X86_FEATURE_FPU, cpu.flags);
5488 if (has_eflag(X86_EFLAGS_ID)) {
5490 + asm volatile("cpuid"
5491 : "=a" (max_intel_level),
5492 "=b" (cpu_vendor[0]),
5493 "=d" (cpu_vendor[1]),
5494 @@ -124,7 +124,7 @@ static void get_flags(void)
5496 if (max_intel_level >= 0x00000001 &&
5497 max_intel_level <= 0x0000ffff) {
5499 + asm volatile("cpuid"
5501 "=c" (cpu.flags[4]),
5503 @@ -136,7 +136,7 @@ static void get_flags(void)
5504 cpu.model += ((tfms >> 16) & 0xf) << 4;
5508 + asm volatile("cpuid"
5509 : "=a" (max_amd_level)
5511 : "ebx", "ecx", "edx");
5512 @@ -144,7 +144,7 @@ static void get_flags(void)
5513 if (max_amd_level >= 0x80000001 &&
5514 max_amd_level <= 0x8000ffff) {
5515 u32 eax = 0x80000001;
5517 + asm volatile("cpuid"
5519 "=c" (cpu.flags[6]),
5521 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5522 u32 ecx = MSR_K7_HWCR;
5525 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5526 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5528 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5531 get_flags(); /* Make sure it really did something */
5532 err = check_flags();
5533 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5534 u32 ecx = MSR_VIA_FCR;
5537 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5538 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5539 eax |= (1<<1)|(1<<7);
5540 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5541 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5543 set_bit(X86_FEATURE_CX8, cpu.flags);
5544 err = check_flags();
5545 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5549 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5550 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5552 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5553 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5554 + asm volatile("cpuid"
5555 : "+a" (level), "=d" (cpu.flags[0])
5557 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5558 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5560 err = check_flags();
5562 diff -urNp linux-2.6.39.4/arch/x86/boot/header.S linux-2.6.39.4/arch/x86/boot/header.S
5563 --- linux-2.6.39.4/arch/x86/boot/header.S 2011-05-19 00:06:34.000000000 -0400
5564 +++ linux-2.6.39.4/arch/x86/boot/header.S 2011-08-05 19:44:33.000000000 -0400
5565 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5566 # single linked list of
5569 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5570 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5572 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5573 #define VO_INIT_SIZE (VO__end - VO__text)
5574 diff -urNp linux-2.6.39.4/arch/x86/boot/Makefile linux-2.6.39.4/arch/x86/boot/Makefile
5575 --- linux-2.6.39.4/arch/x86/boot/Makefile 2011-05-19 00:06:34.000000000 -0400
5576 +++ linux-2.6.39.4/arch/x86/boot/Makefile 2011-08-05 20:34:06.000000000 -0400
5577 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5578 $(call cc-option, -fno-stack-protector) \
5579 $(call cc-option, -mpreferred-stack-boundary=2)
5580 KBUILD_CFLAGS += $(call cc-option, -m32)
5581 +ifdef CONSTIFY_PLUGIN
5582 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5584 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5587 diff -urNp linux-2.6.39.4/arch/x86/boot/memory.c linux-2.6.39.4/arch/x86/boot/memory.c
5588 --- linux-2.6.39.4/arch/x86/boot/memory.c 2011-05-19 00:06:34.000000000 -0400
5589 +++ linux-2.6.39.4/arch/x86/boot/memory.c 2011-08-05 19:44:33.000000000 -0400
5592 static int detect_memory_e820(void)
5595 + unsigned int count = 0;
5596 struct biosregs ireg, oreg;
5597 struct e820entry *desc = boot_params.e820_map;
5598 static struct e820entry buf; /* static so it is zeroed */
5599 diff -urNp linux-2.6.39.4/arch/x86/boot/video.c linux-2.6.39.4/arch/x86/boot/video.c
5600 --- linux-2.6.39.4/arch/x86/boot/video.c 2011-05-19 00:06:34.000000000 -0400
5601 +++ linux-2.6.39.4/arch/x86/boot/video.c 2011-08-05 19:44:33.000000000 -0400
5602 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5603 static unsigned int get_entry(void)
5607 + unsigned int i, len = 0;
5611 diff -urNp linux-2.6.39.4/arch/x86/boot/video-vesa.c linux-2.6.39.4/arch/x86/boot/video-vesa.c
5612 --- linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-05-19 00:06:34.000000000 -0400
5613 +++ linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-08-05 19:44:33.000000000 -0400
5614 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5616 boot_params.screen_info.vesapm_seg = oreg.es;
5617 boot_params.screen_info.vesapm_off = oreg.di;
5618 + boot_params.screen_info.vesapm_size = oreg.cx;
5622 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_aout.c linux-2.6.39.4/arch/x86/ia32/ia32_aout.c
5623 --- linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-05-19 00:06:34.000000000 -0400
5624 +++ linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-08-05 19:44:33.000000000 -0400
5625 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5626 unsigned long dump_start, dump_size;
5629 + memset(&dump, 0, sizeof(dump));
5634 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32entry.S linux-2.6.39.4/arch/x86/ia32/ia32entry.S
5635 --- linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-05-19 00:06:34.000000000 -0400
5636 +++ linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-08-05 19:44:33.000000000 -0400
5638 #include <asm/thread_info.h>
5639 #include <asm/segment.h>
5640 #include <asm/irqflags.h>
5641 +#include <asm/pgtable.h>
5642 #include <linux/linkage.h>
5644 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5645 @@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
5646 ENDPROC(native_irq_enable_sysexit)
5649 + .macro pax_enter_kernel_user
5650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5651 + call pax_enter_kernel_user
5655 + .macro pax_exit_kernel_user
5656 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5657 + call pax_exit_kernel_user
5659 +#ifdef CONFIG_PAX_RANDKSTACK
5661 + call pax_randomize_kstack
5664 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5665 + call pax_erase_kstack
5669 + .macro pax_erase_kstack
5670 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5671 + call pax_erase_kstack
5676 * 32bit SYSENTER instruction entry.
5678 @@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
5679 CFI_REGISTER rsp,rbp
5681 movq PER_CPU_VAR(kernel_stack), %rsp
5682 - addq $(KERNEL_STACK_OFFSET),%rsp
5683 + pax_enter_kernel_user
5685 * No need to follow this irqs on/off section: the syscall
5686 * disabled irqs, here we enable it straight after entry:
5687 @@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
5688 CFI_REL_OFFSET rsp,0
5690 /*CFI_REL_OFFSET rflags,0*/
5691 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5692 + GET_THREAD_INFO(%r10)
5693 + movl TI_sysenter_return(%r10), %r10d
5694 CFI_REGISTER rip,r10
5695 pushq_cfi $__USER32_CS
5696 /*CFI_REL_OFFSET cs,0*/
5697 @@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
5699 /* no need to do an access_ok check here because rbp has been
5700 32bit zero extended */
5702 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5703 + mov $PAX_USER_SHADOW_BASE,%r10
5708 .section __ex_table,"a"
5709 .quad 1b,ia32_badarg
5710 @@ -168,6 +202,7 @@ sysenter_dispatch:
5711 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5713 sysexit_from_sys_call:
5714 + pax_exit_kernel_user
5715 andl $~TS_COMPAT,TI_status(%r10)
5716 /* clear IF, that popfq doesn't enable interrupts early */
5717 andl $~0x200,EFLAGS-R11(%rsp)
5718 @@ -194,6 +229,9 @@ sysexit_from_sys_call:
5719 movl %eax,%esi /* 2nd arg: syscall number */
5720 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5721 call audit_syscall_entry
5725 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5726 cmpq $(IA32_NR_syscalls-1),%rax
5728 @@ -246,6 +284,9 @@ sysenter_tracesys:
5729 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5730 movq %rsp,%rdi /* &pt_regs -> arg1 */
5731 call syscall_trace_enter
5735 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5737 cmpq $(IA32_NR_syscalls-1),%rax
5738 @@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
5739 ENTRY(ia32_cstar_target)
5740 CFI_STARTPROC32 simple
5742 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5744 CFI_REGISTER rip,rcx
5745 /*CFI_REGISTER rflags,r11*/
5749 movq PER_CPU_VAR(kernel_stack),%rsp
5751 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5752 + pax_enter_kernel_user
5756 * No need to follow this irqs on/off section: the syscall
5757 * disabled irqs and here we enable it straight after entry:
5759 ENABLE_INTERRUPTS(CLBR_NONE)
5762 movl %eax,%eax /* zero extension */
5763 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5764 movq %rcx,RIP-ARGOFFSET(%rsp)
5765 @@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
5766 /* no need to do an access_ok check here because r8 has been
5767 32bit zero extended */
5768 /* hardware stack frame is complete now */
5770 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5771 + mov $PAX_USER_SHADOW_BASE,%r10
5776 .section __ex_table,"a"
5777 .quad 1b,ia32_badarg
5778 @@ -327,6 +379,7 @@ cstar_dispatch:
5779 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5781 sysretl_from_sys_call:
5782 + pax_exit_kernel_user
5783 andl $~TS_COMPAT,TI_status(%r10)
5784 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5785 movl RIP-ARGOFFSET(%rsp),%ecx
5786 @@ -364,6 +417,9 @@ cstar_tracesys:
5787 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5788 movq %rsp,%rdi /* &pt_regs -> arg1 */
5789 call syscall_trace_enter
5793 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5796 @@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5797 CFI_REL_OFFSET rip,RIP-RIP
5798 PARAVIRT_ADJUST_EXCEPTION_FRAME
5800 + pax_enter_kernel_user
5802 * No need to follow this irqs on/off section: the syscall
5803 * disabled irqs and here we enable it straight after entry:
5804 @@ -441,6 +498,9 @@ ia32_tracesys:
5805 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5806 movq %rsp,%rdi /* &pt_regs -> arg1 */
5807 call syscall_trace_enter
5811 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5813 cmpq $(IA32_NR_syscalls-1),%rax
5814 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_signal.c linux-2.6.39.4/arch/x86/ia32/ia32_signal.c
5815 --- linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-05-19 00:06:34.000000000 -0400
5816 +++ linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-08-05 19:44:33.000000000 -0400
5817 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5819 /* Align the stack pointer according to the i386 ABI,
5820 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5821 - sp = ((sp + 4) & -16ul) - 4;
5822 + sp = ((sp - 12) & -16ul) - 4;
5823 return (void __user *) sp;
5826 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5827 * These are actually not used anymore, but left because some
5828 * gdb versions depend on them as a marker.
5830 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5835 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5837 __NR_ia32_rt_sigreturn,
5843 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5844 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5846 if (ka->sa.sa_flags & SA_RESTORER)
5847 restorer = ka->sa.sa_restorer;
5848 + else if (current->mm->context.vdso)
5849 + /* Return stub is in 32bit vsyscall page */
5850 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5852 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5854 + restorer = &frame->retcode;
5855 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5858 * Not actually used anymore, but left because some gdb
5861 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5862 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5863 } put_user_catch(err);
5866 diff -urNp linux-2.6.39.4/arch/x86/include/asm/alternative.h linux-2.6.39.4/arch/x86/include/asm/alternative.h
5867 --- linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-05-19 00:06:34.000000000 -0400
5868 +++ linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-08-05 19:44:33.000000000 -0400
5869 @@ -94,7 +94,7 @@ static inline int alternatives_text_rese
5870 ".section .discard,\"aw\",@progbits\n" \
5871 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5873 - ".section .altinstr_replacement, \"ax\"\n" \
5874 + ".section .altinstr_replacement, \"a\"\n" \
5875 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5878 diff -urNp linux-2.6.39.4/arch/x86/include/asm/apm.h linux-2.6.39.4/arch/x86/include/asm/apm.h
5879 --- linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-05-19 00:06:34.000000000 -0400
5880 +++ linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-08-05 19:44:33.000000000 -0400
5881 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5882 __asm__ __volatile__(APM_DO_ZERO_SEGS
5885 - "lcall *%%cs:apm_bios_entry\n\t"
5886 + "lcall *%%ss:apm_bios_entry\n\t"
5890 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5891 __asm__ __volatile__(APM_DO_ZERO_SEGS
5894 - "lcall *%%cs:apm_bios_entry\n\t"
5895 + "lcall *%%ss:apm_bios_entry\n\t"
5899 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h
5900 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-05-19 00:06:34.000000000 -0400
5901 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-08-05 19:44:33.000000000 -0400
5902 @@ -12,6 +12,14 @@ typedef struct {
5903 u64 __aligned(8) counter;
5906 +#ifdef CONFIG_PAX_REFCOUNT
5908 + u64 __aligned(8) counter;
5909 +} atomic64_unchecked_t;
5911 +typedef atomic64_t atomic64_unchecked_t;
5914 #define ATOMIC64_INIT(val) { (val) }
5916 #ifdef CONFIG_X86_CMPXCHG64
5917 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5921 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5922 + * @p: pointer to type atomic64_unchecked_t
5923 + * @o: expected value
5926 + * Atomically sets @v to @n if it was equal to @o and returns
5930 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5932 + return cmpxchg64(&v->counter, o, n);
5936 * atomic64_xchg - xchg atomic64 variable
5937 * @v: pointer to type atomic64_t
5938 * @n: value to assign
5939 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5943 + * atomic64_set_unchecked - set atomic64 variable
5944 + * @v: pointer to type atomic64_unchecked_t
5945 + * @n: value to assign
5947 + * Atomically sets the value of @v to @n.
5949 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5951 + unsigned high = (unsigned)(i >> 32);
5952 + unsigned low = (unsigned)i;
5953 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5954 + : "+b" (low), "+c" (high)
5956 + : "eax", "edx", "memory"
5961 * atomic64_read - read atomic64 variable
5962 * @v: pointer to type atomic64_t
5964 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5968 + * atomic64_read_unchecked - read atomic64 variable
5969 + * @v: pointer to type atomic64_unchecked_t
5971 + * Atomically reads the value of @v and returns it.
5973 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5976 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5977 + : "=A" (r), "+c" (v)
5984 * atomic64_add_return - add and return
5985 * @i: integer value to add
5986 * @v: pointer to type atomic64_t
5987 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5992 + * atomic64_add_return_unchecked - add and return
5993 + * @i: integer value to add
5994 + * @v: pointer to type atomic64_unchecked_t
5996 + * Atomically adds @i to @v and returns @i + *@v
5998 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6000 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6001 + : "+A" (i), "+c" (v)
6008 * Other variants with different arithmetic operators:
6010 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6014 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6017 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6025 static inline long long atomic64_dec_return(atomic64_t *v)
6028 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6032 + * atomic64_add_unchecked - add integer to atomic64 variable
6033 + * @i: integer value to add
6034 + * @v: pointer to type atomic64_unchecked_t
6036 + * Atomically adds @i to @v.
6038 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6040 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6041 + : "+A" (i), "+c" (v)
6048 * atomic64_sub - subtract the atomic64 variable
6049 * @i: integer value to subtract
6050 * @v: pointer to type atomic64_t
6051 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h
6052 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-05-19 00:06:34.000000000 -0400
6053 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-08-05 19:44:33.000000000 -0400
6056 static inline long atomic64_read(const atomic64_t *v)
6058 - return (*(volatile long *)&(v)->counter);
6059 + return (*(volatile const long *)&(v)->counter);
6063 + * atomic64_read_unchecked - read atomic64 variable
6064 + * @v: pointer of type atomic64_unchecked_t
6066 + * Atomically reads the value of @v.
6067 + * Doesn't imply a read memory barrier.
6069 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6071 + return (*(volatile const long *)&(v)->counter);
6075 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6079 + * atomic64_set_unchecked - set atomic64 variable
6080 + * @v: pointer to type atomic64_unchecked_t
6081 + * @i: required value
6083 + * Atomically sets the value of @v to @i.
6085 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6091 * atomic64_add - add integer to atomic64 variable
6092 * @i: integer value to add
6093 * @v: pointer to type atomic64_t
6094 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6096 static inline void atomic64_add(long i, atomic64_t *v)
6098 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6100 +#ifdef CONFIG_PAX_REFCOUNT
6102 + LOCK_PREFIX "subq %1,%0\n"
6104 + _ASM_EXTABLE(0b, 0b)
6107 + : "=m" (v->counter)
6108 + : "er" (i), "m" (v->counter));
6112 + * atomic64_add_unchecked - add integer to atomic64 variable
6113 + * @i: integer value to add
6114 + * @v: pointer to type atomic64_unchecked_t
6116 + * Atomically adds @i to @v.
6118 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6120 asm volatile(LOCK_PREFIX "addq %1,%0"
6122 : "er" (i), "m" (v->counter));
6123 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6125 static inline void atomic64_sub(long i, atomic64_t *v)
6127 - asm volatile(LOCK_PREFIX "subq %1,%0"
6128 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6130 +#ifdef CONFIG_PAX_REFCOUNT
6132 + LOCK_PREFIX "addq %1,%0\n"
6134 + _ASM_EXTABLE(0b, 0b)
6137 + : "=m" (v->counter)
6138 + : "er" (i), "m" (v->counter));
6142 + * atomic64_sub_unchecked - subtract the atomic64 variable
6143 + * @i: integer value to subtract
6144 + * @v: pointer to type atomic64_unchecked_t
6146 + * Atomically subtracts @i from @v.
6148 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6150 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6152 : "er" (i), "m" (v->counter));
6154 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6158 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6159 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6161 +#ifdef CONFIG_PAX_REFCOUNT
6163 + LOCK_PREFIX "addq %2,%0\n"
6165 + _ASM_EXTABLE(0b, 0b)
6169 : "=m" (v->counter), "=qm" (c)
6170 : "er" (i), "m" (v->counter) : "memory");
6172 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6174 static inline void atomic64_inc(atomic64_t *v)
6176 + asm volatile(LOCK_PREFIX "incq %0\n"
6178 +#ifdef CONFIG_PAX_REFCOUNT
6180 + LOCK_PREFIX "decq %0\n"
6182 + _ASM_EXTABLE(0b, 0b)
6185 + : "=m" (v->counter)
6186 + : "m" (v->counter));
6190 + * atomic64_inc_unchecked - increment atomic64 variable
6191 + * @v: pointer to type atomic64_unchecked_t
6193 + * Atomically increments @v by 1.
6195 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6197 asm volatile(LOCK_PREFIX "incq %0"
6199 : "m" (v->counter));
6200 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6202 static inline void atomic64_dec(atomic64_t *v)
6204 - asm volatile(LOCK_PREFIX "decq %0"
6205 + asm volatile(LOCK_PREFIX "decq %0\n"
6207 +#ifdef CONFIG_PAX_REFCOUNT
6209 + LOCK_PREFIX "incq %0\n"
6211 + _ASM_EXTABLE(0b, 0b)
6214 + : "=m" (v->counter)
6215 + : "m" (v->counter));
6219 + * atomic64_dec_unchecked - decrement atomic64 variable
6220 + * @v: pointer to type atomic64_t
6222 + * Atomically decrements @v by 1.
6224 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6226 + asm volatile(LOCK_PREFIX "decq %0\n"
6228 : "m" (v->counter));
6230 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6234 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6235 + asm volatile(LOCK_PREFIX "decq %0\n"
6237 +#ifdef CONFIG_PAX_REFCOUNT
6239 + LOCK_PREFIX "incq %0\n"
6241 + _ASM_EXTABLE(0b, 0b)
6245 : "=m" (v->counter), "=qm" (c)
6246 : "m" (v->counter) : "memory");
6248 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6252 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6253 + asm volatile(LOCK_PREFIX "incq %0\n"
6255 +#ifdef CONFIG_PAX_REFCOUNT
6257 + LOCK_PREFIX "decq %0\n"
6259 + _ASM_EXTABLE(0b, 0b)
6263 : "=m" (v->counter), "=qm" (c)
6264 : "m" (v->counter) : "memory");
6266 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6270 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6271 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6273 +#ifdef CONFIG_PAX_REFCOUNT
6275 + LOCK_PREFIX "subq %2,%0\n"
6277 + _ASM_EXTABLE(0b, 0b)
6281 : "=m" (v->counter), "=qm" (c)
6282 : "er" (i), "m" (v->counter) : "memory");
6284 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6285 static inline long atomic64_add_return(long i, atomic64_t *v)
6288 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6289 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6291 +#ifdef CONFIG_PAX_REFCOUNT
6295 + _ASM_EXTABLE(0b, 0b)
6298 + : "+r" (i), "+m" (v->counter)
6304 + * atomic64_add_return_unchecked - add and return
6305 + * @i: integer value to add
6306 + * @v: pointer to type atomic64_unchecked_t
6308 + * Atomically adds @i to @v and returns @i + @v
6310 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6313 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6314 : "+r" (i), "+m" (v->counter)
6317 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6320 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6321 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6323 + return atomic64_add_return_unchecked(1, v);
6325 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6327 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6328 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6329 return cmpxchg(&v->counter, old, new);
6332 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6334 + return cmpxchg(&v->counter, old, new);
6337 static inline long atomic64_xchg(atomic64_t *v, long new)
6339 return xchg(&v->counter, new);
6340 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6342 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6346 c = atomic64_read(v);
6348 - if (unlikely(c == (u)))
6349 + if (unlikely(c == u))
6351 - old = atomic64_cmpxchg((v), c, c + (a));
6353 + asm volatile("add %2,%0\n"
6355 +#ifdef CONFIG_PAX_REFCOUNT
6359 + _ASM_EXTABLE(0b, 0b)
6363 + : "0" (c), "ir" (a));
6365 + old = atomic64_cmpxchg(v, c, new);
6366 if (likely(old == c))
6374 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6375 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic.h linux-2.6.39.4/arch/x86/include/asm/atomic.h
6376 --- linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-05-19 00:06:34.000000000 -0400
6377 +++ linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-08-05 19:44:33.000000000 -0400
6380 static inline int atomic_read(const atomic_t *v)
6382 - return (*(volatile int *)&(v)->counter);
6383 + return (*(volatile const int *)&(v)->counter);
6387 + * atomic_read_unchecked - read atomic variable
6388 + * @v: pointer of type atomic_unchecked_t
6390 + * Atomically reads the value of @v.
6392 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6394 + return (*(volatile const int *)&(v)->counter);
6398 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6402 + * atomic_set_unchecked - set atomic variable
6403 + * @v: pointer of type atomic_unchecked_t
6404 + * @i: required value
6406 + * Atomically sets the value of @v to @i.
6408 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6414 * atomic_add - add integer to atomic variable
6415 * @i: integer value to add
6416 * @v: pointer of type atomic_t
6417 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6419 static inline void atomic_add(int i, atomic_t *v)
6421 - asm volatile(LOCK_PREFIX "addl %1,%0"
6422 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6424 +#ifdef CONFIG_PAX_REFCOUNT
6426 + LOCK_PREFIX "subl %1,%0\n"
6428 + _ASM_EXTABLE(0b, 0b)
6431 + : "+m" (v->counter)
6436 + * atomic_add_unchecked - add integer to atomic variable
6437 + * @i: integer value to add
6438 + * @v: pointer of type atomic_unchecked_t
6440 + * Atomically adds @i to @v.
6442 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6444 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6448 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6450 static inline void atomic_sub(int i, atomic_t *v)
6452 - asm volatile(LOCK_PREFIX "subl %1,%0"
6453 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6455 +#ifdef CONFIG_PAX_REFCOUNT
6457 + LOCK_PREFIX "addl %1,%0\n"
6459 + _ASM_EXTABLE(0b, 0b)
6462 + : "+m" (v->counter)
6467 + * atomic_sub_unchecked - subtract integer from atomic variable
6468 + * @i: integer value to subtract
6469 + * @v: pointer of type atomic_unchecked_t
6471 + * Atomically subtracts @i from @v.
6473 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6475 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6479 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6483 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6484 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6486 +#ifdef CONFIG_PAX_REFCOUNT
6488 + LOCK_PREFIX "addl %2,%0\n"
6490 + _ASM_EXTABLE(0b, 0b)
6494 : "+m" (v->counter), "=qm" (c)
6495 : "ir" (i) : "memory");
6497 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6499 static inline void atomic_inc(atomic_t *v)
6501 - asm volatile(LOCK_PREFIX "incl %0"
6502 + asm volatile(LOCK_PREFIX "incl %0\n"
6504 +#ifdef CONFIG_PAX_REFCOUNT
6506 + LOCK_PREFIX "decl %0\n"
6508 + _ASM_EXTABLE(0b, 0b)
6511 + : "+m" (v->counter));
6515 + * atomic_inc_unchecked - increment atomic variable
6516 + * @v: pointer of type atomic_unchecked_t
6518 + * Atomically increments @v by 1.
6520 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6522 + asm volatile(LOCK_PREFIX "incl %0\n"
6523 : "+m" (v->counter));
6526 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6528 static inline void atomic_dec(atomic_t *v)
6530 - asm volatile(LOCK_PREFIX "decl %0"
6531 + asm volatile(LOCK_PREFIX "decl %0\n"
6533 +#ifdef CONFIG_PAX_REFCOUNT
6535 + LOCK_PREFIX "incl %0\n"
6537 + _ASM_EXTABLE(0b, 0b)
6540 + : "+m" (v->counter));
6544 + * atomic_dec_unchecked - decrement atomic variable
6545 + * @v: pointer of type atomic_unchecked_t
6547 + * Atomically decrements @v by 1.
6549 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6551 + asm volatile(LOCK_PREFIX "decl %0\n"
6552 : "+m" (v->counter));
6555 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6559 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6560 + asm volatile(LOCK_PREFIX "decl %0\n"
6562 +#ifdef CONFIG_PAX_REFCOUNT
6564 + LOCK_PREFIX "incl %0\n"
6566 + _ASM_EXTABLE(0b, 0b)
6570 : "+m" (v->counter), "=qm" (c)
6573 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6577 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6578 + asm volatile(LOCK_PREFIX "incl %0\n"
6580 +#ifdef CONFIG_PAX_REFCOUNT
6582 + LOCK_PREFIX "decl %0\n"
6584 + _ASM_EXTABLE(0b, 0b)
6588 + : "+m" (v->counter), "=qm" (c)
6594 + * atomic_inc_and_test_unchecked - increment and test
6595 + * @v: pointer of type atomic_unchecked_t
6597 + * Atomically increments @v by 1
6598 + * and returns true if the result is zero, or false for all
6601 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6605 + asm volatile(LOCK_PREFIX "incl %0\n"
6607 : "+m" (v->counter), "=qm" (c)
6610 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6614 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6615 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6617 +#ifdef CONFIG_PAX_REFCOUNT
6619 + LOCK_PREFIX "subl %2,%0\n"
6621 + _ASM_EXTABLE(0b, 0b)
6625 : "+m" (v->counter), "=qm" (c)
6626 : "ir" (i) : "memory");
6628 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6630 /* Modern 486+ processor */
6632 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6634 +#ifdef CONFIG_PAX_REFCOUNT
6638 + _ASM_EXTABLE(0b, 0b)
6641 + : "+r" (i), "+m" (v->counter)
6646 +no_xadd: /* Legacy 386 processor */
6647 + local_irq_save(flags);
6648 + __i = atomic_read(v);
6649 + atomic_set(v, i + __i);
6650 + local_irq_restore(flags);
6656 + * atomic_add_return_unchecked - add integer and return
6657 + * @v: pointer of type atomic_unchecked_t
6658 + * @i: integer value to add
6660 + * Atomically adds @i to @v and returns @i + @v
6662 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6666 + unsigned long flags;
6667 + if (unlikely(boot_cpu_data.x86 <= 3))
6670 + /* Modern 486+ processor */
6672 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6673 : "+r" (i), "+m" (v->counter)
6675 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6678 #define atomic_inc_return(v) (atomic_add_return(1, v))
6679 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6681 + return atomic_add_return_unchecked(1, v);
6683 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6685 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6686 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6687 return cmpxchg(&v->counter, old, new);
6690 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6692 + return cmpxchg(&v->counter, old, new);
6695 static inline int atomic_xchg(atomic_t *v, int new)
6697 return xchg(&v->counter, new);
6700 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6702 + return xchg(&v->counter, new);
6706 * atomic_add_unless - add unless the number is already a given value
6707 * @v: pointer of type atomic_t
6708 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6710 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6716 - if (unlikely(c == (u)))
6717 + if (unlikely(c == u))
6719 - old = atomic_cmpxchg((v), c, c + (a));
6721 + asm volatile("addl %2,%0\n"
6723 +#ifdef CONFIG_PAX_REFCOUNT
6727 + _ASM_EXTABLE(0b, 0b)
6731 + : "0" (c), "ir" (a));
6733 + old = atomic_cmpxchg(v, c, new);
6734 if (likely(old == c))
6742 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6745 + * atomic_inc_not_zero_hint - increment if not null
6746 + * @v: pointer of type atomic_t
6747 + * @hint: probable value of the atomic before the increment
6749 + * This version of atomic_inc_not_zero() gives a hint of probable
6750 + * value of the atomic. This helps processor to not read the memory
6751 + * before doing the atomic read/modify/write cycle, lowering
6752 + * number of bus transactions on some arches.
6754 + * Returns: 0 if increment was not done, 1 otherwise.
6756 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6757 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6759 + int val, c = hint, new;
6761 + /* sanity test, should be removed by compiler if hint is a constant */
6763 + return atomic_inc_not_zero(v);
6766 + asm volatile("incl %0\n"
6768 +#ifdef CONFIG_PAX_REFCOUNT
6772 + _ASM_EXTABLE(0b, 0b)
6778 + val = atomic_cmpxchg(v, c, new);
6788 * atomic_dec_if_positive - decrement by 1 if old value positive
6789 * @v: pointer of type atomic_t
6790 diff -urNp linux-2.6.39.4/arch/x86/include/asm/bitops.h linux-2.6.39.4/arch/x86/include/asm/bitops.h
6791 --- linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-05-19 00:06:34.000000000 -0400
6792 +++ linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-08-05 19:44:33.000000000 -0400
6794 * a mask operation on a byte.
6796 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6797 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6798 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6799 #define CONST_MASK(nr) (1 << ((nr) & 7))
6802 diff -urNp linux-2.6.39.4/arch/x86/include/asm/boot.h linux-2.6.39.4/arch/x86/include/asm/boot.h
6803 --- linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-05-19 00:06:34.000000000 -0400
6804 +++ linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-08-05 19:44:33.000000000 -0400
6806 #include <asm/pgtable_types.h>
6808 /* Physical address where kernel should be loaded. */
6809 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6810 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6811 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6812 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6814 +#ifndef __ASSEMBLY__
6815 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6816 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6819 /* Minimum kernel alignment, as a power of two */
6820 #ifdef CONFIG_X86_64
6821 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6822 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cacheflush.h linux-2.6.39.4/arch/x86/include/asm/cacheflush.h
6823 --- linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400
6824 +++ linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-08-05 19:44:33.000000000 -0400
6825 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6826 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6828 if (pg_flags == _PGMT_DEFAULT)
6831 else if (pg_flags == _PGMT_WC)
6832 return _PAGE_CACHE_WC;
6833 else if (pg_flags == _PGMT_UC_MINUS)
6834 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cache.h linux-2.6.39.4/arch/x86/include/asm/cache.h
6835 --- linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
6836 +++ linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
6839 /* L1 cache line size */
6840 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6841 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6842 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6844 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6845 +#define __read_only __attribute__((__section__(".data..read_only")))
6847 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6848 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6849 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6851 #ifdef CONFIG_X86_VSMP
6853 diff -urNp linux-2.6.39.4/arch/x86/include/asm/checksum_32.h linux-2.6.39.4/arch/x86/include/asm/checksum_32.h
6854 --- linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-05-19 00:06:34.000000000 -0400
6855 +++ linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-08-05 19:44:33.000000000 -0400
6856 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6857 int len, __wsum sum,
6858 int *src_err_ptr, int *dst_err_ptr);
6860 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6861 + int len, __wsum sum,
6862 + int *src_err_ptr, int *dst_err_ptr);
6864 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6865 + int len, __wsum sum,
6866 + int *src_err_ptr, int *dst_err_ptr);
6869 * Note: when you get a NULL pointer exception here this means someone
6870 * passed in an incorrect kernel address to one of these functions.
6871 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6875 - return csum_partial_copy_generic((__force void *)src, dst,
6876 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6877 len, sum, err_ptr, NULL);
6880 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6883 if (access_ok(VERIFY_WRITE, dst, len))
6884 - return csum_partial_copy_generic(src, (__force void *)dst,
6885 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6886 len, sum, NULL, err_ptr);
6889 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cpufeature.h linux-2.6.39.4/arch/x86/include/asm/cpufeature.h
6890 --- linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-06-03 00:04:13.000000000 -0400
6891 +++ linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-08-05 19:44:33.000000000 -0400
6892 @@ -351,7 +351,7 @@ static __always_inline __pure bool __sta
6893 ".section .discard,\"aw\",@progbits\n"
6894 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6896 - ".section .altinstr_replacement,\"ax\"\n"
6897 + ".section .altinstr_replacement,\"a\"\n"
6901 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc_defs.h linux-2.6.39.4/arch/x86/include/asm/desc_defs.h
6902 --- linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-05-19 00:06:34.000000000 -0400
6903 +++ linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-08-05 19:44:33.000000000 -0400
6904 @@ -31,6 +31,12 @@ struct desc_struct {
6905 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6906 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6911 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6912 + unsigned offset_high: 16;
6915 } __attribute__((packed));
6917 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc.h linux-2.6.39.4/arch/x86/include/asm/desc.h
6918 --- linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-05-19 00:06:34.000000000 -0400
6919 +++ linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-08-05 19:44:33.000000000 -0400
6921 #include <asm/desc_defs.h>
6922 #include <asm/ldt.h>
6923 #include <asm/mmu.h>
6924 +#include <asm/pgtable.h>
6925 #include <linux/smp.h>
6927 static inline void fill_ldt(struct desc_struct *desc,
6928 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
6929 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
6930 desc->type = (info->read_exec_only ^ 1) << 1;
6931 desc->type |= info->contents << 2;
6932 + desc->type |= info->seg_not_present ^ 1;
6935 desc->p = info->seg_not_present ^ 1;
6936 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
6939 extern struct desc_ptr idt_descr;
6940 -extern gate_desc idt_table[];
6943 - struct desc_struct gdt[GDT_ENTRIES];
6944 -} __attribute__((aligned(PAGE_SIZE)));
6945 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6946 +extern gate_desc idt_table[256];
6948 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6949 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6951 - return per_cpu(gdt_page, cpu).gdt;
6952 + return cpu_gdt_table[cpu];
6955 #ifdef CONFIG_X86_64
6956 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
6957 unsigned long base, unsigned dpl, unsigned flags,
6960 - gate->a = (seg << 16) | (base & 0xffff);
6961 - gate->b = (base & 0xffff0000) |
6962 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6963 + gate->gate.offset_low = base;
6964 + gate->gate.seg = seg;
6965 + gate->gate.reserved = 0;
6966 + gate->gate.type = type;
6968 + gate->gate.dpl = dpl;
6970 + gate->gate.offset_high = base >> 16;
6974 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
6975 static inline void native_write_idt_entry(gate_desc *idt, int entry,
6976 const gate_desc *gate)
6978 + pax_open_kernel();
6979 memcpy(&idt[entry], gate, sizeof(*gate));
6980 + pax_close_kernel();
6983 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
6986 + pax_open_kernel();
6987 memcpy(&ldt[entry], desc, 8);
6988 + pax_close_kernel();
6991 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
6992 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
6993 size = sizeof(struct desc_struct);
6997 + pax_open_kernel();
6998 memcpy(&gdt[entry], desc, size);
6999 + pax_close_kernel();
7002 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7003 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7005 static inline void native_load_tr_desc(void)
7007 + pax_open_kernel();
7008 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7009 + pax_close_kernel();
7012 static inline void native_load_gdt(const struct desc_ptr *dtr)
7013 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7015 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7017 + pax_open_kernel();
7018 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7019 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7020 + pax_close_kernel();
7023 #define _LDT_empty(info) \
7024 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7025 desc->limit = (limit >> 16) & 0xf;
7028 -static inline void _set_gate(int gate, unsigned type, void *addr,
7029 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7030 unsigned dpl, unsigned ist, unsigned seg)
7033 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7034 * Pentium F0 0F bugfix can have resulted in the mapped
7035 * IDT being write-protected.
7037 -static inline void set_intr_gate(unsigned int n, void *addr)
7038 +static inline void set_intr_gate(unsigned int n, const void *addr)
7040 BUG_ON((unsigned)n > 0xFF);
7041 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7042 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7044 * This routine sets up an interrupt gate at directory privilege level 3.
7046 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7047 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7049 BUG_ON((unsigned)n > 0xFF);
7050 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7053 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7054 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7056 BUG_ON((unsigned)n > 0xFF);
7057 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7060 -static inline void set_trap_gate(unsigned int n, void *addr)
7061 +static inline void set_trap_gate(unsigned int n, const void *addr)
7063 BUG_ON((unsigned)n > 0xFF);
7064 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7065 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
7066 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7068 BUG_ON((unsigned)n > 0xFF);
7069 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7070 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7073 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7074 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7076 BUG_ON((unsigned)n > 0xFF);
7077 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7080 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7081 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7083 BUG_ON((unsigned)n > 0xFF);
7084 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7087 +#ifdef CONFIG_X86_32
7088 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7090 + struct desc_struct d;
7092 + if (likely(limit))
7093 + limit = (limit - 1UL) >> PAGE_SHIFT;
7094 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7095 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7099 #endif /* _ASM_X86_DESC_H */
7100 diff -urNp linux-2.6.39.4/arch/x86/include/asm/e820.h linux-2.6.39.4/arch/x86/include/asm/e820.h
7101 --- linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-05-19 00:06:34.000000000 -0400
7102 +++ linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-08-05 19:44:33.000000000 -0400
7103 @@ -69,7 +69,7 @@ struct e820map {
7104 #define ISA_START_ADDRESS 0xa0000
7105 #define ISA_END_ADDRESS 0x100000
7107 -#define BIOS_BEGIN 0x000a0000
7108 +#define BIOS_BEGIN 0x000c0000
7109 #define BIOS_END 0x00100000
7111 #define BIOS_ROM_BASE 0xffe00000
7112 diff -urNp linux-2.6.39.4/arch/x86/include/asm/elf.h linux-2.6.39.4/arch/x86/include/asm/elf.h
7113 --- linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
7114 +++ linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
7115 @@ -237,7 +237,25 @@ extern int force_personality32;
7116 the loader. We need to make sure that it is out of the way of the program
7117 that it will "exec", and that there is sufficient room for the brk. */
7119 +#ifdef CONFIG_PAX_SEGMEXEC
7120 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7122 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7125 +#ifdef CONFIG_PAX_ASLR
7126 +#ifdef CONFIG_X86_32
7127 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7129 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7130 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7132 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7134 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7135 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7139 /* This yields a mask that user programs can use to figure out what
7140 instruction set this CPU supports. This could be done in user space,
7141 @@ -291,8 +309,7 @@ do { \
7142 #define ARCH_DLINFO \
7145 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7146 - (unsigned long)current->mm->context.vdso); \
7147 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
7150 #define AT_SYSINFO 32
7151 @@ -303,7 +320,7 @@ do { \
7153 #endif /* !CONFIG_X86_32 */
7155 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7156 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7158 #define VDSO_ENTRY \
7159 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7160 @@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
7161 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7162 #define compat_arch_setup_additional_pages syscall32_setup_pages
7164 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7165 -#define arch_randomize_brk arch_randomize_brk
7167 #endif /* _ASM_X86_ELF_H */
7168 diff -urNp linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h
7169 --- linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-05-19 00:06:34.000000000 -0400
7170 +++ linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-08-05 19:44:33.000000000 -0400
7171 @@ -15,6 +15,6 @@ enum reboot_type {
7173 extern enum reboot_type reboot_type;
7175 -extern void machine_emergency_restart(void);
7176 +extern void machine_emergency_restart(void) __noreturn;
7178 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7179 diff -urNp linux-2.6.39.4/arch/x86/include/asm/futex.h linux-2.6.39.4/arch/x86/include/asm/futex.h
7180 --- linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-05-19 00:06:34.000000000 -0400
7181 +++ linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-08-05 19:44:33.000000000 -0400
7183 #include <asm/system.h>
7185 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7186 + typecheck(u32 *, uaddr); \
7187 asm volatile("1:\t" insn "\n" \
7188 "2:\t.section .fixup,\"ax\"\n" \
7189 "3:\tmov\t%3, %1\n" \
7192 _ASM_EXTABLE(1b, 3b) \
7193 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7194 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7195 : "i" (-EFAULT), "0" (oparg), "1" (0))
7197 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7198 + typecheck(u32 *, uaddr); \
7199 asm volatile("1:\tmovl %2, %0\n" \
7200 "\tmovl\t%0, %3\n" \
7203 _ASM_EXTABLE(1b, 4b) \
7204 _ASM_EXTABLE(2b, 4b) \
7205 : "=&a" (oldval), "=&r" (ret), \
7206 - "+m" (*uaddr), "=&r" (tem) \
7207 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7208 : "r" (oparg), "i" (-EFAULT), "1" (0))
7210 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7211 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7215 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7216 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7219 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7220 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7224 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7225 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7228 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7229 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7230 "2:\t.section .fixup, \"ax\"\n"
7234 _ASM_EXTABLE(1b, 3b)
7235 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7236 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7237 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7240 diff -urNp linux-2.6.39.4/arch/x86/include/asm/hw_irq.h linux-2.6.39.4/arch/x86/include/asm/hw_irq.h
7241 --- linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-05-19 00:06:34.000000000 -0400
7242 +++ linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-08-05 19:44:33.000000000 -0400
7243 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7244 extern void enable_IO_APIC(void);
7247 -extern atomic_t irq_err_count;
7248 -extern atomic_t irq_mis_count;
7249 +extern atomic_unchecked_t irq_err_count;
7250 +extern atomic_unchecked_t irq_mis_count;
7253 extern void eisa_set_level_irq(unsigned int irq);
7254 diff -urNp linux-2.6.39.4/arch/x86/include/asm/i387.h linux-2.6.39.4/arch/x86/include/asm/i387.h
7255 --- linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-05-19 00:06:34.000000000 -0400
7256 +++ linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-08-05 19:44:33.000000000 -0400
7257 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7261 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7262 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7263 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7266 /* See comment in fxsave() below. */
7267 #ifdef CONFIG_AS_FXSAVEQ
7268 asm volatile("1: fxrstorq %[fx]\n\t"
7269 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7273 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7274 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7275 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7279 * Clear the bytes not touched by the fxsave and reserved
7281 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7282 #endif /* CONFIG_X86_64 */
7284 /* We need a safe address that is cheap to find and that is already
7285 - in L1 during context switch. The best choices are unfortunately
7286 - different for UP and SMP */
7288 -#define safe_address (__per_cpu_offset[0])
7290 -#define safe_address (kstat_cpu(0).cpustat.user)
7292 + in L1 during context switch. */
7293 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7296 * These must be called with preempt disabled
7297 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7298 struct thread_info *me = current_thread_info();
7300 if (me->status & TS_USEDFPU)
7301 - __save_init_fpu(me->task);
7302 + __save_init_fpu(current);
7306 diff -urNp linux-2.6.39.4/arch/x86/include/asm/io.h linux-2.6.39.4/arch/x86/include/asm/io.h
7307 --- linux-2.6.39.4/arch/x86/include/asm/io.h 2011-05-19 00:06:34.000000000 -0400
7308 +++ linux-2.6.39.4/arch/x86/include/asm/io.h 2011-08-05 19:44:33.000000000 -0400
7309 @@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void);
7311 #include <linux/vmalloc.h>
7313 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7314 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7316 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7319 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7321 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7325 * Convert a virtual cached pointer to an uncached pointer
7327 diff -urNp linux-2.6.39.4/arch/x86/include/asm/irqflags.h linux-2.6.39.4/arch/x86/include/asm/irqflags.h
7328 --- linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-05-19 00:06:34.000000000 -0400
7329 +++ linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-08-05 19:44:33.000000000 -0400
7330 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7334 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7335 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7336 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7337 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7340 #define INTERRUPT_RETURN iret
7341 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7342 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kprobes.h linux-2.6.39.4/arch/x86/include/asm/kprobes.h
7343 --- linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-05-19 00:06:34.000000000 -0400
7344 +++ linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-08-05 19:44:33.000000000 -0400
7345 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7346 #define RELATIVEJUMP_SIZE 5
7347 #define RELATIVECALL_OPCODE 0xe8
7348 #define RELATIVE_ADDR_SIZE 4
7349 -#define MAX_STACK_SIZE 64
7350 -#define MIN_STACK_SIZE(ADDR) \
7351 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7352 - THREAD_SIZE - (unsigned long)(ADDR))) \
7353 - ? (MAX_STACK_SIZE) \
7354 - : (((unsigned long)current_thread_info()) + \
7355 - THREAD_SIZE - (unsigned long)(ADDR)))
7356 +#define MAX_STACK_SIZE 64UL
7357 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7359 #define flush_insn_slot(p) do { } while (0)
7361 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kvm_host.h linux-2.6.39.4/arch/x86/include/asm/kvm_host.h
7362 --- linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
7363 +++ linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-08-05 20:34:06.000000000 -0400
7364 @@ -419,7 +419,7 @@ struct kvm_arch {
7365 unsigned int n_used_mmu_pages;
7366 unsigned int n_requested_mmu_pages;
7367 unsigned int n_max_mmu_pages;
7368 - atomic_t invlpg_counter;
7369 + atomic_unchecked_t invlpg_counter;
7370 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7372 * Hash table of struct kvm_mmu_page.
7373 @@ -589,7 +589,7 @@ struct kvm_x86_ops {
7374 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
7376 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
7377 - const struct trace_print_flags *exit_reasons_str;
7378 + const struct trace_print_flags * const exit_reasons_str;
7381 struct kvm_arch_async_pf {
7382 diff -urNp linux-2.6.39.4/arch/x86/include/asm/local.h linux-2.6.39.4/arch/x86/include/asm/local.h
7383 --- linux-2.6.39.4/arch/x86/include/asm/local.h 2011-05-19 00:06:34.000000000 -0400
7384 +++ linux-2.6.39.4/arch/x86/include/asm/local.h 2011-08-05 19:44:33.000000000 -0400
7385 @@ -18,26 +18,58 @@ typedef struct {
7387 static inline void local_inc(local_t *l)
7389 - asm volatile(_ASM_INC "%0"
7390 + asm volatile(_ASM_INC "%0\n"
7392 +#ifdef CONFIG_PAX_REFCOUNT
7396 + _ASM_EXTABLE(0b, 0b)
7399 : "+m" (l->a.counter));
7402 static inline void local_dec(local_t *l)
7404 - asm volatile(_ASM_DEC "%0"
7405 + asm volatile(_ASM_DEC "%0\n"
7407 +#ifdef CONFIG_PAX_REFCOUNT
7411 + _ASM_EXTABLE(0b, 0b)
7414 : "+m" (l->a.counter));
7417 static inline void local_add(long i, local_t *l)
7419 - asm volatile(_ASM_ADD "%1,%0"
7420 + asm volatile(_ASM_ADD "%1,%0\n"
7422 +#ifdef CONFIG_PAX_REFCOUNT
7424 + _ASM_SUB "%1,%0\n"
7426 + _ASM_EXTABLE(0b, 0b)
7429 : "+m" (l->a.counter)
7433 static inline void local_sub(long i, local_t *l)
7435 - asm volatile(_ASM_SUB "%1,%0"
7436 + asm volatile(_ASM_SUB "%1,%0\n"
7438 +#ifdef CONFIG_PAX_REFCOUNT
7440 + _ASM_ADD "%1,%0\n"
7442 + _ASM_EXTABLE(0b, 0b)
7445 : "+m" (l->a.counter)
7448 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7452 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7453 + asm volatile(_ASM_SUB "%2,%0\n"
7455 +#ifdef CONFIG_PAX_REFCOUNT
7457 + _ASM_ADD "%2,%0\n"
7459 + _ASM_EXTABLE(0b, 0b)
7463 : "+m" (l->a.counter), "=qm" (c)
7464 : "ir" (i) : "memory");
7466 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7470 - asm volatile(_ASM_DEC "%0; sete %1"
7471 + asm volatile(_ASM_DEC "%0\n"
7473 +#ifdef CONFIG_PAX_REFCOUNT
7477 + _ASM_EXTABLE(0b, 0b)
7481 : "+m" (l->a.counter), "=qm" (c)
7484 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7488 - asm volatile(_ASM_INC "%0; sete %1"
7489 + asm volatile(_ASM_INC "%0\n"
7491 +#ifdef CONFIG_PAX_REFCOUNT
7495 + _ASM_EXTABLE(0b, 0b)
7499 : "+m" (l->a.counter), "=qm" (c)
7502 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7506 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7507 + asm volatile(_ASM_ADD "%2,%0\n"
7509 +#ifdef CONFIG_PAX_REFCOUNT
7511 + _ASM_SUB "%2,%0\n"
7513 + _ASM_EXTABLE(0b, 0b)
7517 : "+m" (l->a.counter), "=qm" (c)
7518 : "ir" (i) : "memory");
7520 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7522 /* Modern 486+ processor */
7524 - asm volatile(_ASM_XADD "%0, %1;"
7525 + asm volatile(_ASM_XADD "%0, %1\n"
7527 +#ifdef CONFIG_PAX_REFCOUNT
7529 + _ASM_MOV "%0,%1\n"
7531 + _ASM_EXTABLE(0b, 0b)
7534 : "+r" (i), "+m" (l->a.counter)
7537 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mman.h linux-2.6.39.4/arch/x86/include/asm/mman.h
7538 --- linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-05-19 00:06:34.000000000 -0400
7539 +++ linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-08-05 19:44:33.000000000 -0400
7542 #include <asm-generic/mman.h>
7545 +#ifndef __ASSEMBLY__
7546 +#ifdef CONFIG_X86_32
7547 +#define arch_mmap_check i386_mmap_check
7548 +int i386_mmap_check(unsigned long addr, unsigned long len,
7549 + unsigned long flags);
7554 #endif /* _ASM_X86_MMAN_H */
7555 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu_context.h linux-2.6.39.4/arch/x86/include/asm/mmu_context.h
7556 --- linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-05-19 00:06:34.000000000 -0400
7557 +++ linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-08-05 19:44:33.000000000 -0400
7558 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
7560 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7563 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7567 + pax_open_kernel();
7568 + pgd = get_cpu_pgd(smp_processor_id());
7569 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7570 + if (paravirt_enabled())
7571 + set_pgd(pgd+i, native_make_pgd(0));
7573 + pgd[i] = native_make_pgd(0);
7574 + pax_close_kernel();
7578 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7579 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7580 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
7581 struct task_struct *tsk)
7583 unsigned cpu = smp_processor_id();
7584 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
7585 + int tlbstate = TLBSTATE_OK;
7588 if (likely(prev != next)) {
7590 +#ifdef CONFIG_X86_32
7591 + tlbstate = percpu_read(cpu_tlbstate.state);
7593 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7594 percpu_write(cpu_tlbstate.active_mm, next);
7596 cpumask_set_cpu(cpu, mm_cpumask(next));
7598 /* Re-load page tables */
7599 +#ifdef CONFIG_PAX_PER_CPU_PGD
7600 + pax_open_kernel();
7601 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7602 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7603 + pax_close_kernel();
7604 + load_cr3(get_cpu_pgd(cpu));
7606 load_cr3(next->pgd);
7609 /* stop flush ipis for the previous mm */
7610 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7611 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
7613 if (unlikely(prev->context.ldt != next->context.ldt))
7614 load_LDT_nolock(&next->context);
7617 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7618 + if (!(__supported_pte_mask & _PAGE_NX)) {
7619 + smp_mb__before_clear_bit();
7620 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7621 + smp_mb__after_clear_bit();
7622 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7626 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7627 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7628 + prev->context.user_cs_limit != next->context.user_cs_limit))
7629 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7631 + else if (unlikely(tlbstate != TLBSTATE_OK))
7632 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7639 +#ifdef CONFIG_PAX_PER_CPU_PGD
7640 + pax_open_kernel();
7641 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7642 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7643 + pax_close_kernel();
7644 + load_cr3(get_cpu_pgd(cpu));
7648 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7649 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7651 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
7652 * tlb flush IPI delivery. We must reload CR3
7653 * to make sure to use no freed page tables.
7656 +#ifndef CONFIG_PAX_PER_CPU_PGD
7657 load_cr3(next->pgd);
7660 load_LDT_nolock(&next->context);
7662 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7663 + if (!(__supported_pte_mask & _PAGE_NX))
7664 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7667 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7668 +#ifdef CONFIG_PAX_PAGEEXEC
7669 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7671 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7680 #define activate_mm(prev, next) \
7681 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu.h linux-2.6.39.4/arch/x86/include/asm/mmu.h
7682 --- linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-05-19 00:06:34.000000000 -0400
7683 +++ linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-08-05 19:44:33.000000000 -0400
7685 * we put the segment information here.
7689 + struct desc_struct *ldt;
7693 + unsigned long vdso;
7695 +#ifdef CONFIG_X86_32
7696 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7697 + unsigned long user_cs_base;
7698 + unsigned long user_cs_limit;
7700 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7701 + cpumask_t cpu_user_cs_mask;
7707 #ifdef CONFIG_X86_64
7708 /* True if mm supports a task running in 32 bit compatibility mode. */
7709 diff -urNp linux-2.6.39.4/arch/x86/include/asm/module.h linux-2.6.39.4/arch/x86/include/asm/module.h
7710 --- linux-2.6.39.4/arch/x86/include/asm/module.h 2011-05-19 00:06:34.000000000 -0400
7711 +++ linux-2.6.39.4/arch/x86/include/asm/module.h 2011-08-05 19:44:33.000000000 -0400
7714 #ifdef CONFIG_X86_64
7715 /* X86_64 does not define MODULE_PROC_FAMILY */
7716 +#define MODULE_PROC_FAMILY ""
7717 #elif defined CONFIG_M386
7718 #define MODULE_PROC_FAMILY "386 "
7719 #elif defined CONFIG_M486
7721 #error unknown processor family
7724 -#ifdef CONFIG_X86_32
7725 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7726 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7727 +#define MODULE_PAX_UDEREF "UDEREF "
7729 +#define MODULE_PAX_UDEREF ""
7732 +#ifdef CONFIG_PAX_KERNEXEC
7733 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7735 +#define MODULE_PAX_KERNEXEC ""
7738 +#ifdef CONFIG_PAX_REFCOUNT
7739 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7741 +#define MODULE_PAX_REFCOUNT ""
7744 +#ifdef CONFIG_GRKERNSEC
7745 +#define MODULE_GRSEC "GRSECURITY "
7747 +#define MODULE_GRSEC ""
7750 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7752 #endif /* _ASM_X86_MODULE_H */
7753 diff -urNp linux-2.6.39.4/arch/x86/include/asm/page_64_types.h linux-2.6.39.4/arch/x86/include/asm/page_64_types.h
7754 --- linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-05-19 00:06:34.000000000 -0400
7755 +++ linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-08-05 19:44:33.000000000 -0400
7756 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7758 /* duplicated to the one in bootmem.h */
7759 extern unsigned long max_pfn;
7760 -extern unsigned long phys_base;
7761 +extern const unsigned long phys_base;
7763 extern unsigned long __phys_addr(unsigned long);
7764 #define __phys_reloc_hide(x) (x)
7765 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt.h linux-2.6.39.4/arch/x86/include/asm/paravirt.h
7766 --- linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-05-19 00:06:34.000000000 -0400
7767 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-08-05 19:44:33.000000000 -0400
7768 @@ -739,6 +739,21 @@ static inline void __set_fixmap(unsigned
7769 pv_mmu_ops.set_fixmap(idx, phys, flags);
7772 +#ifdef CONFIG_PAX_KERNEXEC
7773 +static inline unsigned long pax_open_kernel(void)
7775 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7778 +static inline unsigned long pax_close_kernel(void)
7780 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7783 +static inline unsigned long pax_open_kernel(void) { return 0; }
7784 +static inline unsigned long pax_close_kernel(void) { return 0; }
7787 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7789 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7790 @@ -955,7 +970,7 @@ extern void default_banner(void);
7792 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7793 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7794 -#define PARA_INDIRECT(addr) *%cs:addr
7795 +#define PARA_INDIRECT(addr) *%ss:addr
7798 #define INTERRUPT_RETURN \
7799 @@ -1032,6 +1047,21 @@ extern void default_banner(void);
7800 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7802 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7804 +#define GET_CR0_INTO_RDI \
7805 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7808 +#define SET_RDI_INTO_CR0 \
7809 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7811 +#define GET_CR3_INTO_RDI \
7812 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7815 +#define SET_RDI_INTO_CR3 \
7816 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7818 #endif /* CONFIG_X86_32 */
7820 #endif /* __ASSEMBLY__ */
7821 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h
7822 --- linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-05-19 00:06:34.000000000 -0400
7823 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:34:06.000000000 -0400
7824 @@ -78,19 +78,19 @@ struct pv_init_ops {
7826 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7827 unsigned long addr, unsigned len);
7832 struct pv_lazy_ops {
7833 /* Set deferred update mode, used for batching operations. */
7834 void (*enter)(void);
7835 void (*leave)(void);
7839 struct pv_time_ops {
7840 unsigned long long (*sched_clock)(void);
7841 unsigned long (*get_tsc_khz)(void);
7846 /* hooks for various privileged instructions */
7847 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7849 void (*start_context_switch)(struct task_struct *prev);
7850 void (*end_context_switch)(struct task_struct *next);
7856 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7857 unsigned long start_eip,
7858 unsigned long start_esp);
7864 unsigned long (*read_cr2)(void);
7865 @@ -317,6 +317,12 @@ struct pv_mmu_ops {
7866 an mfn. We can tell which is which from the index. */
7867 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7868 phys_addr_t phys, pgprot_t flags);
7870 +#ifdef CONFIG_PAX_KERNEXEC
7871 + unsigned long (*pax_open_kernel)(void);
7872 + unsigned long (*pax_close_kernel)(void);
7877 struct arch_spinlock;
7878 @@ -327,7 +333,7 @@ struct pv_lock_ops {
7879 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7880 int (*spin_trylock)(struct arch_spinlock *lock);
7881 void (*spin_unlock)(struct arch_spinlock *lock);
7885 /* This contains all the paravirt structures: we get a convenient
7886 * number for each function using the offset which we use to indicate
7887 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgalloc.h linux-2.6.39.4/arch/x86/include/asm/pgalloc.h
7888 --- linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-05-19 00:06:34.000000000 -0400
7889 +++ linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-08-05 19:44:33.000000000 -0400
7890 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7891 pmd_t *pmd, pte_t *pte)
7893 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7894 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7897 +static inline void pmd_populate_user(struct mm_struct *mm,
7898 + pmd_t *pmd, pte_t *pte)
7900 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7901 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7904 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h
7905 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-05-19 00:06:34.000000000 -0400
7906 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-08-05 19:44:33.000000000 -0400
7907 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7909 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7911 + pax_open_kernel();
7913 + pax_close_kernel();
7916 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7917 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h
7918 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
7919 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
7922 struct vm_area_struct;
7924 -extern pgd_t swapper_pg_dir[1024];
7925 -extern pgd_t initial_page_table[1024];
7927 static inline void pgtable_cache_init(void) { }
7928 static inline void check_pgt_cache(void) { }
7929 void paging_init(void);
7930 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7931 # include <asm/pgtable-2level.h>
7934 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7935 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7936 +#ifdef CONFIG_X86_PAE
7937 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7940 #if defined(CONFIG_HIGHPTE)
7941 #define pte_offset_map(dir, address) \
7942 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7943 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7944 /* Clear a kernel PTE and flush it from the TLB */
7945 #define kpte_clear_flush(ptep, vaddr) \
7947 + pax_open_kernel(); \
7948 pte_clear(&init_mm, (vaddr), (ptep)); \
7949 + pax_close_kernel(); \
7950 __flush_tlb_one((vaddr)); \
7953 @@ -74,6 +79,9 @@ do { \
7955 #endif /* !__ASSEMBLY__ */
7957 +#define HAVE_ARCH_UNMAPPED_AREA
7958 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7961 * kern_addr_valid() is (1) for FLATMEM and (0) for
7962 * SPARSEMEM and DISCONTIGMEM
7963 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h
7964 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-05-19 00:06:34.000000000 -0400
7965 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-05 19:44:33.000000000 -0400
7968 #ifdef CONFIG_X86_PAE
7969 # include <asm/pgtable-3level_types.h>
7970 -# define PMD_SIZE (1UL << PMD_SHIFT)
7971 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7972 # define PMD_MASK (~(PMD_SIZE - 1))
7974 # include <asm/pgtable-2level_types.h>
7975 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7976 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7979 +#ifdef CONFIG_PAX_KERNEXEC
7980 +#ifndef __ASSEMBLY__
7981 +extern unsigned char MODULES_EXEC_VADDR[];
7982 +extern unsigned char MODULES_EXEC_END[];
7984 +#include <asm/boot.h>
7985 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7986 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7988 +#define ktla_ktva(addr) (addr)
7989 +#define ktva_ktla(addr) (addr)
7992 #define MODULES_VADDR VMALLOC_START
7993 #define MODULES_END VMALLOC_END
7994 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
7995 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h
7996 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-05-19 00:06:34.000000000 -0400
7997 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-08-05 19:44:33.000000000 -0400
7998 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8000 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8002 + pax_open_kernel();
8003 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8004 + pax_close_kernel();
8007 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8009 + pax_open_kernel();
8010 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8011 + pax_close_kernel();
8015 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h
8016 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-05-19 00:06:34.000000000 -0400
8017 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-08-05 19:44:33.000000000 -0400
8020 extern pud_t level3_kernel_pgt[512];
8021 extern pud_t level3_ident_pgt[512];
8022 +extern pud_t level3_vmalloc_pgt[512];
8023 +extern pud_t level3_vmemmap_pgt[512];
8024 +extern pud_t level2_vmemmap_pgt[512];
8025 extern pmd_t level2_kernel_pgt[512];
8026 extern pmd_t level2_fixmap_pgt[512];
8027 -extern pmd_t level2_ident_pgt[512];
8028 -extern pgd_t init_level4_pgt[];
8029 +extern pmd_t level2_ident_pgt[512*2];
8030 +extern pgd_t init_level4_pgt[512];
8032 #define swapper_pg_dir init_level4_pgt
8034 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8036 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8038 + pax_open_kernel();
8040 + pax_close_kernel();
8043 static inline void native_pmd_clear(pmd_t *pmd)
8044 @@ -107,7 +112,9 @@ static inline void native_pud_clear(pud_
8046 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8048 + pax_open_kernel();
8050 + pax_close_kernel();
8053 static inline void native_pgd_clear(pgd_t *pgd)
8054 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h
8055 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-05-19 00:06:34.000000000 -0400
8056 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-05 19:44:33.000000000 -0400
8057 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8058 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8059 #define MODULES_END _AC(0xffffffffff000000, UL)
8060 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8061 +#define MODULES_EXEC_VADDR MODULES_VADDR
8062 +#define MODULES_EXEC_END MODULES_END
8064 +#define ktla_ktva(addr) (addr)
8065 +#define ktva_ktla(addr) (addr)
8067 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8068 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable.h linux-2.6.39.4/arch/x86/include/asm/pgtable.h
8069 --- linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
8070 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
8071 @@ -81,12 +81,51 @@ extern struct mm_struct *pgd_page_get_mm
8073 #define arch_end_context_switch(prev) do {} while(0)
8075 +#define pax_open_kernel() native_pax_open_kernel()
8076 +#define pax_close_kernel() native_pax_close_kernel()
8077 #endif /* CONFIG_PARAVIRT */
8079 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8080 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8082 +#ifdef CONFIG_PAX_KERNEXEC
8083 +static inline unsigned long native_pax_open_kernel(void)
8085 + unsigned long cr0;
8087 + preempt_disable();
8089 + cr0 = read_cr0() ^ X86_CR0_WP;
8090 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8092 + return cr0 ^ X86_CR0_WP;
8095 +static inline unsigned long native_pax_close_kernel(void)
8097 + unsigned long cr0;
8099 + cr0 = read_cr0() ^ X86_CR0_WP;
8100 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8103 + preempt_enable_no_resched();
8104 + return cr0 ^ X86_CR0_WP;
8107 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8108 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8112 * The following only work if pte_present() is true.
8113 * Undefined behaviour if not..
8115 +static inline int pte_user(pte_t pte)
8117 + return pte_val(pte) & _PAGE_USER;
8120 static inline int pte_dirty(pte_t pte)
8122 return pte_flags(pte) & _PAGE_DIRTY;
8123 @@ -196,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t
8124 return pte_clear_flags(pte, _PAGE_RW);
8127 +static inline pte_t pte_mkread(pte_t pte)
8129 + return __pte(pte_val(pte) | _PAGE_USER);
8132 static inline pte_t pte_mkexec(pte_t pte)
8134 - return pte_clear_flags(pte, _PAGE_NX);
8135 +#ifdef CONFIG_X86_PAE
8136 + if (__supported_pte_mask & _PAGE_NX)
8137 + return pte_clear_flags(pte, _PAGE_NX);
8140 + return pte_set_flags(pte, _PAGE_USER);
8143 +static inline pte_t pte_exprotect(pte_t pte)
8145 +#ifdef CONFIG_X86_PAE
8146 + if (__supported_pte_mask & _PAGE_NX)
8147 + return pte_set_flags(pte, _PAGE_NX);
8150 + return pte_clear_flags(pte, _PAGE_USER);
8153 static inline pte_t pte_mkdirty(pte_t pte)
8154 @@ -390,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long
8157 #ifndef __ASSEMBLY__
8159 +#ifdef CONFIG_PAX_PER_CPU_PGD
8160 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8161 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8163 + return cpu_pgd[cpu];
8167 #include <linux/mm_types.h>
8169 static inline int pte_none(pte_t pte)
8170 @@ -560,7 +628,7 @@ static inline pud_t *pud_offset(pgd_t *p
8172 static inline int pgd_bad(pgd_t pgd)
8174 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8175 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8178 static inline int pgd_none(pgd_t pgd)
8179 @@ -583,7 +651,12 @@ static inline int pgd_none(pgd_t pgd)
8180 * pgd_offset() returns a (pgd_t *)
8181 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8183 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8184 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8186 +#ifdef CONFIG_PAX_PER_CPU_PGD
8187 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8191 * a shortcut which implies the use of the kernel's pgd, instead
8193 @@ -594,6 +667,20 @@ static inline int pgd_none(pgd_t pgd)
8194 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8195 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8197 +#ifdef CONFIG_X86_32
8198 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8200 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8201 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8203 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8204 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8206 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8211 #ifndef __ASSEMBLY__
8213 extern int direct_gbpages;
8214 @@ -758,11 +845,23 @@ static inline void pmdp_set_wrprotect(st
8215 * dst and src can be on the same page, but the range must not overlap,
8216 * and must not cross a page boundary.
8218 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8219 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8221 - memcpy(dst, src, count * sizeof(pgd_t));
8222 + pax_open_kernel();
8225 + pax_close_kernel();
8228 +#ifdef CONFIG_PAX_PER_CPU_PGD
8229 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8232 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8233 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8235 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8238 #include <asm-generic/pgtable.h>
8239 #endif /* __ASSEMBLY__ */
8240 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h
8241 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-05-19 00:06:34.000000000 -0400
8242 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-08-05 19:44:33.000000000 -0400
8244 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8245 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8246 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8247 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8248 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8249 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8250 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8251 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8252 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8253 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8254 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8255 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8256 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8257 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8259 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8261 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8262 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8263 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8264 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8265 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8266 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8267 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8270 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8271 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8273 +#elif defined(CONFIG_KMEMCHECK)
8274 #define _PAGE_NX (_AT(pteval_t, 0))
8276 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8279 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8281 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8284 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8285 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8287 #define __PAGE_KERNEL_EXEC \
8288 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8289 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8291 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8292 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8293 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8294 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8295 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8296 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8297 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8298 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8299 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8300 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8302 * bits are combined, this will alow user to access the high address mapped
8303 * VDSO in the presence of CONFIG_COMPAT_VDSO
8305 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8306 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8307 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8308 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8309 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8312 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8314 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8318 +#if PAGETABLE_LEVELS == 3
8319 +#include <asm-generic/pgtable-nopud.h>
8322 +#if PAGETABLE_LEVELS == 2
8323 +#include <asm-generic/pgtable-nopmd.h>
8326 +#ifndef __ASSEMBLY__
8327 #if PAGETABLE_LEVELS > 3
8328 typedef struct { pudval_t pud; } pud_t;
8330 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8334 -#include <asm-generic/pgtable-nopud.h>
8336 static inline pudval_t native_pud_val(pud_t pud)
8338 return native_pgd_val(pud.pgd);
8339 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8343 -#include <asm-generic/pgtable-nopmd.h>
8345 static inline pmdval_t native_pmd_val(pmd_t pmd)
8347 return native_pgd_val(pmd.pud.pgd);
8348 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8350 extern pteval_t __supported_pte_mask;
8351 extern void set_nx(void);
8352 -extern int nx_enabled;
8354 #define pgprot_writecombine pgprot_writecombine
8355 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8356 diff -urNp linux-2.6.39.4/arch/x86/include/asm/processor.h linux-2.6.39.4/arch/x86/include/asm/processor.h
8357 --- linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-05-19 00:06:34.000000000 -0400
8358 +++ linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-08-05 19:44:33.000000000 -0400
8359 @@ -266,7 +266,7 @@ struct tss_struct {
8361 } ____cacheline_aligned;
8363 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8364 +extern struct tss_struct init_tss[NR_CPUS];
8367 * Save the original ist values for checking stack pointers during debugging
8368 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8370 #define TASK_SIZE PAGE_OFFSET
8371 #define TASK_SIZE_MAX TASK_SIZE
8373 +#ifdef CONFIG_PAX_SEGMEXEC
8374 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8375 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8377 #define STACK_TOP TASK_SIZE
8378 -#define STACK_TOP_MAX STACK_TOP
8381 +#define STACK_TOP_MAX TASK_SIZE
8383 #define INIT_THREAD { \
8384 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8385 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8386 .vm86_info = NULL, \
8387 .sysenter_cs = __KERNEL_CS, \
8388 .io_bitmap_ptr = NULL, \
8389 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8391 #define INIT_TSS { \
8393 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8394 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8395 .ss0 = __KERNEL_DS, \
8396 .ss1 = __KERNEL_CS, \
8397 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8398 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8399 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8401 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8402 -#define KSTK_TOP(info) \
8404 - unsigned long *__ptr = (unsigned long *)(info); \
8405 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8407 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8410 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8411 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8412 #define task_pt_regs(task) \
8414 struct pt_regs *__regs__; \
8415 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8416 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8420 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8422 * User space process size. 47bits minus one guard page.
8424 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8425 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8427 /* This decides where the kernel will search for a free chunk of vm
8428 * space during mmap's.
8430 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8431 - 0xc0000000 : 0xFFFFe000)
8432 + 0xc0000000 : 0xFFFFf000)
8434 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8435 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8436 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8437 #define STACK_TOP_MAX TASK_SIZE_MAX
8439 #define INIT_THREAD { \
8440 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8441 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8444 #define INIT_TSS { \
8445 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8446 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8450 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8452 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8454 +#ifdef CONFIG_PAX_SEGMEXEC
8455 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8458 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8460 /* Get/set a process' ability to use the timestamp counter instruction */
8461 diff -urNp linux-2.6.39.4/arch/x86/include/asm/ptrace.h linux-2.6.39.4/arch/x86/include/asm/ptrace.h
8462 --- linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-05-19 00:06:34.000000000 -0400
8463 +++ linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-08-05 19:44:33.000000000 -0400
8464 @@ -152,28 +152,29 @@ static inline unsigned long regs_return_
8468 - * user_mode_vm(regs) determines whether a register set came from user mode.
8469 + * user_mode(regs) determines whether a register set came from user mode.
8470 * This is true if V8086 mode was enabled OR if the register set was from
8471 * protected mode with RPL-3 CS value. This tricky test checks that with
8472 * one comparison. Many places in the kernel can bypass this full check
8473 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8474 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8477 -static inline int user_mode(struct pt_regs *regs)
8478 +static inline int user_mode_novm(struct pt_regs *regs)
8480 #ifdef CONFIG_X86_32
8481 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8483 - return !!(regs->cs & 3);
8484 + return !!(regs->cs & SEGMENT_RPL_MASK);
8488 -static inline int user_mode_vm(struct pt_regs *regs)
8489 +static inline int user_mode(struct pt_regs *regs)
8491 #ifdef CONFIG_X86_32
8492 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8495 - return user_mode(regs);
8496 + return user_mode_novm(regs);
8500 diff -urNp linux-2.6.39.4/arch/x86/include/asm/reboot.h linux-2.6.39.4/arch/x86/include/asm/reboot.h
8501 --- linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-05-19 00:06:34.000000000 -0400
8502 +++ linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-08-05 20:34:06.000000000 -0400
8506 struct machine_ops {
8507 - void (*restart)(char *cmd);
8508 - void (*halt)(void);
8509 - void (*power_off)(void);
8510 + void (* __noreturn restart)(char *cmd);
8511 + void (* __noreturn halt)(void);
8512 + void (* __noreturn power_off)(void);
8513 void (*shutdown)(void);
8514 void (*crash_shutdown)(struct pt_regs *);
8515 - void (*emergency_restart)(void);
8517 + void (* __noreturn emergency_restart)(void);
8520 extern struct machine_ops machine_ops;
8522 void native_machine_crash_shutdown(struct pt_regs *regs);
8523 void native_machine_shutdown(void);
8524 -void machine_real_restart(unsigned int type);
8525 +void machine_real_restart(unsigned int type) __noreturn;
8526 /* These must match dispatch_table in reboot_32.S */
8529 diff -urNp linux-2.6.39.4/arch/x86/include/asm/rwsem.h linux-2.6.39.4/arch/x86/include/asm/rwsem.h
8530 --- linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-05-19 00:06:34.000000000 -0400
8531 +++ linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-08-05 19:44:33.000000000 -0400
8532 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8534 asm volatile("# beginning down_read\n\t"
8535 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8537 +#ifdef CONFIG_PAX_REFCOUNT
8539 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8541 + _ASM_EXTABLE(0b, 0b)
8544 /* adds 0x00000001 */
8546 " call call_rwsem_down_read_failed\n"
8547 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8552 +#ifdef CONFIG_PAX_REFCOUNT
8556 + _ASM_EXTABLE(0b, 0b)
8560 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8562 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8564 asm volatile("# beginning down_write\n\t"
8565 LOCK_PREFIX " xadd %1,(%2)\n\t"
8567 +#ifdef CONFIG_PAX_REFCOUNT
8571 + _ASM_EXTABLE(0b, 0b)
8574 /* adds 0xffff0001, returns the old value */
8576 /* was the count 0 before? */
8577 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8579 asm volatile("# beginning __up_read\n\t"
8580 LOCK_PREFIX " xadd %1,(%2)\n\t"
8582 +#ifdef CONFIG_PAX_REFCOUNT
8586 + _ASM_EXTABLE(0b, 0b)
8589 /* subtracts 1, returns the old value */
8591 " call call_rwsem_wake\n" /* expects old value in %edx */
8592 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8594 asm volatile("# beginning __up_write\n\t"
8595 LOCK_PREFIX " xadd %1,(%2)\n\t"
8597 +#ifdef CONFIG_PAX_REFCOUNT
8601 + _ASM_EXTABLE(0b, 0b)
8604 /* subtracts 0xffff0001, returns the old value */
8606 " call call_rwsem_wake\n" /* expects old value in %edx */
8607 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8609 asm volatile("# beginning __downgrade_write\n\t"
8610 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8612 +#ifdef CONFIG_PAX_REFCOUNT
8614 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8616 + _ASM_EXTABLE(0b, 0b)
8620 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8621 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8622 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8624 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8626 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8627 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8629 +#ifdef CONFIG_PAX_REFCOUNT
8631 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8633 + _ASM_EXTABLE(0b, 0b)
8639 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8643 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8644 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8646 +#ifdef CONFIG_PAX_REFCOUNT
8650 + _ASM_EXTABLE(0b, 0b)
8653 : "+r" (tmp), "+m" (sem->count)
8656 diff -urNp linux-2.6.39.4/arch/x86/include/asm/segment.h linux-2.6.39.4/arch/x86/include/asm/segment.h
8657 --- linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-05-19 00:06:34.000000000 -0400
8658 +++ linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-08-05 19:44:33.000000000 -0400
8660 * 26 - ESPFIX small SS
8661 * 27 - per-cpu [ offset to per-cpu data area ]
8662 * 28 - stack_canary-20 [ for stack protector ]
8665 + * 29 - PCI BIOS CS
8666 + * 30 - PCI BIOS DS
8667 * 31 - TSS for double fault handler
8669 #define GDT_ENTRY_TLS_MIN 6
8672 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8674 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8676 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8678 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8679 @@ -104,6 +106,12 @@
8680 #define __KERNEL_STACK_CANARY 0
8683 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8684 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8686 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8687 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8689 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8695 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8696 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8697 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8702 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8703 #define __USER32_DS __USER_DS
8705 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8707 #define GDT_ENTRY_TSS 8 /* needs two entries */
8708 #define GDT_ENTRY_LDT 10 /* needs two entries */
8709 #define GDT_ENTRY_TLS_MIN 12
8713 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8714 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8715 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8716 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8717 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8718 diff -urNp linux-2.6.39.4/arch/x86/include/asm/smp.h linux-2.6.39.4/arch/x86/include/asm/smp.h
8719 --- linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-05-19 00:06:34.000000000 -0400
8720 +++ linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-08-05 20:34:06.000000000 -0400
8721 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8722 /* cpus sharing the last level cache: */
8723 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8724 DECLARE_PER_CPU(u16, cpu_llc_id);
8725 -DECLARE_PER_CPU(int, cpu_number);
8726 +DECLARE_PER_CPU(unsigned int, cpu_number);
8728 static inline struct cpumask *cpu_sibling_mask(int cpu)
8730 @@ -77,7 +77,7 @@ struct smp_ops {
8732 void (*send_call_func_ipi)(const struct cpumask *mask);
8733 void (*send_call_func_single_ipi)(int cpu);
8737 /* Globals due to paravirt */
8738 extern void set_cpu_sibling_map(int cpu);
8739 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8740 extern int safe_smp_processor_id(void);
8742 #elif defined(CONFIG_X86_64_SMP)
8743 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8745 -#define stack_smp_processor_id() \
8747 - struct thread_info *ti; \
8748 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8751 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8752 +#define stack_smp_processor_id() raw_smp_processor_id()
8753 #define safe_smp_processor_id() smp_processor_id()
8756 diff -urNp linux-2.6.39.4/arch/x86/include/asm/spinlock.h linux-2.6.39.4/arch/x86/include/asm/spinlock.h
8757 --- linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
8758 +++ linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
8759 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8760 static inline void arch_read_lock(arch_rwlock_t *rw)
8762 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8764 +#ifdef CONFIG_PAX_REFCOUNT
8766 + LOCK_PREFIX " addl $1,(%0)\n"
8768 + _ASM_EXTABLE(0b, 0b)
8772 "call __read_lock_failed\n\t"
8774 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8775 static inline void arch_write_lock(arch_rwlock_t *rw)
8777 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8779 +#ifdef CONFIG_PAX_REFCOUNT
8781 + LOCK_PREFIX " addl %1,(%0)\n"
8783 + _ASM_EXTABLE(0b, 0b)
8787 "call __write_lock_failed\n\t"
8789 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8791 static inline void arch_read_unlock(arch_rwlock_t *rw)
8793 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8794 + asm volatile(LOCK_PREFIX "incl %0\n"
8796 +#ifdef CONFIG_PAX_REFCOUNT
8798 + LOCK_PREFIX "decl %0\n"
8800 + _ASM_EXTABLE(0b, 0b)
8803 + :"+m" (rw->lock) : : "memory");
8806 static inline void arch_write_unlock(arch_rwlock_t *rw)
8808 - asm volatile(LOCK_PREFIX "addl %1, %0"
8809 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8811 +#ifdef CONFIG_PAX_REFCOUNT
8813 + LOCK_PREFIX "subl %1, %0\n"
8815 + _ASM_EXTABLE(0b, 0b)
8818 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8821 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stackprotector.h linux-2.6.39.4/arch/x86/include/asm/stackprotector.h
8822 --- linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-05-19 00:06:34.000000000 -0400
8823 +++ linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-08-05 19:44:33.000000000 -0400
8825 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8827 #define GDT_STACK_CANARY_INIT \
8828 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8829 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8832 * Initialize the stackprotector canary value.
8833 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8835 static inline void load_stack_canary_segment(void)
8837 -#ifdef CONFIG_X86_32
8838 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8839 asm volatile ("mov %0, %%gs" : : "r" (0));
8842 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stacktrace.h linux-2.6.39.4/arch/x86/include/asm/stacktrace.h
8843 --- linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-05-19 00:06:34.000000000 -0400
8844 +++ linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-08-05 19:44:33.000000000 -0400
8847 extern int kstack_depth_to_print;
8849 -struct thread_info;
8850 +struct task_struct;
8851 struct stacktrace_ops;
8853 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8854 - unsigned long *stack,
8856 - const struct stacktrace_ops *ops,
8858 - unsigned long *end,
8861 -extern unsigned long
8862 -print_context_stack(struct thread_info *tinfo,
8863 - unsigned long *stack, unsigned long bp,
8864 - const struct stacktrace_ops *ops, void *data,
8865 - unsigned long *end, int *graph);
8867 -extern unsigned long
8868 -print_context_stack_bp(struct thread_info *tinfo,
8869 - unsigned long *stack, unsigned long bp,
8870 - const struct stacktrace_ops *ops, void *data,
8871 - unsigned long *end, int *graph);
8872 +typedef unsigned long walk_stack_t(struct task_struct *task,
8873 + void *stack_start,
8874 + unsigned long *stack,
8876 + const struct stacktrace_ops *ops,
8878 + unsigned long *end,
8881 +extern walk_stack_t print_context_stack;
8882 +extern walk_stack_t print_context_stack_bp;
8884 /* Generic stack tracer with callbacks */
8886 @@ -43,7 +35,7 @@ struct stacktrace_ops {
8887 void (*address)(void *data, unsigned long address, int reliable);
8888 /* On negative return stop dumping */
8889 int (*stack)(void *data, char *name);
8890 - walk_stack_t walk_stack;
8891 + walk_stack_t *walk_stack;
8894 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8895 diff -urNp linux-2.6.39.4/arch/x86/include/asm/system.h linux-2.6.39.4/arch/x86/include/asm/system.h
8896 --- linux-2.6.39.4/arch/x86/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
8897 +++ linux-2.6.39.4/arch/x86/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
8898 @@ -129,7 +129,7 @@ do { \
8899 "call __switch_to\n\t" \
8900 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8902 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8903 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8904 "movq %%rax,%%rdi\n\t" \
8905 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8906 "jnz ret_from_fork\n\t" \
8907 @@ -140,7 +140,7 @@ do { \
8908 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8909 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8910 [_tif_fork] "i" (_TIF_FORK), \
8911 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8912 + [thread_info] "m" (current_tinfo), \
8913 [current_task] "m" (current_task) \
8914 __switch_canary_iparam \
8915 : "memory", "cc" __EXTRA_CLOBBER)
8916 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8918 unsigned long __limit;
8919 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8920 - return __limit + 1;
8924 static inline void native_clts(void)
8925 @@ -340,12 +340,12 @@ void enable_hlt(void);
8927 void cpu_idle_wait(void);
8929 -extern unsigned long arch_align_stack(unsigned long sp);
8930 +#define arch_align_stack(x) ((x) & ~0xfUL)
8931 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8933 void default_idle(void);
8935 -void stop_this_cpu(void *dummy);
8936 +void stop_this_cpu(void *dummy) __noreturn;
8939 * Force strict CPU ordering.
8940 diff -urNp linux-2.6.39.4/arch/x86/include/asm/thread_info.h linux-2.6.39.4/arch/x86/include/asm/thread_info.h
8941 --- linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-05-19 00:06:34.000000000 -0400
8942 +++ linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-08-05 19:44:33.000000000 -0400
8944 #include <linux/compiler.h>
8945 #include <asm/page.h>
8946 #include <asm/types.h>
8947 +#include <asm/percpu.h>
8950 * low level task data that entry.S needs immediate access to
8951 @@ -24,7 +25,6 @@ struct exec_domain;
8952 #include <asm/atomic.h>
8954 struct thread_info {
8955 - struct task_struct *task; /* main task structure */
8956 struct exec_domain *exec_domain; /* execution domain */
8957 __u32 flags; /* low level flags */
8958 __u32 status; /* thread synchronous flags */
8959 @@ -34,18 +34,12 @@ struct thread_info {
8960 mm_segment_t addr_limit;
8961 struct restart_block restart_block;
8962 void __user *sysenter_return;
8963 -#ifdef CONFIG_X86_32
8964 - unsigned long previous_esp; /* ESP of the previous stack in
8965 - case of nested (IRQ) stacks
8967 - __u8 supervisor_stack[0];
8969 + unsigned long lowest_stack;
8973 -#define INIT_THREAD_INFO(tsk) \
8974 +#define INIT_THREAD_INFO \
8977 .exec_domain = &default_exec_domain, \
8980 @@ -56,7 +50,7 @@ struct thread_info {
8984 -#define init_thread_info (init_thread_union.thread_info)
8985 +#define init_thread_info (init_thread_union.stack)
8986 #define init_stack (init_thread_union.stack)
8988 #else /* !__ASSEMBLY__ */
8989 @@ -170,6 +164,23 @@ struct thread_info {
8993 +#ifdef __ASSEMBLY__
8994 +/* how to get the thread information struct from ASM */
8995 +#define GET_THREAD_INFO(reg) \
8996 + mov PER_CPU_VAR(current_tinfo), reg
8998 +/* use this one if reg already contains %esp */
8999 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9001 +/* how to get the thread information struct from C */
9002 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9004 +static __always_inline struct thread_info *current_thread_info(void)
9006 + return percpu_read_stable(current_tinfo);
9010 #ifdef CONFIG_X86_32
9012 #define STACK_WARN (THREAD_SIZE/8)
9013 @@ -180,35 +191,13 @@ struct thread_info {
9015 #ifndef __ASSEMBLY__
9018 /* how to get the current stack pointer from C */
9019 register unsigned long current_stack_pointer asm("esp") __used;
9021 -/* how to get the thread information struct from C */
9022 -static inline struct thread_info *current_thread_info(void)
9024 - return (struct thread_info *)
9025 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9028 -#else /* !__ASSEMBLY__ */
9030 -/* how to get the thread information struct from ASM */
9031 -#define GET_THREAD_INFO(reg) \
9032 - movl $-THREAD_SIZE, reg; \
9035 -/* use this one if reg already contains %esp */
9036 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9037 - andl $-THREAD_SIZE, reg
9043 -#include <asm/percpu.h>
9044 -#define KERNEL_STACK_OFFSET (5*8)
9047 * macros/functions for gaining access to the thread information structure
9048 * preempt_count needs to be 1 initially, until the scheduler is functional.
9049 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9050 #ifndef __ASSEMBLY__
9051 DECLARE_PER_CPU(unsigned long, kernel_stack);
9053 -static inline struct thread_info *current_thread_info(void)
9055 - struct thread_info *ti;
9056 - ti = (void *)(percpu_read_stable(kernel_stack) +
9057 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9061 -#else /* !__ASSEMBLY__ */
9063 -/* how to get the thread information struct from ASM */
9064 -#define GET_THREAD_INFO(reg) \
9065 - movq PER_CPU_VAR(kernel_stack),reg ; \
9066 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9068 +/* how to get the current stack pointer from C */
9069 +register unsigned long current_stack_pointer asm("rsp") __used;
9072 #endif /* !X86_32 */
9073 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9074 extern void free_thread_info(struct thread_info *ti);
9075 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9076 #define arch_task_cache_init arch_task_cache_init
9078 +#define __HAVE_THREAD_FUNCTIONS
9079 +#define task_thread_info(task) (&(task)->tinfo)
9080 +#define task_stack_page(task) ((task)->stack)
9081 +#define setup_thread_stack(p, org) do {} while (0)
9082 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9084 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9085 +extern struct task_struct *alloc_task_struct_node(int node);
9086 +extern void free_task_struct(struct task_struct *);
9089 #endif /* _ASM_X86_THREAD_INFO_H */
9090 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h
9091 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
9092 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
9093 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
9094 static __always_inline unsigned long __must_check
9095 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9097 + pax_track_stack();
9102 if (__builtin_constant_p(n)) {
9105 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
9109 + if (!__builtin_constant_p(n))
9110 + check_object_size(from, n, true);
9111 return __copy_to_user_ll(to, from, n);
9114 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
9115 __copy_to_user(void __user *to, const void *from, unsigned long n)
9119 return __copy_to_user_inatomic(to, from, n);
9122 static __always_inline unsigned long
9123 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9128 /* Avoid zeroing the tail if the copy fails..
9129 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9130 * but as the zeroing behaviour is only significant when n is not
9131 @@ -138,6 +149,12 @@ static __always_inline unsigned long
9132 __copy_from_user(void *to, const void __user *from, unsigned long n)
9136 + pax_track_stack();
9141 if (__builtin_constant_p(n)) {
9144 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
9148 + if (!__builtin_constant_p(n))
9149 + check_object_size(to, n, false);
9150 return __copy_from_user_ll(to, from, n);
9153 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
9154 const void __user *from, unsigned long n)
9161 if (__builtin_constant_p(n)) {
9164 @@ -182,15 +205,19 @@ static __always_inline unsigned long
9165 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9168 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9173 -unsigned long __must_check copy_to_user(void __user *to,
9174 - const void *from, unsigned long n);
9175 -unsigned long __must_check _copy_from_user(void *to,
9176 - const void __user *from,
9178 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9181 +extern void copy_to_user_overflow(void)
9182 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9183 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9185 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9189 extern void copy_from_user_overflow(void)
9190 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9191 @@ -200,17 +227,61 @@ extern void copy_from_user_overflow(void
9195 -static inline unsigned long __must_check copy_from_user(void *to,
9196 - const void __user *from,
9199 + * copy_to_user: - Copy a block of data into user space.
9200 + * @to: Destination address, in user space.
9201 + * @from: Source address, in kernel space.
9202 + * @n: Number of bytes to copy.
9204 + * Context: User context only. This function may sleep.
9206 + * Copy data from kernel space to user space.
9208 + * Returns number of bytes that could not be copied.
9209 + * On success, this will be zero.
9211 +static inline unsigned long __must_check
9212 +copy_to_user(void __user *to, const void *from, unsigned long n)
9214 + int sz = __compiletime_object_size(from);
9216 + if (unlikely(sz != -1 && sz < n))
9217 + copy_to_user_overflow();
9218 + else if (access_ok(VERIFY_WRITE, to, n))
9219 + n = __copy_to_user(to, from, n);
9224 + * copy_from_user: - Copy a block of data from user space.
9225 + * @to: Destination address, in kernel space.
9226 + * @from: Source address, in user space.
9227 + * @n: Number of bytes to copy.
9229 + * Context: User context only. This function may sleep.
9231 + * Copy data from user space to kernel space.
9233 + * Returns number of bytes that could not be copied.
9234 + * On success, this will be zero.
9236 + * If some data could not be copied, this function will pad the copied
9237 + * data to the requested size using zero bytes.
9239 +static inline unsigned long __must_check
9240 +copy_from_user(void *to, const void __user *from, unsigned long n)
9242 int sz = __compiletime_object_size(to);
9244 - if (likely(sz == -1 || sz >= n))
9245 - n = _copy_from_user(to, from, n);
9247 + if (unlikely(sz != -1 && sz < n))
9248 copy_from_user_overflow();
9250 + else if (access_ok(VERIFY_READ, from, n))
9251 + n = __copy_from_user(to, from, n);
9252 + else if ((long)n > 0) {
9253 + if (!__builtin_constant_p(n))
9254 + check_object_size(to, n, false);
9260 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h
9261 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
9262 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
9264 #include <asm/alternative.h>
9265 #include <asm/cpufeature.h>
9266 #include <asm/page.h>
9267 +#include <asm/pgtable.h>
9269 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9272 * Copy To/From Userspace
9273 @@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *
9277 -__must_check unsigned long
9278 -_copy_to_user(void __user *to, const void *from, unsigned len);
9279 -__must_check unsigned long
9280 -_copy_from_user(void *to, const void __user *from, unsigned len);
9281 +static __always_inline __must_check unsigned long
9282 +__copy_to_user(void __user *to, const void *from, unsigned len);
9283 +static __always_inline __must_check unsigned long
9284 +__copy_from_user(void *to, const void __user *from, unsigned len);
9285 __must_check unsigned long
9286 copy_in_user(void __user *to, const void __user *from, unsigned len);
9288 static inline unsigned long __must_check copy_from_user(void *to,
9289 const void __user *from,
9293 - int sz = __compiletime_object_size(to);
9296 - if (likely(sz == -1 || sz >= n))
9297 - n = _copy_from_user(to, from, n);
9298 -#ifdef CONFIG_DEBUG_VM
9300 - WARN(1, "Buffer overflow detected!\n");
9303 + if (access_ok(VERIFY_READ, from, n))
9304 + n = __copy_from_user(to, from, n);
9305 + else if ((int)n > 0) {
9306 + if (!__builtin_constant_p(n))
9307 + check_object_size(to, n, false);
9313 @@ -65,110 +68,198 @@ int copy_to_user(void __user *dst, const
9317 - return _copy_to_user(dst, src, size);
9318 + if (access_ok(VERIFY_WRITE, dst, size))
9319 + size = __copy_to_user(dst, src, size);
9323 static __always_inline __must_check
9324 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9325 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9328 + int sz = __compiletime_object_size(dst);
9332 - if (!__builtin_constant_p(size))
9333 - return copy_user_generic(dst, (__force void *)src, size);
9335 + pax_track_stack();
9337 + if ((int)size < 0)
9340 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9341 + if (!__access_ok(VERIFY_READ, src, size))
9345 + if (unlikely(sz != -1 && sz < size)) {
9346 +#ifdef CONFIG_DEBUG_VM
9347 + WARN(1, "Buffer overflow detected!\n");
9352 + if (!__builtin_constant_p(size)) {
9353 + check_object_size(dst, size, false);
9355 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9356 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9357 + src += PAX_USER_SHADOW_BASE;
9360 + return copy_user_generic(dst, (__force const void *)src, size);
9363 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9364 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9365 ret, "b", "b", "=q", 1);
9367 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9368 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9369 ret, "w", "w", "=r", 2);
9371 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9372 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9373 ret, "l", "k", "=r", 4);
9375 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9376 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9377 ret, "q", "", "=r", 8);
9380 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9381 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9382 ret, "q", "", "=r", 10);
9385 __get_user_asm(*(u16 *)(8 + (char *)dst),
9386 - (u16 __user *)(8 + (char __user *)src),
9387 + (const u16 __user *)(8 + (const char __user *)src),
9388 ret, "w", "w", "=r", 2);
9391 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9392 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9393 ret, "q", "", "=r", 16);
9396 __get_user_asm(*(u64 *)(8 + (char *)dst),
9397 - (u64 __user *)(8 + (char __user *)src),
9398 + (const u64 __user *)(8 + (const char __user *)src),
9399 ret, "q", "", "=r", 8);
9402 - return copy_user_generic(dst, (__force void *)src, size);
9404 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9405 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9406 + src += PAX_USER_SHADOW_BASE;
9409 + return copy_user_generic(dst, (__force const void *)src, size);
9413 static __always_inline __must_check
9414 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9415 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9418 + int sz = __compiletime_object_size(src);
9422 - if (!__builtin_constant_p(size))
9424 + pax_track_stack();
9426 + if ((int)size < 0)
9429 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9430 + if (!__access_ok(VERIFY_WRITE, dst, size))
9434 + if (unlikely(sz != -1 && sz < size)) {
9435 +#ifdef CONFIG_DEBUG_VM
9436 + WARN(1, "Buffer overflow detected!\n");
9441 + if (!__builtin_constant_p(size)) {
9442 + check_object_size(src, size, true);
9444 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9445 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9446 + dst += PAX_USER_SHADOW_BASE;
9449 return copy_user_generic((__force void *)dst, src, size);
9452 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9453 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9454 ret, "b", "b", "iq", 1);
9456 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9457 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9458 ret, "w", "w", "ir", 2);
9460 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9461 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9462 ret, "l", "k", "ir", 4);
9464 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9465 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9466 ret, "q", "", "er", 8);
9469 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9470 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9471 ret, "q", "", "er", 10);
9475 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9476 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9477 ret, "w", "w", "ir", 2);
9480 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9481 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9482 ret, "q", "", "er", 16);
9486 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9487 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9488 ret, "q", "", "er", 8);
9492 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9493 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9494 + dst += PAX_USER_SHADOW_BASE;
9497 return copy_user_generic((__force void *)dst, src, size);
9501 static __always_inline __must_check
9502 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9503 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9509 - if (!__builtin_constant_p(size))
9511 + if ((int)size < 0)
9514 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9515 + if (!__access_ok(VERIFY_READ, src, size))
9517 + if (!__access_ok(VERIFY_WRITE, dst, size))
9521 + if (!__builtin_constant_p(size)) {
9523 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9524 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9525 + src += PAX_USER_SHADOW_BASE;
9526 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9527 + dst += PAX_USER_SHADOW_BASE;
9530 return copy_user_generic((__force void *)dst,
9531 - (__force void *)src, size);
9532 + (__force const void *)src, size);
9537 - __get_user_asm(tmp, (u8 __user *)src,
9538 + __get_user_asm(tmp, (const u8 __user *)src,
9539 ret, "b", "b", "=q", 1);
9541 __put_user_asm(tmp, (u8 __user *)dst,
9542 @@ -177,7 +268,7 @@ int __copy_in_user(void __user *dst, con
9546 - __get_user_asm(tmp, (u16 __user *)src,
9547 + __get_user_asm(tmp, (const u16 __user *)src,
9548 ret, "w", "w", "=r", 2);
9550 __put_user_asm(tmp, (u16 __user *)dst,
9551 @@ -187,7 +278,7 @@ int __copy_in_user(void __user *dst, con
9555 - __get_user_asm(tmp, (u32 __user *)src,
9556 + __get_user_asm(tmp, (const u32 __user *)src,
9557 ret, "l", "k", "=r", 4);
9559 __put_user_asm(tmp, (u32 __user *)dst,
9560 @@ -196,7 +287,7 @@ int __copy_in_user(void __user *dst, con
9564 - __get_user_asm(tmp, (u64 __user *)src,
9565 + __get_user_asm(tmp, (const u64 __user *)src,
9566 ret, "q", "", "=r", 8);
9568 __put_user_asm(tmp, (u64 __user *)dst,
9569 @@ -204,8 +295,16 @@ int __copy_in_user(void __user *dst, con
9574 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9575 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9576 + src += PAX_USER_SHADOW_BASE;
9577 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9578 + dst += PAX_USER_SHADOW_BASE;
9581 return copy_user_generic((__force void *)dst,
9582 - (__force void *)src, size);
9583 + (__force const void *)src, size);
9587 @@ -222,33 +321,72 @@ __must_check unsigned long __clear_user(
9588 static __must_check __always_inline int
9589 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9591 + pax_track_stack();
9593 + if ((int)size < 0)
9596 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9597 + if (!__access_ok(VERIFY_READ, src, size))
9600 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9601 + src += PAX_USER_SHADOW_BASE;
9604 return copy_user_generic(dst, (__force const void *)src, size);
9607 -static __must_check __always_inline int
9608 +static __must_check __always_inline unsigned long
9609 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9611 + if ((int)size < 0)
9614 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9615 + if (!__access_ok(VERIFY_WRITE, dst, size))
9618 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9619 + dst += PAX_USER_SHADOW_BASE;
9622 return copy_user_generic((__force void *)dst, src, size);
9625 -extern long __copy_user_nocache(void *dst, const void __user *src,
9626 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9627 unsigned size, int zerorest);
9630 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9631 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9635 + if ((int)size < 0)
9638 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9639 + if (!__access_ok(VERIFY_READ, src, size))
9643 return __copy_user_nocache(dst, src, size, 1);
9647 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9648 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9651 + if ((int)size < 0)
9654 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9655 + if (!__access_ok(VERIFY_READ, src, size))
9659 return __copy_user_nocache(dst, src, size, 0);
9663 +extern unsigned long
9664 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9666 #endif /* _ASM_X86_UACCESS_64_H */
9667 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess.h linux-2.6.39.4/arch/x86/include/asm/uaccess.h
9668 --- linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-06-03 00:04:13.000000000 -0400
9669 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
9671 #include <linux/thread_info.h>
9672 #include <linux/prefetch.h>
9673 #include <linux/string.h>
9674 +#include <linux/sched.h>
9675 #include <asm/asm.h>
9676 #include <asm/page.h>
9678 #define VERIFY_READ 0
9679 #define VERIFY_WRITE 1
9681 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9684 * The fs value determines whether argument validity checking should be
9685 * performed or not. If get_fs() == USER_DS, checking is performed, with
9688 #define get_ds() (KERNEL_DS)
9689 #define get_fs() (current_thread_info()->addr_limit)
9690 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9691 +void __set_fs(mm_segment_t x);
9692 +void set_fs(mm_segment_t x);
9694 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9697 #define segment_eq(a, b) ((a).seg == (b).seg)
9700 * checks that the pointer is in the user space range - after calling
9701 * this function, memory access functions may still return -EFAULT.
9703 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9704 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9705 +#define access_ok(type, addr, size) \
9707 + long __size = size; \
9708 + unsigned long __addr = (unsigned long)addr; \
9709 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9710 + unsigned long __end_ao = __addr + __size - 1; \
9711 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9712 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9713 + while(__addr_ao <= __end_ao) { \
9715 + __addr_ao += PAGE_SIZE; \
9716 + if (__size > PAGE_SIZE) \
9718 + if (__get_user(__c_ao, (char __user *)__addr)) \
9720 + if (type != VERIFY_WRITE) { \
9721 + __addr = __addr_ao; \
9724 + if (__put_user(__c_ao, (char __user *)__addr)) \
9726 + __addr = __addr_ao; \
9733 * The exception table consists of pairs of addresses: the first is the
9734 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
9735 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9736 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9739 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9740 +#define __copyuser_seg "gs;"
9741 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9742 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9744 +#define __copyuser_seg
9745 +#define __COPYUSER_SET_ES
9746 +#define __COPYUSER_RESTORE_ES
9749 #ifdef CONFIG_X86_32
9750 #define __put_user_asm_u64(x, addr, err, errret) \
9751 - asm volatile("1: movl %%eax,0(%2)\n" \
9752 - "2: movl %%edx,4(%2)\n" \
9753 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9754 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9756 ".section .fixup,\"ax\"\n" \
9758 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
9759 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9761 #define __put_user_asm_ex_u64(x, addr) \
9762 - asm volatile("1: movl %%eax,0(%1)\n" \
9763 - "2: movl %%edx,4(%1)\n" \
9764 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9765 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9767 _ASM_EXTABLE(1b, 2b - 1b) \
9768 _ASM_EXTABLE(2b, 3b - 2b) \
9769 @@ -374,7 +416,7 @@ do { \
9772 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9773 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9774 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9776 ".section .fixup,\"ax\"\n" \
9778 @@ -382,7 +424,7 @@ do { \
9781 _ASM_EXTABLE(1b, 3b) \
9782 - : "=r" (err), ltype(x) \
9783 + : "=r" (err), ltype (x) \
9784 : "m" (__m(addr)), "i" (errret), "0" (err))
9786 #define __get_user_size_ex(x, ptr, size) \
9787 @@ -407,7 +449,7 @@ do { \
9790 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9791 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9792 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9794 _ASM_EXTABLE(1b, 2b - 1b) \
9795 : ltype(x) : "m" (__m(addr)))
9796 @@ -424,13 +466,24 @@ do { \
9798 unsigned long __gu_val; \
9799 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9800 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9801 + (x) = (__typeof__(*(ptr)))__gu_val; \
9805 /* FIXME: this hack is definitely wrong -AK */
9806 struct __large_struct { unsigned long buf[100]; };
9807 -#define __m(x) (*(struct __large_struct __user *)(x))
9808 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9811 + unsigned long ____x = (unsigned long)(x); \
9812 + if (____x < PAX_USER_SHADOW_BASE) \
9813 + ____x += PAX_USER_SHADOW_BASE; \
9814 + (void __user *)____x; \
9817 +#define ____m(x) (x)
9819 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9822 * Tell gcc we read from memory instead of writing: this is because
9823 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
9826 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9827 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9828 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9830 ".section .fixup,\"ax\"\n" \
9832 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
9834 _ASM_EXTABLE(1b, 3b) \
9836 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9837 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9839 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9840 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9841 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9843 _ASM_EXTABLE(1b, 2b - 1b) \
9844 : : ltype(x), "m" (__m(addr)))
9845 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
9846 * On error, the variable @x is set to zero.
9849 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9850 +#define __get_user(x, ptr) get_user((x), (ptr))
9852 #define __get_user(x, ptr) \
9853 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9857 * __put_user: - Write a simple value into user space, with less checking.
9858 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
9859 * Returns zero on success, or -EFAULT on error.
9862 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9863 +#define __put_user(x, ptr) put_user((x), (ptr))
9865 #define __put_user(x, ptr) \
9866 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9869 #define __get_user_unaligned __get_user
9870 #define __put_user_unaligned __put_user
9871 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
9872 #define get_user_ex(x, ptr) do { \
9873 unsigned long __gue_val; \
9874 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9875 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9876 + (x) = (__typeof__(*(ptr)))__gue_val; \
9879 #ifdef CONFIG_X86_WP_WORKS_OK
9880 @@ -567,6 +628,7 @@ extern struct movsl_mask {
9882 #define ARCH_HAS_NOCACHE_UACCESS 1
9884 +#define ARCH_HAS_SORT_EXTABLE
9885 #ifdef CONFIG_X86_32
9886 # include "uaccess_32.h"
9888 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vgtod.h linux-2.6.39.4/arch/x86/include/asm/vgtod.h
9889 --- linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-05-19 00:06:34.000000000 -0400
9890 +++ linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-08-05 19:44:33.000000000 -0400
9891 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9893 struct timezone sys_tz;
9894 struct { /* extract of a clocksource struct */
9896 cycle_t (*vread)(void);
9899 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vsyscall.h linux-2.6.39.4/arch/x86/include/asm/vsyscall.h
9900 --- linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-05-19 00:06:34.000000000 -0400
9901 +++ linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-08-05 19:44:33.000000000 -0400
9902 @@ -15,9 +15,10 @@ enum vsyscall_num {
9905 #include <linux/seqlock.h>
9906 +#include <linux/getcpu.h>
9907 +#include <linux/time.h>
9909 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
9910 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
9912 /* Definitions for CONFIG_GENERIC_TIME definitions */
9913 #define __section_vsyscall_gtod_data __attribute__ \
9914 @@ -31,7 +32,6 @@ enum vsyscall_num {
9915 #define VGETCPU_LSL 2
9917 extern int __vgetcpu_mode;
9918 -extern volatile unsigned long __jiffies;
9920 /* kernel space (writeable) */
9921 extern int vgetcpu_mode;
9922 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
9924 extern void map_vsyscall(void);
9926 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
9927 +extern time_t vtime(time_t *t);
9928 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
9929 #endif /* __KERNEL__ */
9931 #endif /* _ASM_X86_VSYSCALL_H */
9932 diff -urNp linux-2.6.39.4/arch/x86/include/asm/x86_init.h linux-2.6.39.4/arch/x86/include/asm/x86_init.h
9933 --- linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-05-19 00:06:34.000000000 -0400
9934 +++ linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-08-05 20:34:06.000000000 -0400
9935 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9936 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9937 void (*find_smp_config)(void);
9938 void (*get_smp_config)(unsigned int early);
9943 * struct x86_init_resources - platform specific resource related ops
9944 @@ -42,7 +42,7 @@ struct x86_init_resources {
9945 void (*probe_roms)(void);
9946 void (*reserve_resources)(void);
9947 char *(*memory_setup)(void);
9952 * struct x86_init_irqs - platform specific interrupt setup
9953 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9954 void (*pre_vector_init)(void);
9955 void (*intr_init)(void);
9956 void (*trap_init)(void);
9961 * struct x86_init_oem - oem platform specific customizing functions
9962 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9963 struct x86_init_oem {
9964 void (*arch_setup)(void);
9965 void (*banner)(void);
9970 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9971 @@ -76,7 +76,7 @@ struct x86_init_oem {
9973 struct x86_init_mapping {
9974 void (*pagetable_reserve)(u64 start, u64 end);
9979 * struct x86_init_paging - platform specific paging functions
9980 @@ -86,7 +86,7 @@ struct x86_init_mapping {
9981 struct x86_init_paging {
9982 void (*pagetable_setup_start)(pgd_t *base);
9983 void (*pagetable_setup_done)(pgd_t *base);
9988 * struct x86_init_timers - platform specific timer setup
9989 @@ -101,7 +101,7 @@ struct x86_init_timers {
9990 void (*tsc_pre_init)(void);
9991 void (*timer_init)(void);
9992 void (*wallclock_init)(void);
9997 * struct x86_init_iommu - platform specific iommu setup
9998 @@ -109,7 +109,7 @@ struct x86_init_timers {
10000 struct x86_init_iommu {
10001 int (*iommu_init)(void);
10006 * struct x86_init_pci - platform specific pci init functions
10007 @@ -123,7 +123,7 @@ struct x86_init_pci {
10009 void (*init_irq)(void);
10010 void (*fixup_irqs)(void);
10015 * struct x86_init_ops - functions for platform specific setup
10016 @@ -139,7 +139,7 @@ struct x86_init_ops {
10017 struct x86_init_timers timers;
10018 struct x86_init_iommu iommu;
10019 struct x86_init_pci pci;
10024 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10025 @@ -147,7 +147,7 @@ struct x86_init_ops {
10027 struct x86_cpuinit_ops {
10028 void (*setup_percpu_clockev)(void);
10033 * struct x86_platform_ops - platform specific runtime functions
10034 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10035 bool (*is_untracked_pat_range)(u64 start, u64 end);
10036 void (*nmi_init)(void);
10037 int (*i8042_detect)(void);
10043 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10044 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10045 void (*teardown_msi_irq)(unsigned int irq);
10046 void (*teardown_msi_irqs)(struct pci_dev *dev);
10050 extern struct x86_init_ops x86_init;
10051 extern struct x86_cpuinit_ops x86_cpuinit;
10052 diff -urNp linux-2.6.39.4/arch/x86/include/asm/xsave.h linux-2.6.39.4/arch/x86/include/asm/xsave.h
10053 --- linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-05-19 00:06:34.000000000 -0400
10054 +++ linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-08-05 19:44:33.000000000 -0400
10055 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10059 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10060 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10061 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10065 * Clear the xsave header first, so that reserved fields are
10066 * initialized to zero.
10067 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10069 u32 hmask = mask >> 32;
10071 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10072 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10073 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10076 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10078 ".section .fixup,\"ax\"\n"
10079 diff -urNp linux-2.6.39.4/arch/x86/Kconfig linux-2.6.39.4/arch/x86/Kconfig
10080 --- linux-2.6.39.4/arch/x86/Kconfig 2011-05-19 00:06:34.000000000 -0400
10081 +++ linux-2.6.39.4/arch/x86/Kconfig 2011-08-05 19:44:33.000000000 -0400
10082 @@ -224,7 +224,7 @@ config X86_HT
10084 config X86_32_LAZY_GS
10086 - depends on X86_32 && !CC_STACKPROTECTOR
10087 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10089 config ARCH_HWEIGHT_CFLAGS
10091 @@ -1022,7 +1022,7 @@ choice
10095 - depends on !X86_NUMAQ
10096 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10098 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10099 However, the address space of 32-bit x86 processors is only 4
10100 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
10104 - depends on !X86_NUMAQ
10105 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10107 Select this if you have a 32-bit processor and between 1 and 4
10108 gigabytes of physical RAM.
10109 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
10111 default 0xB0000000 if VMSPLIT_3G_OPT
10112 default 0x80000000 if VMSPLIT_2G
10113 - default 0x78000000 if VMSPLIT_2G_OPT
10114 + default 0x70000000 if VMSPLIT_2G_OPT
10115 default 0x40000000 if VMSPLIT_1G
10118 @@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED
10121 bool "EFI runtime service support"
10123 + depends on ACPI && !PAX_KERNEXEC
10125 This enables the kernel to use EFI runtime services that are
10126 available (such as the EFI variable services).
10127 @@ -1487,6 +1487,7 @@ config SECCOMP
10129 config CC_STACKPROTECTOR
10130 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10131 + depends on X86_64 || !PAX_MEMORY_UDEREF
10133 This option turns on the -fstack-protector GCC feature. This
10134 feature puts, at the beginning of functions, a canary value on
10135 @@ -1544,6 +1545,7 @@ config KEXEC_JUMP
10136 config PHYSICAL_START
10137 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10138 default "0x1000000"
10139 + range 0x400000 0x40000000
10141 This gives the physical address where the kernel is loaded.
10143 @@ -1607,6 +1609,7 @@ config X86_NEED_RELOCS
10144 config PHYSICAL_ALIGN
10145 hex "Alignment value to which kernel should be aligned" if X86_32
10146 default "0x1000000"
10147 + range 0x400000 0x1000000 if PAX_KERNEXEC
10148 range 0x2000 0x1000000
10150 This value puts the alignment restrictions on physical address
10151 @@ -1638,9 +1641,10 @@ config HOTPLUG_CPU
10152 Say N if you want to disable CPU hotplug.
10157 prompt "Compat VDSO support"
10158 depends on X86_32 || IA32_EMULATION
10159 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10161 Map the 32-bit VDSO to the predictable old-style address too.
10163 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.cpu linux-2.6.39.4/arch/x86/Kconfig.cpu
10164 --- linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-05-19 00:06:34.000000000 -0400
10165 +++ linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-08-05 19:44:33.000000000 -0400
10166 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
10168 config X86_F00F_BUG
10170 - depends on M586MMX || M586TSC || M586 || M486 || M386
10171 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10173 config X86_INVD_BUG
10175 @@ -358,7 +358,7 @@ config X86_POPAD_OK
10177 config X86_ALIGNMENT_16
10179 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10180 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10182 config X86_INTEL_USERCOPY
10184 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
10188 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10189 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10191 config X86_MINIMUM_CPU_FAMILY
10193 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.debug linux-2.6.39.4/arch/x86/Kconfig.debug
10194 --- linux-2.6.39.4/arch/x86/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
10195 +++ linux-2.6.39.4/arch/x86/Kconfig.debug 2011-08-05 19:44:33.000000000 -0400
10196 @@ -101,7 +101,7 @@ config X86_PTDUMP
10197 config DEBUG_RODATA
10198 bool "Write protect kernel read-only data structures"
10200 - depends on DEBUG_KERNEL
10201 + depends on DEBUG_KERNEL && BROKEN
10203 Mark the kernel read-only data as write-protected in the pagetables,
10204 in order to catch accidental (and incorrect) writes to such const
10205 @@ -119,7 +119,7 @@ config DEBUG_RODATA_TEST
10207 config DEBUG_SET_MODULE_RONX
10208 bool "Set loadable kernel module data as NX and text as RO"
10209 - depends on MODULES
10210 + depends on MODULES && BROKEN
10212 This option helps catch unintended modifications to loadable
10213 kernel module's text and read-only data. It also prevents execution
10214 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile
10215 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-05-19 00:06:34.000000000 -0400
10216 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-05 20:34:06.000000000 -0400
10217 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10218 $(call cc-option, -fno-stack-protector) \
10219 $(call cc-option, -mpreferred-stack-boundary=2)
10220 KBUILD_CFLAGS += $(call cc-option, -m32)
10221 +ifdef CONSTIFY_PLUGIN
10222 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10224 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10227 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S
10228 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-09 09:18:51.000000000 -0400
10229 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-05 19:44:33.000000000 -0400
10230 @@ -108,6 +108,9 @@ wakeup_code:
10231 /* Do any other stuff... */
10233 #ifndef CONFIG_64BIT
10234 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10237 /* This could also be done in C code... */
10238 movl pmode_cr3, %eax
10240 @@ -131,6 +134,7 @@ wakeup_code:
10241 movl pmode_cr0, %eax
10244 +# include "../../verify_cpu.S"
10247 pushw trampoline_segment
10248 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c
10249 --- linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-07-09 09:18:51.000000000 -0400
10250 +++ linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-08-05 19:44:33.000000000 -0400
10251 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10252 header->trampoline_segment = trampoline_address() >> 4;
10254 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10256 + pax_open_kernel();
10257 early_gdt_descr.address =
10258 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10259 + pax_close_kernel();
10261 initial_gs = per_cpu_offset(smp_processor_id());
10263 initial_code = (unsigned long)wakeup_long64;
10264 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S
10265 --- linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-05-19 00:06:34.000000000 -0400
10266 +++ linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-05 19:44:33.000000000 -0400
10267 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10268 # and restore the stack ... but you need gdt for this to work
10269 movl saved_context_esp, %esp
10271 - movl %cs:saved_magic, %eax
10272 - cmpl $0x12345678, %eax
10273 + cmpl $0x12345678, saved_magic
10276 # jump to place where we left off
10277 - movl saved_eip, %eax
10283 diff -urNp linux-2.6.39.4/arch/x86/kernel/alternative.c linux-2.6.39.4/arch/x86/kernel/alternative.c
10284 --- linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-05-19 00:06:34.000000000 -0400
10285 +++ linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-08-05 19:44:33.000000000 -0400
10286 @@ -248,7 +248,7 @@ static void alternatives_smp_lock(const
10287 if (!*poff || ptr < text || ptr >= text_end)
10289 /* turn DS segment override prefix into lock prefix */
10290 - if (*ptr == 0x3e)
10291 + if (*ktla_ktva(ptr) == 0x3e)
10292 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10294 mutex_unlock(&text_mutex);
10295 @@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons
10296 if (!*poff || ptr < text || ptr >= text_end)
10298 /* turn lock prefix into DS segment override prefix */
10299 - if (*ptr == 0xf0)
10300 + if (*ktla_ktva(ptr) == 0xf0)
10301 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10303 mutex_unlock(&text_mutex);
10304 @@ -438,7 +438,7 @@ void __init_or_module apply_paravirt(str
10306 BUG_ON(p->len > MAX_PATCH_LEN);
10307 /* prep the buffer with the original instructions */
10308 - memcpy(insnbuf, p->instr, p->len);
10309 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10310 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10311 (unsigned long)p->instr, p->len);
10313 @@ -506,7 +506,7 @@ void __init alternative_instructions(voi
10315 free_init_pages("SMP alternatives",
10316 (unsigned long)__smp_locks,
10317 - (unsigned long)__smp_locks_end);
10318 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10322 @@ -523,13 +523,17 @@ void __init alternative_instructions(voi
10323 * instructions. And on the local CPU you need to be protected again NMI or MCE
10324 * handlers seeing an inconsistent instruction while you patch.
10326 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10327 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10330 unsigned long flags;
10331 local_irq_save(flags);
10332 - memcpy(addr, opcode, len);
10334 + pax_open_kernel();
10335 + memcpy(ktla_ktva(addr), opcode, len);
10337 + pax_close_kernel();
10339 local_irq_restore(flags);
10340 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10341 that causes hangs on some VIA CPUs. */
10342 @@ -551,36 +555,22 @@ void *__init_or_module text_poke_early(v
10344 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10346 - unsigned long flags;
10348 + unsigned char *vaddr = ktla_ktva(addr);
10349 struct page *pages[2];
10353 if (!core_kernel_text((unsigned long)addr)) {
10354 - pages[0] = vmalloc_to_page(addr);
10355 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10356 + pages[0] = vmalloc_to_page(vaddr);
10357 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10359 - pages[0] = virt_to_page(addr);
10360 + pages[0] = virt_to_page(vaddr);
10361 WARN_ON(!PageReserved(pages[0]));
10362 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10363 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10366 - local_irq_save(flags);
10367 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10369 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10370 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10371 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10372 - clear_fixmap(FIX_TEXT_POKE0);
10374 - clear_fixmap(FIX_TEXT_POKE1);
10375 - local_flush_tlb();
10377 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10378 - that causes hangs on some VIA CPUs. */
10379 + text_poke_early(addr, opcode, len);
10380 for (i = 0; i < len; i++)
10381 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10382 - local_irq_restore(flags);
10383 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10387 @@ -682,9 +672,9 @@ void __kprobes text_poke_smp_batch(struc
10388 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
10390 #ifdef CONFIG_X86_64
10391 -unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10392 +unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10394 -unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10395 +unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10398 void __init arch_init_ideal_nop5(void)
10399 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/apic.c linux-2.6.39.4/arch/x86/kernel/apic/apic.c
10400 --- linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-05-19 00:06:34.000000000 -0400
10401 +++ linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-08-05 19:44:33.000000000 -0400
10402 @@ -1821,7 +1821,7 @@ void smp_error_interrupt(struct pt_regs
10403 apic_write(APIC_ESR, 0);
10404 v1 = apic_read(APIC_ESR);
10406 - atomic_inc(&irq_err_count);
10407 + atomic_inc_unchecked(&irq_err_count);
10410 * Here is what the APIC error bits mean:
10411 @@ -2204,6 +2204,8 @@ static int __cpuinit apic_cluster_num(vo
10412 u16 *bios_cpu_apicid;
10413 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10415 + pax_track_stack();
10417 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10418 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10420 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c
10421 --- linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:04:13.000000000 -0400
10422 +++ linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-08-05 19:44:33.000000000 -0400
10423 @@ -623,7 +623,7 @@ struct IO_APIC_route_entry **alloc_ioapi
10424 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
10426 if (!ioapic_entries)
10430 for (apic = 0; apic < nr_ioapics; apic++) {
10431 ioapic_entries[apic] =
10432 @@ -640,7 +640,7 @@ nomem:
10433 kfree(ioapic_entries[apic]);
10434 kfree(ioapic_entries);
10441 @@ -1040,7 +1040,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10443 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10445 -void lock_vector_lock(void)
10446 +void lock_vector_lock(void) __acquires(vector_lock)
10448 /* Used to the online set of cpus does not change
10449 * during assign_irq_vector.
10450 @@ -1048,7 +1048,7 @@ void lock_vector_lock(void)
10451 raw_spin_lock(&vector_lock);
10454 -void unlock_vector_lock(void)
10455 +void unlock_vector_lock(void) __releases(vector_lock)
10457 raw_spin_unlock(&vector_lock);
10459 @@ -2379,7 +2379,7 @@ static void ack_apic_edge(struct irq_dat
10463 -atomic_t irq_mis_count;
10464 +atomic_unchecked_t irq_mis_count;
10467 * IO-APIC versions below 0x20 don't support EOI register.
10468 @@ -2487,7 +2487,7 @@ static void ack_apic_level(struct irq_da
10471 if (!(v & (1 << (i & 0x1f)))) {
10472 - atomic_inc(&irq_mis_count);
10473 + atomic_inc_unchecked(&irq_mis_count);
10475 eoi_ioapic_irq(irq, cfg);
10477 diff -urNp linux-2.6.39.4/arch/x86/kernel/apm_32.c linux-2.6.39.4/arch/x86/kernel/apm_32.c
10478 --- linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-05-19 00:06:34.000000000 -0400
10479 +++ linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-08-05 19:44:33.000000000 -0400
10480 @@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
10481 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10482 * even though they are called in protected mode.
10484 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10485 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10486 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10488 static const char driver_version[] = "1.16ac"; /* no spaces */
10489 @@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
10491 gdt = get_cpu_gdt_table(cpu);
10492 save_desc_40 = gdt[0x40 / 8];
10494 + pax_open_kernel();
10495 gdt[0x40 / 8] = bad_bios_desc;
10496 + pax_close_kernel();
10498 apm_irq_save(flags);
10500 @@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
10502 APM_DO_RESTORE_SEGS;
10503 apm_irq_restore(flags);
10505 + pax_open_kernel();
10506 gdt[0x40 / 8] = save_desc_40;
10507 + pax_close_kernel();
10511 return call->eax & 0xff;
10512 @@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void
10514 gdt = get_cpu_gdt_table(cpu);
10515 save_desc_40 = gdt[0x40 / 8];
10517 + pax_open_kernel();
10518 gdt[0x40 / 8] = bad_bios_desc;
10519 + pax_close_kernel();
10521 apm_irq_save(flags);
10523 @@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void
10525 APM_DO_RESTORE_SEGS;
10526 apm_irq_restore(flags);
10528 + pax_open_kernel();
10529 gdt[0x40 / 8] = save_desc_40;
10530 + pax_close_kernel();
10535 @@ -2351,12 +2365,15 @@ static int __init apm_init(void)
10536 * code to that CPU.
10538 gdt = get_cpu_gdt_table(0);
10540 + pax_open_kernel();
10541 set_desc_base(&gdt[APM_CS >> 3],
10542 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10543 set_desc_base(&gdt[APM_CS_16 >> 3],
10544 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10545 set_desc_base(&gdt[APM_DS >> 3],
10546 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10547 + pax_close_kernel();
10549 proc_create("apm", 0, NULL, &apm_file_ops);
10551 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c
10552 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-05-19 00:06:34.000000000 -0400
10553 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-08-05 19:44:33.000000000 -0400
10554 @@ -69,6 +69,7 @@ int main(void)
10558 + DEFINE(TSS_size, sizeof(struct tss_struct));
10559 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10562 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets.c linux-2.6.39.4/arch/x86/kernel/asm-offsets.c
10563 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-05-19 00:06:34.000000000 -0400
10564 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-08-05 19:44:33.000000000 -0400
10565 @@ -33,6 +33,8 @@ void common(void) {
10566 OFFSET(TI_status, thread_info, status);
10567 OFFSET(TI_addr_limit, thread_info, addr_limit);
10568 OFFSET(TI_preempt_count, thread_info, preempt_count);
10569 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10570 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10573 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10574 @@ -53,8 +55,26 @@ void common(void) {
10575 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10576 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10577 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10579 +#ifdef CONFIG_PAX_KERNEXEC
10580 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10583 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10584 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10585 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10586 +#ifdef CONFIG_X86_64
10587 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
10594 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10595 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10596 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10600 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10601 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/amd.c linux-2.6.39.4/arch/x86/kernel/cpu/amd.c
10602 --- linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-06-03 00:04:13.000000000 -0400
10603 +++ linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-08-05 19:44:33.000000000 -0400
10604 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10607 /* AMD errata T13 (order #21922) */
10608 - if ((c->x86 == 6)) {
10609 + if (c->x86 == 6) {
10611 if (c->x86_model == 3 && c->x86_mask == 0)
10613 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/common.c linux-2.6.39.4/arch/x86/kernel/cpu/common.c
10614 --- linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-06-03 00:04:13.000000000 -0400
10615 +++ linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-08-05 19:44:33.000000000 -0400
10616 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10618 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10620 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10621 -#ifdef CONFIG_X86_64
10623 - * We need valid kernel segments for data and code in long mode too
10624 - * IRET will check the segment types kkeil 2000/10/28
10625 - * Also sysret mandates a special GDT layout
10627 - * TLS descriptors are currently at a different place compared to i386.
10628 - * Hopefully nobody expects them at a fixed place (Wine?)
10630 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10631 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10632 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10633 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10634 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10635 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10637 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10638 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10639 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10640 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10642 - * Segments used for calling PnP BIOS have byte granularity.
10643 - * They code segments and data segments have fixed 64k limits,
10644 - * the transfer segment sizes are set at run time.
10646 - /* 32-bit code */
10647 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10648 - /* 16-bit code */
10649 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10650 - /* 16-bit data */
10651 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10652 - /* 16-bit data */
10653 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10654 - /* 16-bit data */
10655 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10657 - * The APM segments have byte granularity and their bases
10658 - * are set at run time. All have 64k limits.
10660 - /* 32-bit code */
10661 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10662 - /* 16-bit code */
10663 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10665 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10667 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10668 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10669 - GDT_STACK_CANARY_INIT
10672 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10674 static int __init x86_xsave_setup(char *s)
10676 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10677 @@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu)
10679 struct desc_ptr gdt_descr;
10681 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10682 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10683 gdt_descr.size = GDT_SIZE - 1;
10684 load_gdt(&gdt_descr);
10685 /* Reload the per-cpu base */
10686 @@ -824,6 +770,10 @@ static void __cpuinit identify_cpu(struc
10687 /* Filter out anything that depends on CPUID levels we don't have */
10688 filter_cpuid_features(c, true);
10690 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10691 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10694 /* If the model name is still unset, do table lookup. */
10695 if (!c->x86_model_id[0]) {
10697 @@ -1003,6 +953,9 @@ static __init int setup_disablecpuid(cha
10699 __setup("clearcpuid=", setup_disablecpuid);
10701 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10702 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10704 #ifdef CONFIG_X86_64
10705 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10707 @@ -1018,7 +971,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10708 EXPORT_PER_CPU_SYMBOL(current_task);
10710 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10711 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10712 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10713 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10715 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10716 @@ -1083,7 +1036,7 @@ struct pt_regs * __cpuinit idle_regs(str
10718 memset(regs, 0, sizeof(struct pt_regs));
10719 regs->fs = __KERNEL_PERCPU;
10720 - regs->gs = __KERNEL_STACK_CANARY;
10721 + savesegment(gs, regs->gs);
10725 @@ -1138,7 +1091,7 @@ void __cpuinit cpu_init(void)
10728 cpu = stack_smp_processor_id();
10729 - t = &per_cpu(init_tss, cpu);
10730 + t = init_tss + cpu;
10731 oist = &per_cpu(orig_ist, cpu);
10734 @@ -1164,7 +1117,7 @@ void __cpuinit cpu_init(void)
10735 switch_to_new_gdt(cpu);
10736 loadsegment(fs, 0);
10738 - load_idt((const struct desc_ptr *)&idt_descr);
10739 + load_idt(&idt_descr);
10741 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10743 @@ -1173,7 +1126,6 @@ void __cpuinit cpu_init(void)
10744 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10747 - x86_configure_nx();
10751 @@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
10753 int cpu = smp_processor_id();
10754 struct task_struct *curr = current;
10755 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10756 + struct tss_struct *t = init_tss + cpu;
10757 struct thread_struct *thread = &curr->thread;
10759 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10760 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/intel.c linux-2.6.39.4/arch/x86/kernel/cpu/intel.c
10761 --- linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-05-19 00:06:34.000000000 -0400
10762 +++ linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-08-05 19:44:33.000000000 -0400
10763 @@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
10764 * Update the IDT descriptor and reload the IDT so that
10765 * it uses the read-only mapped virtual address.
10767 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10768 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10769 load_idt(&idt_descr);
10772 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/Makefile linux-2.6.39.4/arch/x86/kernel/cpu/Makefile
10773 --- linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-05-19 00:06:34.000000000 -0400
10774 +++ linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-08-05 19:44:33.000000000 -0400
10775 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10776 CFLAGS_REMOVE_perf_event.o = -pg
10779 -# Make sure load_percpu_segment has no stackprotector
10780 -nostackp := $(call cc-option, -fno-stack-protector)
10781 -CFLAGS_common.o := $(nostackp)
10783 obj-y := intel_cacheinfo.o scattered.o topology.o
10784 obj-y += proc.o capflags.o powerflags.o common.o
10785 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10786 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c
10787 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-19 00:06:34.000000000 -0400
10788 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-05 19:44:33.000000000 -0400
10790 #include <asm/ipi.h>
10791 #include <asm/mce.h>
10792 #include <asm/msr.h>
10793 +#include <asm/local.h>
10795 #include "mce-internal.h"
10797 @@ -220,7 +221,7 @@ static void print_mce(struct mce *m)
10798 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10801 - if (m->cs == __KERNEL_CS)
10802 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10803 print_symbol("{%s}", m->ip);
10806 @@ -244,10 +245,10 @@ static void print_mce(struct mce *m)
10808 #define PANIC_TIMEOUT 5 /* 5 seconds */
10810 -static atomic_t mce_paniced;
10811 +static atomic_unchecked_t mce_paniced;
10813 static int fake_panic;
10814 -static atomic_t mce_fake_paniced;
10815 +static atomic_unchecked_t mce_fake_paniced;
10817 /* Panic in progress. Enable interrupts and wait for final IPI */
10818 static void wait_for_panic(void)
10819 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10821 * Make sure only one CPU runs in machine check panic
10823 - if (atomic_inc_return(&mce_paniced) > 1)
10824 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10828 @@ -279,7 +280,7 @@ static void mce_panic(char *msg, struct
10831 /* Don't log too much for fake panic */
10832 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10833 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10836 /* First print corrected ones that are still unlogged */
10837 @@ -647,7 +648,7 @@ static int mce_timed_out(u64 *t)
10838 * might have been modified by someone else.
10841 - if (atomic_read(&mce_paniced))
10842 + if (atomic_read_unchecked(&mce_paniced))
10844 if (!monarch_timeout)
10846 @@ -1461,14 +1462,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10849 static DEFINE_SPINLOCK(mce_state_lock);
10850 -static int open_count; /* #times opened */
10851 +static local_t open_count; /* #times opened */
10852 static int open_exclu; /* already open exclusive? */
10854 static int mce_open(struct inode *inode, struct file *file)
10856 spin_lock(&mce_state_lock);
10858 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10859 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10860 spin_unlock(&mce_state_lock);
10863 @@ -1476,7 +1477,7 @@ static int mce_open(struct inode *inode,
10865 if (file->f_flags & O_EXCL)
10868 + local_inc(&open_count);
10870 spin_unlock(&mce_state_lock);
10872 @@ -1487,7 +1488,7 @@ static int mce_release(struct inode *ino
10874 spin_lock(&mce_state_lock);
10877 + local_dec(&open_count);
10880 spin_unlock(&mce_state_lock);
10881 @@ -2174,7 +2175,7 @@ struct dentry *mce_get_debugfs_dir(void)
10882 static void mce_reset(void)
10885 - atomic_set(&mce_fake_paniced, 0);
10886 + atomic_set_unchecked(&mce_fake_paniced, 0);
10887 atomic_set(&mce_executing, 0);
10888 atomic_set(&mce_callin, 0);
10889 atomic_set(&global_nwo, 0);
10890 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10891 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-05-19 00:06:34.000000000 -0400
10892 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:34:06.000000000 -0400
10893 @@ -215,7 +215,9 @@ static int inject_init(void)
10894 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10896 printk(KERN_INFO "Machine check injector initialized\n");
10897 - mce_chrdev_ops.write = mce_write;
10898 + pax_open_kernel();
10899 + *(void **)&mce_chrdev_ops.write = mce_write;
10900 + pax_close_kernel();
10901 register_die_notifier(&mce_raise_nb);
10904 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c
10905 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-05-19 00:06:34.000000000 -0400
10906 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-05 19:44:33.000000000 -0400
10907 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10908 u64 size_or_mask, size_and_mask;
10909 static bool mtrr_aps_delayed_init;
10911 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10912 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10914 const struct mtrr_ops *mtrr_if;
10916 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10917 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-19 00:06:34.000000000 -0400
10918 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-05 20:34:06.000000000 -0400
10920 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10924 - u32 use_intel_if;
10925 + const u32 vendor;
10926 + const u32 use_intel_if;
10927 void (*set)(unsigned int reg, unsigned long base,
10928 unsigned long size, mtrr_type type);
10929 void (*set_all)(void);
10930 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c
10931 --- linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-05-19 00:06:34.000000000 -0400
10932 +++ linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-08-05 19:44:33.000000000 -0400
10933 @@ -774,6 +774,8 @@ static int x86_schedule_events(struct cp
10934 int i, j, w, wmax, num = 0;
10935 struct hw_perf_event *hwc;
10937 + pax_track_stack();
10939 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10941 for (i = 0; i < n; i++) {
10942 @@ -1878,7 +1880,7 @@ perf_callchain_user(struct perf_callchai
10945 perf_callchain_store(entry, frame.return_address);
10946 - fp = frame.next_frame;
10947 + fp = (__force const void __user *)frame.next_frame;
10951 diff -urNp linux-2.6.39.4/arch/x86/kernel/crash.c linux-2.6.39.4/arch/x86/kernel/crash.c
10952 --- linux-2.6.39.4/arch/x86/kernel/crash.c 2011-05-19 00:06:34.000000000 -0400
10953 +++ linux-2.6.39.4/arch/x86/kernel/crash.c 2011-08-05 19:44:33.000000000 -0400
10954 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10957 #ifdef CONFIG_X86_32
10958 - if (!user_mode_vm(regs)) {
10959 + if (!user_mode(regs)) {
10960 crash_fixup_ss_esp(&fixed_regs, regs);
10961 regs = &fixed_regs;
10963 diff -urNp linux-2.6.39.4/arch/x86/kernel/doublefault_32.c linux-2.6.39.4/arch/x86/kernel/doublefault_32.c
10964 --- linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-05-19 00:06:34.000000000 -0400
10965 +++ linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-08-05 19:44:33.000000000 -0400
10968 #define DOUBLEFAULT_STACKSIZE (1024)
10969 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10970 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10971 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10973 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10975 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10976 unsigned long gdt, tss;
10978 store_gdt(&gdt_desc);
10979 - gdt = gdt_desc.address;
10980 + gdt = (unsigned long)gdt_desc.address;
10982 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10984 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10985 /* 0x2 bit is always set */
10986 .flags = X86_EFLAGS_SF | 0x2,
10989 + .es = __KERNEL_DS,
10993 + .ds = __KERNEL_DS,
10994 .fs = __KERNEL_PERCPU,
10996 .__cr3 = __pa_nodebug(swapper_pg_dir),
10997 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c
10998 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-05-19 00:06:34.000000000 -0400
10999 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-08-05 19:44:33.000000000 -0400
11000 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11001 bp = stack_frame(task, regs);
11004 - struct thread_info *context;
11005 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11007 - context = (struct thread_info *)
11008 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11009 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11010 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11012 - stack = (unsigned long *)context->previous_esp;
11014 + if (stack_start == task_stack_page(task))
11016 + stack = *(unsigned long **)stack_start;
11017 if (ops->stack(data, "IRQ") < 0)
11019 touch_nmi_watchdog();
11020 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11021 * When in-kernel, we also print out the stack and code at the
11022 * time of the fault..
11024 - if (!user_mode_vm(regs)) {
11025 + if (!user_mode(regs)) {
11026 unsigned int code_prologue = code_bytes * 43 / 64;
11027 unsigned int code_len = code_bytes;
11030 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11032 printk(KERN_EMERG "Stack:\n");
11033 show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG);
11035 printk(KERN_EMERG "Code: ");
11037 - ip = (u8 *)regs->ip - code_prologue;
11038 + ip = (u8 *)regs->ip - code_prologue + cs_base;
11039 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11040 /* try starting at IP */
11041 - ip = (u8 *)regs->ip;
11042 + ip = (u8 *)regs->ip + cs_base;
11043 code_len = code_len - code_prologue + 1;
11045 for (i = 0; i < code_len; i++, ip++) {
11046 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11047 printk(" Bad EIP value.");
11050 - if (ip == (u8 *)regs->ip)
11051 + if (ip == (u8 *)regs->ip + cs_base)
11052 printk("<%02x> ", c);
11054 printk("%02x ", c);
11055 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11057 unsigned short ud2;
11059 + ip = ktla_ktva(ip);
11060 if (ip < PAGE_OFFSET)
11062 if (probe_kernel_address((unsigned short *)ip, ud2))
11063 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c
11064 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-05-19 00:06:34.000000000 -0400
11065 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-08-05 19:44:33.000000000 -0400
11066 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11067 unsigned long *irq_stack_end =
11068 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11070 - struct thread_info *tinfo;
11072 unsigned long dummy;
11073 + void *stack_start;
11077 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11078 * current stack address. If the stacks consist of nested
11081 - tinfo = task_thread_info(task);
11084 unsigned long *estack_end;
11086 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11089 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11090 if (ops->stack(data, id) < 0)
11093 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11094 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11095 data, estack_end, &graph);
11096 ops->stack(data, "<EOE>");
11098 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11099 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11100 if (ops->stack(data, "IRQ") < 0)
11102 - bp = ops->walk_stack(tinfo, stack, bp,
11103 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11104 ops, data, irq_stack_end, &graph);
11106 * We link to the next stack (which would be
11107 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11109 * This handles the process stack:
11111 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11112 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11113 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11116 EXPORT_SYMBOL(dump_trace);
11117 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack.c linux-2.6.39.4/arch/x86/kernel/dumpstack.c
11118 --- linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-05-19 00:06:34.000000000 -0400
11119 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-08-05 19:44:33.000000000 -0400
11121 * Copyright (C) 1991, 1992 Linus Torvalds
11122 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11124 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11125 +#define __INCLUDED_BY_HIDESYM 1
11127 #include <linux/kallsyms.h>
11128 #include <linux/kprobes.h>
11129 #include <linux/uaccess.h>
11130 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11132 print_ftrace_graph_addr(unsigned long addr, void *data,
11133 const struct stacktrace_ops *ops,
11134 - struct thread_info *tinfo, int *graph)
11135 + struct task_struct *task, int *graph)
11137 - struct task_struct *task = tinfo->task;
11138 unsigned long ret_addr;
11139 int index = task->curr_ret_stack;
11141 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11143 print_ftrace_graph_addr(unsigned long addr, void *data,
11144 const struct stacktrace_ops *ops,
11145 - struct thread_info *tinfo, int *graph)
11146 + struct task_struct *task, int *graph)
11150 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11151 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11154 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11155 - void *p, unsigned int size, void *end)
11156 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11160 if (p < end && p >= (end-THREAD_SIZE))
11162 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11166 -print_context_stack(struct thread_info *tinfo,
11167 +print_context_stack(struct task_struct *task, void *stack_start,
11168 unsigned long *stack, unsigned long bp,
11169 const struct stacktrace_ops *ops, void *data,
11170 unsigned long *end, int *graph)
11172 struct stack_frame *frame = (struct stack_frame *)bp;
11174 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11175 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11176 unsigned long addr;
11179 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11181 ops->address(data, addr, 0);
11183 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11184 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11188 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11189 EXPORT_SYMBOL_GPL(print_context_stack);
11192 -print_context_stack_bp(struct thread_info *tinfo,
11193 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11194 unsigned long *stack, unsigned long bp,
11195 const struct stacktrace_ops *ops, void *data,
11196 unsigned long *end, int *graph)
11197 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11198 struct stack_frame *frame = (struct stack_frame *)bp;
11199 unsigned long *ret_addr = &frame->return_address;
11201 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11202 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11203 unsigned long addr = *ret_addr;
11205 if (!__kernel_text_address(addr))
11206 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11207 ops->address(data, addr, 1);
11208 frame = frame->next_frame;
11209 ret_addr = &frame->return_address;
11210 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11211 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11214 return (unsigned long)frame;
11215 @@ -202,7 +202,7 @@ void dump_stack(void)
11217 bp = stack_frame(current, NULL);
11218 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11219 - current->pid, current->comm, print_tainted(),
11220 + task_pid_nr(current), current->comm, print_tainted(),
11221 init_utsname()->release,
11222 (int)strcspn(init_utsname()->version, " "),
11223 init_utsname()->version);
11224 @@ -238,6 +238,8 @@ unsigned __kprobes long oops_begin(void)
11226 EXPORT_SYMBOL_GPL(oops_begin);
11228 +extern void gr_handle_kernel_exploit(void);
11230 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11232 if (regs && kexec_should_crash(current))
11233 @@ -259,7 +261,10 @@ void __kprobes oops_end(unsigned long fl
11234 panic("Fatal exception in interrupt");
11236 panic("Fatal exception");
11239 + gr_handle_kernel_exploit();
11241 + do_group_exit(signr);
11244 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11245 @@ -286,7 +291,7 @@ int __kprobes __die(const char *str, str
11247 show_registers(regs);
11248 #ifdef CONFIG_X86_32
11249 - if (user_mode_vm(regs)) {
11250 + if (user_mode(regs)) {
11252 ss = regs->ss & 0xffff;
11254 @@ -314,7 +319,7 @@ void die(const char *str, struct pt_regs
11255 unsigned long flags = oops_begin();
11258 - if (!user_mode_vm(regs))
11259 + if (!user_mode(regs))
11260 report_bug(regs->ip, regs);
11262 if (__die(str, regs, err))
11263 diff -urNp linux-2.6.39.4/arch/x86/kernel/early_printk.c linux-2.6.39.4/arch/x86/kernel/early_printk.c
11264 --- linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-05-19 00:06:34.000000000 -0400
11265 +++ linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-08-05 19:44:33.000000000 -0400
11267 #include <linux/pci_regs.h>
11268 #include <linux/pci_ids.h>
11269 #include <linux/errno.h>
11270 +#include <linux/sched.h>
11271 #include <asm/io.h>
11272 #include <asm/processor.h>
11273 #include <asm/fcntl.h>
11274 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11278 + pax_track_stack();
11281 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11282 early_console->write(early_console, buf, n);
11283 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_32.S linux-2.6.39.4/arch/x86/kernel/entry_32.S
11284 --- linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-05-19 00:06:34.000000000 -0400
11285 +++ linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-08-05 19:44:33.000000000 -0400
11286 @@ -185,13 +185,146 @@
11287 /*CFI_REL_OFFSET gs, PT_GS*/
11289 .macro SET_KERNEL_GS reg
11291 +#ifdef CONFIG_CC_STACKPROTECTOR
11292 movl $(__KERNEL_STACK_CANARY), \reg
11293 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11294 + movl $(__USER_DS), \reg
11302 #endif /* CONFIG_X86_32_LAZY_GS */
11305 +.macro pax_enter_kernel
11306 +#ifdef CONFIG_PAX_KERNEXEC
11307 + call pax_enter_kernel
11311 +.macro pax_exit_kernel
11312 +#ifdef CONFIG_PAX_KERNEXEC
11313 + call pax_exit_kernel
11317 +#ifdef CONFIG_PAX_KERNEXEC
11318 +ENTRY(pax_enter_kernel)
11319 +#ifdef CONFIG_PARAVIRT
11322 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11330 + cmp $__KERNEL_CS, %esi
11332 + ljmp $__KERNEL_CS, $3f
11333 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11335 +#ifdef CONFIG_PARAVIRT
11337 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11342 +#ifdef CONFIG_PARAVIRT
11347 +ENDPROC(pax_enter_kernel)
11349 +ENTRY(pax_exit_kernel)
11350 +#ifdef CONFIG_PARAVIRT
11355 + cmp $__KERNEXEC_KERNEL_CS, %esi
11357 +#ifdef CONFIG_PARAVIRT
11358 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11364 + ljmp $__KERNEL_CS, $1f
11366 +#ifdef CONFIG_PARAVIRT
11368 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11373 +#ifdef CONFIG_PARAVIRT
11378 +ENDPROC(pax_exit_kernel)
11381 +.macro pax_erase_kstack
11382 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11383 + call pax_erase_kstack
11387 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11389 + * ebp: thread_info
11390 + * ecx, edx: can be clobbered
11392 +ENTRY(pax_erase_kstack)
11396 + mov TI_lowest_stack(%ebp), %edi
11397 + mov $-0xBEEF, %eax
11401 + and $THREAD_SIZE_asm - 1, %ecx
11420 + mov TI_task_thread_sp0(%ebp), %edi
11422 + mov %edi, TI_lowest_stack(%ebp)
11427 +ENDPROC(pax_erase_kstack)
11430 +.macro __SAVE_ALL _DS
11434 @@ -214,7 +347,7 @@
11435 CFI_REL_OFFSET ecx, 0
11437 CFI_REL_OFFSET ebx, 0
11438 - movl $(__USER_DS), %edx
11442 movl $(__KERNEL_PERCPU), %edx
11443 @@ -222,6 +355,15 @@
11448 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11449 + __SAVE_ALL __KERNEL_DS
11452 + __SAVE_ALL __USER_DS
11456 .macro RESTORE_INT_REGS
11459 @@ -332,7 +474,15 @@ check_userspace:
11460 movb PT_CS(%esp), %al
11461 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11462 cmpl $USER_RPL, %eax
11464 +#ifdef CONFIG_PAX_KERNEXEC
11465 + jae resume_userspace
11468 + jmp resume_kernel
11470 jb resume_kernel # not returning to v8086 or userspace
11473 ENTRY(resume_userspace)
11475 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11476 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11477 # int/exception return?
11480 + jmp restore_all_pax
11481 END(ret_from_exception)
11483 #ifdef CONFIG_PREEMPT
11484 @@ -394,23 +544,34 @@ sysenter_past_esp:
11485 /*CFI_REL_OFFSET cs, 0*/
11487 * Push current_thread_info()->sysenter_return to the stack.
11488 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11489 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11491 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11493 CFI_REL_OFFSET eip, 0
11497 + GET_THREAD_INFO(%ebp)
11498 + movl TI_sysenter_return(%ebp),%ebp
11499 + movl %ebp,PT_EIP(%esp)
11500 ENABLE_INTERRUPTS(CLBR_NONE)
11503 * Load the potential sixth argument from user stack.
11504 * Careful about security.
11506 + movl PT_OLDESP(%esp),%ebp
11508 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11509 + mov PT_OLDSS(%esp),%ds
11510 +1: movl %ds:(%ebp),%ebp
11514 cmpl $__PAGE_OFFSET-3,%ebp
11516 1: movl (%ebp),%ebp
11519 movl %ebp,PT_EBP(%esp)
11520 .section __ex_table,"a"
11522 @@ -433,12 +594,23 @@ sysenter_do_call:
11523 testl $_TIF_ALLWORK_MASK, %ecx
11527 +#ifdef CONFIG_PAX_RANDKSTACK
11529 + call pax_randomize_kstack
11535 /* if something modifies registers it must also disable sysexit */
11536 movl PT_EIP(%esp), %edx
11537 movl PT_OLDESP(%esp), %ecx
11540 1: mov PT_FS(%esp), %fs
11541 +2: mov PT_DS(%esp), %ds
11542 +3: mov PT_ES(%esp), %es
11544 ENABLE_INTERRUPTS_SYSEXIT
11546 @@ -455,6 +627,9 @@ sysenter_audit:
11547 movl %eax,%edx /* 2nd arg: syscall number */
11548 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11549 call audit_syscall_entry
11554 movl PT_EAX(%esp),%eax /* reload syscall number */
11555 jmp sysenter_do_call
11556 @@ -481,11 +656,17 @@ sysexit_audit:
11559 .pushsection .fixup,"ax"
11560 -2: movl $0,PT_FS(%esp)
11561 +4: movl $0,PT_FS(%esp)
11563 +5: movl $0,PT_DS(%esp)
11565 +6: movl $0,PT_ES(%esp)
11567 .section __ex_table,"a"
11575 ENDPROC(ia32_sysenter_target)
11576 @@ -518,6 +699,14 @@ syscall_exit:
11577 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11578 jne syscall_exit_work
11582 +#ifdef CONFIG_PAX_RANDKSTACK
11583 + call pax_randomize_kstack
11590 restore_all_notrace:
11591 @@ -577,14 +766,21 @@ ldt_ss:
11592 * compensating for the offset by changing to the ESPFIX segment with
11593 * a base address that matches for the difference.
11595 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11596 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11597 mov %esp, %edx /* load kernel esp */
11598 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11599 mov %dx, %ax /* eax: new kernel esp */
11600 sub %eax, %edx /* offset (low word is 0) */
11602 + movl PER_CPU_VAR(cpu_number), %ebx
11603 + shll $PAGE_SHIFT_asm, %ebx
11604 + addl $cpu_gdt_table, %ebx
11606 + movl $cpu_gdt_table, %ebx
11609 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11610 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11611 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11612 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11613 pushl_cfi $__ESPFIX_SS
11614 pushl_cfi %eax /* new kernel esp */
11615 /* Disable interrupts, but do not irqtrace this section: we
11616 @@ -613,29 +809,23 @@ work_resched:
11617 movl TI_flags(%ebp), %ecx
11618 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11619 # than syscall tracing?
11621 + jz restore_all_pax
11622 testb $_TIF_NEED_RESCHED, %cl
11625 work_notifysig: # deal with pending signals and
11626 # notify-resume requests
11629 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11631 - jne work_notifysig_v86 # returning to kernel-space or
11632 + jz 1f # returning to kernel-space or
11635 - call do_notify_resume
11636 - jmp resume_userspace_sig
11639 -work_notifysig_v86:
11640 pushl_cfi %ecx # save ti_flags for do_notify_resume
11641 call save_v86_state # %eax contains pt_regs pointer
11649 call do_notify_resume
11650 @@ -648,6 +838,9 @@ syscall_trace_entry:
11651 movl $-ENOSYS,PT_EAX(%esp)
11653 call syscall_trace_enter
11657 /* What it returned is what we'll actually use. */
11658 cmpl $(nr_syscalls), %eax
11660 @@ -670,6 +863,10 @@ END(syscall_exit_work)
11662 RING0_INT_FRAME # can't unwind into user space anyway
11664 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11668 GET_THREAD_INFO(%ebp)
11669 movl $-EFAULT,PT_EAX(%esp)
11670 jmp resume_userspace
11671 @@ -752,6 +949,36 @@ ptregs_clone:
11673 ENDPROC(ptregs_clone)
11676 +ENTRY(kernel_execve)
11679 + sub $PT_OLDSS+4,%esp
11683 + lea 3*4(%esp),%edi
11684 + mov $PT_OLDSS/4+1,%ecx
11690 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11694 + CFI_ADJUST_CFA_OFFSET -4
11695 + GET_THREAD_INFO(%ebp)
11698 + add $PT_OLDSS+4,%esp
11699 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11703 +ENDPROC(kernel_execve)
11705 .macro FIXUP_ESPFIX_STACK
11707 * Switch back for ESPFIX stack to the normal zerobased stack
11708 @@ -761,8 +988,15 @@ ENDPROC(ptregs_clone)
11709 * normal stack and adjusts ESP with the matching offset.
11711 /* fixup the stack */
11712 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11713 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11715 + movl PER_CPU_VAR(cpu_number), %ebx
11716 + shll $PAGE_SHIFT_asm, %ebx
11717 + addl $cpu_gdt_table, %ebx
11719 + movl $cpu_gdt_table, %ebx
11721 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11722 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11724 addl %esp, %eax /* the adjusted stack pointer */
11725 pushl_cfi $__KERNEL_DS
11726 @@ -1213,7 +1447,6 @@ return_to_handler:
11730 -.section .rodata,"a"
11731 #include "syscall_table_32.S"
11733 syscall_table_size=(.-sys_call_table)
11734 @@ -1259,9 +1492,12 @@ error_code:
11735 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11738 - movl $(__USER_DS), %ecx
11739 + movl $(__KERNEL_DS), %ecx
11746 movl %esp,%eax # pt_regs pointer
11748 @@ -1346,6 +1582,9 @@ nmi_stack_correct:
11749 xorl %edx,%edx # zero error code
11750 movl %esp,%eax # pt_regs pointer
11755 jmp restore_all_notrace
11758 @@ -1382,6 +1621,9 @@ nmi_espfix_stack:
11759 FIXUP_ESPFIX_STACK # %eax == %esp
11760 xorl %edx,%edx # zero error code
11766 lss 12+4(%esp), %esp # back to espfix stack
11767 CFI_ADJUST_CFA_OFFSET -24
11768 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_64.S linux-2.6.39.4/arch/x86/kernel/entry_64.S
11769 --- linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-05-19 00:06:34.000000000 -0400
11770 +++ linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-08-05 19:44:33.000000000 -0400
11772 #include <asm/paravirt.h>
11773 #include <asm/ftrace.h>
11774 #include <asm/percpu.h>
11775 +#include <asm/pgtable.h>
11777 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11778 #include <linux/elf-em.h>
11779 @@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
11780 ENDPROC(native_usergs_sysret64)
11781 #endif /* CONFIG_PARAVIRT */
11783 + .macro ljmpq sel, off
11784 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11785 + .byte 0x48; ljmp *1234f(%rip)
11786 + .pushsection .rodata
11788 + 1234: .quad \off; .word \sel
11797 + .macro pax_enter_kernel
11798 +#ifdef CONFIG_PAX_KERNEXEC
11799 + call pax_enter_kernel
11803 + .macro pax_exit_kernel
11804 +#ifdef CONFIG_PAX_KERNEXEC
11805 + call pax_exit_kernel
11809 +#ifdef CONFIG_PAX_KERNEXEC
11810 +ENTRY(pax_enter_kernel)
11813 +#ifdef CONFIG_PARAVIRT
11814 + PV_SAVE_REGS(CLBR_RDI)
11821 + cmp $__KERNEL_CS,%edi
11823 + ljmpq __KERNEL_CS,3f
11824 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11825 +2: SET_RDI_INTO_CR0
11828 +#ifdef CONFIG_PARAVIRT
11829 + PV_RESTORE_REGS(CLBR_RDI)
11834 +ENDPROC(pax_enter_kernel)
11836 +ENTRY(pax_exit_kernel)
11839 +#ifdef CONFIG_PARAVIRT
11840 + PV_SAVE_REGS(CLBR_RDI)
11844 + cmp $__KERNEXEC_KERNEL_CS,%edi
11848 + ljmpq __KERNEL_CS,1f
11849 +1: SET_RDI_INTO_CR0
11852 +#ifdef CONFIG_PARAVIRT
11853 + PV_RESTORE_REGS(CLBR_RDI);
11858 +ENDPROC(pax_exit_kernel)
11861 + .macro pax_enter_kernel_user
11862 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11863 + call pax_enter_kernel_user
11867 + .macro pax_exit_kernel_user
11868 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11869 + call pax_exit_kernel_user
11871 +#ifdef CONFIG_PAX_RANDKSTACK
11873 + call pax_randomize_kstack
11876 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11877 + call pax_erase_kstack
11881 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11882 +ENTRY(pax_enter_kernel_user)
11886 +#ifdef CONFIG_PARAVIRT
11887 + PV_SAVE_REGS(CLBR_RDI)
11892 + add $__START_KERNEL_map,%rbx
11893 + sub phys_base(%rip),%rbx
11895 +#ifdef CONFIG_PARAVIRT
11897 + cmpl $0, pv_info+PARAVIRT_enabled
11900 + .rept USER_PGD_PTRS
11901 + mov i*8(%rbx),%rsi
11903 + lea i*8(%rbx),%rdi
11904 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11912 + .rept USER_PGD_PTRS
11913 + movb $0,i*8(%rbx)
11917 +#ifdef CONFIG_PARAVIRT
11922 +#ifdef CONFIG_PAX_KERNEXEC
11928 +#ifdef CONFIG_PARAVIRT
11929 + PV_RESTORE_REGS(CLBR_RDI)
11935 +ENDPROC(pax_enter_kernel_user)
11937 +ENTRY(pax_exit_kernel_user)
11940 +#ifdef CONFIG_PARAVIRT
11942 + PV_SAVE_REGS(CLBR_RDI)
11945 +#ifdef CONFIG_PAX_KERNEXEC
11952 + add $__START_KERNEL_map,%rdi
11953 + sub phys_base(%rip),%rdi
11955 +#ifdef CONFIG_PARAVIRT
11956 + cmpl $0, pv_info+PARAVIRT_enabled
11960 + .rept USER_PGD_PTRS
11961 + mov i*8(%rbx),%rsi
11963 + lea i*8(%rbx),%rdi
11964 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11972 + .rept USER_PGD_PTRS
11973 + movb $0x67,i*8(%rdi)
11977 +#ifdef CONFIG_PARAVIRT
11978 +2: PV_RESTORE_REGS(CLBR_RDI)
11984 +ENDPROC(pax_exit_kernel_user)
11987 + .macro pax_erase_kstack
11988 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11989 + call pax_erase_kstack
11993 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11995 + * r10: thread_info
11996 + * rcx, rdx: can be clobbered
11998 +ENTRY(pax_erase_kstack)
12002 + GET_THREAD_INFO(%r10)
12003 + mov TI_lowest_stack(%r10), %rdi
12004 + mov $-0xBEEF, %rax
12008 + and $THREAD_SIZE_asm - 1, %ecx
12027 + mov TI_task_thread_sp0(%r10), %rdi
12029 + mov %rdi, TI_lowest_stack(%r10)
12034 +ENDPROC(pax_erase_kstack)
12037 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12038 #ifdef CONFIG_TRACE_IRQFLAGS
12039 @@ -318,7 +572,7 @@ ENTRY(save_args)
12040 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12041 movq_cfi rbp, 8 /* push %rbp */
12042 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12043 - testl $3, CS(%rdi)
12044 + testb $3, CS(%rdi)
12048 @@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
12052 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12053 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12054 je int_ret_from_sys_call
12056 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12057 @@ -455,7 +709,7 @@ END(ret_from_fork)
12059 CFI_STARTPROC simple
12061 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12062 + CFI_DEF_CFA rsp,0
12063 CFI_REGISTER rip,rcx
12064 /*CFI_REGISTER rflags,r11*/
12065 SWAPGS_UNSAFE_STACK
12066 @@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
12068 movq %rsp,PER_CPU_VAR(old_rsp)
12069 movq PER_CPU_VAR(kernel_stack),%rsp
12070 + pax_enter_kernel_user
12072 * No need to follow this irqs off/on section - it's straight
12075 ENABLE_INTERRUPTS(CLBR_NONE)
12078 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12079 movq %rcx,RIP-ARGOFFSET(%rsp)
12080 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12081 @@ -502,6 +757,7 @@ sysret_check:
12085 + pax_exit_kernel_user
12087 * sysretq will re-enable interrupts:
12089 @@ -560,6 +816,9 @@ auditsys:
12090 movq %rax,%rsi /* 2nd arg: syscall number */
12091 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12092 call audit_syscall_entry
12096 LOAD_ARGS 0 /* reload call-clobbered registers */
12097 jmp system_call_fastpath
12099 @@ -590,6 +849,9 @@ tracesys:
12100 FIXUP_TOP_OF_STACK %rdi
12102 call syscall_trace_enter
12107 * Reload arg registers from stack in case ptrace changed them.
12108 * We don't reload %rax because syscall_trace_enter() returned
12109 @@ -611,7 +873,7 @@ tracesys:
12110 GLOBAL(int_ret_from_sys_call)
12111 DISABLE_INTERRUPTS(CLBR_NONE)
12113 - testl $3,CS-ARGOFFSET(%rsp)
12114 + testb $3,CS-ARGOFFSET(%rsp)
12115 je retint_restore_args
12116 movl $_TIF_ALLWORK_MASK,%edi
12117 /* edi: mask to check */
12118 @@ -793,6 +1055,16 @@ END(interrupt)
12119 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12122 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12123 + testb $3, CS(%rdi)
12127 +1: pax_enter_kernel_user
12135 @@ -825,7 +1097,7 @@ ret_from_intr:
12136 CFI_ADJUST_CFA_OFFSET -8
12138 GET_THREAD_INFO(%rcx)
12139 - testl $3,CS-ARGOFFSET(%rsp)
12140 + testb $3,CS-ARGOFFSET(%rsp)
12143 /* Interrupt came from user space */
12144 @@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
12145 * The iretq could re-enable interrupts:
12147 DISABLE_INTERRUPTS(CLBR_ANY)
12148 + pax_exit_kernel_user
12153 retint_restore_args: /* return to kernel space */
12154 DISABLE_INTERRUPTS(CLBR_ANY)
12157 * The iretq could re-enable interrupts:
12159 @@ -1027,6 +1301,16 @@ ENTRY(\sym)
12160 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12163 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12164 + testb $3, CS(%rsp)
12168 +1: pax_enter_kernel_user
12173 movq %rsp,%rdi /* pt_regs pointer */
12174 xorl %esi,%esi /* no error code */
12176 @@ -1044,6 +1328,16 @@ ENTRY(\sym)
12177 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12180 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12181 + testb $3, CS(%rsp)
12185 +1: pax_enter_kernel_user
12190 movq %rsp,%rdi /* pt_regs pointer */
12191 xorl %esi,%esi /* no error code */
12193 @@ -1052,7 +1346,7 @@ ENTRY(\sym)
12197 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12198 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12199 .macro paranoidzeroentry_ist sym do_sym ist
12202 @@ -1062,8 +1356,24 @@ ENTRY(\sym)
12203 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12206 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12207 + testb $3, CS(%rsp)
12211 +1: pax_enter_kernel_user
12216 movq %rsp,%rdi /* pt_regs pointer */
12217 xorl %esi,%esi /* no error code */
12219 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12220 + lea init_tss(%r12), %r12
12222 + lea init_tss(%rip), %r12
12224 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12226 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12227 @@ -1080,6 +1390,16 @@ ENTRY(\sym)
12228 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12231 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12232 + testb $3, CS(%rsp)
12236 +1: pax_enter_kernel_user
12241 movq %rsp,%rdi /* pt_regs pointer */
12242 movq ORIG_RAX(%rsp),%rsi /* get error code */
12243 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12244 @@ -1099,6 +1419,16 @@ ENTRY(\sym)
12248 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12249 + testb $3, CS(%rsp)
12253 +1: pax_enter_kernel_user
12258 movq %rsp,%rdi /* pt_regs pointer */
12259 movq ORIG_RAX(%rsp),%rsi /* get error code */
12260 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12261 @@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
12263 testl %ebx,%ebx /* swapgs needed? */
12264 jnz paranoid_restore
12265 - testl $3,CS(%rsp)
12266 + testb $3,CS(%rsp)
12267 jnz paranoid_userspace
12268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12270 + TRACE_IRQS_IRETQ 0
12271 + SWAPGS_UNSAFE_STACK
12276 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12277 + pax_exit_kernel_user
12282 SWAPGS_UNSAFE_STACK
12290 @@ -1426,7 +1769,7 @@ ENTRY(error_entry)
12291 movq_cfi r14, R14+8
12292 movq_cfi r15, R15+8
12294 - testl $3,CS+8(%rsp)
12295 + testb $3,CS+8(%rsp)
12296 je error_kernelspace
12299 @@ -1490,6 +1833,16 @@ ENTRY(nmi)
12300 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12304 + testb $3, CS(%rsp)
12308 +1: pax_enter_kernel_user
12313 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12316 @@ -1500,11 +1853,25 @@ ENTRY(nmi)
12317 DISABLE_INTERRUPTS(CLBR_NONE)
12318 testl %ebx,%ebx /* swapgs needed? */
12320 - testl $3,CS(%rsp)
12321 + testb $3,CS(%rsp)
12323 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12325 + SWAPGS_UNSAFE_STACK
12330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12331 + pax_exit_kernel_user
12335 SWAPGS_UNSAFE_STACK
12343 diff -urNp linux-2.6.39.4/arch/x86/kernel/ftrace.c linux-2.6.39.4/arch/x86/kernel/ftrace.c
12344 --- linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-05-19 00:06:34.000000000 -0400
12345 +++ linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-08-05 19:44:33.000000000 -0400
12346 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12347 static void *mod_code_newcode; /* holds the text to write to the IP */
12349 static unsigned nmi_wait_count;
12350 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12351 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12353 int ftrace_arch_read_dyn_info(char *buf, int size)
12355 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12357 r = snprintf(buf, size, "%u %u",
12359 - atomic_read(&nmi_update_count));
12360 + atomic_read_unchecked(&nmi_update_count));
12364 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12366 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12368 + pax_open_kernel();
12370 - atomic_inc(&nmi_update_count);
12371 + pax_close_kernel();
12372 + atomic_inc_unchecked(&nmi_update_count);
12374 /* Must have previous changes seen before executions */
12376 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12378 unsigned char replaced[MCOUNT_INSN_SIZE];
12380 + ip = ktla_ktva(ip);
12383 * Note: Due to modules and __init, code can
12384 * disappear and change, we need to protect against faulting
12385 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12386 unsigned char old[MCOUNT_INSN_SIZE], *new;
12389 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12390 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12391 new = ftrace_call_replace(ip, (unsigned long)func);
12392 ret = ftrace_modify_code(ip, old, new);
12394 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12396 unsigned char code[MCOUNT_INSN_SIZE];
12398 + ip = ktla_ktva(ip);
12400 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12403 diff -urNp linux-2.6.39.4/arch/x86/kernel/head32.c linux-2.6.39.4/arch/x86/kernel/head32.c
12404 --- linux-2.6.39.4/arch/x86/kernel/head32.c 2011-05-19 00:06:34.000000000 -0400
12405 +++ linux-2.6.39.4/arch/x86/kernel/head32.c 2011-08-05 19:44:33.000000000 -0400
12407 #include <asm/io_apic.h>
12408 #include <asm/bios_ebda.h>
12409 #include <asm/tlbflush.h>
12410 +#include <asm/boot.h>
12412 static void __init i386_default_early_setup(void)
12414 @@ -34,7 +35,7 @@ void __init i386_start_kernel(void)
12418 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12419 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12421 #ifdef CONFIG_BLK_DEV_INITRD
12422 /* Reserve INITRD */
12423 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_32.S linux-2.6.39.4/arch/x86/kernel/head_32.S
12424 --- linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-05-19 00:06:34.000000000 -0400
12425 +++ linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-08-05 19:44:33.000000000 -0400
12427 /* Physical address */
12428 #define pa(X) ((X) - __PAGE_OFFSET)
12430 +#ifdef CONFIG_PAX_KERNEXEC
12433 +#define ta(X) ((X) - __PAGE_OFFSET)
12437 * References to members of the new_cpu_data structure.
12440 * and small than max_low_pfn, otherwise will waste some page table entries
12443 -#if PTRS_PER_PMD > 1
12444 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12446 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12448 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12450 /* Number of possible pages in the lowmem region */
12451 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12452 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12453 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12456 + * Real beginning of normal "text" segment
12462 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12463 * %esi points to the real-mode code as a 32-bit pointer.
12464 * CS and DS must be 4 GB flat segments, but we don't depend on
12465 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12470 +#ifdef CONFIG_PAX_KERNEXEC
12472 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12473 +.fill PAGE_SIZE-5,1,0xcc
12477 movl pa(stack_start),%ecx
12479 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12481 leal -__PAGE_OFFSET(%ecx),%esp
12484 + movl $pa(cpu_gdt_table),%edi
12485 + movl $__per_cpu_load,%eax
12486 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12488 + movb %al,__KERNEL_PERCPU + 4(%edi)
12489 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12490 + movl $__per_cpu_end - 1,%eax
12491 + subl $__per_cpu_start,%eax
12492 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12495 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12496 + movl $NR_CPUS,%ecx
12497 + movl $pa(cpu_gdt_table),%edi
12499 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12500 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12501 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12502 + addl $PAGE_SIZE_asm,%edi
12506 +#ifdef CONFIG_PAX_KERNEXEC
12507 + movl $pa(boot_gdt),%edi
12508 + movl $__LOAD_PHYSICAL_ADDR,%eax
12509 + movw %ax,__BOOT_CS + 2(%edi)
12511 + movb %al,__BOOT_CS + 4(%edi)
12512 + movb %ah,__BOOT_CS + 7(%edi)
12515 + ljmp $(__BOOT_CS),$1f
12518 + movl $NR_CPUS,%ecx
12519 + movl $pa(cpu_gdt_table),%edi
12520 + addl $__PAGE_OFFSET,%eax
12522 + movw %ax,__KERNEL_CS + 2(%edi)
12523 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12525 + movb %al,__KERNEL_CS + 4(%edi)
12526 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12527 + movb %ah,__KERNEL_CS + 7(%edi)
12528 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12530 + addl $PAGE_SIZE_asm,%edi
12535 * Clear BSS first so that there are no surprises...
12537 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12538 movl %eax, pa(max_pfn_mapped)
12540 /* Do early initialization of the fixmap area */
12541 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12542 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12543 +#ifdef CONFIG_COMPAT_VDSO
12544 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12546 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12548 #else /* Not PAE */
12550 page_pde_offset = (__PAGE_OFFSET >> 20);
12551 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12552 movl %eax, pa(max_pfn_mapped)
12554 /* Do early initialization of the fixmap area */
12555 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12556 - movl %eax,pa(initial_page_table+0xffc)
12557 +#ifdef CONFIG_COMPAT_VDSO
12558 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12560 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12564 #ifdef CONFIG_PARAVIRT
12565 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12566 cmpl $num_subarch_entries, %eax
12569 - movl pa(subarch_entries)(,%eax,4), %eax
12570 - subl $__PAGE_OFFSET, %eax
12572 + jmp *pa(subarch_entries)(,%eax,4)
12576 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12580 - .long default_entry /* normal x86/PC */
12581 - .long lguest_entry /* lguest hypervisor */
12582 - .long xen_entry /* Xen hypervisor */
12583 - .long default_entry /* Moorestown MID */
12584 + .long ta(default_entry) /* normal x86/PC */
12585 + .long ta(lguest_entry) /* lguest hypervisor */
12586 + .long ta(xen_entry) /* Xen hypervisor */
12587 + .long ta(default_entry) /* Moorestown MID */
12588 num_subarch_entries = (. - subarch_entries) / 4
12591 @@ -312,6 +382,7 @@ default_entry:
12595 +#ifdef CONFIG_X86_PAE
12596 testb $X86_CR4_PAE, %al # check if PAE is enabled
12599 @@ -340,6 +411,9 @@ default_entry:
12600 /* Make changes effective */
12603 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12609 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12610 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12611 movl %eax,%ss # after changing gdt.
12613 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12614 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12618 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12622 - movl $gdt_page,%eax
12623 + movl $cpu_gdt_table,%eax
12624 movl $stack_canary,%ecx
12626 + addl $__per_cpu_load,%ecx
12628 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12630 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12631 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12634 movl $(__KERNEL_STACK_CANARY),%eax
12635 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12636 + movl $(__USER_DS),%eax
12642 xorl %eax,%eax # Clear LDT
12643 @@ -558,22 +639,22 @@ early_page_fault:
12648 #ifdef CONFIG_PRINTK
12649 + cmpl $1,%ss:early_recursion_flag
12651 + incl %ss:early_recursion_flag
12654 movl $(__KERNEL_DS),%eax
12657 - cmpl $2,early_recursion_flag
12659 - incl early_recursion_flag
12662 pushl %edx /* trapno */
12671 @@ -581,8 +662,11 @@ hlt_loop:
12672 /* This is the default interrupt "handler" :-) */
12676 #ifdef CONFIG_PRINTK
12677 + cmpl $2,%ss:early_recursion_flag
12679 + incl %ss:early_recursion_flag
12684 @@ -591,9 +675,6 @@ ignore_int:
12685 movl $(__KERNEL_DS),%eax
12688 - cmpl $2,early_recursion_flag
12690 - incl early_recursion_flag
12694 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12698 -__PAGE_ALIGNED_BSS
12700 #ifdef CONFIG_X86_PAE
12701 +.section .initial_pg_pmd,"a",@progbits
12703 .fill 1024*KPMDS,4,0
12705 +.section .initial_page_table,"a",@progbits
12706 ENTRY(initial_page_table)
12709 +.section .initial_pg_fixmap,"a",@progbits
12712 +.section .empty_zero_page,"a",@progbits
12713 ENTRY(empty_zero_page)
12715 +.section .swapper_pg_dir,"a",@progbits
12716 ENTRY(swapper_pg_dir)
12717 +#ifdef CONFIG_X86_PAE
12724 + * The IDT has to be page-aligned to simplify the Pentium
12725 + * F0 0F bug workaround.. We have a special link segment
12728 +.section .idt,"a",@progbits
12733 * This starts the data section.
12735 #ifdef CONFIG_X86_PAE
12736 -__PAGE_ALIGNED_DATA
12737 - /* Page-aligned for the benefit of paravirt? */
12739 +.section .initial_page_table,"a",@progbits
12740 ENTRY(initial_page_table)
12741 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12743 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12744 # error "Kernel PMDs should be 1, 2 or 3"
12746 .align PAGE_SIZE /* needs to be page-sized too */
12748 +#ifdef CONFIG_PAX_PER_CPU_PGD
12760 - .long init_thread_union+THREAD_SIZE
12761 + .long init_thread_union+THREAD_SIZE-8
12765 +.section .rodata,"a",@progbits
12766 early_recursion_flag:
12772 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12774 @@ -707,7 +811,7 @@ fault_msg:
12775 .word 0 # 32 bit align gdt_desc.address
12778 - .long boot_gdt - __PAGE_OFFSET
12779 + .long pa(boot_gdt)
12781 .word 0 # 32-bit align idt_desc.address
12783 @@ -718,7 +822,7 @@ idt_descr:
12784 .word 0 # 32 bit align gdt_desc.address
12785 ENTRY(early_gdt_descr)
12786 .word GDT_ENTRIES*8-1
12787 - .long gdt_page /* Overwritten for secondary CPUs */
12788 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12791 * The boot_gdt must mirror the equivalent in setup.S and is
12792 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12793 .align L1_CACHE_BYTES
12795 .fill GDT_ENTRY_BOOT_CS,8,0
12796 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12797 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12798 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12799 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12801 + .align PAGE_SIZE_asm
12802 +ENTRY(cpu_gdt_table)
12804 + .quad 0x0000000000000000 /* NULL descriptor */
12805 + .quad 0x0000000000000000 /* 0x0b reserved */
12806 + .quad 0x0000000000000000 /* 0x13 reserved */
12807 + .quad 0x0000000000000000 /* 0x1b reserved */
12809 +#ifdef CONFIG_PAX_KERNEXEC
12810 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12812 + .quad 0x0000000000000000 /* 0x20 unused */
12815 + .quad 0x0000000000000000 /* 0x28 unused */
12816 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12817 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12818 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12819 + .quad 0x0000000000000000 /* 0x4b reserved */
12820 + .quad 0x0000000000000000 /* 0x53 reserved */
12821 + .quad 0x0000000000000000 /* 0x5b reserved */
12823 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12824 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12825 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12826 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12828 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12829 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12832 + * Segments used for calling PnP BIOS have byte granularity.
12833 + * The code segments and data segments have fixed 64k limits,
12834 + * the transfer segment sizes are set at run time.
12836 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12837 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12838 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12839 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12840 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12843 + * The APM segments have byte granularity and their bases
12844 + * are set at run time. All have 64k limits.
12846 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12847 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12848 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12850 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12851 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12852 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12853 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12854 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12855 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12857 + /* Be sure this is zeroed to avoid false validations in Xen */
12858 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12860 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_64.S linux-2.6.39.4/arch/x86/kernel/head_64.S
12861 --- linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-05-19 00:06:34.000000000 -0400
12862 +++ linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-08-05 19:44:33.000000000 -0400
12864 #include <asm/cache.h>
12865 #include <asm/processor-flags.h>
12866 #include <asm/percpu.h>
12867 +#include <asm/cpufeature.h>
12869 #ifdef CONFIG_PARAVIRT
12870 #include <asm/asm-offsets.h>
12871 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12872 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12873 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12874 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12875 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12876 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12877 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12878 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12882 @@ -85,35 +90,22 @@ startup_64:
12884 addq %rbp, init_level4_pgt + 0(%rip)
12885 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12886 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12887 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12888 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12890 addq %rbp, level3_ident_pgt + 0(%rip)
12891 +#ifndef CONFIG_XEN
12892 + addq %rbp, level3_ident_pgt + 8(%rip)
12895 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12896 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12897 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12899 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12900 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12901 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12903 - /* Add an Identity mapping if I am above 1G */
12904 - leaq _text(%rip), %rdi
12905 - andq $PMD_PAGE_MASK, %rdi
12908 - shrq $PUD_SHIFT, %rax
12909 - andq $(PTRS_PER_PUD - 1), %rax
12910 - jz ident_complete
12912 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12913 - leaq level3_ident_pgt(%rip), %rbx
12914 - movq %rdx, 0(%rbx, %rax, 8)
12917 - shrq $PMD_SHIFT, %rax
12918 - andq $(PTRS_PER_PMD - 1), %rax
12919 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12920 - leaq level2_spare_pgt(%rip), %rbx
12921 - movq %rdx, 0(%rbx, %rax, 8)
12923 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12924 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12927 * Fixup the kernel text+data virtual addresses. Note that
12928 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12929 * after the boot processor executes this code.
12932 - /* Enable PAE mode and PGE */
12933 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12934 + /* Enable PAE mode and PSE/PGE */
12935 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12938 /* Setup early boot stage 4 level pagetables. */
12939 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12940 movl $MSR_EFER, %ecx
12942 btsl $_EFER_SCE, %eax /* Enable System Call */
12943 - btl $20,%edi /* No Execute supported? */
12944 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12946 btsl $_EFER_NX, %eax
12947 + leaq init_level4_pgt(%rip), %rdi
12948 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12949 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12950 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12951 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12952 1: wrmsr /* Make changes effective */
12955 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12959 - .section ".init.text","ax"
12961 #ifdef CONFIG_EARLY_PRINTK
12962 .globl early_idt_handlers
12963 early_idt_handlers:
12964 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12965 #endif /* EARLY_PRINTK */
12970 #ifdef CONFIG_EARLY_PRINTK
12972 early_recursion_flag:
12976 + .section .rodata,"a",@progbits
12978 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12981 -#endif /* CONFIG_EARLY_PRINTK */
12983 +#endif /* CONFIG_EARLY_PRINTK */
12985 + .section .rodata,"a",@progbits
12986 #define NEXT_PAGE(name) \
12987 .balign PAGE_SIZE; \
12989 @@ -338,7 +340,6 @@ ENTRY(name)
12995 * This default setting generates an ident mapping at address 0x100000
12996 * and a mapping for the kernel that precisely maps virtual address
12997 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12998 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12999 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13000 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13001 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
13002 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13003 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13004 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13005 .org init_level4_pgt + L4_START_KERNEL*8, 0
13006 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13007 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13009 +#ifdef CONFIG_PAX_PER_CPU_PGD
13010 +NEXT_PAGE(cpu_pgd)
13016 NEXT_PAGE(level3_ident_pgt)
13017 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13021 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13025 +NEXT_PAGE(level3_vmalloc_pgt)
13028 +NEXT_PAGE(level3_vmemmap_pgt)
13029 + .fill L3_VMEMMAP_START,8,0
13030 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13032 NEXT_PAGE(level3_kernel_pgt)
13033 .fill L3_START_KERNEL,8,0
13034 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13035 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13036 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13038 +NEXT_PAGE(level2_vmemmap_pgt)
13041 NEXT_PAGE(level2_fixmap_pgt)
13043 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13044 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13047 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13048 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13051 -NEXT_PAGE(level1_fixmap_pgt)
13052 +NEXT_PAGE(level1_vsyscall_pgt)
13055 -NEXT_PAGE(level2_ident_pgt)
13056 - /* Since I easily can, map the first 1G.
13057 + /* Since I easily can, map the first 2G.
13058 * Don't set NX because code runs from these pages.
13060 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13061 +NEXT_PAGE(level2_ident_pgt)
13062 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13064 NEXT_PAGE(level2_kernel_pgt)
13066 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13067 * If you want to increase this then increase MODULES_VADDR
13070 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13071 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13073 -NEXT_PAGE(level2_spare_pgt)
13075 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13082 +ENTRY(cpu_gdt_table)
13084 + .quad 0x0000000000000000 /* NULL descriptor */
13085 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13086 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13087 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13088 + .quad 0x00cffb000000ffff /* __USER32_CS */
13089 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13090 + .quad 0x00affb000000ffff /* __USER_CS */
13092 +#ifdef CONFIG_PAX_KERNEXEC
13093 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13095 + .quad 0x0 /* unused */
13098 + .quad 0,0 /* TSS */
13099 + .quad 0,0 /* LDT */
13100 + .quad 0,0,0 /* three TLS descriptors */
13101 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13102 + /* asm/segment.h:GDT_ENTRIES must match this */
13104 + /* zero the remaining page */
13105 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13109 .globl early_gdt_descr
13111 .word GDT_ENTRIES*8-1
13112 early_gdt_descr_base:
13113 - .quad INIT_PER_CPU_VAR(gdt_page)
13114 + .quad cpu_gdt_table
13117 /* This must match the first entry in level2_kernel_pgt */
13118 .quad 0x0000000000000000
13120 #include "../../x86/xen/xen-head.S"
13122 - .section .bss, "aw", @nobits
13124 + .section .rodata,"a",@progbits
13125 .align L1_CACHE_BYTES
13127 - .skip IDT_ENTRIES * 16
13132 diff -urNp linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c
13133 --- linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-05-19 00:06:34.000000000 -0400
13134 +++ linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-05 19:44:33.000000000 -0400
13135 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13136 EXPORT_SYMBOL(cmpxchg8b_emu);
13139 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13141 /* Networking helper routines. */
13142 EXPORT_SYMBOL(csum_partial_copy_generic);
13143 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13144 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13146 EXPORT_SYMBOL(__get_user_1);
13147 EXPORT_SYMBOL(__get_user_2);
13148 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13150 EXPORT_SYMBOL(csum_partial);
13151 EXPORT_SYMBOL(empty_zero_page);
13153 +#ifdef CONFIG_PAX_KERNEXEC
13154 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13156 diff -urNp linux-2.6.39.4/arch/x86/kernel/i8259.c linux-2.6.39.4/arch/x86/kernel/i8259.c
13157 --- linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-05-19 00:06:34.000000000 -0400
13158 +++ linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-08-05 19:44:33.000000000 -0400
13159 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13160 "spurious 8259A interrupt: IRQ%d.\n", irq);
13161 spurious_irq_mask |= irqmask;
13163 - atomic_inc(&irq_err_count);
13164 + atomic_inc_unchecked(&irq_err_count);
13166 * Theoretically we do not have to handle this IRQ,
13167 * but in Linux this does not cause problems and is
13168 diff -urNp linux-2.6.39.4/arch/x86/kernel/init_task.c linux-2.6.39.4/arch/x86/kernel/init_task.c
13169 --- linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-05-19 00:06:34.000000000 -0400
13170 +++ linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-08-05 19:44:33.000000000 -0400
13171 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13172 * way process stacks are handled. This is done by having a special
13173 * "init_task" linker map entry..
13175 -union thread_union init_thread_union __init_task_data =
13176 - { INIT_THREAD_INFO(init_task) };
13177 +union thread_union init_thread_union __init_task_data;
13180 * Initial task structure.
13181 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13182 * section. Since TSS's are completely CPU-local, we want them
13183 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13185 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13187 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13188 +EXPORT_SYMBOL(init_tss);
13189 diff -urNp linux-2.6.39.4/arch/x86/kernel/ioport.c linux-2.6.39.4/arch/x86/kernel/ioport.c
13190 --- linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400
13191 +++ linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-08-05 19:44:33.000000000 -0400
13193 #include <linux/sched.h>
13194 #include <linux/kernel.h>
13195 #include <linux/capability.h>
13196 +#include <linux/security.h>
13197 #include <linux/errno.h>
13198 #include <linux/types.h>
13199 #include <linux/ioport.h>
13200 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13202 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13204 +#ifdef CONFIG_GRKERNSEC_IO
13205 + if (turn_on && grsec_disable_privio) {
13206 + gr_handle_ioperm();
13210 if (turn_on && !capable(CAP_SYS_RAWIO))
13213 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13214 * because the ->io_bitmap_max value must match the bitmap
13217 - tss = &per_cpu(init_tss, get_cpu());
13218 + tss = init_tss + get_cpu();
13221 bitmap_clear(t->io_bitmap_ptr, from, num);
13222 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13224 /* Trying to gain more privileges? */
13226 +#ifdef CONFIG_GRKERNSEC_IO
13227 + if (grsec_disable_privio) {
13228 + gr_handle_iopl();
13232 if (!capable(CAP_SYS_RAWIO))
13235 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq_32.c linux-2.6.39.4/arch/x86/kernel/irq_32.c
13236 --- linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-05-19 00:06:34.000000000 -0400
13237 +++ linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-08-05 19:44:33.000000000 -0400
13238 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13239 __asm__ __volatile__("andl %%esp,%0" :
13240 "=r" (sp) : "0" (THREAD_SIZE - 1));
13242 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13243 + return sp < STACK_WARN;
13246 static void print_stack_overflow(void)
13247 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13248 * per-CPU IRQ handling contexts (thread information and stack)
13251 - struct thread_info tinfo;
13252 - u32 stack[THREAD_SIZE/sizeof(u32)];
13253 + unsigned long previous_esp;
13254 + u32 stack[THREAD_SIZE/sizeof(u32)];
13255 } __attribute__((aligned(THREAD_SIZE)));
13257 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13258 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13260 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13262 - union irq_ctx *curctx, *irqctx;
13263 + union irq_ctx *irqctx;
13264 u32 *isp, arg1, arg2;
13266 - curctx = (union irq_ctx *) current_thread_info();
13267 irqctx = __this_cpu_read(hardirq_ctx);
13270 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13271 * handler) we can't do that and just have to keep using the
13272 * current stack (which is the irq stack already after all)
13274 - if (unlikely(curctx == irqctx))
13275 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13278 /* build the stack frame on the IRQ stack */
13279 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13280 - irqctx->tinfo.task = curctx->tinfo.task;
13281 - irqctx->tinfo.previous_esp = current_stack_pointer;
13282 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13283 + irqctx->previous_esp = current_stack_pointer;
13286 - * Copy the softirq bits in preempt_count so that the
13287 - * softirq checks work in the hardirq context.
13289 - irqctx->tinfo.preempt_count =
13290 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13291 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13292 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13293 + __set_fs(MAKE_MM_SEG(0));
13296 if (unlikely(overflow))
13297 call_on_stack(print_stack_overflow, isp);
13298 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13299 : "0" (irq), "1" (desc), "2" (isp),
13300 "D" (desc->handle_irq)
13301 : "memory", "cc", "ecx");
13303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13304 + __set_fs(current_thread_info()->addr_limit);
13310 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13312 void __cpuinit irq_ctx_init(int cpu)
13314 - union irq_ctx *irqctx;
13316 if (per_cpu(hardirq_ctx, cpu))
13319 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13322 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13323 - irqctx->tinfo.cpu = cpu;
13324 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13325 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13327 - per_cpu(hardirq_ctx, cpu) = irqctx;
13329 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13332 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13333 - irqctx->tinfo.cpu = cpu;
13334 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13336 - per_cpu(softirq_ctx, cpu) = irqctx;
13337 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13338 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13340 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13341 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13342 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13343 asmlinkage void do_softirq(void)
13345 unsigned long flags;
13346 - struct thread_info *curctx;
13347 union irq_ctx *irqctx;
13350 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13351 local_irq_save(flags);
13353 if (local_softirq_pending()) {
13354 - curctx = current_thread_info();
13355 irqctx = __this_cpu_read(softirq_ctx);
13356 - irqctx->tinfo.task = curctx->task;
13357 - irqctx->tinfo.previous_esp = current_stack_pointer;
13358 + irqctx->previous_esp = current_stack_pointer;
13360 /* build the stack frame on the softirq stack */
13361 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13362 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13364 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13365 + __set_fs(MAKE_MM_SEG(0));
13368 call_on_stack(__do_softirq, isp);
13370 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13371 + __set_fs(current_thread_info()->addr_limit);
13375 * Shouldn't happen, we returned above if in_interrupt():
13377 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq.c linux-2.6.39.4/arch/x86/kernel/irq.c
13378 --- linux-2.6.39.4/arch/x86/kernel/irq.c 2011-05-19 00:06:34.000000000 -0400
13379 +++ linux-2.6.39.4/arch/x86/kernel/irq.c 2011-08-05 19:44:33.000000000 -0400
13381 #include <asm/mce.h>
13382 #include <asm/hw_irq.h>
13384 -atomic_t irq_err_count;
13385 +atomic_unchecked_t irq_err_count;
13387 /* Function pointer for generic interrupt vector handling */
13388 void (*x86_platform_ipi_callback)(void) = NULL;
13389 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13390 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13391 seq_printf(p, " Machine check polls\n");
13393 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13394 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13395 #if defined(CONFIG_X86_IO_APIC)
13396 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13397 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13401 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13403 u64 arch_irq_stat(void)
13405 - u64 sum = atomic_read(&irq_err_count);
13406 + u64 sum = atomic_read_unchecked(&irq_err_count);
13408 #ifdef CONFIG_X86_IO_APIC
13409 - sum += atomic_read(&irq_mis_count);
13410 + sum += atomic_read_unchecked(&irq_mis_count);
13414 diff -urNp linux-2.6.39.4/arch/x86/kernel/kgdb.c linux-2.6.39.4/arch/x86/kernel/kgdb.c
13415 --- linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
13416 +++ linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-08-05 20:34:06.000000000 -0400
13417 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13418 #ifdef CONFIG_X86_32
13421 - if (!user_mode_vm(regs))
13422 + if (!user_mode(regs))
13423 *(unsigned long *)mem = __KERNEL_DS;
13426 - if (!user_mode_vm(regs))
13427 + if (!user_mode(regs))
13428 *(unsigned long *)mem = kernel_stack_pointer(regs);
13431 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13433 /* clear the trace bit */
13434 linux_regs->flags &= ~X86_EFLAGS_TF;
13435 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13436 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13438 /* set the trace bit if we're stepping */
13439 if (remcomInBuffer[0] == 's') {
13440 linux_regs->flags |= X86_EFLAGS_TF;
13441 - atomic_set(&kgdb_cpu_doing_single_step,
13442 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13443 raw_smp_processor_id());
13446 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13447 return NOTIFY_DONE;
13450 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13451 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13452 if (user_mode(regs))
13453 return single_step_cont(regs, args);
13455 diff -urNp linux-2.6.39.4/arch/x86/kernel/kprobes.c linux-2.6.39.4/arch/x86/kernel/kprobes.c
13456 --- linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
13457 +++ linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-08-05 19:44:33.000000000 -0400
13458 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13459 } __attribute__((packed)) *insn;
13461 insn = (struct __arch_relative_insn *)from;
13463 + pax_open_kernel();
13464 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13466 + pax_close_kernel();
13469 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13470 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13471 kprobe_opcode_t opcode;
13472 kprobe_opcode_t *orig_opcodes = opcodes;
13474 - if (search_exception_tables((unsigned long)opcodes))
13475 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13476 return 0; /* Page fault may occur on this address. */
13479 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13482 insn_get_length(&insn);
13483 + pax_open_kernel();
13484 memcpy(dest, insn.kaddr, insn.length);
13485 + pax_close_kernel();
13487 #ifdef CONFIG_X86_64
13488 if (insn_rip_relative(&insn)) {
13489 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13491 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13492 disp = (u8 *) dest + insn_offset_displacement(&insn);
13493 + pax_open_kernel();
13494 *(s32 *) disp = (s32) newdisp;
13495 + pax_close_kernel();
13498 return insn.length;
13499 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13501 __copy_instruction(p->ainsn.insn, p->addr, 0);
13503 - if (can_boost(p->addr))
13504 + if (can_boost(ktla_ktva(p->addr)))
13505 p->ainsn.boostable = 0;
13507 p->ainsn.boostable = -1;
13509 - p->opcode = *p->addr;
13510 + p->opcode = *(ktla_ktva(p->addr));
13513 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13514 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13515 * nor set current_kprobe, because it doesn't use single
13518 - regs->ip = (unsigned long)p->ainsn.insn;
13519 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13520 preempt_enable_no_resched();
13523 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13524 if (p->opcode == BREAKPOINT_INSTRUCTION)
13525 regs->ip = (unsigned long)p->addr;
13527 - regs->ip = (unsigned long)p->ainsn.insn;
13528 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13532 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13533 setup_singlestep(p, regs, kcb, 0);
13536 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13537 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13539 * The breakpoint instruction was removed right
13540 * after we hit it. Another cpu has removed
13541 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13542 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13544 unsigned long *tos = stack_addr(regs);
13545 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13546 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13547 unsigned long orig_ip = (unsigned long)p->addr;
13548 kprobe_opcode_t *insn = p->ainsn.insn;
13550 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13551 struct die_args *args = data;
13552 int ret = NOTIFY_DONE;
13554 - if (args->regs && user_mode_vm(args->regs))
13555 + if (args->regs && user_mode(args->regs))
13559 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13560 * Verify if the address gap is in 2GB range, because this uses
13563 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13564 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13565 if (abs(rel) > 0x7fffffff)
13568 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13569 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13571 /* Set probe function call */
13572 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13573 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13575 /* Set returning jmp instruction at the tail of out-of-line buffer */
13576 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13577 - (u8 *)op->kp.addr + op->optinsn.size);
13578 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13580 flush_icache_range((unsigned long) buf,
13581 (unsigned long) buf + TMPL_END_IDX +
13582 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13583 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13585 /* Backup instructions which will be replaced by jump address */
13586 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13587 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13588 RELATIVE_ADDR_SIZE);
13590 insn_buf[0] = RELATIVEJUMP_OPCODE;
13591 diff -urNp linux-2.6.39.4/arch/x86/kernel/ldt.c linux-2.6.39.4/arch/x86/kernel/ldt.c
13592 --- linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-05-19 00:06:34.000000000 -0400
13593 +++ linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-08-05 19:44:33.000000000 -0400
13594 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13599 + load_LDT_nolock(pc);
13600 if (!cpumask_equal(mm_cpumask(current->mm),
13601 cpumask_of(smp_processor_id())))
13602 smp_call_function(flush_ldt, current->mm, 1);
13606 + load_LDT_nolock(pc);
13610 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13613 for (i = 0; i < old->size; i++)
13614 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13615 + write_ldt_entry(new->ldt, i, old->ldt + i);
13619 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13620 retval = copy_ldt(&mm->context, &old_mm->context);
13621 mutex_unlock(&old_mm->context.lock);
13624 + if (tsk == current) {
13625 + mm->context.vdso = 0;
13627 +#ifdef CONFIG_X86_32
13628 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13629 + mm->context.user_cs_base = 0UL;
13630 + mm->context.user_cs_limit = ~0UL;
13632 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13633 + cpus_clear(mm->context.cpu_user_cs_mask);
13644 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13648 +#ifdef CONFIG_PAX_SEGMEXEC
13649 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13655 fill_ldt(&ldt, &ldt_info);
13658 diff -urNp linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c
13659 --- linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-05-19 00:06:34.000000000 -0400
13660 +++ linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-08-05 19:44:33.000000000 -0400
13662 #include <asm/cacheflush.h>
13663 #include <asm/debugreg.h>
13665 -static void set_idt(void *newidt, __u16 limit)
13666 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13668 struct desc_ptr curidt;
13670 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13674 -static void set_gdt(void *newgdt, __u16 limit)
13675 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13677 struct desc_ptr curgdt;
13679 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13682 control_page = page_address(image->control_code_page);
13683 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13684 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13686 relocate_kernel_ptr = control_page;
13687 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13688 diff -urNp linux-2.6.39.4/arch/x86/kernel/microcode_intel.c linux-2.6.39.4/arch/x86/kernel/microcode_intel.c
13689 --- linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-05-19 00:06:34.000000000 -0400
13690 +++ linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-08-05 20:34:06.000000000 -0400
13691 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13693 static int get_ucode_user(void *to, const void *from, size_t n)
13695 - return copy_from_user(to, from, n);
13696 + return copy_from_user(to, (__force const void __user *)from, n);
13699 static enum ucode_state
13700 request_microcode_user(int cpu, const void __user *buf, size_t size)
13702 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13703 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13706 static void microcode_fini_cpu(int cpu)
13707 diff -urNp linux-2.6.39.4/arch/x86/kernel/module.c linux-2.6.39.4/arch/x86/kernel/module.c
13708 --- linux-2.6.39.4/arch/x86/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
13709 +++ linux-2.6.39.4/arch/x86/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
13710 @@ -35,21 +35,66 @@
13711 #define DEBUGP(fmt...)
13714 -void *module_alloc(unsigned long size)
13715 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13717 if (PAGE_ALIGN(size) > MODULES_LEN)
13719 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13720 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13721 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13722 -1, __builtin_return_address(0));
13725 +void *module_alloc(unsigned long size)
13728 +#ifdef CONFIG_PAX_KERNEXEC
13729 + return __module_alloc(size, PAGE_KERNEL);
13731 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13736 /* Free memory returned from module_alloc */
13737 void module_free(struct module *mod, void *module_region)
13739 vfree(module_region);
13742 +#ifdef CONFIG_PAX_KERNEXEC
13743 +#ifdef CONFIG_X86_32
13744 +void *module_alloc_exec(unsigned long size)
13746 + struct vm_struct *area;
13751 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13752 + return area ? area->addr : NULL;
13754 +EXPORT_SYMBOL(module_alloc_exec);
13756 +void module_free_exec(struct module *mod, void *module_region)
13758 + vunmap(module_region);
13760 +EXPORT_SYMBOL(module_free_exec);
13762 +void module_free_exec(struct module *mod, void *module_region)
13764 + module_free(mod, module_region);
13766 +EXPORT_SYMBOL(module_free_exec);
13768 +void *module_alloc_exec(unsigned long size)
13770 + return __module_alloc(size, PAGE_KERNEL_RX);
13772 +EXPORT_SYMBOL(module_alloc_exec);
13776 /* We don't need anything special. */
13777 int module_frob_arch_sections(Elf_Ehdr *hdr,
13779 @@ -69,14 +114,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13781 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13783 - uint32_t *location;
13784 + uint32_t *plocation, location;
13786 DEBUGP("Applying relocate section %u to %u\n", relsec,
13787 sechdrs[relsec].sh_info);
13788 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13789 /* This is where to make the change */
13790 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13791 - + rel[i].r_offset;
13792 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13793 + location = (uint32_t)plocation;
13794 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13795 + plocation = ktla_ktva((void *)plocation);
13796 /* This is the symbol it is referring to. Note that all
13797 undefined symbols have been resolved. */
13798 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13799 @@ -85,11 +132,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13800 switch (ELF32_R_TYPE(rel[i].r_info)) {
13802 /* We add the value into the location given */
13803 - *location += sym->st_value;
13804 + pax_open_kernel();
13805 + *plocation += sym->st_value;
13806 + pax_close_kernel();
13809 /* Add the value, subtract its postition */
13810 - *location += sym->st_value - (uint32_t)location;
13811 + pax_open_kernel();
13812 + *plocation += sym->st_value - location;
13813 + pax_close_kernel();
13816 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13817 @@ -145,21 +196,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13818 case R_X86_64_NONE:
13821 + pax_open_kernel();
13823 + pax_close_kernel();
13826 + pax_open_kernel();
13828 + pax_close_kernel();
13829 if (val != *(u32 *)loc)
13833 + pax_open_kernel();
13835 + pax_close_kernel();
13836 if ((s64)val != *(s32 *)loc)
13839 case R_X86_64_PC32:
13841 + pax_open_kernel();
13843 + pax_close_kernel();
13846 if ((s64)val != *(s32 *)loc)
13848 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt.c linux-2.6.39.4/arch/x86/kernel/paravirt.c
13849 --- linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-05-19 00:06:34.000000000 -0400
13850 +++ linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-08-05 19:44:33.000000000 -0400
13851 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13855 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13856 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13859 void __init default_banner(void)
13861 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13862 * corresponding structure. */
13863 static void *get_call_destination(u8 type)
13865 - struct paravirt_patch_template tmpl = {
13866 + const struct paravirt_patch_template tmpl = {
13867 .pv_init_ops = pv_init_ops,
13868 .pv_time_ops = pv_time_ops,
13869 .pv_cpu_ops = pv_cpu_ops,
13870 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13871 .pv_lock_ops = pv_lock_ops,
13875 + pax_track_stack();
13877 return *((void **)&tmpl + type);
13880 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13881 if (opfunc == NULL)
13882 /* If there's no function, patch it with a ud2a (BUG) */
13883 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13884 - else if (opfunc == _paravirt_nop)
13885 + else if (opfunc == (void *)_paravirt_nop)
13886 /* If the operation is a nop, then nop the callsite */
13887 ret = paravirt_patch_nop();
13889 /* identity functions just return their single argument */
13890 - else if (opfunc == _paravirt_ident_32)
13891 + else if (opfunc == (void *)_paravirt_ident_32)
13892 ret = paravirt_patch_ident_32(insnbuf, len);
13893 - else if (opfunc == _paravirt_ident_64)
13894 + else if (opfunc == (void *)_paravirt_ident_64)
13895 ret = paravirt_patch_ident_64(insnbuf, len);
13896 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13897 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13898 + ret = paravirt_patch_ident_64(insnbuf, len);
13901 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13902 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13903 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13904 if (insn_len > len || start == NULL)
13907 - memcpy(insnbuf, start, insn_len);
13908 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13912 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13916 -struct pv_info pv_info = {
13917 +struct pv_info pv_info __read_only = {
13918 .name = "bare hardware",
13919 .paravirt_enabled = 0,
13921 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13924 -struct pv_init_ops pv_init_ops = {
13925 +struct pv_init_ops pv_init_ops __read_only = {
13926 .patch = native_patch,
13929 -struct pv_time_ops pv_time_ops = {
13930 +struct pv_time_ops pv_time_ops __read_only = {
13931 .sched_clock = native_sched_clock,
13934 -struct pv_irq_ops pv_irq_ops = {
13935 +struct pv_irq_ops pv_irq_ops __read_only = {
13936 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13937 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13938 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13939 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13943 -struct pv_cpu_ops pv_cpu_ops = {
13944 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13945 .cpuid = native_cpuid,
13946 .get_debugreg = native_get_debugreg,
13947 .set_debugreg = native_set_debugreg,
13948 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13949 .end_context_switch = paravirt_nop,
13952 -struct pv_apic_ops pv_apic_ops = {
13953 +struct pv_apic_ops pv_apic_ops __read_only = {
13954 #ifdef CONFIG_X86_LOCAL_APIC
13955 .startup_ipi_hook = paravirt_nop,
13959 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13960 +#ifdef CONFIG_X86_32
13961 +#ifdef CONFIG_X86_PAE
13962 +/* 64-bit pagetable entries */
13963 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13965 /* 32-bit pagetable entries */
13966 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13969 /* 64-bit pagetable entries */
13970 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13973 -struct pv_mmu_ops pv_mmu_ops = {
13974 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13976 .read_cr2 = native_read_cr2,
13977 .write_cr2 = native_write_cr2,
13978 @@ -465,6 +480,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13981 .set_fixmap = native_set_fixmap,
13983 +#ifdef CONFIG_PAX_KERNEXEC
13984 + .pax_open_kernel = native_pax_open_kernel,
13985 + .pax_close_kernel = native_pax_close_kernel,
13990 EXPORT_SYMBOL_GPL(pv_time_ops);
13991 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c
13992 --- linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-05-19 00:06:34.000000000 -0400
13993 +++ linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-05 19:44:33.000000000 -0400
13994 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13995 arch_spin_lock(lock);
13998 -struct pv_lock_ops pv_lock_ops = {
13999 +struct pv_lock_ops pv_lock_ops __read_only = {
14001 .spin_is_locked = __ticket_spin_is_locked,
14002 .spin_is_contended = __ticket_spin_is_contended,
14003 diff -urNp linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c
14004 --- linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-05-19 00:06:34.000000000 -0400
14005 +++ linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-08-05 19:44:35.000000000 -0400
14007 #include <asm/iommu_table.h>
14008 #include <linux/string.h>
14009 #include <linux/kallsyms.h>
14011 +#include <linux/sched.h>
14015 @@ -53,6 +53,8 @@ void __init check_iommu_entries(struct i
14016 char sym_p[KSYM_SYMBOL_LEN];
14017 char sym_q[KSYM_SYMBOL_LEN];
14019 + pax_track_stack();
14021 /* Simple cyclic dependency checker. */
14022 for (p = start; p < finish; p++) {
14023 q = find_dependents_of(start, finish, p);
14024 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_32.c linux-2.6.39.4/arch/x86/kernel/process_32.c
14025 --- linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-06-25 12:55:22.000000000 -0400
14026 +++ linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-08-05 19:44:35.000000000 -0400
14027 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14028 unsigned long thread_saved_pc(struct task_struct *tsk)
14030 return ((unsigned long *)tsk->thread.sp)[3];
14031 +//XXX return tsk->thread.eip;
14035 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14037 unsigned short ss, gs;
14039 - if (user_mode_vm(regs)) {
14040 + if (user_mode(regs)) {
14042 ss = regs->ss & 0xffff;
14043 - gs = get_user_gs(regs);
14045 sp = kernel_stack_pointer(regs);
14046 savesegment(ss, ss);
14047 - savesegment(gs, gs);
14049 + gs = get_user_gs(regs);
14051 show_regs_common();
14053 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14054 struct task_struct *tsk;
14057 - childregs = task_pt_regs(p);
14058 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14059 *childregs = *regs;
14061 childregs->sp = sp;
14063 p->thread.sp = (unsigned long) childregs;
14064 p->thread.sp0 = (unsigned long) (childregs+1);
14065 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14067 p->thread.ip = (unsigned long) ret_from_fork;
14069 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14070 struct thread_struct *prev = &prev_p->thread,
14071 *next = &next_p->thread;
14072 int cpu = smp_processor_id();
14073 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14074 + struct tss_struct *tss = init_tss + cpu;
14077 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14078 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14080 lazy_save_gs(prev->gs);
14082 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14083 + __set_fs(task_thread_info(next_p)->addr_limit);
14087 * Load the per-thread Thread-Local Storage descriptor.
14089 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14091 arch_end_context_switch(next_p);
14093 + percpu_write(current_task, next_p);
14094 + percpu_write(current_tinfo, &next_p->tinfo);
14097 __math_state_restore();
14099 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14100 if (prev->gs | next->gs)
14101 lazy_load_gs(next->gs);
14103 - percpu_write(current_task, next_p);
14108 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14109 } while (count++ < 16);
14113 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_64.c linux-2.6.39.4/arch/x86/kernel/process_64.c
14114 --- linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-06-25 12:55:22.000000000 -0400
14115 +++ linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-08-05 19:44:35.000000000 -0400
14116 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14117 void exit_idle(void)
14119 /* idle loop has pid 0 */
14120 - if (current->pid)
14121 + if (task_pid_nr(current))
14125 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14126 struct pt_regs *childregs;
14127 struct task_struct *me = current;
14129 - childregs = ((struct pt_regs *)
14130 - (THREAD_SIZE + task_stack_page(p))) - 1;
14131 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14132 *childregs = *regs;
14135 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14136 p->thread.sp = (unsigned long) childregs;
14137 p->thread.sp0 = (unsigned long) (childregs+1);
14138 p->thread.usersp = me->thread.usersp;
14139 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14141 set_tsk_thread_flag(p, TIF_FORK);
14143 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14144 struct thread_struct *prev = &prev_p->thread;
14145 struct thread_struct *next = &next_p->thread;
14146 int cpu = smp_processor_id();
14147 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14148 + struct tss_struct *tss = init_tss + cpu;
14149 unsigned fsindex, gsindex;
14152 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14153 prev->usersp = percpu_read(old_rsp);
14154 percpu_write(old_rsp, next->usersp);
14155 percpu_write(current_task, next_p);
14156 + percpu_write(current_tinfo, &next_p->tinfo);
14158 - percpu_write(kernel_stack,
14159 - (unsigned long)task_stack_page(next_p) +
14160 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14161 + percpu_write(kernel_stack, next->sp0);
14164 * Now maybe reload the debug registers and handle I/O bitmaps
14165 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14166 if (!p || p == current || p->state == TASK_RUNNING)
14168 stack = (unsigned long)task_stack_page(p);
14169 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14170 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14172 fp = *(u64 *)(p->thread.sp);
14174 - if (fp < (unsigned long)stack ||
14175 - fp >= (unsigned long)stack+THREAD_SIZE)
14176 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14178 ip = *(u64 *)(fp+8);
14179 if (!in_sched_functions(ip))
14180 diff -urNp linux-2.6.39.4/arch/x86/kernel/process.c linux-2.6.39.4/arch/x86/kernel/process.c
14181 --- linux-2.6.39.4/arch/x86/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
14182 +++ linux-2.6.39.4/arch/x86/kernel/process.c 2011-08-05 19:44:35.000000000 -0400
14183 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14185 void free_thread_info(struct thread_info *ti)
14187 - free_thread_xstate(ti->task);
14188 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14191 +static struct kmem_cache *task_struct_cachep;
14193 void arch_task_cache_init(void)
14195 - task_xstate_cachep =
14196 - kmem_cache_create("task_xstate", xstate_size,
14197 + /* create a slab on which task_structs can be allocated */
14198 + task_struct_cachep =
14199 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14200 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14202 + task_xstate_cachep =
14203 + kmem_cache_create("task_xstate", xstate_size,
14204 __alignof__(union thread_xstate),
14205 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14206 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14209 +struct task_struct *alloc_task_struct_node(int node)
14211 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14214 +void free_task_struct(struct task_struct *task)
14216 + free_thread_xstate(task);
14217 + kmem_cache_free(task_struct_cachep, task);
14221 @@ -70,7 +87,7 @@ void exit_thread(void)
14222 unsigned long *bp = t->io_bitmap_ptr;
14225 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14226 + struct tss_struct *tss = init_tss + get_cpu();
14228 t->io_bitmap_ptr = NULL;
14229 clear_thread_flag(TIF_IO_BITMAP);
14230 @@ -106,7 +123,7 @@ void show_regs_common(void)
14232 printk(KERN_CONT "\n");
14233 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14234 - current->pid, current->comm, print_tainted(),
14235 + task_pid_nr(current), current->comm, print_tainted(),
14236 init_utsname()->release,
14237 (int)strcspn(init_utsname()->version, " "),
14238 init_utsname()->version);
14239 @@ -120,6 +137,9 @@ void flush_thread(void)
14241 struct task_struct *tsk = current;
14243 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14244 + loadsegment(gs, 0);
14246 flush_ptrace_hw_breakpoint(tsk);
14247 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14249 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14250 regs.di = (unsigned long) arg;
14252 #ifdef CONFIG_X86_32
14253 - regs.ds = __USER_DS;
14254 - regs.es = __USER_DS;
14255 + regs.ds = __KERNEL_DS;
14256 + regs.es = __KERNEL_DS;
14257 regs.fs = __KERNEL_PERCPU;
14258 - regs.gs = __KERNEL_STACK_CANARY;
14259 + savesegment(gs, regs.gs);
14261 regs.ss = __KERNEL_DS;
14263 @@ -401,7 +421,7 @@ void default_idle(void)
14264 EXPORT_SYMBOL(default_idle);
14267 -void stop_this_cpu(void *dummy)
14268 +__noreturn void stop_this_cpu(void *dummy)
14270 local_irq_disable();
14272 @@ -665,16 +685,34 @@ static int __init idle_setup(char *str)
14274 early_param("idle", idle_setup);
14276 -unsigned long arch_align_stack(unsigned long sp)
14277 +#ifdef CONFIG_PAX_RANDKSTACK
14278 +asmlinkage void pax_randomize_kstack(void)
14280 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14281 - sp -= get_random_int() % 8192;
14282 - return sp & ~0xf;
14284 + struct thread_struct *thread = ¤t->thread;
14285 + unsigned long time;
14287 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14289 - unsigned long range_end = mm->brk + 0x02000000;
14290 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14292 + if (!randomize_va_space)
14297 + /* P4 seems to return a 0 LSB, ignore it */
14298 +#ifdef CONFIG_MPENTIUM4
14301 +#elif defined(CONFIG_X86_64)
14309 + thread->sp0 ^= time;
14310 + load_sp0(init_tss + smp_processor_id(), thread);
14312 +#ifdef CONFIG_X86_64
14313 + percpu_write(kernel_stack, thread->sp0);
14317 diff -urNp linux-2.6.39.4/arch/x86/kernel/ptrace.c linux-2.6.39.4/arch/x86/kernel/ptrace.c
14318 --- linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
14319 +++ linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-08-05 19:44:35.000000000 -0400
14320 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14321 unsigned long addr, unsigned long data)
14324 - unsigned long __user *datap = (unsigned long __user *)data;
14325 + unsigned long __user *datap = (__force unsigned long __user *)data;
14328 /* read the word at location addr in the USER area. */
14329 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14330 if ((int) addr < 0)
14332 ret = do_get_thread_area(child, addr,
14333 - (struct user_desc __user *)data);
14334 + (__force struct user_desc __user *) data);
14337 case PTRACE_SET_THREAD_AREA:
14338 if ((int) addr < 0)
14340 ret = do_set_thread_area(child, addr,
14341 - (struct user_desc __user *)data, 0);
14342 + (__force struct user_desc __user *) data, 0);
14346 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14347 memset(info, 0, sizeof(*info));
14348 info->si_signo = SIGTRAP;
14349 info->si_code = si_code;
14350 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14351 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14354 void user_single_step_siginfo(struct task_struct *tsk,
14355 @@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *ts
14356 * We must return the syscall number to actually look up in the table.
14357 * This can be -1L to skip running any syscall at all.
14359 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
14360 +long syscall_trace_enter(struct pt_regs *regs)
14364 @@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(stru
14365 return ret ?: regs->orig_ax;
14368 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
14369 +void syscall_trace_leave(struct pt_regs *regs)
14373 diff -urNp linux-2.6.39.4/arch/x86/kernel/pvclock.c linux-2.6.39.4/arch/x86/kernel/pvclock.c
14374 --- linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-05-19 00:06:34.000000000 -0400
14375 +++ linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-08-05 19:44:35.000000000 -0400
14376 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14380 -static atomic64_t last_value = ATOMIC64_INIT(0);
14381 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14383 void pvclock_resume(void)
14385 - atomic64_set(&last_value, 0);
14386 + atomic64_set_unchecked(&last_value, 0);
14389 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14390 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14391 * updating at the same time, and one of them could be slightly behind,
14392 * making the assumption that last_value always go forward fail to hold.
14394 - last = atomic64_read(&last_value);
14395 + last = atomic64_read_unchecked(&last_value);
14399 - last = atomic64_cmpxchg(&last_value, last, ret);
14400 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14401 } while (unlikely(last != ret));
14404 diff -urNp linux-2.6.39.4/arch/x86/kernel/reboot.c linux-2.6.39.4/arch/x86/kernel/reboot.c
14405 --- linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:11:51.000000000 -0400
14406 +++ linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:12:20.000000000 -0400
14407 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14408 EXPORT_SYMBOL(pm_power_off);
14410 static const struct desc_ptr no_idt = {};
14411 -static int reboot_mode;
14412 +static unsigned short reboot_mode;
14413 enum reboot_type reboot_type = BOOT_KBD;
14416 @@ -307,13 +307,17 @@ core_initcall(reboot_init);
14417 extern const unsigned char machine_real_restart_asm[];
14418 extern const u64 machine_real_restart_gdt[3];
14420 -void machine_real_restart(unsigned int type)
14421 +__noreturn void machine_real_restart(unsigned int type)
14424 unsigned long restart_pa;
14425 - void (*restart_lowmem)(unsigned int);
14426 + void (* __noreturn restart_lowmem)(unsigned int);
14429 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14430 + struct desc_struct *gdt;
14433 local_irq_disable();
14435 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14436 @@ -339,14 +343,14 @@ void machine_real_restart(unsigned int t
14437 boot)". This seems like a fairly standard thing that gets set by
14438 REBOOT.COM programs, and the previous reset routine did this
14440 - *((unsigned short *)0x472) = reboot_mode;
14441 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14443 /* Patch the GDT in the low memory trampoline */
14444 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14446 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14447 restart_pa = virt_to_phys(restart_va);
14448 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14449 + restart_lowmem = (void *)restart_pa;
14451 /* GDT[0]: GDT self-pointer */
14453 @@ -357,7 +361,33 @@ void machine_real_restart(unsigned int t
14454 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14456 /* Jump to the identity-mapped low memory code */
14458 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14459 + gdt = get_cpu_gdt_table(smp_processor_id());
14460 + pax_open_kernel();
14461 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14462 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14463 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14464 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14466 +#ifdef CONFIG_PAX_KERNEXEC
14467 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14468 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14469 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14470 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14471 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14472 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14474 + pax_close_kernel();
14477 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14478 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14481 restart_lowmem(type);
14485 #ifdef CONFIG_APM_MODULE
14486 EXPORT_SYMBOL(machine_real_restart);
14487 @@ -486,7 +516,7 @@ void __attribute__((weak)) mach_reboot_f
14491 -static void native_machine_emergency_restart(void)
14492 +__noreturn static void native_machine_emergency_restart(void)
14496 @@ -601,13 +631,13 @@ void native_machine_shutdown(void)
14500 -static void __machine_emergency_restart(int emergency)
14501 +static __noreturn void __machine_emergency_restart(int emergency)
14503 reboot_emergency = emergency;
14504 machine_ops.emergency_restart();
14507 -static void native_machine_restart(char *__unused)
14508 +static __noreturn void native_machine_restart(char *__unused)
14510 printk("machine restart\n");
14512 @@ -616,7 +646,7 @@ static void native_machine_restart(char
14513 __machine_emergency_restart(0);
14516 -static void native_machine_halt(void)
14517 +static __noreturn void native_machine_halt(void)
14519 /* stop other cpus and apics */
14520 machine_shutdown();
14521 @@ -627,7 +657,7 @@ static void native_machine_halt(void)
14522 stop_this_cpu(NULL);
14525 -static void native_machine_power_off(void)
14526 +__noreturn static void native_machine_power_off(void)
14528 if (pm_power_off) {
14530 @@ -636,6 +666,7 @@ static void native_machine_power_off(voi
14532 /* a fallback in case there is no PM info available */
14533 tboot_shutdown(TB_SHUTDOWN_HALT);
14537 struct machine_ops machine_ops = {
14538 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup.c linux-2.6.39.4/arch/x86/kernel/setup.c
14539 --- linux-2.6.39.4/arch/x86/kernel/setup.c 2011-06-25 12:55:22.000000000 -0400
14540 +++ linux-2.6.39.4/arch/x86/kernel/setup.c 2011-08-05 19:44:35.000000000 -0400
14541 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14542 * area (640->1Mb) as ram even though it is not.
14545 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14546 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14547 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14550 @@ -775,14 +775,14 @@ void __init setup_arch(char **cmdline_p)
14552 if (!boot_params.hdr.root_flags)
14553 root_mountflags &= ~MS_RDONLY;
14554 - init_mm.start_code = (unsigned long) _text;
14555 - init_mm.end_code = (unsigned long) _etext;
14556 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14557 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14558 init_mm.end_data = (unsigned long) _edata;
14559 init_mm.brk = _brk_end;
14561 - code_resource.start = virt_to_phys(_text);
14562 - code_resource.end = virt_to_phys(_etext)-1;
14563 - data_resource.start = virt_to_phys(_etext);
14564 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14565 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14566 + data_resource.start = virt_to_phys(_sdata);
14567 data_resource.end = virt_to_phys(_edata)-1;
14568 bss_resource.start = virt_to_phys(&__bss_start);
14569 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14570 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup_percpu.c linux-2.6.39.4/arch/x86/kernel/setup_percpu.c
14571 --- linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-05-19 00:06:34.000000000 -0400
14572 +++ linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-08-05 19:44:35.000000000 -0400
14573 @@ -21,19 +21,17 @@
14574 #include <asm/cpu.h>
14575 #include <asm/stackprotector.h>
14577 -DEFINE_PER_CPU(int, cpu_number);
14579 +DEFINE_PER_CPU(unsigned int, cpu_number);
14580 EXPORT_PER_CPU_SYMBOL(cpu_number);
14583 -#ifdef CONFIG_X86_64
14584 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14586 -#define BOOT_PERCPU_OFFSET 0
14589 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14590 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14592 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14593 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14594 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14596 EXPORT_SYMBOL(__per_cpu_offset);
14597 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14599 #ifdef CONFIG_X86_32
14600 struct desc_struct gdt;
14601 + unsigned long base = per_cpu_offset(cpu);
14603 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14604 - 0x2 | DESCTYPE_S, 0x8);
14606 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14607 + 0x83 | DESCTYPE_S, 0xC);
14608 write_gdt_entry(get_cpu_gdt_table(cpu),
14609 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14611 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14612 /* alrighty, percpu areas up and running */
14613 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14614 for_each_possible_cpu(cpu) {
14615 +#ifdef CONFIG_CC_STACKPROTECTOR
14616 +#ifdef CONFIG_X86_32
14617 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14620 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14621 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14622 per_cpu(cpu_number, cpu) = cpu;
14623 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14625 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14627 +#ifdef CONFIG_CC_STACKPROTECTOR
14628 +#ifdef CONFIG_X86_32
14630 + per_cpu(stack_canary.canary, cpu) = canary;
14634 * Up to this point, the boot CPU has been using .init.data
14635 * area. Reload any changed state for the boot CPU.
14636 diff -urNp linux-2.6.39.4/arch/x86/kernel/signal.c linux-2.6.39.4/arch/x86/kernel/signal.c
14637 --- linux-2.6.39.4/arch/x86/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
14638 +++ linux-2.6.39.4/arch/x86/kernel/signal.c 2011-08-05 19:44:35.000000000 -0400
14639 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14640 * Align the stack pointer according to the i386 ABI,
14641 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14643 - sp = ((sp + 4) & -16ul) - 4;
14644 + sp = ((sp - 12) & -16ul) - 4;
14645 #else /* !CONFIG_X86_32 */
14646 sp = round_down(sp, 16) - 8;
14648 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14649 * Return an always-bogus address instead so we will die with SIGSEGV.
14651 if (onsigstack && !likely(on_sig_stack(sp)))
14652 - return (void __user *)-1L;
14653 + return (__force void __user *)-1L;
14655 /* save i387 state */
14656 if (used_math() && save_i387_xstate(*fpstate) < 0)
14657 - return (void __user *)-1L;
14658 + return (__force void __user *)-1L;
14660 return (void __user *)sp;
14662 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14665 if (current->mm->context.vdso)
14666 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14667 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14669 - restorer = &frame->retcode;
14670 + restorer = (void __user *)&frame->retcode;
14671 if (ka->sa.sa_flags & SA_RESTORER)
14672 restorer = ka->sa.sa_restorer;
14674 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14675 * reasons and because gdb uses it as a signature to notice
14676 * signal handler stack frames.
14678 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14679 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14683 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14684 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14686 /* Set up to return from userspace. */
14687 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14688 + if (current->mm->context.vdso)
14689 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14691 + restorer = (void __user *)&frame->retcode;
14692 if (ka->sa.sa_flags & SA_RESTORER)
14693 restorer = ka->sa.sa_restorer;
14694 put_user_ex(restorer, &frame->pretcode);
14695 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14696 * reasons and because gdb uses it as a signature to notice
14697 * signal handler stack frames.
14699 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14700 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14701 } put_user_catch(err);
14704 @@ -773,6 +776,8 @@ static void do_signal(struct pt_regs *re
14708 + pax_track_stack();
14711 * We want the common case to go fast, which is why we may in certain
14712 * cases get here from kernel mode. Just return without doing anything
14713 @@ -780,7 +785,7 @@ static void do_signal(struct pt_regs *re
14714 * X86_32: vm86 regs switched out by assembly code before reaching
14715 * here, so testing against kernel CS suffices.
14717 - if (!user_mode(regs))
14718 + if (!user_mode_novm(regs))
14721 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14722 diff -urNp linux-2.6.39.4/arch/x86/kernel/smpboot.c linux-2.6.39.4/arch/x86/kernel/smpboot.c
14723 --- linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-06-25 12:55:22.000000000 -0400
14724 +++ linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-08-05 19:44:35.000000000 -0400
14725 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14726 set_idle_for_cpu(cpu, c_idle.idle);
14728 per_cpu(current_task, cpu) = c_idle.idle;
14729 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14730 #ifdef CONFIG_X86_32
14731 /* Stack for startup_32 can be just as for start_secondary onwards */
14734 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14735 initial_gs = per_cpu_offset(cpu);
14736 - per_cpu(kernel_stack, cpu) =
14737 - (unsigned long)task_stack_page(c_idle.idle) -
14738 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14739 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14742 + pax_open_kernel();
14743 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14744 + pax_close_kernel();
14746 initial_code = (unsigned long)start_secondary;
14747 stack_start = c_idle.idle->thread.sp;
14749 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14751 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14753 +#ifdef CONFIG_PAX_PER_CPU_PGD
14754 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14755 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14756 + KERNEL_PGD_PTRS);
14759 err = do_boot_cpu(apicid, cpu);
14761 pr_debug("do_boot_cpu failed %d\n", err);
14762 diff -urNp linux-2.6.39.4/arch/x86/kernel/step.c linux-2.6.39.4/arch/x86/kernel/step.c
14763 --- linux-2.6.39.4/arch/x86/kernel/step.c 2011-05-19 00:06:34.000000000 -0400
14764 +++ linux-2.6.39.4/arch/x86/kernel/step.c 2011-08-05 19:44:35.000000000 -0400
14765 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14766 struct desc_struct *desc;
14767 unsigned long base;
14772 mutex_lock(&child->mm->context.lock);
14773 - if (unlikely((seg >> 3) >= child->mm->context.size))
14774 + if (unlikely(seg >= child->mm->context.size))
14775 addr = -1L; /* bogus selector, access would fault */
14777 desc = child->mm->context.ldt + seg;
14778 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14781 mutex_unlock(&child->mm->context.lock);
14783 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14784 + addr = ktla_ktva(addr);
14788 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14789 unsigned char opcode[15];
14790 unsigned long addr = convert_ip_to_linear(child, regs);
14792 + if (addr == -EINVAL)
14795 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14796 for (i = 0; i < copied; i++) {
14797 switch (opcode[i]) {
14798 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14800 #ifdef CONFIG_X86_64
14801 case 0x40 ... 0x4f:
14802 - if (regs->cs != __USER_CS)
14803 + if ((regs->cs & 0xffff) != __USER_CS)
14804 /* 32-bit mode: register increment */
14806 /* 64-bit mode: REX prefix */
14807 diff -urNp linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S
14808 --- linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-05-19 00:06:34.000000000 -0400
14809 +++ linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-08-05 19:44:35.000000000 -0400
14811 +.section .rodata,"a",@progbits
14812 ENTRY(sys_call_table)
14813 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14815 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c
14816 --- linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-05-19 00:06:34.000000000 -0400
14817 +++ linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-08-05 19:44:35.000000000 -0400
14818 @@ -24,17 +24,224 @@
14820 #include <asm/syscalls.h>
14823 - * Do a system call from kernel instead of calling sys_execve so we
14824 - * end up with proper pt_regs.
14826 -int kernel_execve(const char *filename,
14827 - const char *const argv[],
14828 - const char *const envp[])
14829 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14832 - asm volatile ("int $0x80"
14834 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14836 + unsigned long pax_task_size = TASK_SIZE;
14838 +#ifdef CONFIG_PAX_SEGMEXEC
14839 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14840 + pax_task_size = SEGMEXEC_TASK_SIZE;
14843 + if (len > pax_task_size || addr > pax_task_size - len)
14850 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14851 + unsigned long len, unsigned long pgoff, unsigned long flags)
14853 + struct mm_struct *mm = current->mm;
14854 + struct vm_area_struct *vma;
14855 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14857 +#ifdef CONFIG_PAX_SEGMEXEC
14858 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14859 + pax_task_size = SEGMEXEC_TASK_SIZE;
14862 + pax_task_size -= PAGE_SIZE;
14864 + if (len > pax_task_size)
14867 + if (flags & MAP_FIXED)
14870 +#ifdef CONFIG_PAX_RANDMMAP
14871 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14875 + addr = PAGE_ALIGN(addr);
14876 + if (pax_task_size - len >= addr) {
14877 + vma = find_vma(mm, addr);
14878 + if (check_heap_stack_gap(vma, addr, len))
14882 + if (len > mm->cached_hole_size) {
14883 + start_addr = addr = mm->free_area_cache;
14885 + start_addr = addr = mm->mmap_base;
14886 + mm->cached_hole_size = 0;
14889 +#ifdef CONFIG_PAX_PAGEEXEC
14890 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14891 + start_addr = 0x00110000UL;
14893 +#ifdef CONFIG_PAX_RANDMMAP
14894 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14895 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14898 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14899 + start_addr = addr = mm->mmap_base;
14901 + addr = start_addr;
14906 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14907 + /* At this point: (!vma || addr < vma->vm_end). */
14908 + if (pax_task_size - len < addr) {
14910 + * Start a new search - just in case we missed
14913 + if (start_addr != mm->mmap_base) {
14914 + start_addr = addr = mm->mmap_base;
14915 + mm->cached_hole_size = 0;
14916 + goto full_search;
14920 + if (check_heap_stack_gap(vma, addr, len))
14922 + if (addr + mm->cached_hole_size < vma->vm_start)
14923 + mm->cached_hole_size = vma->vm_start - addr;
14924 + addr = vma->vm_end;
14925 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14926 + start_addr = addr = mm->mmap_base;
14927 + mm->cached_hole_size = 0;
14928 + goto full_search;
14933 + * Remember the place where we stopped the search:
14935 + mm->free_area_cache = addr + len;
14940 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14941 + const unsigned long len, const unsigned long pgoff,
14942 + const unsigned long flags)
14944 + struct vm_area_struct *vma;
14945 + struct mm_struct *mm = current->mm;
14946 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14948 +#ifdef CONFIG_PAX_SEGMEXEC
14949 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14950 + pax_task_size = SEGMEXEC_TASK_SIZE;
14953 + pax_task_size -= PAGE_SIZE;
14955 + /* requested length too big for entire address space */
14956 + if (len > pax_task_size)
14959 + if (flags & MAP_FIXED)
14962 +#ifdef CONFIG_PAX_PAGEEXEC
14963 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14967 +#ifdef CONFIG_PAX_RANDMMAP
14968 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14971 + /* requesting a specific address */
14973 + addr = PAGE_ALIGN(addr);
14974 + if (pax_task_size - len >= addr) {
14975 + vma = find_vma(mm, addr);
14976 + if (check_heap_stack_gap(vma, addr, len))
14981 + /* check if free_area_cache is useful for us */
14982 + if (len <= mm->cached_hole_size) {
14983 + mm->cached_hole_size = 0;
14984 + mm->free_area_cache = mm->mmap_base;
14987 + /* either no address requested or can't fit in requested address hole */
14988 + addr = mm->free_area_cache;
14990 + /* make sure it can fit in the remaining address space */
14991 + if (addr > len) {
14992 + vma = find_vma(mm, addr-len);
14993 + if (check_heap_stack_gap(vma, addr - len, len))
14994 + /* remember the address as a hint for next time */
14995 + return (mm->free_area_cache = addr-len);
14998 + if (mm->mmap_base < len)
15001 + addr = mm->mmap_base-len;
15005 + * Lookup failure means no vma is above this address,
15006 + * else if new region fits below vma->vm_start,
15007 + * return with success:
15009 + vma = find_vma(mm, addr);
15010 + if (check_heap_stack_gap(vma, addr, len))
15011 + /* remember the address as a hint for next time */
15012 + return (mm->free_area_cache = addr);
15014 + /* remember the largest hole we saw so far */
15015 + if (addr + mm->cached_hole_size < vma->vm_start)
15016 + mm->cached_hole_size = vma->vm_start - addr;
15018 + /* try just below the current vma->vm_start */
15019 + addr = skip_heap_stack_gap(vma, len);
15020 + } while (!IS_ERR_VALUE(addr));
15024 + * A failed mmap() very likely causes application failure,
15025 + * so fall back to the bottom-up function here. This scenario
15026 + * can happen with large stack limits and large mmap()
15030 +#ifdef CONFIG_PAX_SEGMEXEC
15031 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15032 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15036 + mm->mmap_base = TASK_UNMAPPED_BASE;
15038 +#ifdef CONFIG_PAX_RANDMMAP
15039 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15040 + mm->mmap_base += mm->delta_mmap;
15043 + mm->free_area_cache = mm->mmap_base;
15044 + mm->cached_hole_size = ~0UL;
15045 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15047 + * Restore the topdown base:
15049 + mm->mmap_base = base;
15050 + mm->free_area_cache = base;
15051 + mm->cached_hole_size = ~0UL;
15055 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c
15056 --- linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-05-19 00:06:34.000000000 -0400
15057 +++ linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-08-05 19:44:35.000000000 -0400
15058 @@ -32,8 +32,8 @@ out:
15062 -static void find_start_end(unsigned long flags, unsigned long *begin,
15063 - unsigned long *end)
15064 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15065 + unsigned long *begin, unsigned long *end)
15067 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15068 unsigned long new_begin;
15069 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15070 *begin = new_begin;
15073 - *begin = TASK_UNMAPPED_BASE;
15074 + *begin = mm->mmap_base;
15078 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15079 if (flags & MAP_FIXED)
15082 - find_start_end(flags, &begin, &end);
15083 + find_start_end(mm, flags, &begin, &end);
15088 +#ifdef CONFIG_PAX_RANDMMAP
15089 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15093 addr = PAGE_ALIGN(addr);
15094 vma = find_vma(mm, addr);
15095 - if (end - len >= addr &&
15096 - (!vma || addr + len <= vma->vm_start))
15097 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15100 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15101 @@ -106,7 +109,7 @@ full_search:
15105 - if (!vma || addr + len <= vma->vm_start) {
15106 + if (check_heap_stack_gap(vma, addr, len)) {
15108 * Remember the place where we stopped the search:
15110 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15112 struct vm_area_struct *vma;
15113 struct mm_struct *mm = current->mm;
15114 - unsigned long addr = addr0;
15115 + unsigned long base = mm->mmap_base, addr = addr0;
15117 /* requested length too big for entire address space */
15118 if (len > TASK_SIZE)
15119 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15120 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15123 +#ifdef CONFIG_PAX_RANDMMAP
15124 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15127 /* requesting a specific address */
15129 addr = PAGE_ALIGN(addr);
15130 - vma = find_vma(mm, addr);
15131 - if (TASK_SIZE - len >= addr &&
15132 - (!vma || addr + len <= vma->vm_start))
15134 + if (TASK_SIZE - len >= addr) {
15135 + vma = find_vma(mm, addr);
15136 + if (check_heap_stack_gap(vma, addr, len))
15141 /* check if free_area_cache is useful for us */
15142 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15143 /* make sure it can fit in the remaining address space */
15145 vma = find_vma(mm, addr-len);
15146 - if (!vma || addr <= vma->vm_start)
15147 + if (check_heap_stack_gap(vma, addr - len, len))
15148 /* remember the address as a hint for next time */
15149 return mm->free_area_cache = addr-len;
15151 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15152 * return with success:
15154 vma = find_vma(mm, addr);
15155 - if (!vma || addr+len <= vma->vm_start)
15156 + if (check_heap_stack_gap(vma, addr, len))
15157 /* remember the address as a hint for next time */
15158 return mm->free_area_cache = addr;
15160 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15161 mm->cached_hole_size = vma->vm_start - addr;
15163 /* try just below the current vma->vm_start */
15164 - addr = vma->vm_start-len;
15165 - } while (len < vma->vm_start);
15166 + addr = skip_heap_stack_gap(vma, len);
15167 + } while (!IS_ERR_VALUE(addr));
15171 @@ -198,13 +206,21 @@ bottomup:
15172 * can happen with large stack limits and large mmap()
15175 + mm->mmap_base = TASK_UNMAPPED_BASE;
15177 +#ifdef CONFIG_PAX_RANDMMAP
15178 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15179 + mm->mmap_base += mm->delta_mmap;
15182 + mm->free_area_cache = mm->mmap_base;
15183 mm->cached_hole_size = ~0UL;
15184 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15185 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15187 * Restore the topdown base:
15189 - mm->free_area_cache = mm->mmap_base;
15190 + mm->mmap_base = base;
15191 + mm->free_area_cache = base;
15192 mm->cached_hole_size = ~0UL;
15195 diff -urNp linux-2.6.39.4/arch/x86/kernel/tboot.c linux-2.6.39.4/arch/x86/kernel/tboot.c
15196 --- linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-05-19 00:06:34.000000000 -0400
15197 +++ linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-08-05 19:44:35.000000000 -0400
15198 @@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
15200 void tboot_shutdown(u32 shutdown_type)
15202 - void (*shutdown)(void);
15203 + void (* __noreturn shutdown)(void);
15205 if (!tboot_enabled())
15207 @@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
15209 switch_to_tboot_pt();
15211 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15212 + shutdown = (void *)tboot->shutdown_entry;
15215 /* should not reach here */
15216 @@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15217 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15220 -static atomic_t ap_wfs_count;
15221 +static atomic_unchecked_t ap_wfs_count;
15223 static int tboot_wait_for_aps(int num_aps)
15225 @@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
15229 - atomic_inc(&ap_wfs_count);
15230 + atomic_inc_unchecked(&ap_wfs_count);
15231 if (num_online_cpus() == 1)
15232 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15233 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15237 @@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
15239 tboot_create_trampoline();
15241 - atomic_set(&ap_wfs_count, 0);
15242 + atomic_set_unchecked(&ap_wfs_count, 0);
15243 register_hotcpu_notifier(&tboot_cpu_notifier);
15246 diff -urNp linux-2.6.39.4/arch/x86/kernel/time.c linux-2.6.39.4/arch/x86/kernel/time.c
15247 --- linux-2.6.39.4/arch/x86/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
15248 +++ linux-2.6.39.4/arch/x86/kernel/time.c 2011-08-05 19:44:35.000000000 -0400
15249 @@ -22,17 +22,13 @@
15250 #include <asm/hpet.h>
15251 #include <asm/time.h>
15253 -#ifdef CONFIG_X86_64
15254 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
15257 unsigned long profile_pc(struct pt_regs *regs)
15259 unsigned long pc = instruction_pointer(regs);
15261 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15262 + if (!user_mode(regs) && in_lock_functions(pc)) {
15263 #ifdef CONFIG_FRAME_POINTER
15264 - return *(unsigned long *)(regs->bp + sizeof(long));
15265 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15267 unsigned long *sp =
15268 (unsigned long *)kernel_stack_pointer(regs);
15269 @@ -41,11 +37,17 @@ unsigned long profile_pc(struct pt_regs
15270 * or above a saved flags. Eflags has bits 22-31 zero,
15271 * kernel addresses don't.
15274 +#ifdef CONFIG_PAX_KERNEXEC
15275 + return ktla_ktva(sp[0]);
15287 diff -urNp linux-2.6.39.4/arch/x86/kernel/tls.c linux-2.6.39.4/arch/x86/kernel/tls.c
15288 --- linux-2.6.39.4/arch/x86/kernel/tls.c 2011-05-19 00:06:34.000000000 -0400
15289 +++ linux-2.6.39.4/arch/x86/kernel/tls.c 2011-08-05 19:44:35.000000000 -0400
15290 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15291 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15294 +#ifdef CONFIG_PAX_SEGMEXEC
15295 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15299 set_tls_desc(p, idx, &info, 1);
15302 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_32.S linux-2.6.39.4/arch/x86/kernel/trampoline_32.S
15303 --- linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-05-19 00:06:34.000000000 -0400
15304 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-08-05 19:44:35.000000000 -0400
15306 #include <asm/segment.h>
15307 #include <asm/page_types.h>
15309 +#ifdef CONFIG_PAX_KERNEXEC
15312 +#define ta(X) ((X) - __PAGE_OFFSET)
15317 .section ".x86_trampoline","a"
15318 @@ -62,7 +68,7 @@ r_base = .
15319 inc %ax # protected mode (PE) bit
15320 lmsw %ax # into protected mode
15321 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15322 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15323 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15325 # These need to be in the same 64K segment as the above;
15326 # hence we don't use the boot_gdt_descr defined in head.S
15327 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_64.S linux-2.6.39.4/arch/x86/kernel/trampoline_64.S
15328 --- linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-05-19 00:06:34.000000000 -0400
15329 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-08-05 19:44:35.000000000 -0400
15330 @@ -90,7 +90,7 @@ startup_32:
15331 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15334 - movl $X86_CR4_PAE, %eax
15335 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15336 movl %eax, %cr4 # Enable PAE mode
15338 # Setup trampoline 4 level pagetables
15339 @@ -138,7 +138,7 @@ tidt:
15340 # so the kernel can live anywhere
15343 - .short tgdt_end - tgdt # gdt limit
15344 + .short tgdt_end - tgdt - 1 # gdt limit
15345 .long tgdt - r_base
15347 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15348 diff -urNp linux-2.6.39.4/arch/x86/kernel/traps.c linux-2.6.39.4/arch/x86/kernel/traps.c
15349 --- linux-2.6.39.4/arch/x86/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
15350 +++ linux-2.6.39.4/arch/x86/kernel/traps.c 2011-08-05 19:44:35.000000000 -0400
15351 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15353 /* Do we ignore FPU interrupts ? */
15354 char ignore_fpu_irq;
15357 - * The IDT has to be page-aligned to simplify the Pentium
15358 - * F0 0F bug workaround.
15360 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15363 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15364 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15367 static void __kprobes
15368 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15369 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15370 long error_code, siginfo_t *info)
15372 struct task_struct *tsk = current;
15374 #ifdef CONFIG_X86_32
15375 - if (regs->flags & X86_VM_MASK) {
15376 + if (v8086_mode(regs)) {
15378 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15379 * On nmi (interrupt 2), do_trap should not be called.
15380 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15384 - if (!user_mode(regs))
15385 + if (!user_mode_novm(regs))
15388 #ifdef CONFIG_X86_32
15389 @@ -157,7 +151,7 @@ trap_signal:
15390 printk_ratelimit()) {
15392 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15393 - tsk->comm, tsk->pid, str,
15394 + tsk->comm, task_pid_nr(tsk), str,
15395 regs->ip, regs->sp, error_code);
15396 print_vma_addr(" in ", regs->ip);
15398 @@ -174,8 +168,20 @@ kernel_trap:
15399 if (!fixup_exception(regs)) {
15400 tsk->thread.error_code = error_code;
15401 tsk->thread.trap_no = trapnr;
15403 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15404 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15405 + str = "PAX: suspicious stack segment fault";
15408 die(str, regs, error_code);
15411 +#ifdef CONFIG_PAX_REFCOUNT
15413 + pax_report_refcount_overflow(regs);
15418 #ifdef CONFIG_X86_32
15419 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15420 conditional_sti(regs);
15422 #ifdef CONFIG_X86_32
15423 - if (regs->flags & X86_VM_MASK)
15424 + if (v8086_mode(regs))
15429 - if (!user_mode(regs))
15430 + if (!user_mode_novm(regs))
15433 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15434 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15435 + struct mm_struct *mm = tsk->mm;
15436 + unsigned long limit;
15438 + down_write(&mm->mmap_sem);
15439 + limit = mm->context.user_cs_limit;
15440 + if (limit < TASK_SIZE) {
15441 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15442 + up_write(&mm->mmap_sem);
15445 + up_write(&mm->mmap_sem);
15449 tsk->thread.error_code = error_code;
15450 tsk->thread.trap_no = 13;
15452 @@ -304,6 +326,13 @@ gp_in_kernel:
15453 if (notify_die(DIE_GPF, "general protection fault", regs,
15454 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15457 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15458 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15459 + die("PAX: suspicious general protection fault", regs, error_code);
15463 die("general protection fault", regs, error_code);
15466 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15467 dotraplinkage notrace __kprobes void
15468 do_nmi(struct pt_regs *regs, long error_code)
15471 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15472 + if (!user_mode(regs)) {
15473 + unsigned long cs = regs->cs & 0xFFFF;
15474 + unsigned long ip = ktva_ktla(regs->ip);
15476 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15483 inc_irq_stat(__nmi_count);
15484 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15485 /* It's safe to allow irq's after DR6 has been saved */
15486 preempt_conditional_sti(regs);
15488 - if (regs->flags & X86_VM_MASK) {
15489 + if (v8086_mode(regs)) {
15490 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15492 preempt_conditional_cli(regs);
15493 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15494 * We already checked v86 mode above, so we can check for kernel mode
15495 * by just checking the CPL of CS.
15497 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15498 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15499 tsk->thread.debugreg6 &= ~DR_STEP;
15500 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15501 regs->flags &= ~X86_EFLAGS_TF;
15502 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15504 conditional_sti(regs);
15506 - if (!user_mode_vm(regs))
15507 + if (!user_mode(regs))
15509 if (!fixup_exception(regs)) {
15510 task->thread.error_code = error_code;
15511 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15512 void __math_state_restore(void)
15514 struct thread_info *thread = current_thread_info();
15515 - struct task_struct *tsk = thread->task;
15516 + struct task_struct *tsk = current;
15519 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15520 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15522 asmlinkage void math_state_restore(void)
15524 - struct thread_info *thread = current_thread_info();
15525 - struct task_struct *tsk = thread->task;
15526 + struct task_struct *tsk = current;
15528 if (!tsk_used_math(tsk)) {
15529 local_irq_enable();
15530 diff -urNp linux-2.6.39.4/arch/x86/kernel/verify_cpu.S linux-2.6.39.4/arch/x86/kernel/verify_cpu.S
15531 --- linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-05-19 00:06:34.000000000 -0400
15532 +++ linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-08-05 19:44:35.000000000 -0400
15534 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15535 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15536 * arch/x86/kernel/head_32.S: processor startup
15537 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15539 * verify_cpu, returns the status of longmode and SSE in register %eax.
15540 * 0: Success 1: Failure
15541 diff -urNp linux-2.6.39.4/arch/x86/kernel/vm86_32.c linux-2.6.39.4/arch/x86/kernel/vm86_32.c
15542 --- linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-05-19 00:06:34.000000000 -0400
15543 +++ linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-08-05 19:44:35.000000000 -0400
15545 #include <linux/ptrace.h>
15546 #include <linux/audit.h>
15547 #include <linux/stddef.h>
15548 +#include <linux/grsecurity.h>
15550 #include <asm/uaccess.h>
15551 #include <asm/io.h>
15552 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15556 - tss = &per_cpu(init_tss, get_cpu());
15557 + tss = init_tss + get_cpu();
15558 current->thread.sp0 = current->thread.saved_sp0;
15559 current->thread.sysenter_cs = __KERNEL_CS;
15560 load_sp0(tss, ¤t->thread);
15561 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15562 struct task_struct *tsk;
15563 int tmp, ret = -EPERM;
15565 +#ifdef CONFIG_GRKERNSEC_VM86
15566 + if (!capable(CAP_SYS_RAWIO)) {
15567 + gr_handle_vm86();
15573 if (tsk->thread.saved_sp0)
15575 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15577 struct vm86plus_struct __user *v86;
15579 +#ifdef CONFIG_GRKERNSEC_VM86
15580 + if (!capable(CAP_SYS_RAWIO)) {
15581 + gr_handle_vm86();
15589 case VM86_REQUEST_IRQ:
15590 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15591 tsk->thread.saved_fs = info->regs32->fs;
15592 tsk->thread.saved_gs = get_user_gs(info->regs32);
15594 - tss = &per_cpu(init_tss, get_cpu());
15595 + tss = init_tss + get_cpu();
15596 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15598 tsk->thread.sysenter_cs = 0;
15599 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15600 goto cannot_handle;
15601 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15602 goto cannot_handle;
15603 - intr_ptr = (unsigned long __user *) (i << 2);
15604 + intr_ptr = (__force unsigned long __user *) (i << 2);
15605 if (get_user(segoffs, intr_ptr))
15606 goto cannot_handle;
15607 if ((segoffs >> 16) == BIOSSEG)
15608 diff -urNp linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S
15609 --- linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
15610 +++ linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-08-05 19:44:35.000000000 -0400
15612 #include <asm/page_types.h>
15613 #include <asm/cache.h>
15614 #include <asm/boot.h>
15615 +#include <asm/segment.h>
15617 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15618 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15620 +#define __KERNEL_TEXT_OFFSET 0
15623 #undef i386 /* in case the preprocessor is a 32bit one */
15625 @@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
15626 #ifdef CONFIG_X86_32
15628 ENTRY(phys_startup_32)
15629 -jiffies = jiffies_64;
15631 OUTPUT_ARCH(i386:x86-64)
15632 ENTRY(phys_startup_64)
15633 -jiffies_64 = jiffies;
15636 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
15637 @@ -69,31 +74,46 @@ jiffies_64 = jiffies;
15640 text PT_LOAD FLAGS(5); /* R_E */
15641 +#ifdef CONFIG_X86_32
15642 + module PT_LOAD FLAGS(5); /* R_E */
15645 + rodata PT_LOAD FLAGS(5); /* R_E */
15647 + rodata PT_LOAD FLAGS(4); /* R__ */
15649 data PT_LOAD FLAGS(6); /* RW_ */
15650 #ifdef CONFIG_X86_64
15651 user PT_LOAD FLAGS(5); /* R_E */
15653 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15655 percpu PT_LOAD FLAGS(6); /* RW_ */
15657 + text.init PT_LOAD FLAGS(5); /* R_E */
15658 + text.exit PT_LOAD FLAGS(5); /* R_E */
15659 init PT_LOAD FLAGS(7); /* RWE */
15661 note PT_NOTE FLAGS(0); /* ___ */
15666 #ifdef CONFIG_X86_32
15667 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15668 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15669 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15671 - . = __START_KERNEL;
15672 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15673 + . = __START_KERNEL;
15676 /* Text and read-only data */
15677 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15679 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15680 /* bootstrapping code */
15681 +#ifdef CONFIG_X86_32
15682 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15684 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15686 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15689 #ifdef CONFIG_X86_32
15690 . = ALIGN(PAGE_SIZE);
15691 @@ -109,13 +129,47 @@ SECTIONS
15695 - /* End of text section */
15699 - NOTES :text :note
15700 + . += __KERNEL_TEXT_OFFSET;
15702 +#ifdef CONFIG_X86_32
15703 + . = ALIGN(PAGE_SIZE);
15704 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15706 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15707 + MODULES_EXEC_VADDR = .;
15709 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15710 + . = ALIGN(HPAGE_SIZE);
15711 + MODULES_EXEC_END = . - 1;
15717 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15718 + /* End of text section */
15719 + _etext = . - __KERNEL_TEXT_OFFSET;
15722 - EXCEPTION_TABLE(16) :text = 0x9090
15723 +#ifdef CONFIG_X86_32
15724 + . = ALIGN(PAGE_SIZE);
15725 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15727 + . = ALIGN(PAGE_SIZE);
15728 + *(.empty_zero_page)
15729 + *(.initial_pg_fixmap)
15730 + *(.initial_pg_pmd)
15731 + *(.initial_page_table)
15732 + *(.swapper_pg_dir)
15736 + . = ALIGN(PAGE_SIZE);
15737 + NOTES :rodata :note
15739 + EXCEPTION_TABLE(16) :rodata
15741 #if defined(CONFIG_DEBUG_RODATA)
15742 /* .text should occupy whole number of pages */
15743 @@ -127,16 +181,20 @@ SECTIONS
15746 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15748 +#ifdef CONFIG_PAX_KERNEXEC
15749 + . = ALIGN(HPAGE_SIZE);
15751 + . = ALIGN(PAGE_SIZE);
15754 /* Start of data section */
15758 INIT_TASK_DATA(THREAD_SIZE)
15760 -#ifdef CONFIG_X86_32
15761 - /* 32 bit has nosave before _edata */
15765 PAGE_ALIGNED_DATA(PAGE_SIZE)
15767 @@ -145,6 +203,8 @@ SECTIONS
15771 + jiffies = jiffies_64;
15773 /* rarely changed data like cpu maps */
15774 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
15776 @@ -199,12 +259,6 @@ SECTIONS
15778 vgetcpu_mode = VVIRT(.vgetcpu_mode);
15780 - . = ALIGN(L1_CACHE_BYTES);
15781 - .jiffies : AT(VLOAD(.jiffies)) {
15784 - jiffies = VVIRT(.jiffies);
15786 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
15789 @@ -220,12 +274,19 @@ SECTIONS
15790 #endif /* CONFIG_X86_64 */
15792 /* Init code and data - will be freed after init */
15793 - . = ALIGN(PAGE_SIZE);
15794 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15797 +#ifdef CONFIG_PAX_KERNEXEC
15798 + . = ALIGN(HPAGE_SIZE);
15800 + . = ALIGN(PAGE_SIZE);
15803 __init_begin = .; /* paired with __init_end */
15807 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15810 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15811 * output PHDR, so the next output section - .init.text - should
15812 @@ -234,12 +295,27 @@ SECTIONS
15813 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15816 - INIT_TEXT_SECTION(PAGE_SIZE)
15817 -#ifdef CONFIG_X86_64
15820 + . = ALIGN(PAGE_SIZE);
15822 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15823 + VMLINUX_SYMBOL(_sinittext) = .;
15825 + VMLINUX_SYMBOL(_einittext) = .;
15826 + . = ALIGN(PAGE_SIZE);
15829 - INIT_DATA_SECTION(16)
15831 + * .exit.text is discard at runtime, not link time, to deal with
15832 + * references from .altinstructions and .eh_frame
15834 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15838 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15840 + . = ALIGN(PAGE_SIZE);
15841 + INIT_DATA_SECTION(16) :init
15844 * Code and data for a variety of lowlevel trampolines, to be
15845 @@ -306,19 +382,12 @@ SECTIONS
15850 - * .exit.text is discard at runtime, not link time, to deal with
15851 - * references from .altinstructions and .eh_frame
15853 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15857 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15861 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15862 +#ifndef CONFIG_SMP
15863 PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
15866 @@ -337,16 +406,10 @@ SECTIONS
15867 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15870 - . = ALIGN(PAGE_SIZE);
15871 __smp_locks_end = .;
15872 + . = ALIGN(PAGE_SIZE);
15875 -#ifdef CONFIG_X86_64
15876 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15882 . = ALIGN(PAGE_SIZE);
15883 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15884 @@ -362,6 +425,7 @@ SECTIONS
15886 . += 64 * 1024; /* 64k alignment slop space */
15887 *(.brk_reservation) /* areas brk users have reserved */
15888 + . = ALIGN(HPAGE_SIZE);
15892 @@ -388,13 +452,12 @@ SECTIONS
15893 * for the boot processor.
15895 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15896 -INIT_PER_CPU(gdt_page);
15897 INIT_PER_CPU(irq_stack_union);
15900 * Build-time check on the image size:
15902 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15903 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15904 "kernel image bigger than KERNEL_IMAGE_SIZE");
15907 diff -urNp linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c
15908 --- linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-05-19 00:06:34.000000000 -0400
15909 +++ linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-08-05 19:44:35.000000000 -0400
15910 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
15912 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
15913 /* copy vsyscall data */
15914 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
15915 vsyscall_gtod_data.clock.vread = clock->vread;
15916 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
15917 vsyscall_gtod_data.clock.mask = clock->mask;
15918 @@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
15919 We do this here because otherwise user space would do it on
15920 its own in a likely inferior way (no access to jiffies).
15921 If you don't like it pass NULL. */
15922 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
15923 + if (tcache && tcache->blob[0] == (j = jiffies)) {
15924 p = tcache->blob[1];
15925 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
15926 /* Load per CPU data from RDTSCP */
15927 diff -urNp linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c
15928 --- linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-05-19 00:06:34.000000000 -0400
15929 +++ linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-05 19:44:35.000000000 -0400
15930 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15931 EXPORT_SYMBOL(copy_user_generic_string);
15932 EXPORT_SYMBOL(copy_user_generic_unrolled);
15933 EXPORT_SYMBOL(__copy_user_nocache);
15934 -EXPORT_SYMBOL(_copy_from_user);
15935 -EXPORT_SYMBOL(_copy_to_user);
15937 EXPORT_SYMBOL(copy_page);
15938 EXPORT_SYMBOL(clear_page);
15939 diff -urNp linux-2.6.39.4/arch/x86/kernel/xsave.c linux-2.6.39.4/arch/x86/kernel/xsave.c
15940 --- linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-05-19 00:06:34.000000000 -0400
15941 +++ linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-08-05 19:44:35.000000000 -0400
15942 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15943 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15946 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15947 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15948 fx_sw_user->extended_size -
15949 FP_XSTATE_MAGIC2_SIZE));
15951 @@ -267,7 +267,7 @@ fx_only:
15952 * the other extended state.
15954 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15955 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15956 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15960 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15962 err = restore_user_xstate(buf);
15964 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15965 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15967 if (unlikely(err)) {
15969 diff -urNp linux-2.6.39.4/arch/x86/kvm/emulate.c linux-2.6.39.4/arch/x86/kvm/emulate.c
15970 --- linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-05-19 00:06:34.000000000 -0400
15971 +++ linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-08-05 19:44:35.000000000 -0400
15973 #define Src2ImmByte (2<<29)
15974 #define Src2One (3<<29)
15975 #define Src2Imm (4<<29)
15976 -#define Src2Mask (7<<29)
15977 +#define Src2Mask (7U<<29)
15979 #define X2(x...) x, x
15980 #define X3(x...) X2(x), x
15981 @@ -190,6 +190,7 @@ struct group_dual {
15983 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15985 + unsigned long _tmp; \
15986 __asm__ __volatile__ ( \
15987 _PRE_EFLAGS("0", "4", "2") \
15988 _op _suffix " %"_x"3,%1; " \
15989 @@ -203,8 +204,6 @@ struct group_dual {
15990 /* Raw emulation: instruction has two explicit operands. */
15991 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15993 - unsigned long _tmp; \
15995 switch ((_dst).bytes) { \
15997 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15998 @@ -220,7 +219,6 @@ struct group_dual {
16000 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16002 - unsigned long _tmp; \
16003 switch ((_dst).bytes) { \
16005 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16006 diff -urNp linux-2.6.39.4/arch/x86/kvm/lapic.c linux-2.6.39.4/arch/x86/kvm/lapic.c
16007 --- linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-05-19 00:06:34.000000000 -0400
16008 +++ linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-08-05 19:44:35.000000000 -0400
16010 #define APIC_BUS_CYCLE_NS 1
16012 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16013 -#define apic_debug(fmt, arg...)
16014 +#define apic_debug(fmt, arg...) do {} while (0)
16016 #define APIC_LVT_NUM 6
16017 /* 14 is the version for Xeon and Pentium 8.4.8*/
16018 diff -urNp linux-2.6.39.4/arch/x86/kvm/mmu.c linux-2.6.39.4/arch/x86/kvm/mmu.c
16019 --- linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-05-19 00:06:34.000000000 -0400
16020 +++ linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-08-05 19:44:35.000000000 -0400
16021 @@ -3240,7 +3240,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16023 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16025 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16026 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16029 * Assume that the pte write on a page table of the same type
16030 @@ -3275,7 +3275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16033 spin_lock(&vcpu->kvm->mmu_lock);
16034 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16035 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16037 kvm_mmu_free_some_pages(vcpu);
16038 ++vcpu->kvm->stat.mmu_pte_write;
16039 diff -urNp linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h
16040 --- linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-05-19 00:06:34.000000000 -0400
16041 +++ linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-08-05 19:44:35.000000000 -0400
16042 @@ -552,6 +552,8 @@ static int FNAME(page_fault)(struct kvm_
16043 unsigned long mmu_seq;
16046 + pax_track_stack();
16048 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16050 r = mmu_topup_memory_caches(vcpu);
16051 @@ -672,7 +674,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16053 kvm_flush_remote_tlbs(vcpu->kvm);
16055 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16056 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16058 spin_unlock(&vcpu->kvm->mmu_lock);
16060 diff -urNp linux-2.6.39.4/arch/x86/kvm/svm.c linux-2.6.39.4/arch/x86/kvm/svm.c
16061 --- linux-2.6.39.4/arch/x86/kvm/svm.c 2011-05-19 00:06:34.000000000 -0400
16062 +++ linux-2.6.39.4/arch/x86/kvm/svm.c 2011-08-05 20:34:06.000000000 -0400
16063 @@ -3278,7 +3278,11 @@ static void reload_tss(struct kvm_vcpu *
16064 int cpu = raw_smp_processor_id();
16066 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16068 + pax_open_kernel();
16069 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16070 + pax_close_kernel();
16075 @@ -3656,6 +3660,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16079 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16080 + __set_fs(current_thread_info()->addr_limit);
16085 local_irq_disable();
16086 diff -urNp linux-2.6.39.4/arch/x86/kvm/vmx.c linux-2.6.39.4/arch/x86/kvm/vmx.c
16087 --- linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-05-19 00:06:34.000000000 -0400
16088 +++ linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-08-05 20:34:06.000000000 -0400
16089 @@ -725,7 +725,11 @@ static void reload_tss(void)
16090 struct desc_struct *descs;
16092 descs = (void *)gdt->address;
16094 + pax_open_kernel();
16095 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16096 + pax_close_kernel();
16101 @@ -1648,8 +1652,11 @@ static __init int hardware_setup(void)
16102 if (!cpu_has_vmx_flexpriority())
16103 flexpriority_enabled = 0;
16105 - if (!cpu_has_vmx_tpr_shadow())
16106 - kvm_x86_ops->update_cr8_intercept = NULL;
16107 + if (!cpu_has_vmx_tpr_shadow()) {
16108 + pax_open_kernel();
16109 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16110 + pax_close_kernel();
16113 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16114 kvm_disable_largepages();
16115 @@ -2693,7 +2700,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16116 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16118 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16119 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16120 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16121 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16122 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16123 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16124 @@ -4068,6 +4075,12 @@ static void __noclone vmx_vcpu_run(struc
16125 "jmp .Lkvm_vmx_return \n\t"
16126 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16127 ".Lkvm_vmx_return: "
16129 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16130 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16131 + ".Lkvm_vmx_return2: "
16134 /* Save guest registers, load host registers, keep flags */
16135 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16137 @@ -4116,6 +4129,11 @@ static void __noclone vmx_vcpu_run(struc
16139 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16140 [wordsize]"i"(sizeof(ulong))
16142 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16143 + ,[cs]"i"(__KERNEL_CS)
16147 , R"ax", R"bx", R"di", R"si"
16148 #ifdef CONFIG_X86_64
16149 @@ -4130,7 +4148,16 @@ static void __noclone vmx_vcpu_run(struc
16151 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16153 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16154 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16156 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16157 + loadsegment(fs, __KERNEL_PERCPU);
16160 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16161 + __set_fs(current_thread_info()->addr_limit);
16166 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16167 diff -urNp linux-2.6.39.4/arch/x86/kvm/x86.c linux-2.6.39.4/arch/x86/kvm/x86.c
16168 --- linux-2.6.39.4/arch/x86/kvm/x86.c 2011-05-19 00:06:34.000000000 -0400
16169 +++ linux-2.6.39.4/arch/x86/kvm/x86.c 2011-08-05 20:34:06.000000000 -0400
16170 @@ -2050,6 +2050,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16171 if (n < msr_list.nmsrs)
16174 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16176 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16177 num_msrs_to_save * sizeof(u32)))
16179 @@ -2217,15 +2219,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16180 struct kvm_cpuid2 *cpuid,
16181 struct kvm_cpuid_entry2 __user *entries)
16187 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16190 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16191 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16192 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16194 + for (i = 0; i < cpuid->nent; ++i) {
16195 + struct kvm_cpuid_entry2 cpuid_entry;
16196 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16198 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16200 vcpu->arch.cpuid_nent = cpuid->nent;
16201 kvm_apic_set_version(vcpu);
16202 kvm_x86_ops->cpuid_update(vcpu);
16203 @@ -2240,15 +2247,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16204 struct kvm_cpuid2 *cpuid,
16205 struct kvm_cpuid_entry2 __user *entries)
16211 if (cpuid->nent < vcpu->arch.cpuid_nent)
16214 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16215 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16216 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16218 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16219 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16220 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16226 @@ -2526,7 +2537,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16227 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16228 struct kvm_interrupt *irq)
16230 - if (irq->irq < 0 || irq->irq >= 256)
16231 + if (irq->irq >= 256)
16233 if (irqchip_in_kernel(vcpu->kvm))
16235 @@ -4690,7 +4701,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16237 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16239 -int kvm_arch_init(void *opaque)
16240 +int kvm_arch_init(const void *opaque)
16243 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16244 diff -urNp linux-2.6.39.4/arch/x86/lguest/boot.c linux-2.6.39.4/arch/x86/lguest/boot.c
16245 --- linux-2.6.39.4/arch/x86/lguest/boot.c 2011-06-25 12:55:22.000000000 -0400
16246 +++ linux-2.6.39.4/arch/x86/lguest/boot.c 2011-08-05 20:34:06.000000000 -0400
16247 @@ -1178,9 +1178,10 @@ static __init int early_put_chars(u32 vt
16248 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16249 * Launcher to reboot us.
16251 -static void lguest_restart(char *reason)
16252 +static __noreturn void lguest_restart(char *reason)
16254 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16259 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_32.c linux-2.6.39.4/arch/x86/lib/atomic64_32.c
16260 --- linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-05-19 00:06:34.000000000 -0400
16261 +++ linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-08-05 19:44:35.000000000 -0400
16264 long long atomic64_read_cx8(long long, const atomic64_t *v);
16265 EXPORT_SYMBOL(atomic64_read_cx8);
16266 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16267 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16268 long long atomic64_set_cx8(long long, const atomic64_t *v);
16269 EXPORT_SYMBOL(atomic64_set_cx8);
16270 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16271 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16272 long long atomic64_xchg_cx8(long long, unsigned high);
16273 EXPORT_SYMBOL(atomic64_xchg_cx8);
16274 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16275 EXPORT_SYMBOL(atomic64_add_return_cx8);
16276 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16277 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16278 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16279 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16280 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16281 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16282 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16283 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16284 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16285 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16286 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16287 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16288 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16289 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16290 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16291 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16292 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16293 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16294 #ifndef CONFIG_X86_CMPXCHG64
16295 long long atomic64_read_386(long long, const atomic64_t *v);
16296 EXPORT_SYMBOL(atomic64_read_386);
16297 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16298 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16299 long long atomic64_set_386(long long, const atomic64_t *v);
16300 EXPORT_SYMBOL(atomic64_set_386);
16301 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16302 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16303 long long atomic64_xchg_386(long long, unsigned high);
16304 EXPORT_SYMBOL(atomic64_xchg_386);
16305 long long atomic64_add_return_386(long long a, atomic64_t *v);
16306 EXPORT_SYMBOL(atomic64_add_return_386);
16307 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16308 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16309 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16310 EXPORT_SYMBOL(atomic64_sub_return_386);
16311 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16312 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16313 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16314 EXPORT_SYMBOL(atomic64_inc_return_386);
16315 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16316 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16317 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16318 EXPORT_SYMBOL(atomic64_dec_return_386);
16319 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16320 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16321 long long atomic64_add_386(long long a, atomic64_t *v);
16322 EXPORT_SYMBOL(atomic64_add_386);
16323 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16324 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16325 long long atomic64_sub_386(long long a, atomic64_t *v);
16326 EXPORT_SYMBOL(atomic64_sub_386);
16327 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16328 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16329 long long atomic64_inc_386(long long a, atomic64_t *v);
16330 EXPORT_SYMBOL(atomic64_inc_386);
16331 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16332 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16333 long long atomic64_dec_386(long long a, atomic64_t *v);
16334 EXPORT_SYMBOL(atomic64_dec_386);
16335 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16336 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16337 long long atomic64_dec_if_positive_386(atomic64_t *v);
16338 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16339 int atomic64_inc_not_zero_386(atomic64_t *v);
16340 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S
16341 --- linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-05-19 00:06:34.000000000 -0400
16342 +++ linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-08-05 19:44:35.000000000 -0400
16343 @@ -48,6 +48,10 @@ BEGIN(read)
16347 +BEGIN(read_unchecked)
16354 @@ -55,6 +59,10 @@ BEGIN(set)
16358 +BEGIN(set_unchecked)
16365 @@ -70,6 +78,20 @@ RET_ENDP
16370 +#ifdef CONFIG_PAX_REFCOUNT
16376 + _ASM_EXTABLE(0b, 0b)
16380 +BEGIN(add_unchecked)
16386 @@ -77,6 +99,24 @@ RET_ENDP
16391 +#ifdef CONFIG_PAX_REFCOUNT
16394 + _ASM_EXTABLE(1234b, 2f)
16400 +#ifdef CONFIG_PAX_REFCOUNT
16405 +BEGIN(add_return_unchecked)
16411 @@ -86,6 +126,20 @@ RET_ENDP
16416 +#ifdef CONFIG_PAX_REFCOUNT
16422 + _ASM_EXTABLE(0b, 0b)
16426 +BEGIN(sub_unchecked)
16432 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16437 +#ifdef CONFIG_PAX_REFCOUNT
16440 + _ASM_EXTABLE(1234b, 2f)
16446 +#ifdef CONFIG_PAX_REFCOUNT
16451 +BEGIN(sub_return_unchecked)
16460 @@ -105,6 +180,20 @@ RET_ENDP
16465 +#ifdef CONFIG_PAX_REFCOUNT
16471 + _ASM_EXTABLE(0b, 0b)
16475 +BEGIN(inc_unchecked)
16481 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16486 +#ifdef CONFIG_PAX_REFCOUNT
16489 + _ASM_EXTABLE(1234b, 2f)
16495 +#ifdef CONFIG_PAX_REFCOUNT
16500 +BEGIN(inc_return_unchecked)
16508 @@ -123,6 +232,20 @@ RET_ENDP
16513 +#ifdef CONFIG_PAX_REFCOUNT
16519 + _ASM_EXTABLE(0b, 0b)
16523 +BEGIN(dec_unchecked)
16529 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16534 +#ifdef CONFIG_PAX_REFCOUNT
16537 + _ASM_EXTABLE(1234b, 2f)
16543 +#ifdef CONFIG_PAX_REFCOUNT
16548 +BEGIN(dec_return_unchecked)
16556 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16561 +#ifdef CONFIG_PAX_REFCOUNT
16564 + _ASM_EXTABLE(1234b, 2f)
16570 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16575 +#ifdef CONFIG_PAX_REFCOUNT
16578 + _ASM_EXTABLE(1234b, 2f)
16584 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16589 +#ifdef CONFIG_PAX_REFCOUNT
16592 + _ASM_EXTABLE(1234b, 1f)
16598 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S
16599 --- linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-05-19 00:06:34.000000000 -0400
16600 +++ linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-05 19:44:35.000000000 -0400
16601 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16603 ENDPROC(atomic64_read_cx8)
16605 +ENTRY(atomic64_read_unchecked_cx8)
16611 +ENDPROC(atomic64_read_unchecked_cx8)
16613 ENTRY(atomic64_set_cx8)
16616 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16618 ENDPROC(atomic64_set_cx8)
16620 +ENTRY(atomic64_set_unchecked_cx8)
16624 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16625 + * are atomic on 586 and newer */
16631 +ENDPROC(atomic64_set_unchecked_cx8)
16633 ENTRY(atomic64_xchg_cx8)
16636 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16638 ENDPROC(atomic64_xchg_cx8)
16640 -.macro addsub_return func ins insc
16641 -ENTRY(atomic64_\func\()_return_cx8)
16642 +.macro addsub_return func ins insc unchecked=""
16643 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16647 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16649 \ins\()l %esi, %ebx
16650 \insc\()l %edi, %ecx
16653 +#ifdef CONFIG_PAX_REFCOUNT
16656 + _ASM_EXTABLE(2b, 3f)
16669 +#ifdef CONFIG_PAX_REFCOUNT
16680 -ENDPROC(atomic64_\func\()_return_cx8)
16681 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16684 addsub_return add add adc
16685 addsub_return sub sub sbb
16686 +addsub_return add add adc _unchecked
16687 +addsub_return sub sub sbb _unchecked
16689 -.macro incdec_return func ins insc
16690 -ENTRY(atomic64_\func\()_return_cx8)
16691 +.macro incdec_return func ins insc unchecked
16692 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16696 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16702 +#ifdef CONFIG_PAX_REFCOUNT
16705 + _ASM_EXTABLE(2b, 3f)
16718 +#ifdef CONFIG_PAX_REFCOUNT
16726 -ENDPROC(atomic64_\func\()_return_cx8)
16727 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16730 incdec_return inc add adc
16731 incdec_return dec sub sbb
16732 +incdec_return inc add adc _unchecked
16733 +incdec_return dec sub sbb _unchecked
16735 ENTRY(atomic64_dec_if_positive_cx8)
16737 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16742 +#ifdef CONFIG_PAX_REFCOUNT
16745 + _ASM_EXTABLE(1234b, 2f)
16751 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16756 +#ifdef CONFIG_PAX_REFCOUNT
16759 + _ASM_EXTABLE(1234b, 3f)
16765 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16770 +#ifdef CONFIG_PAX_REFCOUNT
16773 + _ASM_EXTABLE(1234b, 3f)
16779 diff -urNp linux-2.6.39.4/arch/x86/lib/checksum_32.S linux-2.6.39.4/arch/x86/lib/checksum_32.S
16780 --- linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-05-19 00:06:34.000000000 -0400
16781 +++ linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-08-05 19:44:35.000000000 -0400
16783 #include <linux/linkage.h>
16784 #include <asm/dwarf2.h>
16785 #include <asm/errno.h>
16787 +#include <asm/segment.h>
16790 * computes a partial checksum, e.g. for TCP/UDP fragments
16792 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16797 -ENTRY(csum_partial_copy_generic)
16799 +ENTRY(csum_partial_copy_generic_to_user)
16802 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16805 + jmp csum_partial_copy_generic
16808 +ENTRY(csum_partial_copy_generic_from_user)
16810 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16815 +ENTRY(csum_partial_copy_generic)
16817 CFI_ADJUST_CFA_OFFSET 4
16819 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16821 SRC(1: movw (%esi), %bx )
16823 -DST( movw %bx, (%edi) )
16824 +DST( movw %bx, %es:(%edi) )
16828 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16829 SRC(1: movl (%esi), %ebx )
16830 SRC( movl 4(%esi), %edx )
16832 -DST( movl %ebx, (%edi) )
16833 +DST( movl %ebx, %es:(%edi) )
16835 -DST( movl %edx, 4(%edi) )
16836 +DST( movl %edx, %es:4(%edi) )
16838 SRC( movl 8(%esi), %ebx )
16839 SRC( movl 12(%esi), %edx )
16841 -DST( movl %ebx, 8(%edi) )
16842 +DST( movl %ebx, %es:8(%edi) )
16844 -DST( movl %edx, 12(%edi) )
16845 +DST( movl %edx, %es:12(%edi) )
16847 SRC( movl 16(%esi), %ebx )
16848 SRC( movl 20(%esi), %edx )
16850 -DST( movl %ebx, 16(%edi) )
16851 +DST( movl %ebx, %es:16(%edi) )
16853 -DST( movl %edx, 20(%edi) )
16854 +DST( movl %edx, %es:20(%edi) )
16856 SRC( movl 24(%esi), %ebx )
16857 SRC( movl 28(%esi), %edx )
16859 -DST( movl %ebx, 24(%edi) )
16860 +DST( movl %ebx, %es:24(%edi) )
16862 -DST( movl %edx, 28(%edi) )
16863 +DST( movl %edx, %es:28(%edi) )
16867 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16868 shrl $2, %edx # This clears CF
16869 SRC(3: movl (%esi), %ebx )
16871 -DST( movl %ebx, (%edi) )
16872 +DST( movl %ebx, %es:(%edi) )
16876 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16878 SRC( movw (%esi), %cx )
16880 -DST( movw %cx, (%edi) )
16881 +DST( movw %cx, %es:(%edi) )
16885 SRC(5: movb (%esi), %cl )
16886 -DST( movb %cl, (%edi) )
16887 +DST( movb %cl, %es:(%edi) )
16891 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16894 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16895 - movl $-EFAULT, (%ebx)
16896 + movl $-EFAULT, %ss:(%ebx)
16898 # zero the complete destination - computing the rest
16900 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16903 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16904 - movl $-EFAULT,(%ebx)
16905 + movl $-EFAULT,%ss:(%ebx)
16917 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16918 popl_cfi %ecx # equivalent to addl $4,%esp
16921 -ENDPROC(csum_partial_copy_generic)
16922 +ENDPROC(csum_partial_copy_generic_to_user)
16926 /* Version for PentiumII/PPro */
16928 #define ROUND1(x) \
16930 SRC(movl x(%esi), %ebx ) ; \
16931 addl %ebx, %eax ; \
16932 - DST(movl %ebx, x(%edi) ) ;
16933 + DST(movl %ebx, %es:x(%edi)) ;
16937 SRC(movl x(%esi), %ebx ) ; \
16938 adcl %ebx, %eax ; \
16939 - DST(movl %ebx, x(%edi) ) ;
16940 + DST(movl %ebx, %es:x(%edi)) ;
16944 -ENTRY(csum_partial_copy_generic)
16946 +ENTRY(csum_partial_copy_generic_to_user)
16949 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16952 + jmp csum_partial_copy_generic
16955 +ENTRY(csum_partial_copy_generic_from_user)
16957 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16962 +ENTRY(csum_partial_copy_generic)
16964 CFI_REL_OFFSET ebx, 0
16966 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16970 - lea 3f(%ebx,%ebx), %ebx
16971 + lea 3f(%ebx,%ebx,2), %ebx
16975 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16977 SRC( movw (%esi), %dx )
16979 -DST( movw %dx, (%edi) )
16980 +DST( movw %dx, %es:(%edi) )
16985 SRC( movb (%esi), %dl )
16986 -DST( movb %dl, (%edi) )
16987 +DST( movb %dl, %es:(%edi) )
16991 .section .fixup, "ax"
16992 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16993 - movl $-EFAULT, (%ebx)
16994 + movl $-EFAULT, %ss:(%ebx)
16995 # zero the complete destination (computing the rest is too much work)
16996 movl ARGBASE+8(%esp),%edi # dst
16997 movl ARGBASE+12(%esp),%ecx # len
16998 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17001 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17002 - movl $-EFAULT, (%ebx)
17003 + movl $-EFAULT, %ss:(%ebx)
17007 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17017 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17021 -ENDPROC(csum_partial_copy_generic)
17022 +ENDPROC(csum_partial_copy_generic_to_user)
17026 diff -urNp linux-2.6.39.4/arch/x86/lib/clear_page_64.S linux-2.6.39.4/arch/x86/lib/clear_page_64.S
17027 --- linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-05-19 00:06:34.000000000 -0400
17028 +++ linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-08-05 19:44:35.000000000 -0400
17029 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
17031 #include <asm/cpufeature.h>
17033 - .section .altinstr_replacement,"ax"
17034 + .section .altinstr_replacement,"a"
17035 1: .byte 0xeb /* jmp <disp8> */
17036 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17038 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_page_64.S linux-2.6.39.4/arch/x86/lib/copy_page_64.S
17039 --- linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-05-19 00:06:34.000000000 -0400
17040 +++ linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-08-05 19:44:35.000000000 -0400
17041 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
17043 #include <asm/cpufeature.h>
17045 - .section .altinstr_replacement,"ax"
17046 + .section .altinstr_replacement,"a"
17047 1: .byte 0xeb /* jmp <disp8> */
17048 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17050 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_64.S linux-2.6.39.4/arch/x86/lib/copy_user_64.S
17051 --- linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-06-03 00:04:13.000000000 -0400
17052 +++ linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-08-05 19:44:35.000000000 -0400
17053 @@ -15,13 +15,14 @@
17054 #include <asm/asm-offsets.h>
17055 #include <asm/thread_info.h>
17056 #include <asm/cpufeature.h>
17057 +#include <asm/pgtable.h>
17059 .macro ALTERNATIVE_JUMP feature,orig,alt
17061 .byte 0xe9 /* 32bit jump */
17062 .long \orig-1f /* by default jump to orig */
17064 - .section .altinstr_replacement,"ax"
17065 + .section .altinstr_replacement,"a"
17066 2: .byte 0xe9 /* near jump with 32bit immediate */
17067 .long \alt-1b /* offset */ /* or alternatively to alt */
17069 @@ -64,37 +65,13 @@
17073 -/* Standard copy_to_user with segment limit checking */
17074 -ENTRY(_copy_to_user)
17076 - GET_THREAD_INFO(%rax)
17080 - cmpq TI_addr_limit(%rax),%rcx
17082 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17084 -ENDPROC(_copy_to_user)
17086 -/* Standard copy_from_user with segment limit checking */
17087 -ENTRY(_copy_from_user)
17089 - GET_THREAD_INFO(%rax)
17093 - cmpq TI_addr_limit(%rax),%rcx
17095 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17097 -ENDPROC(_copy_from_user)
17099 .section .fixup,"ax"
17100 /* must zero dest */
17101 ENTRY(bad_from_user)
17109 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S
17110 --- linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-05-19 00:06:34.000000000 -0400
17111 +++ linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-05 19:44:35.000000000 -0400
17113 #include <asm/current.h>
17114 #include <asm/asm-offsets.h>
17115 #include <asm/thread_info.h>
17116 +#include <asm/pgtable.h>
17118 .macro ALIGN_DESTINATION
17119 #ifdef FIX_ALIGNMENT
17122 ENTRY(__copy_user_nocache)
17125 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17126 + mov $PAX_USER_SHADOW_BASE,%rcx
17134 jb 20f /* less then 8 bytes, go to byte copy loop */
17136 diff -urNp linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c
17137 --- linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-05-19 00:06:34.000000000 -0400
17138 +++ linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-08-05 19:44:35.000000000 -0400
17139 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17144 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17145 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17146 + src += PAX_USER_SHADOW_BASE;
17149 isum = csum_partial_copy_generic((__force const void *)src,
17150 dst, len, isum, errp, NULL);
17151 if (unlikely(*errp))
17152 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17157 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17158 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17159 + dst += PAX_USER_SHADOW_BASE;
17162 return csum_partial_copy_generic(src, (void __force *)dst,
17163 len, isum, NULL, errp);
17165 diff -urNp linux-2.6.39.4/arch/x86/lib/getuser.S linux-2.6.39.4/arch/x86/lib/getuser.S
17166 --- linux-2.6.39.4/arch/x86/lib/getuser.S 2011-05-19 00:06:34.000000000 -0400
17167 +++ linux-2.6.39.4/arch/x86/lib/getuser.S 2011-08-05 19:44:35.000000000 -0400
17168 @@ -33,14 +33,35 @@
17169 #include <asm/asm-offsets.h>
17170 #include <asm/thread_info.h>
17171 #include <asm/asm.h>
17172 +#include <asm/segment.h>
17173 +#include <asm/pgtable.h>
17175 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17176 +#define __copyuser_seg gs;
17178 +#define __copyuser_seg
17182 ENTRY(__get_user_1)
17185 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17186 GET_THREAD_INFO(%_ASM_DX)
17187 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17189 -1: movzb (%_ASM_AX),%edx
17191 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17192 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17193 + cmp %_ASM_DX,%_ASM_AX
17195 + add %_ASM_DX,%_ASM_AX
17201 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17205 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17206 ENTRY(__get_user_2)
17210 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17212 GET_THREAD_INFO(%_ASM_DX)
17213 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17215 -2: movzwl -1(%_ASM_AX),%edx
17217 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17218 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17219 + cmp %_ASM_DX,%_ASM_AX
17221 + add %_ASM_DX,%_ASM_AX
17227 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17231 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17232 ENTRY(__get_user_4)
17236 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17238 GET_THREAD_INFO(%_ASM_DX)
17239 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17241 -3: mov -3(%_ASM_AX),%edx
17243 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17244 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17245 + cmp %_ASM_DX,%_ASM_AX
17247 + add %_ASM_DX,%_ASM_AX
17253 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17257 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17258 GET_THREAD_INFO(%_ASM_DX)
17259 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17262 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17263 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17264 + cmp %_ASM_DX,%_ASM_AX
17266 + add %_ASM_DX,%_ASM_AX
17270 4: movq -7(%_ASM_AX),%_ASM_DX
17273 diff -urNp linux-2.6.39.4/arch/x86/lib/insn.c linux-2.6.39.4/arch/x86/lib/insn.c
17274 --- linux-2.6.39.4/arch/x86/lib/insn.c 2011-05-19 00:06:34.000000000 -0400
17275 +++ linux-2.6.39.4/arch/x86/lib/insn.c 2011-08-05 19:44:35.000000000 -0400
17277 #include <linux/string.h>
17278 #include <asm/inat.h>
17279 #include <asm/insn.h>
17281 +#include <asm/pgtable_types.h>
17283 +#define ktla_ktva(addr) addr
17286 #define get_next(t, insn) \
17287 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17289 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17291 memset(insn, 0, sizeof(*insn));
17292 - insn->kaddr = kaddr;
17293 - insn->next_byte = kaddr;
17294 + insn->kaddr = ktla_ktva(kaddr);
17295 + insn->next_byte = ktla_ktva(kaddr);
17296 insn->x86_64 = x86_64 ? 1 : 0;
17297 insn->opnd_bytes = 4;
17299 diff -urNp linux-2.6.39.4/arch/x86/lib/mmx_32.c linux-2.6.39.4/arch/x86/lib/mmx_32.c
17300 --- linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-05-19 00:06:34.000000000 -0400
17301 +++ linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-08-05 19:44:35.000000000 -0400
17302 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17306 + unsigned long cr0;
17308 if (unlikely(in_interrupt()))
17309 return __memcpy(to, from, len);
17310 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17311 kernel_fpu_begin();
17313 __asm__ __volatile__ (
17314 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17315 - " prefetch 64(%0)\n"
17316 - " prefetch 128(%0)\n"
17317 - " prefetch 192(%0)\n"
17318 - " prefetch 256(%0)\n"
17319 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17320 + " prefetch 64(%1)\n"
17321 + " prefetch 128(%1)\n"
17322 + " prefetch 192(%1)\n"
17323 + " prefetch 256(%1)\n"
17325 ".section .fixup, \"ax\"\n"
17326 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17329 +#ifdef CONFIG_PAX_KERNEXEC
17330 + " movl %%cr0, %0\n"
17331 + " movl %0, %%eax\n"
17332 + " andl $0xFFFEFFFF, %%eax\n"
17333 + " movl %%eax, %%cr0\n"
17336 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17338 +#ifdef CONFIG_PAX_KERNEXEC
17339 + " movl %0, %%cr0\n"
17344 _ASM_EXTABLE(1b, 3b)
17346 + : "=&r" (cr0) : "r" (from) : "ax");
17348 for ( ; i > 5; i--) {
17349 __asm__ __volatile__ (
17350 - "1: prefetch 320(%0)\n"
17351 - "2: movq (%0), %%mm0\n"
17352 - " movq 8(%0), %%mm1\n"
17353 - " movq 16(%0), %%mm2\n"
17354 - " movq 24(%0), %%mm3\n"
17355 - " movq %%mm0, (%1)\n"
17356 - " movq %%mm1, 8(%1)\n"
17357 - " movq %%mm2, 16(%1)\n"
17358 - " movq %%mm3, 24(%1)\n"
17359 - " movq 32(%0), %%mm0\n"
17360 - " movq 40(%0), %%mm1\n"
17361 - " movq 48(%0), %%mm2\n"
17362 - " movq 56(%0), %%mm3\n"
17363 - " movq %%mm0, 32(%1)\n"
17364 - " movq %%mm1, 40(%1)\n"
17365 - " movq %%mm2, 48(%1)\n"
17366 - " movq %%mm3, 56(%1)\n"
17367 + "1: prefetch 320(%1)\n"
17368 + "2: movq (%1), %%mm0\n"
17369 + " movq 8(%1), %%mm1\n"
17370 + " movq 16(%1), %%mm2\n"
17371 + " movq 24(%1), %%mm3\n"
17372 + " movq %%mm0, (%2)\n"
17373 + " movq %%mm1, 8(%2)\n"
17374 + " movq %%mm2, 16(%2)\n"
17375 + " movq %%mm3, 24(%2)\n"
17376 + " movq 32(%1), %%mm0\n"
17377 + " movq 40(%1), %%mm1\n"
17378 + " movq 48(%1), %%mm2\n"
17379 + " movq 56(%1), %%mm3\n"
17380 + " movq %%mm0, 32(%2)\n"
17381 + " movq %%mm1, 40(%2)\n"
17382 + " movq %%mm2, 48(%2)\n"
17383 + " movq %%mm3, 56(%2)\n"
17384 ".section .fixup, \"ax\"\n"
17385 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17388 +#ifdef CONFIG_PAX_KERNEXEC
17389 + " movl %%cr0, %0\n"
17390 + " movl %0, %%eax\n"
17391 + " andl $0xFFFEFFFF, %%eax\n"
17392 + " movl %%eax, %%cr0\n"
17395 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17397 +#ifdef CONFIG_PAX_KERNEXEC
17398 + " movl %0, %%cr0\n"
17403 _ASM_EXTABLE(1b, 3b)
17404 - : : "r" (from), "r" (to) : "memory");
17405 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17409 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17410 static void fast_copy_page(void *to, void *from)
17413 + unsigned long cr0;
17415 kernel_fpu_begin();
17417 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17418 * but that is for later. -AV
17420 __asm__ __volatile__(
17421 - "1: prefetch (%0)\n"
17422 - " prefetch 64(%0)\n"
17423 - " prefetch 128(%0)\n"
17424 - " prefetch 192(%0)\n"
17425 - " prefetch 256(%0)\n"
17426 + "1: prefetch (%1)\n"
17427 + " prefetch 64(%1)\n"
17428 + " prefetch 128(%1)\n"
17429 + " prefetch 192(%1)\n"
17430 + " prefetch 256(%1)\n"
17432 ".section .fixup, \"ax\"\n"
17433 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17436 +#ifdef CONFIG_PAX_KERNEXEC
17437 + " movl %%cr0, %0\n"
17438 + " movl %0, %%eax\n"
17439 + " andl $0xFFFEFFFF, %%eax\n"
17440 + " movl %%eax, %%cr0\n"
17443 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17445 +#ifdef CONFIG_PAX_KERNEXEC
17446 + " movl %0, %%cr0\n"
17451 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17452 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17454 for (i = 0; i < (4096-320)/64; i++) {
17455 __asm__ __volatile__ (
17456 - "1: prefetch 320(%0)\n"
17457 - "2: movq (%0), %%mm0\n"
17458 - " movntq %%mm0, (%1)\n"
17459 - " movq 8(%0), %%mm1\n"
17460 - " movntq %%mm1, 8(%1)\n"
17461 - " movq 16(%0), %%mm2\n"
17462 - " movntq %%mm2, 16(%1)\n"
17463 - " movq 24(%0), %%mm3\n"
17464 - " movntq %%mm3, 24(%1)\n"
17465 - " movq 32(%0), %%mm4\n"
17466 - " movntq %%mm4, 32(%1)\n"
17467 - " movq 40(%0), %%mm5\n"
17468 - " movntq %%mm5, 40(%1)\n"
17469 - " movq 48(%0), %%mm6\n"
17470 - " movntq %%mm6, 48(%1)\n"
17471 - " movq 56(%0), %%mm7\n"
17472 - " movntq %%mm7, 56(%1)\n"
17473 + "1: prefetch 320(%1)\n"
17474 + "2: movq (%1), %%mm0\n"
17475 + " movntq %%mm0, (%2)\n"
17476 + " movq 8(%1), %%mm1\n"
17477 + " movntq %%mm1, 8(%2)\n"
17478 + " movq 16(%1), %%mm2\n"
17479 + " movntq %%mm2, 16(%2)\n"
17480 + " movq 24(%1), %%mm3\n"
17481 + " movntq %%mm3, 24(%2)\n"
17482 + " movq 32(%1), %%mm4\n"
17483 + " movntq %%mm4, 32(%2)\n"
17484 + " movq 40(%1), %%mm5\n"
17485 + " movntq %%mm5, 40(%2)\n"
17486 + " movq 48(%1), %%mm6\n"
17487 + " movntq %%mm6, 48(%2)\n"
17488 + " movq 56(%1), %%mm7\n"
17489 + " movntq %%mm7, 56(%2)\n"
17490 ".section .fixup, \"ax\"\n"
17491 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17494 +#ifdef CONFIG_PAX_KERNEXEC
17495 + " movl %%cr0, %0\n"
17496 + " movl %0, %%eax\n"
17497 + " andl $0xFFFEFFFF, %%eax\n"
17498 + " movl %%eax, %%cr0\n"
17501 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17503 +#ifdef CONFIG_PAX_KERNEXEC
17504 + " movl %0, %%cr0\n"
17509 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17510 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17514 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17515 static void fast_copy_page(void *to, void *from)
17518 + unsigned long cr0;
17520 kernel_fpu_begin();
17522 __asm__ __volatile__ (
17523 - "1: prefetch (%0)\n"
17524 - " prefetch 64(%0)\n"
17525 - " prefetch 128(%0)\n"
17526 - " prefetch 192(%0)\n"
17527 - " prefetch 256(%0)\n"
17528 + "1: prefetch (%1)\n"
17529 + " prefetch 64(%1)\n"
17530 + " prefetch 128(%1)\n"
17531 + " prefetch 192(%1)\n"
17532 + " prefetch 256(%1)\n"
17534 ".section .fixup, \"ax\"\n"
17535 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17538 +#ifdef CONFIG_PAX_KERNEXEC
17539 + " movl %%cr0, %0\n"
17540 + " movl %0, %%eax\n"
17541 + " andl $0xFFFEFFFF, %%eax\n"
17542 + " movl %%eax, %%cr0\n"
17545 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17547 +#ifdef CONFIG_PAX_KERNEXEC
17548 + " movl %0, %%cr0\n"
17553 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17554 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17556 for (i = 0; i < 4096/64; i++) {
17557 __asm__ __volatile__ (
17558 - "1: prefetch 320(%0)\n"
17559 - "2: movq (%0), %%mm0\n"
17560 - " movq 8(%0), %%mm1\n"
17561 - " movq 16(%0), %%mm2\n"
17562 - " movq 24(%0), %%mm3\n"
17563 - " movq %%mm0, (%1)\n"
17564 - " movq %%mm1, 8(%1)\n"
17565 - " movq %%mm2, 16(%1)\n"
17566 - " movq %%mm3, 24(%1)\n"
17567 - " movq 32(%0), %%mm0\n"
17568 - " movq 40(%0), %%mm1\n"
17569 - " movq 48(%0), %%mm2\n"
17570 - " movq 56(%0), %%mm3\n"
17571 - " movq %%mm0, 32(%1)\n"
17572 - " movq %%mm1, 40(%1)\n"
17573 - " movq %%mm2, 48(%1)\n"
17574 - " movq %%mm3, 56(%1)\n"
17575 + "1: prefetch 320(%1)\n"
17576 + "2: movq (%1), %%mm0\n"
17577 + " movq 8(%1), %%mm1\n"
17578 + " movq 16(%1), %%mm2\n"
17579 + " movq 24(%1), %%mm3\n"
17580 + " movq %%mm0, (%2)\n"
17581 + " movq %%mm1, 8(%2)\n"
17582 + " movq %%mm2, 16(%2)\n"
17583 + " movq %%mm3, 24(%2)\n"
17584 + " movq 32(%1), %%mm0\n"
17585 + " movq 40(%1), %%mm1\n"
17586 + " movq 48(%1), %%mm2\n"
17587 + " movq 56(%1), %%mm3\n"
17588 + " movq %%mm0, 32(%2)\n"
17589 + " movq %%mm1, 40(%2)\n"
17590 + " movq %%mm2, 48(%2)\n"
17591 + " movq %%mm3, 56(%2)\n"
17592 ".section .fixup, \"ax\"\n"
17593 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17596 +#ifdef CONFIG_PAX_KERNEXEC
17597 + " movl %%cr0, %0\n"
17598 + " movl %0, %%eax\n"
17599 + " andl $0xFFFEFFFF, %%eax\n"
17600 + " movl %%eax, %%cr0\n"
17603 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17605 +#ifdef CONFIG_PAX_KERNEXEC
17606 + " movl %0, %%cr0\n"
17611 _ASM_EXTABLE(1b, 3b)
17612 - : : "r" (from), "r" (to) : "memory");
17613 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17617 diff -urNp linux-2.6.39.4/arch/x86/lib/putuser.S linux-2.6.39.4/arch/x86/lib/putuser.S
17618 --- linux-2.6.39.4/arch/x86/lib/putuser.S 2011-05-19 00:06:34.000000000 -0400
17619 +++ linux-2.6.39.4/arch/x86/lib/putuser.S 2011-08-05 19:44:35.000000000 -0400
17621 #include <asm/thread_info.h>
17622 #include <asm/errno.h>
17623 #include <asm/asm.h>
17625 +#include <asm/segment.h>
17626 +#include <asm/pgtable.h>
17630 @@ -29,52 +30,119 @@
17631 * as they get called from within inline assembly.
17634 -#define ENTER CFI_STARTPROC ; \
17635 - GET_THREAD_INFO(%_ASM_BX)
17636 +#define ENTER CFI_STARTPROC
17637 #define EXIT ret ; \
17640 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17641 +#define _DEST %_ASM_CX,%_ASM_BX
17643 +#define _DEST %_ASM_CX
17646 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17647 +#define __copyuser_seg gs;
17649 +#define __copyuser_seg
17653 ENTRY(__put_user_1)
17656 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17657 + GET_THREAD_INFO(%_ASM_BX)
17658 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17660 -1: movb %al,(%_ASM_CX)
17662 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17663 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17664 + cmp %_ASM_BX,%_ASM_CX
17672 +1: __copyuser_seg movb %al,(_DEST)
17675 ENDPROC(__put_user_1)
17677 ENTRY(__put_user_2)
17680 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17681 + GET_THREAD_INFO(%_ASM_BX)
17682 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17684 cmp %_ASM_BX,%_ASM_CX
17686 -2: movw %ax,(%_ASM_CX)
17688 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17689 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17690 + cmp %_ASM_BX,%_ASM_CX
17698 +2: __copyuser_seg movw %ax,(_DEST)
17701 ENDPROC(__put_user_2)
17703 ENTRY(__put_user_4)
17706 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17707 + GET_THREAD_INFO(%_ASM_BX)
17708 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17710 cmp %_ASM_BX,%_ASM_CX
17712 -3: movl %eax,(%_ASM_CX)
17714 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17715 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17716 + cmp %_ASM_BX,%_ASM_CX
17724 +3: __copyuser_seg movl %eax,(_DEST)
17727 ENDPROC(__put_user_4)
17729 ENTRY(__put_user_8)
17732 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17733 + GET_THREAD_INFO(%_ASM_BX)
17734 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17736 cmp %_ASM_BX,%_ASM_CX
17738 -4: mov %_ASM_AX,(%_ASM_CX)
17740 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17741 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17742 + cmp %_ASM_BX,%_ASM_CX
17750 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17751 #ifdef CONFIG_X86_32
17752 -5: movl %edx,4(%_ASM_CX)
17753 +5: __copyuser_seg movl %edx,4(_DEST)
17757 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_32.c linux-2.6.39.4/arch/x86/lib/usercopy_32.c
17758 --- linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-05-19 00:06:34.000000000 -0400
17759 +++ linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-08-05 19:44:35.000000000 -0400
17760 @@ -43,7 +43,7 @@ do { \
17761 __asm__ __volatile__( \
17765 + "0: "__copyuser_seg"lodsb\n" \
17767 " testb %%al,%%al\n" \
17769 @@ -128,10 +128,12 @@ do { \
17772 __asm__ __volatile__( \
17773 + __COPYUSER_SET_ES \
17774 "0: rep; stosl\n" \
17776 "1: rep; stosb\n" \
17778 + __COPYUSER_RESTORE_ES \
17779 ".section .fixup,\"ax\"\n" \
17780 "3: lea 0(%2,%0,4),%0\n" \
17782 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17785 __asm__ __volatile__(
17786 + __COPYUSER_SET_ES
17790 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17794 + __COPYUSER_RESTORE_ES
17795 ".section .fixup,\"ax\"\n"
17796 "2: xorl %%eax,%%eax\n"
17798 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17800 #ifdef CONFIG_X86_INTEL_USERCOPY
17801 static unsigned long
17802 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17803 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17806 __asm__ __volatile__(
17807 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17809 "3: movl 0(%4), %%eax\n"
17810 "4: movl 4(%4), %%edx\n"
17811 - "5: movl %%eax, 0(%3)\n"
17812 - "6: movl %%edx, 4(%3)\n"
17813 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17814 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17815 "7: movl 8(%4), %%eax\n"
17816 "8: movl 12(%4),%%edx\n"
17817 - "9: movl %%eax, 8(%3)\n"
17818 - "10: movl %%edx, 12(%3)\n"
17819 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17820 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17821 "11: movl 16(%4), %%eax\n"
17822 "12: movl 20(%4), %%edx\n"
17823 - "13: movl %%eax, 16(%3)\n"
17824 - "14: movl %%edx, 20(%3)\n"
17825 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17826 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17827 "15: movl 24(%4), %%eax\n"
17828 "16: movl 28(%4), %%edx\n"
17829 - "17: movl %%eax, 24(%3)\n"
17830 - "18: movl %%edx, 28(%3)\n"
17831 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17832 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17833 "19: movl 32(%4), %%eax\n"
17834 "20: movl 36(%4), %%edx\n"
17835 - "21: movl %%eax, 32(%3)\n"
17836 - "22: movl %%edx, 36(%3)\n"
17837 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17838 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17839 "23: movl 40(%4), %%eax\n"
17840 "24: movl 44(%4), %%edx\n"
17841 - "25: movl %%eax, 40(%3)\n"
17842 - "26: movl %%edx, 44(%3)\n"
17843 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17844 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17845 "27: movl 48(%4), %%eax\n"
17846 "28: movl 52(%4), %%edx\n"
17847 - "29: movl %%eax, 48(%3)\n"
17848 - "30: movl %%edx, 52(%3)\n"
17849 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17850 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17851 "31: movl 56(%4), %%eax\n"
17852 "32: movl 60(%4), %%edx\n"
17853 - "33: movl %%eax, 56(%3)\n"
17854 - "34: movl %%edx, 60(%3)\n"
17855 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17856 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17860 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17862 " andl $3, %%eax\n"
17864 + __COPYUSER_SET_ES
17866 "36: movl %%eax, %0\n"
17869 + __COPYUSER_RESTORE_ES
17870 + ".section .fixup,\"ax\"\n"
17871 + "101: lea 0(%%eax,%0,4),%0\n"
17874 + ".section __ex_table,\"a\"\n"
17876 + " .long 1b,100b\n"
17877 + " .long 2b,100b\n"
17878 + " .long 3b,100b\n"
17879 + " .long 4b,100b\n"
17880 + " .long 5b,100b\n"
17881 + " .long 6b,100b\n"
17882 + " .long 7b,100b\n"
17883 + " .long 8b,100b\n"
17884 + " .long 9b,100b\n"
17885 + " .long 10b,100b\n"
17886 + " .long 11b,100b\n"
17887 + " .long 12b,100b\n"
17888 + " .long 13b,100b\n"
17889 + " .long 14b,100b\n"
17890 + " .long 15b,100b\n"
17891 + " .long 16b,100b\n"
17892 + " .long 17b,100b\n"
17893 + " .long 18b,100b\n"
17894 + " .long 19b,100b\n"
17895 + " .long 20b,100b\n"
17896 + " .long 21b,100b\n"
17897 + " .long 22b,100b\n"
17898 + " .long 23b,100b\n"
17899 + " .long 24b,100b\n"
17900 + " .long 25b,100b\n"
17901 + " .long 26b,100b\n"
17902 + " .long 27b,100b\n"
17903 + " .long 28b,100b\n"
17904 + " .long 29b,100b\n"
17905 + " .long 30b,100b\n"
17906 + " .long 31b,100b\n"
17907 + " .long 32b,100b\n"
17908 + " .long 33b,100b\n"
17909 + " .long 34b,100b\n"
17910 + " .long 35b,100b\n"
17911 + " .long 36b,100b\n"
17912 + " .long 37b,100b\n"
17913 + " .long 99b,101b\n"
17915 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17916 + : "1"(to), "2"(from), "0"(size)
17917 + : "eax", "edx", "memory");
17921 +static unsigned long
17922 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17925 + __asm__ __volatile__(
17926 + " .align 2,0x90\n"
17927 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17928 + " cmpl $67, %0\n"
17930 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17931 + " .align 2,0x90\n"
17932 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17933 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17934 + "5: movl %%eax, 0(%3)\n"
17935 + "6: movl %%edx, 4(%3)\n"
17936 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17937 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17938 + "9: movl %%eax, 8(%3)\n"
17939 + "10: movl %%edx, 12(%3)\n"
17940 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17941 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17942 + "13: movl %%eax, 16(%3)\n"
17943 + "14: movl %%edx, 20(%3)\n"
17944 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17945 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17946 + "17: movl %%eax, 24(%3)\n"
17947 + "18: movl %%edx, 28(%3)\n"
17948 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17949 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17950 + "21: movl %%eax, 32(%3)\n"
17951 + "22: movl %%edx, 36(%3)\n"
17952 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17953 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17954 + "25: movl %%eax, 40(%3)\n"
17955 + "26: movl %%edx, 44(%3)\n"
17956 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17957 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17958 + "29: movl %%eax, 48(%3)\n"
17959 + "30: movl %%edx, 52(%3)\n"
17960 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17961 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17962 + "33: movl %%eax, 56(%3)\n"
17963 + "34: movl %%edx, 60(%3)\n"
17964 + " addl $-64, %0\n"
17965 + " addl $64, %4\n"
17966 + " addl $64, %3\n"
17967 + " cmpl $63, %0\n"
17969 + "35: movl %0, %%eax\n"
17971 + " andl $3, %%eax\n"
17973 + "99: rep; "__copyuser_seg" movsl\n"
17974 + "36: movl %%eax, %0\n"
17975 + "37: rep; "__copyuser_seg" movsb\n"
17977 ".section .fixup,\"ax\"\n"
17978 "101: lea 0(%%eax,%0,4),%0\n"
17980 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17982 __asm__ __volatile__(
17984 - "0: movl 32(%4), %%eax\n"
17985 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17988 - "1: movl 64(%4), %%eax\n"
17989 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17991 - "2: movl 0(%4), %%eax\n"
17992 - "21: movl 4(%4), %%edx\n"
17993 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17994 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17995 " movl %%eax, 0(%3)\n"
17996 " movl %%edx, 4(%3)\n"
17997 - "3: movl 8(%4), %%eax\n"
17998 - "31: movl 12(%4),%%edx\n"
17999 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18000 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18001 " movl %%eax, 8(%3)\n"
18002 " movl %%edx, 12(%3)\n"
18003 - "4: movl 16(%4), %%eax\n"
18004 - "41: movl 20(%4), %%edx\n"
18005 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18006 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18007 " movl %%eax, 16(%3)\n"
18008 " movl %%edx, 20(%3)\n"
18009 - "10: movl 24(%4), %%eax\n"
18010 - "51: movl 28(%4), %%edx\n"
18011 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18012 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18013 " movl %%eax, 24(%3)\n"
18014 " movl %%edx, 28(%3)\n"
18015 - "11: movl 32(%4), %%eax\n"
18016 - "61: movl 36(%4), %%edx\n"
18017 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18018 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18019 " movl %%eax, 32(%3)\n"
18020 " movl %%edx, 36(%3)\n"
18021 - "12: movl 40(%4), %%eax\n"
18022 - "71: movl 44(%4), %%edx\n"
18023 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18024 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18025 " movl %%eax, 40(%3)\n"
18026 " movl %%edx, 44(%3)\n"
18027 - "13: movl 48(%4), %%eax\n"
18028 - "81: movl 52(%4), %%edx\n"
18029 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18030 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18031 " movl %%eax, 48(%3)\n"
18032 " movl %%edx, 52(%3)\n"
18033 - "14: movl 56(%4), %%eax\n"
18034 - "91: movl 60(%4), %%edx\n"
18035 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18036 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18037 " movl %%eax, 56(%3)\n"
18038 " movl %%edx, 60(%3)\n"
18040 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18042 " andl $3, %%eax\n"
18044 - "6: rep; movsl\n"
18045 + "6: rep; "__copyuser_seg" movsl\n"
18047 - "7: rep; movsb\n"
18048 + "7: rep; "__copyuser_seg" movsb\n"
18050 ".section .fixup,\"ax\"\n"
18051 "9: lea 0(%%eax,%0,4),%0\n"
18052 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18054 __asm__ __volatile__(
18056 - "0: movl 32(%4), %%eax\n"
18057 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18060 - "1: movl 64(%4), %%eax\n"
18061 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18063 - "2: movl 0(%4), %%eax\n"
18064 - "21: movl 4(%4), %%edx\n"
18065 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18066 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18067 " movnti %%eax, 0(%3)\n"
18068 " movnti %%edx, 4(%3)\n"
18069 - "3: movl 8(%4), %%eax\n"
18070 - "31: movl 12(%4),%%edx\n"
18071 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18072 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18073 " movnti %%eax, 8(%3)\n"
18074 " movnti %%edx, 12(%3)\n"
18075 - "4: movl 16(%4), %%eax\n"
18076 - "41: movl 20(%4), %%edx\n"
18077 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18078 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18079 " movnti %%eax, 16(%3)\n"
18080 " movnti %%edx, 20(%3)\n"
18081 - "10: movl 24(%4), %%eax\n"
18082 - "51: movl 28(%4), %%edx\n"
18083 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18084 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18085 " movnti %%eax, 24(%3)\n"
18086 " movnti %%edx, 28(%3)\n"
18087 - "11: movl 32(%4), %%eax\n"
18088 - "61: movl 36(%4), %%edx\n"
18089 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18090 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18091 " movnti %%eax, 32(%3)\n"
18092 " movnti %%edx, 36(%3)\n"
18093 - "12: movl 40(%4), %%eax\n"
18094 - "71: movl 44(%4), %%edx\n"
18095 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18096 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18097 " movnti %%eax, 40(%3)\n"
18098 " movnti %%edx, 44(%3)\n"
18099 - "13: movl 48(%4), %%eax\n"
18100 - "81: movl 52(%4), %%edx\n"
18101 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18102 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18103 " movnti %%eax, 48(%3)\n"
18104 " movnti %%edx, 52(%3)\n"
18105 - "14: movl 56(%4), %%eax\n"
18106 - "91: movl 60(%4), %%edx\n"
18107 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18108 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18109 " movnti %%eax, 56(%3)\n"
18110 " movnti %%edx, 60(%3)\n"
18112 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18114 " andl $3, %%eax\n"
18116 - "6: rep; movsl\n"
18117 + "6: rep; "__copyuser_seg" movsl\n"
18119 - "7: rep; movsb\n"
18120 + "7: rep; "__copyuser_seg" movsb\n"
18122 ".section .fixup,\"ax\"\n"
18123 "9: lea 0(%%eax,%0,4),%0\n"
18124 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18126 __asm__ __volatile__(
18128 - "0: movl 32(%4), %%eax\n"
18129 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18132 - "1: movl 64(%4), %%eax\n"
18133 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18135 - "2: movl 0(%4), %%eax\n"
18136 - "21: movl 4(%4), %%edx\n"
18137 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18138 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18139 " movnti %%eax, 0(%3)\n"
18140 " movnti %%edx, 4(%3)\n"
18141 - "3: movl 8(%4), %%eax\n"
18142 - "31: movl 12(%4),%%edx\n"
18143 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18144 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18145 " movnti %%eax, 8(%3)\n"
18146 " movnti %%edx, 12(%3)\n"
18147 - "4: movl 16(%4), %%eax\n"
18148 - "41: movl 20(%4), %%edx\n"
18149 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18150 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18151 " movnti %%eax, 16(%3)\n"
18152 " movnti %%edx, 20(%3)\n"
18153 - "10: movl 24(%4), %%eax\n"
18154 - "51: movl 28(%4), %%edx\n"
18155 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18156 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18157 " movnti %%eax, 24(%3)\n"
18158 " movnti %%edx, 28(%3)\n"
18159 - "11: movl 32(%4), %%eax\n"
18160 - "61: movl 36(%4), %%edx\n"
18161 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18162 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18163 " movnti %%eax, 32(%3)\n"
18164 " movnti %%edx, 36(%3)\n"
18165 - "12: movl 40(%4), %%eax\n"
18166 - "71: movl 44(%4), %%edx\n"
18167 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18168 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18169 " movnti %%eax, 40(%3)\n"
18170 " movnti %%edx, 44(%3)\n"
18171 - "13: movl 48(%4), %%eax\n"
18172 - "81: movl 52(%4), %%edx\n"
18173 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18174 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18175 " movnti %%eax, 48(%3)\n"
18176 " movnti %%edx, 52(%3)\n"
18177 - "14: movl 56(%4), %%eax\n"
18178 - "91: movl 60(%4), %%edx\n"
18179 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18180 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18181 " movnti %%eax, 56(%3)\n"
18182 " movnti %%edx, 60(%3)\n"
18184 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18186 " andl $3, %%eax\n"
18188 - "6: rep; movsl\n"
18189 + "6: rep; "__copyuser_seg" movsl\n"
18191 - "7: rep; movsb\n"
18192 + "7: rep; "__copyuser_seg" movsb\n"
18194 ".section .fixup,\"ax\"\n"
18195 "9: lea 0(%%eax,%0,4),%0\n"
18196 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18198 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18199 unsigned long size);
18200 -unsigned long __copy_user_intel(void __user *to, const void *from,
18201 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18202 + unsigned long size);
18203 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18204 unsigned long size);
18205 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18206 const void __user *from, unsigned long size);
18207 #endif /* CONFIG_X86_INTEL_USERCOPY */
18209 /* Generic arbitrary sized copy. */
18210 -#define __copy_user(to, from, size) \
18211 +#define __copy_user(to, from, size, prefix, set, restore) \
18213 int __d0, __d1, __d2; \
18214 __asm__ __volatile__( \
18222 - "4: rep; movsb\n" \
18223 + "4: rep; "prefix"movsb\n" \
18227 " .align 2,0x90\n" \
18228 - "0: rep; movsl\n" \
18229 + "0: rep; "prefix"movsl\n" \
18231 - "1: rep; movsb\n" \
18232 + "1: rep; "prefix"movsb\n" \
18235 ".section .fixup,\"ax\"\n" \
18236 "5: addl %3,%0\n" \
18238 @@ -682,14 +799,14 @@ do { \
18242 - "4: rep; movsb\n" \
18243 + "4: rep; "__copyuser_seg"movsb\n" \
18247 " .align 2,0x90\n" \
18248 - "0: rep; movsl\n" \
18249 + "0: rep; "__copyuser_seg"movsl\n" \
18251 - "1: rep; movsb\n" \
18252 + "1: rep; "__copyuser_seg"movsb\n" \
18254 ".section .fixup,\"ax\"\n" \
18255 "5: addl %3,%0\n" \
18256 @@ -775,9 +892,9 @@ survive:
18259 if (movsl_is_ok(to, from, n))
18260 - __copy_user(to, from, n);
18261 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18263 - n = __copy_user_intel(to, from, n);
18264 + n = __generic_copy_to_user_intel(to, from, n);
18267 EXPORT_SYMBOL(__copy_to_user_ll);
18268 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18271 if (movsl_is_ok(to, from, n))
18272 - __copy_user(to, from, n);
18273 + __copy_user(to, from, n, __copyuser_seg, "", "");
18275 - n = __copy_user_intel((void __user *)to,
18276 - (const void *)from, n);
18277 + n = __generic_copy_from_user_intel(to, from, n);
18280 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18281 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18282 if (n > 64 && cpu_has_xmm2)
18283 n = __copy_user_intel_nocache(to, from, n);
18285 - __copy_user(to, from, n);
18286 + __copy_user(to, from, n, __copyuser_seg, "", "");
18288 - __copy_user(to, from, n);
18289 + __copy_user(to, from, n, __copyuser_seg, "", "");
18293 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18296 - * copy_to_user: - Copy a block of data into user space.
18297 - * @to: Destination address, in user space.
18298 - * @from: Source address, in kernel space.
18299 - * @n: Number of bytes to copy.
18301 - * Context: User context only. This function may sleep.
18303 - * Copy data from kernel space to user space.
18305 - * Returns number of bytes that could not be copied.
18306 - * On success, this will be zero.
18309 -copy_to_user(void __user *to, const void *from, unsigned long n)
18310 +void copy_from_user_overflow(void)
18312 - if (access_ok(VERIFY_WRITE, to, n))
18313 - n = __copy_to_user(to, from, n);
18315 + WARN(1, "Buffer overflow detected!\n");
18317 -EXPORT_SYMBOL(copy_to_user);
18318 +EXPORT_SYMBOL(copy_from_user_overflow);
18321 - * copy_from_user: - Copy a block of data from user space.
18322 - * @to: Destination address, in kernel space.
18323 - * @from: Source address, in user space.
18324 - * @n: Number of bytes to copy.
18326 - * Context: User context only. This function may sleep.
18328 - * Copy data from user space to kernel space.
18330 - * Returns number of bytes that could not be copied.
18331 - * On success, this will be zero.
18333 - * If some data could not be copied, this function will pad the copied
18334 - * data to the requested size using zero bytes.
18337 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18338 +void copy_to_user_overflow(void)
18340 - if (access_ok(VERIFY_READ, from, n))
18341 - n = __copy_from_user(to, from, n);
18343 - memset(to, 0, n);
18345 + WARN(1, "Buffer overflow detected!\n");
18347 -EXPORT_SYMBOL(_copy_from_user);
18348 +EXPORT_SYMBOL(copy_to_user_overflow);
18350 -void copy_from_user_overflow(void)
18351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18352 +void __set_fs(mm_segment_t x)
18354 - WARN(1, "Buffer overflow detected!\n");
18357 + loadsegment(gs, 0);
18359 + case TASK_SIZE_MAX:
18360 + loadsegment(gs, __USER_DS);
18363 + loadsegment(gs, __KERNEL_DS);
18370 -EXPORT_SYMBOL(copy_from_user_overflow);
18371 +EXPORT_SYMBOL(__set_fs);
18373 +void set_fs(mm_segment_t x)
18375 + current_thread_info()->addr_limit = x;
18378 +EXPORT_SYMBOL(set_fs);
18380 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_64.c linux-2.6.39.4/arch/x86/lib/usercopy_64.c
18381 --- linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
18382 +++ linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-08-05 19:44:35.000000000 -0400
18383 @@ -42,6 +42,12 @@ long
18384 __strncpy_from_user(char *dst, const char __user *src, long count)
18388 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18389 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18390 + src += PAX_USER_SHADOW_BASE;
18393 __do_strncpy_from_user(dst, src, count, res);
18396 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18401 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18402 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18403 + addr += PAX_USER_SHADOW_BASE;
18406 /* no memory constraint because it doesn't change any memory gcc knows
18409 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18411 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18413 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18414 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18417 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18418 + to += PAX_USER_SHADOW_BASE;
18419 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18420 + from += PAX_USER_SHADOW_BASE;
18423 return copy_user_generic((__force void *)to, (__force void *)from, len);
18429 EXPORT_SYMBOL(copy_in_user);
18431 diff -urNp linux-2.6.39.4/arch/x86/Makefile linux-2.6.39.4/arch/x86/Makefile
18432 --- linux-2.6.39.4/arch/x86/Makefile 2011-05-19 00:06:34.000000000 -0400
18433 +++ linux-2.6.39.4/arch/x86/Makefile 2011-08-05 19:44:35.000000000 -0400
18434 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18437 UTS_MACHINE := x86_64
18438 + biarch := $(call cc-option,-m64)
18439 CHECKFLAGS += -D__x86_64__ -m64
18441 KBUILD_AFLAGS += -m64
18442 @@ -195,3 +196,12 @@ define archhelp
18443 echo ' FDARGS="..." arguments for the booted kernel'
18444 echo ' FDINITRD=file initrd for the booted kernel'
18449 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18450 +*** Please upgrade your binutils to 2.18 or newer
18454 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18455 diff -urNp linux-2.6.39.4/arch/x86/mm/extable.c linux-2.6.39.4/arch/x86/mm/extable.c
18456 --- linux-2.6.39.4/arch/x86/mm/extable.c 2011-05-19 00:06:34.000000000 -0400
18457 +++ linux-2.6.39.4/arch/x86/mm/extable.c 2011-08-05 19:44:35.000000000 -0400
18459 #include <linux/module.h>
18460 #include <linux/spinlock.h>
18461 +#include <linux/sort.h>
18462 #include <asm/uaccess.h>
18463 +#include <asm/pgtable.h>
18466 + * The exception table needs to be sorted so that the binary
18467 + * search that we use to find entries in it works properly.
18468 + * This is used both for the kernel exception table and for
18469 + * the exception tables of modules that get loaded.
18471 +static int cmp_ex(const void *a, const void *b)
18473 + const struct exception_table_entry *x = a, *y = b;
18475 + /* avoid overflow */
18476 + if (x->insn > y->insn)
18478 + if (x->insn < y->insn)
18483 +static void swap_ex(void *a, void *b, int size)
18485 + struct exception_table_entry t, *x = a, *y = b;
18489 + pax_open_kernel();
18492 + pax_close_kernel();
18495 +void sort_extable(struct exception_table_entry *start,
18496 + struct exception_table_entry *finish)
18498 + sort(start, finish - start, sizeof(struct exception_table_entry),
18499 + cmp_ex, swap_ex);
18502 +#ifdef CONFIG_MODULES
18504 + * If the exception table is sorted, any referring to the module init
18505 + * will be at the beginning or the end.
18507 +void trim_init_extable(struct module *m)
18509 + /*trim the beginning*/
18510 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
18512 + m->num_exentries--;
18515 + while (m->num_exentries &&
18516 + within_module_init(m->extable[m->num_exentries-1].insn, m))
18517 + m->num_exentries--;
18519 +#endif /* CONFIG_MODULES */
18521 int fixup_exception(struct pt_regs *regs)
18523 const struct exception_table_entry *fixup;
18525 #ifdef CONFIG_PNPBIOS
18526 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18527 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18528 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18529 extern u32 pnp_bios_is_utter_crap;
18530 pnp_bios_is_utter_crap = 1;
18531 diff -urNp linux-2.6.39.4/arch/x86/mm/fault.c linux-2.6.39.4/arch/x86/mm/fault.c
18532 --- linux-2.6.39.4/arch/x86/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
18533 +++ linux-2.6.39.4/arch/x86/mm/fault.c 2011-08-05 19:44:35.000000000 -0400
18534 @@ -12,10 +12,18 @@
18535 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
18536 #include <linux/perf_event.h> /* perf_sw_event */
18537 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18538 +#include <linux/unistd.h>
18539 +#include <linux/compiler.h>
18541 #include <asm/traps.h> /* dotraplinkage, ... */
18542 #include <asm/pgalloc.h> /* pgd_*(), ... */
18543 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18544 +#include <asm/vsyscall.h>
18545 +#include <asm/tlbflush.h>
18547 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18548 +#include <asm/stacktrace.h>
18552 * Page fault error code bits:
18553 @@ -53,7 +61,7 @@ static inline int __kprobes notify_page_
18556 /* kprobe_running() needs smp_processor_id() */
18557 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18558 + if (kprobes_built_in() && !user_mode(regs)) {
18560 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18562 @@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re
18563 return !instr_lo || (instr_lo>>1) == 1;
18565 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18566 - if (probe_kernel_address(instr, opcode))
18567 + if (user_mode(regs)) {
18568 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18570 + } else if (probe_kernel_address(instr, opcode))
18573 *prefetch = (instr_lo == 0xF) &&
18574 @@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign
18575 while (instr < max_instr) {
18576 unsigned char opcode;
18578 - if (probe_kernel_address(instr, opcode))
18579 + if (user_mode(regs)) {
18580 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18582 + } else if (probe_kernel_address(instr, opcode))
18586 @@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s
18587 force_sig_info(si_signo, &info, tsk);
18590 +#ifdef CONFIG_PAX_EMUTRAMP
18591 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18594 +#ifdef CONFIG_PAX_PAGEEXEC
18595 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18601 + pgd = pgd_offset(mm, address);
18602 + if (!pgd_present(*pgd))
18604 + pud = pud_offset(pgd, address);
18605 + if (!pud_present(*pud))
18607 + pmd = pmd_offset(pud, address);
18608 + if (!pmd_present(*pmd))
18614 DEFINE_SPINLOCK(pgd_lock);
18615 LIST_HEAD(pgd_list);
18617 @@ -229,10 +267,22 @@ void vmalloc_sync_all(void)
18618 for (address = VMALLOC_START & PMD_MASK;
18619 address >= TASK_SIZE && address < FIXADDR_TOP;
18620 address += PMD_SIZE) {
18622 +#ifdef CONFIG_PAX_PER_CPU_PGD
18623 + unsigned long cpu;
18628 spin_lock(&pgd_lock);
18630 +#ifdef CONFIG_PAX_PER_CPU_PGD
18631 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18632 + pgd_t *pgd = get_cpu_pgd(cpu);
18635 list_for_each_entry(page, &pgd_list, lru) {
18636 + pgd_t *pgd = page_address(page);
18637 spinlock_t *pgt_lock;
18640 @@ -240,8 +290,13 @@ void vmalloc_sync_all(void)
18641 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18643 spin_lock(pgt_lock);
18644 - ret = vmalloc_sync_one(page_address(page), address);
18647 + ret = vmalloc_sync_one(pgd, address);
18649 +#ifndef CONFIG_PAX_PER_CPU_PGD
18650 spin_unlock(pgt_lock);
18655 @@ -275,6 +330,11 @@ static noinline __kprobes int vmalloc_fa
18656 * an interrupt in the middle of a task switch..
18658 pgd_paddr = read_cr3();
18660 +#ifdef CONFIG_PAX_PER_CPU_PGD
18661 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18664 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18667 @@ -370,7 +430,14 @@ static noinline __kprobes int vmalloc_fa
18668 * happen within a race in page table update. In the later
18672 +#ifdef CONFIG_PAX_PER_CPU_PGD
18673 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18674 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18676 pgd = pgd_offset(current->active_mm, address);
18679 pgd_ref = pgd_offset_k(address);
18680 if (pgd_none(*pgd_ref))
18682 @@ -532,7 +599,7 @@ static int is_errata93(struct pt_regs *r
18683 static int is_errata100(struct pt_regs *regs, unsigned long address)
18685 #ifdef CONFIG_X86_64
18686 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18687 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18691 @@ -559,7 +626,7 @@ static int is_f00f_bug(struct pt_regs *r
18694 static const char nx_warning[] = KERN_CRIT
18695 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18696 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18699 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18700 @@ -568,15 +635,26 @@ show_fault_oops(struct pt_regs *regs, un
18701 if (!oops_may_print())
18704 - if (error_code & PF_INSTR) {
18705 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18706 unsigned int level;
18708 pte_t *pte = lookup_address(address, &level);
18710 if (pte && pte_present(*pte) && !pte_exec(*pte))
18711 - printk(nx_warning, current_uid());
18712 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18715 +#ifdef CONFIG_PAX_KERNEXEC
18716 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18717 + if (current->signal->curr_ip)
18718 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18719 + ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18721 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18722 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18726 printk(KERN_ALERT "BUG: unable to handle kernel ");
18727 if (address < PAGE_SIZE)
18728 printk(KERN_CONT "NULL pointer dereference");
18729 @@ -701,6 +779,68 @@ __bad_area_nosemaphore(struct pt_regs *r
18730 unsigned long address, int si_code)
18732 struct task_struct *tsk = current;
18733 + struct mm_struct *mm = tsk->mm;
18735 +#ifdef CONFIG_X86_64
18736 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18737 + if (regs->ip == (unsigned long)vgettimeofday) {
18738 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
18740 + } else if (regs->ip == (unsigned long)vtime) {
18741 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
18743 + } else if (regs->ip == (unsigned long)vgetcpu) {
18744 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
18750 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18751 + if (mm && (error_code & PF_USER)) {
18752 + unsigned long ip = regs->ip;
18754 + if (v8086_mode(regs))
18755 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18758 + * It's possible to have interrupts off here:
18760 + local_irq_enable();
18762 +#ifdef CONFIG_PAX_PAGEEXEC
18763 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18764 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18766 +#ifdef CONFIG_PAX_EMUTRAMP
18767 + switch (pax_handle_fetch_fault(regs)) {
18773 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18774 + do_group_exit(SIGKILL);
18778 +#ifdef CONFIG_PAX_SEGMEXEC
18779 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18781 +#ifdef CONFIG_PAX_EMUTRAMP
18782 + switch (pax_handle_fetch_fault(regs)) {
18788 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18789 + do_group_exit(SIGKILL);
18796 /* User mode accesses just cause a SIGSEGV */
18797 if (error_code & PF_USER) {
18798 @@ -855,6 +995,99 @@ static int spurious_fault_check(unsigned
18802 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18803 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18808 + unsigned char pte_mask;
18810 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18811 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18814 + /* PaX: it's our fault, let's handle it if we can */
18816 + /* PaX: take a look at read faults before acquiring any locks */
18817 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18818 + /* instruction fetch attempt from a protected page in user mode */
18819 + up_read(&mm->mmap_sem);
18821 +#ifdef CONFIG_PAX_EMUTRAMP
18822 + switch (pax_handle_fetch_fault(regs)) {
18828 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18829 + do_group_exit(SIGKILL);
18832 + pmd = pax_get_pmd(mm, address);
18833 + if (unlikely(!pmd))
18836 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18837 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18838 + pte_unmap_unlock(pte, ptl);
18842 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18843 + /* write attempt to a protected page in user mode */
18844 + pte_unmap_unlock(pte, ptl);
18849 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18851 + if (likely(address > get_limit(regs->cs)))
18854 + set_pte(pte, pte_mkread(*pte));
18855 + __flush_tlb_one(address);
18856 + pte_unmap_unlock(pte, ptl);
18857 + up_read(&mm->mmap_sem);
18861 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18864 + * PaX: fill DTLB with user rights and retry
18866 + __asm__ __volatile__ (
18868 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18870 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18871 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18872 + * page fault when examined during a TLB load attempt. this is true not only
18873 + * for PTEs holding a non-present entry but also present entries that will
18874 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18875 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18876 + * for our target pages since their PTEs are simply not in the TLBs at all.
18878 + * the best thing in omitting it is that we gain around 15-20% speed in the
18879 + * fast path of the page fault handler and can get rid of tracing since we
18880 + * can no longer flush unintended entries.
18884 + __copyuser_seg"testb $0,(%0)\n"
18887 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18888 + : "memory", "cc");
18889 + pte_unmap_unlock(pte, ptl);
18890 + up_read(&mm->mmap_sem);
18896 * Handle a spurious fault caused by a stale TLB entry.
18898 @@ -927,6 +1160,9 @@ int show_unhandled_signals = 1;
18900 access_error(unsigned long error_code, struct vm_area_struct *vma)
18902 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18905 if (error_code & PF_WRITE) {
18906 /* write, present and write, not present: */
18907 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18908 @@ -960,19 +1196,33 @@ do_page_fault(struct pt_regs *regs, unsi
18910 struct vm_area_struct *vma;
18911 struct task_struct *tsk;
18912 - unsigned long address;
18913 struct mm_struct *mm;
18915 int write = error_code & PF_WRITE;
18916 unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
18917 (write ? FAULT_FLAG_WRITE : 0);
18919 + /* Get the faulting address: */
18920 + unsigned long address = read_cr2();
18922 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18923 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18924 + if (!search_exception_tables(regs->ip)) {
18925 + bad_area_nosemaphore(regs, error_code, address);
18928 + if (address < PAX_USER_SHADOW_BASE) {
18929 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18930 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18931 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18933 + address -= PAX_USER_SHADOW_BASE;
18940 - /* Get the faulting address: */
18941 - address = read_cr2();
18944 * Detect and handle instructions that would cause a page fault for
18945 * both a tracked kernel page and a userspace page.
18946 @@ -1032,7 +1282,7 @@ do_page_fault(struct pt_regs *regs, unsi
18947 * User-mode registers count as a user access even for any
18948 * potential system fault or CPU buglet:
18950 - if (user_mode_vm(regs)) {
18951 + if (user_mode(regs)) {
18952 local_irq_enable();
18953 error_code |= PF_USER;
18955 @@ -1087,6 +1337,11 @@ retry:
18959 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18960 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18964 vma = find_vma(mm, address);
18965 if (unlikely(!vma)) {
18966 bad_area(regs, error_code, address);
18967 @@ -1098,18 +1353,24 @@ retry:
18968 bad_area(regs, error_code, address);
18971 - if (error_code & PF_USER) {
18973 - * Accessing the stack below %sp is always a bug.
18974 - * The large cushion allows instructions like enter
18975 - * and pusha to work. ("enter $65535, $31" pushes
18976 - * 32 pointers and then decrements %sp by 65535.)
18978 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18979 - bad_area(regs, error_code, address);
18983 + * Accessing the stack below %sp is always a bug.
18984 + * The large cushion allows instructions like enter
18985 + * and pusha to work. ("enter $65535, $31" pushes
18986 + * 32 pointers and then decrements %sp by 65535.)
18988 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18989 + bad_area(regs, error_code, address);
18993 +#ifdef CONFIG_PAX_SEGMEXEC
18994 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18995 + bad_area(regs, error_code, address);
19000 if (unlikely(expand_stack(vma, address))) {
19001 bad_area(regs, error_code, address);
19003 @@ -1164,3 +1425,199 @@ good_area:
19005 up_read(&mm->mmap_sem);
19008 +#ifdef CONFIG_PAX_EMUTRAMP
19009 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19013 + do { /* PaX: gcc trampoline emulation #1 */
19014 + unsigned char mov1, mov2;
19015 + unsigned short jmp;
19016 + unsigned int addr1, addr2;
19018 +#ifdef CONFIG_X86_64
19019 + if ((regs->ip + 11) >> 32)
19023 + err = get_user(mov1, (unsigned char __user *)regs->ip);
19024 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19025 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19026 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19027 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19032 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19033 + regs->cx = addr1;
19034 + regs->ax = addr2;
19035 + regs->ip = addr2;
19040 + do { /* PaX: gcc trampoline emulation #2 */
19041 + unsigned char mov, jmp;
19042 + unsigned int addr1, addr2;
19044 +#ifdef CONFIG_X86_64
19045 + if ((regs->ip + 9) >> 32)
19049 + err = get_user(mov, (unsigned char __user *)regs->ip);
19050 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19051 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19052 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19057 + if (mov == 0xB9 && jmp == 0xE9) {
19058 + regs->cx = addr1;
19059 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19064 + return 1; /* PaX in action */
19067 +#ifdef CONFIG_X86_64
19068 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19072 + do { /* PaX: gcc trampoline emulation #1 */
19073 + unsigned short mov1, mov2, jmp1;
19074 + unsigned char jmp2;
19075 + unsigned int addr1;
19076 + unsigned long addr2;
19078 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19079 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19080 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19081 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19082 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19083 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19088 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19089 + regs->r11 = addr1;
19090 + regs->r10 = addr2;
19091 + regs->ip = addr1;
19096 + do { /* PaX: gcc trampoline emulation #2 */
19097 + unsigned short mov1, mov2, jmp1;
19098 + unsigned char jmp2;
19099 + unsigned long addr1, addr2;
19101 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19102 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19103 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19104 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19105 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19106 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19111 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19112 + regs->r11 = addr1;
19113 + regs->r10 = addr2;
19114 + regs->ip = addr1;
19119 + return 1; /* PaX in action */
19124 + * PaX: decide what to do with offenders (regs->ip = fault address)
19126 + * returns 1 when task should be killed
19127 + * 2 when gcc trampoline was detected
19129 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19131 + if (v8086_mode(regs))
19134 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19137 +#ifdef CONFIG_X86_32
19138 + return pax_handle_fetch_fault_32(regs);
19140 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19141 + return pax_handle_fetch_fault_32(regs);
19143 + return pax_handle_fetch_fault_64(regs);
19148 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19149 +void pax_report_insns(void *pc, void *sp)
19153 + printk(KERN_ERR "PAX: bytes at PC: ");
19154 + for (i = 0; i < 20; i++) {
19156 + if (get_user(c, (__force unsigned char __user *)pc+i))
19157 + printk(KERN_CONT "?? ");
19159 + printk(KERN_CONT "%02x ", c);
19163 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19164 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19166 + if (get_user(c, (__force unsigned long __user *)sp+i))
19167 +#ifdef CONFIG_X86_32
19168 + printk(KERN_CONT "???????? ");
19170 + printk(KERN_CONT "???????????????? ");
19173 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19180 + * probe_kernel_write(): safely attempt to write to a location
19181 + * @dst: address to write to
19182 + * @src: pointer to the data that shall be written
19183 + * @size: size of the data chunk
19185 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19186 + * happens, handle that and return -EFAULT.
19188 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19191 + mm_segment_t old_fs = get_fs();
19193 + set_fs(KERNEL_DS);
19194 + pagefault_disable();
19195 + pax_open_kernel();
19196 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19197 + pax_close_kernel();
19198 + pagefault_enable();
19201 + return ret ? -EFAULT : 0;
19203 diff -urNp linux-2.6.39.4/arch/x86/mm/gup.c linux-2.6.39.4/arch/x86/mm/gup.c
19204 --- linux-2.6.39.4/arch/x86/mm/gup.c 2011-05-19 00:06:34.000000000 -0400
19205 +++ linux-2.6.39.4/arch/x86/mm/gup.c 2011-08-05 19:44:35.000000000 -0400
19206 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19208 len = (unsigned long) nr_pages << PAGE_SHIFT;
19210 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19211 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19212 (void __user *)start, len)))
19215 diff -urNp linux-2.6.39.4/arch/x86/mm/highmem_32.c linux-2.6.39.4/arch/x86/mm/highmem_32.c
19216 --- linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-05-19 00:06:34.000000000 -0400
19217 +++ linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-08-05 19:44:35.000000000 -0400
19218 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19219 idx = type + KM_TYPE_NR*smp_processor_id();
19220 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19221 BUG_ON(!pte_none(*(kmap_pte-idx)));
19223 + pax_open_kernel();
19224 set_pte(kmap_pte-idx, mk_pte(page, prot));
19225 + pax_close_kernel();
19227 return (void *)vaddr;
19229 diff -urNp linux-2.6.39.4/arch/x86/mm/hugetlbpage.c linux-2.6.39.4/arch/x86/mm/hugetlbpage.c
19230 --- linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
19231 +++ linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-08-05 19:44:35.000000000 -0400
19232 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19233 struct hstate *h = hstate_file(file);
19234 struct mm_struct *mm = current->mm;
19235 struct vm_area_struct *vma;
19236 - unsigned long start_addr;
19237 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19239 +#ifdef CONFIG_PAX_SEGMEXEC
19240 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19241 + pax_task_size = SEGMEXEC_TASK_SIZE;
19244 + pax_task_size -= PAGE_SIZE;
19246 if (len > mm->cached_hole_size) {
19247 - start_addr = mm->free_area_cache;
19248 + start_addr = mm->free_area_cache;
19250 - start_addr = TASK_UNMAPPED_BASE;
19251 - mm->cached_hole_size = 0;
19252 + start_addr = mm->mmap_base;
19253 + mm->cached_hole_size = 0;
19257 @@ -280,26 +287,27 @@ full_search:
19259 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19260 /* At this point: (!vma || addr < vma->vm_end). */
19261 - if (TASK_SIZE - len < addr) {
19262 + if (pax_task_size - len < addr) {
19264 * Start a new search - just in case we missed
19267 - if (start_addr != TASK_UNMAPPED_BASE) {
19268 - start_addr = TASK_UNMAPPED_BASE;
19269 + if (start_addr != mm->mmap_base) {
19270 + start_addr = mm->mmap_base;
19271 mm->cached_hole_size = 0;
19276 - if (!vma || addr + len <= vma->vm_start) {
19277 - mm->free_area_cache = addr + len;
19280 + if (check_heap_stack_gap(vma, addr, len))
19282 if (addr + mm->cached_hole_size < vma->vm_start)
19283 mm->cached_hole_size = vma->vm_start - addr;
19284 addr = ALIGN(vma->vm_end, huge_page_size(h));
19287 + mm->free_area_cache = addr + len;
19291 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19292 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19294 struct hstate *h = hstate_file(file);
19295 struct mm_struct *mm = current->mm;
19296 - struct vm_area_struct *vma, *prev_vma;
19297 - unsigned long base = mm->mmap_base, addr = addr0;
19298 + struct vm_area_struct *vma;
19299 + unsigned long base = mm->mmap_base, addr;
19300 unsigned long largest_hole = mm->cached_hole_size;
19301 - int first_time = 1;
19303 /* don't allow allocations above current base */
19304 if (mm->free_area_cache > base)
19305 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19307 mm->free_area_cache = base;
19311 /* make sure it can fit in the remaining address space */
19312 if (mm->free_area_cache < len)
19315 /* either no address requested or can't fit in requested address hole */
19316 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19317 + addr = (mm->free_area_cache - len);
19319 + addr &= huge_page_mask(h);
19320 + vma = find_vma(mm, addr);
19322 * Lookup failure means no vma is above this address,
19323 * i.e. return with success:
19325 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19329 * new region fits between prev_vma->vm_end and
19330 * vma->vm_start, use it:
19332 - if (addr + len <= vma->vm_start &&
19333 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19334 + if (check_heap_stack_gap(vma, addr, len)) {
19335 /* remember the address as a hint for next time */
19336 - mm->cached_hole_size = largest_hole;
19337 - return (mm->free_area_cache = addr);
19339 - /* pull free_area_cache down to the first hole */
19340 - if (mm->free_area_cache == vma->vm_end) {
19341 - mm->free_area_cache = vma->vm_start;
19342 - mm->cached_hole_size = largest_hole;
19344 + mm->cached_hole_size = largest_hole;
19345 + return (mm->free_area_cache = addr);
19347 + /* pull free_area_cache down to the first hole */
19348 + if (mm->free_area_cache == vma->vm_end) {
19349 + mm->free_area_cache = vma->vm_start;
19350 + mm->cached_hole_size = largest_hole;
19353 /* remember the largest hole we saw so far */
19354 if (addr + largest_hole < vma->vm_start)
19355 - largest_hole = vma->vm_start - addr;
19356 + largest_hole = vma->vm_start - addr;
19358 /* try just below the current vma->vm_start */
19359 - addr = (vma->vm_start - len) & huge_page_mask(h);
19360 - } while (len <= vma->vm_start);
19361 + addr = skip_heap_stack_gap(vma, len);
19362 + } while (!IS_ERR_VALUE(addr));
19366 - * if hint left us with no space for the requested
19367 - * mapping then try again:
19369 - if (first_time) {
19370 - mm->free_area_cache = base;
19371 - largest_hole = 0;
19376 * A failed mmap() very likely causes application failure,
19377 * so fall back to the bottom-up function here. This scenario
19378 * can happen with large stack limits and large mmap()
19381 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19383 +#ifdef CONFIG_PAX_SEGMEXEC
19384 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19385 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19389 + mm->mmap_base = TASK_UNMAPPED_BASE;
19391 +#ifdef CONFIG_PAX_RANDMMAP
19392 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19393 + mm->mmap_base += mm->delta_mmap;
19396 + mm->free_area_cache = mm->mmap_base;
19397 mm->cached_hole_size = ~0UL;
19398 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19399 len, pgoff, flags);
19400 @@ -386,6 +392,7 @@ fail:
19402 * Restore the topdown base:
19404 + mm->mmap_base = base;
19405 mm->free_area_cache = base;
19406 mm->cached_hole_size = ~0UL;
19408 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19409 struct hstate *h = hstate_file(file);
19410 struct mm_struct *mm = current->mm;
19411 struct vm_area_struct *vma;
19412 + unsigned long pax_task_size = TASK_SIZE;
19414 if (len & ~huge_page_mask(h))
19416 - if (len > TASK_SIZE)
19418 +#ifdef CONFIG_PAX_SEGMEXEC
19419 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19420 + pax_task_size = SEGMEXEC_TASK_SIZE;
19423 + pax_task_size -= PAGE_SIZE;
19425 + if (len > pax_task_size)
19428 if (flags & MAP_FIXED) {
19429 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19431 addr = ALIGN(addr, huge_page_size(h));
19432 vma = find_vma(mm, addr);
19433 - if (TASK_SIZE - len >= addr &&
19434 - (!vma || addr + len <= vma->vm_start))
19435 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19438 if (mm->get_unmapped_area == arch_get_unmapped_area)
19439 diff -urNp linux-2.6.39.4/arch/x86/mm/init_32.c linux-2.6.39.4/arch/x86/mm/init_32.c
19440 --- linux-2.6.39.4/arch/x86/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
19441 +++ linux-2.6.39.4/arch/x86/mm/init_32.c 2011-08-05 19:44:35.000000000 -0400
19442 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19446 - * Creates a middle page table and puts a pointer to it in the
19447 - * given global directory entry. This only returns the gd entry
19448 - * in non-PAE compilation mode, since the middle layer is folded.
19450 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19453 - pmd_t *pmd_table;
19455 -#ifdef CONFIG_X86_PAE
19456 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19457 - if (after_bootmem)
19458 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19460 - pmd_table = (pmd_t *)alloc_low_page();
19461 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19462 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19463 - pud = pud_offset(pgd, 0);
19464 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19466 - return pmd_table;
19469 - pud = pud_offset(pgd, 0);
19470 - pmd_table = pmd_offset(pud, 0);
19472 - return pmd_table;
19476 * Create a page table and place a pointer to it in a middle page
19479 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19480 page_table = (pte_t *)alloc_low_page();
19482 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19483 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19484 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19486 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19488 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19491 return pte_offset_kernel(pmd, 0);
19494 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19497 + pmd_t *pmd_table;
19499 + pud = pud_offset(pgd, 0);
19500 + pmd_table = pmd_offset(pud, 0);
19502 + return pmd_table;
19505 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19507 int pgd_idx = pgd_index(vaddr);
19508 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19509 int pgd_idx, pmd_idx;
19510 unsigned long vaddr;
19516 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19517 pgd = pgd_base + pgd_idx;
19519 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19520 - pmd = one_md_table_init(pgd);
19521 - pmd = pmd + pmd_index(vaddr);
19522 + pud = pud_offset(pgd, vaddr);
19523 + pmd = pmd_offset(pud, vaddr);
19525 +#ifdef CONFIG_X86_PAE
19526 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19529 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19530 pmd++, pmd_idx++) {
19531 pte = page_table_kmap_check(one_page_table_init(pmd),
19532 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19536 -static inline int is_kernel_text(unsigned long addr)
19537 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19539 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19542 + if ((start > ktla_ktva((unsigned long)_etext) ||
19543 + end <= ktla_ktva((unsigned long)_stext)) &&
19544 + (start > ktla_ktva((unsigned long)_einittext) ||
19545 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19547 +#ifdef CONFIG_ACPI_SLEEP
19548 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19551 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19557 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19558 unsigned long last_map_addr = end;
19559 unsigned long start_pfn, end_pfn;
19560 pgd_t *pgd_base = swapper_pg_dir;
19561 - int pgd_idx, pmd_idx, pte_ofs;
19562 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19568 unsigned pages_2m, pages_4k;
19569 @@ -281,8 +282,13 @@ repeat:
19571 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19572 pgd = pgd_base + pgd_idx;
19573 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19574 - pmd = one_md_table_init(pgd);
19575 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19576 + pud = pud_offset(pgd, 0);
19577 + pmd = pmd_offset(pud, 0);
19579 +#ifdef CONFIG_X86_PAE
19580 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19583 if (pfn >= end_pfn)
19585 @@ -294,14 +300,13 @@ repeat:
19587 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19588 pmd++, pmd_idx++) {
19589 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19590 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19593 * Map with big pages if possible, otherwise
19594 * create normal page tables:
19597 - unsigned int addr2;
19598 pgprot_t prot = PAGE_KERNEL_LARGE;
19600 * first pass will use the same initial
19601 @@ -311,11 +316,7 @@ repeat:
19602 __pgprot(PTE_IDENT_ATTR |
19605 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19606 - PAGE_OFFSET + PAGE_SIZE-1;
19608 - if (is_kernel_text(addr) ||
19609 - is_kernel_text(addr2))
19610 + if (is_kernel_text(address, address + PMD_SIZE))
19611 prot = PAGE_KERNEL_LARGE_EXEC;
19614 @@ -332,7 +333,7 @@ repeat:
19615 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19617 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19618 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19619 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19620 pgprot_t prot = PAGE_KERNEL;
19622 * first pass will use the same initial
19623 @@ -340,7 +341,7 @@ repeat:
19625 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19627 - if (is_kernel_text(addr))
19628 + if (is_kernel_text(address, address + PAGE_SIZE))
19629 prot = PAGE_KERNEL_EXEC;
19632 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19634 pud = pud_offset(pgd, va);
19635 pmd = pmd_offset(pud, va);
19636 - if (!pmd_present(*pmd))
19637 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19640 pte = pte_offset_kernel(pmd, va);
19641 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19643 static void __init pagetable_init(void)
19645 - pgd_t *pgd_base = swapper_pg_dir;
19647 - permanent_kmaps_init(pgd_base);
19648 + permanent_kmaps_init(swapper_pg_dir);
19651 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19652 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19653 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19655 /* user-defined highmem size */
19656 @@ -754,6 +753,12 @@ void __init mem_init(void)
19660 +#ifdef CONFIG_PAX_PER_CPU_PGD
19661 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19662 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19663 + KERNEL_PGD_PTRS);
19666 #ifdef CONFIG_FLATMEM
19669 @@ -771,7 +776,7 @@ void __init mem_init(void)
19670 set_highmem_pages_init();
19672 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19673 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19674 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19675 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19677 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19678 @@ -812,10 +817,10 @@ void __init mem_init(void)
19679 ((unsigned long)&__init_end -
19680 (unsigned long)&__init_begin) >> 10,
19682 - (unsigned long)&_etext, (unsigned long)&_edata,
19683 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19684 + (unsigned long)&_sdata, (unsigned long)&_edata,
19685 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19687 - (unsigned long)&_text, (unsigned long)&_etext,
19688 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19689 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19692 @@ -893,6 +898,7 @@ void set_kernel_text_rw(void)
19693 if (!kernel_set_to_readonly)
19696 + start = ktla_ktva(start);
19697 pr_debug("Set kernel text: %lx - %lx for read write\n",
19698 start, start+size);
19700 @@ -907,6 +913,7 @@ void set_kernel_text_ro(void)
19701 if (!kernel_set_to_readonly)
19704 + start = ktla_ktva(start);
19705 pr_debug("Set kernel text: %lx - %lx for read only\n",
19706 start, start+size);
19708 @@ -935,6 +942,7 @@ void mark_rodata_ro(void)
19709 unsigned long start = PFN_ALIGN(_text);
19710 unsigned long size = PFN_ALIGN(_etext) - start;
19712 + start = ktla_ktva(start);
19713 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19714 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19716 diff -urNp linux-2.6.39.4/arch/x86/mm/init_64.c linux-2.6.39.4/arch/x86/mm/init_64.c
19717 --- linux-2.6.39.4/arch/x86/mm/init_64.c 2011-05-19 00:06:34.000000000 -0400
19718 +++ linux-2.6.39.4/arch/x86/mm/init_64.c 2011-08-05 19:44:35.000000000 -0400
19719 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa
19720 * around without checking the pgd every time.
19723 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19724 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19725 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19727 int force_personality32;
19728 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star
19730 for (address = start; address <= end; address += PGDIR_SIZE) {
19731 const pgd_t *pgd_ref = pgd_offset_k(address);
19733 +#ifdef CONFIG_PAX_PER_CPU_PGD
19734 + unsigned long cpu;
19739 if (pgd_none(*pgd_ref))
19742 spin_lock(&pgd_lock);
19744 +#ifdef CONFIG_PAX_PER_CPU_PGD
19745 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19746 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19748 list_for_each_entry(page, &pgd_list, lru) {
19750 spinlock_t *pgt_lock;
19751 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star
19752 /* the pgt_lock only for Xen */
19753 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19754 spin_lock(pgt_lock);
19757 if (pgd_none(*pgd))
19758 set_pgd(pgd, *pgd_ref);
19759 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star
19760 BUG_ON(pgd_page_vaddr(*pgd)
19761 != pgd_page_vaddr(*pgd_ref));
19763 +#ifndef CONFIG_PAX_PER_CPU_PGD
19764 spin_unlock(pgt_lock);
19768 spin_unlock(&pgd_lock);
19770 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19771 pmd = fill_pmd(pud, vaddr);
19772 pte = fill_pte(pmd, vaddr);
19774 + pax_open_kernel();
19775 set_pte(pte, new_pte);
19776 + pax_close_kernel();
19779 * It's enough to flush this one mapping.
19780 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(
19781 pgd = pgd_offset_k((unsigned long)__va(phys));
19782 if (pgd_none(*pgd)) {
19783 pud = (pud_t *) spp_getpage();
19784 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19786 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19788 pud = pud_offset(pgd, (unsigned long)__va(phys));
19789 if (pud_none(*pud)) {
19790 pmd = (pmd_t *) spp_getpage();
19791 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19793 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19795 pmd = pmd_offset(pud, phys);
19796 BUG_ON(!pmd_none(*pmd));
19797 @@ -698,6 +712,12 @@ void __init mem_init(void)
19801 +#ifdef CONFIG_PAX_PER_CPU_PGD
19802 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19803 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19804 + KERNEL_PGD_PTRS);
19807 /* clear_bss() already clear the empty_zero_page */
19810 @@ -858,8 +878,8 @@ int kern_addr_valid(unsigned long addr)
19811 static struct vm_area_struct gate_vma = {
19812 .vm_start = VSYSCALL_START,
19813 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19814 - .vm_page_prot = PAGE_READONLY_EXEC,
19815 - .vm_flags = VM_READ | VM_EXEC
19816 + .vm_page_prot = PAGE_READONLY,
19817 + .vm_flags = VM_READ
19820 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19821 @@ -893,7 +913,7 @@ int in_gate_area_no_mm(unsigned long add
19823 const char *arch_vma_name(struct vm_area_struct *vma)
19825 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19826 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19828 if (vma == &gate_vma)
19829 return "[vsyscall]";
19830 diff -urNp linux-2.6.39.4/arch/x86/mm/init.c linux-2.6.39.4/arch/x86/mm/init.c
19831 --- linux-2.6.39.4/arch/x86/mm/init.c 2011-05-19 00:06:34.000000000 -0400
19832 +++ linux-2.6.39.4/arch/x86/mm/init.c 2011-08-05 19:44:35.000000000 -0400
19833 @@ -33,7 +33,7 @@ int direct_gbpages
19834 static void __init find_early_table_space(unsigned long end, int use_pse,
19837 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19838 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19841 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19842 @@ -315,12 +315,34 @@ unsigned long __init_refok init_memory_m
19844 int devmem_is_allowed(unsigned long pagenr)
19846 - if (pagenr <= 256)
19847 +#ifdef CONFIG_GRKERNSEC_KMEM
19852 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19857 +#ifdef CONFIG_VM86
19858 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19863 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19865 +#ifdef CONFIG_GRKERNSEC_KMEM
19866 + /* throw out everything else below 1MB */
19867 + if (pagenr <= 256)
19870 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19872 if (!page_is_ram(pagenr))
19878 @@ -375,6 +397,86 @@ void free_init_pages(char *what, unsigne
19880 void free_initmem(void)
19883 +#ifdef CONFIG_PAX_KERNEXEC
19884 +#ifdef CONFIG_X86_32
19885 + /* PaX: limit KERNEL_CS to actual size */
19886 + unsigned long addr, limit;
19887 + struct desc_struct d;
19890 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19891 + limit = (limit - 1UL) >> PAGE_SHIFT;
19893 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19894 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19895 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19896 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19899 + /* PaX: make KERNEL_CS read-only */
19900 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19901 + if (!paravirt_enabled())
19902 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19904 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19905 + pgd = pgd_offset_k(addr);
19906 + pud = pud_offset(pgd, addr);
19907 + pmd = pmd_offset(pud, addr);
19908 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19911 +#ifdef CONFIG_X86_PAE
19912 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19914 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19915 + pgd = pgd_offset_k(addr);
19916 + pud = pud_offset(pgd, addr);
19917 + pmd = pmd_offset(pud, addr);
19918 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19923 +#ifdef CONFIG_MODULES
19924 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19931 + unsigned long addr, end;
19933 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19934 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19935 + pgd = pgd_offset_k(addr);
19936 + pud = pud_offset(pgd, addr);
19937 + pmd = pmd_offset(pud, addr);
19938 + if (!pmd_present(*pmd))
19940 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19941 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19943 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19946 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19947 + end = addr + KERNEL_IMAGE_SIZE;
19948 + for (; addr < end; addr += PMD_SIZE) {
19949 + pgd = pgd_offset_k(addr);
19950 + pud = pud_offset(pgd, addr);
19951 + pmd = pmd_offset(pud, addr);
19952 + if (!pmd_present(*pmd))
19954 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19955 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19962 free_init_pages("unused kernel memory",
19963 (unsigned long)(&__init_begin),
19964 (unsigned long)(&__init_end));
19965 diff -urNp linux-2.6.39.4/arch/x86/mm/iomap_32.c linux-2.6.39.4/arch/x86/mm/iomap_32.c
19966 --- linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-05-19 00:06:34.000000000 -0400
19967 +++ linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-08-05 19:44:35.000000000 -0400
19968 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19969 type = kmap_atomic_idx_push();
19970 idx = type + KM_TYPE_NR * smp_processor_id();
19971 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19973 + pax_open_kernel();
19974 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19975 + pax_close_kernel();
19977 arch_flush_lazy_mmu_mode();
19979 return (void *)vaddr;
19980 diff -urNp linux-2.6.39.4/arch/x86/mm/ioremap.c linux-2.6.39.4/arch/x86/mm/ioremap.c
19981 --- linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-05-19 00:06:34.000000000 -0400
19982 +++ linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-08-05 19:44:35.000000000 -0400
19983 @@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re
19984 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19985 int is_ram = page_is_ram(pfn);
19987 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19988 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19990 WARN_ON_ONCE(is_ram);
19992 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19993 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19995 static __initdata int after_paging_init;
19996 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19997 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19999 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20001 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20002 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20004 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20005 - memset(bm_pte, 0, sizeof(bm_pte));
20006 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
20007 + pmd_populate_user(&init_mm, pmd, bm_pte);
20010 * The boot-ioremap range spans multiple pmds, for which
20011 diff -urNp linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c
20012 --- linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-19 00:06:34.000000000 -0400
20013 +++ linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-05 19:44:35.000000000 -0400
20014 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20015 * memory (e.g. tracked pages)? For now, we need this to avoid
20016 * invoking kmemcheck for PnP BIOS calls.
20018 - if (regs->flags & X86_VM_MASK)
20019 + if (v8086_mode(regs))
20021 - if (regs->cs != __KERNEL_CS)
20022 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20025 pte = kmemcheck_pte_lookup(address);
20026 diff -urNp linux-2.6.39.4/arch/x86/mm/mmap.c linux-2.6.39.4/arch/x86/mm/mmap.c
20027 --- linux-2.6.39.4/arch/x86/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
20028 +++ linux-2.6.39.4/arch/x86/mm/mmap.c 2011-08-05 19:44:35.000000000 -0400
20029 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20030 * Leave an at least ~128 MB hole with possible stack randomization.
20032 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20033 -#define MAX_GAP (TASK_SIZE/6*5)
20034 +#define MAX_GAP (pax_task_size/6*5)
20037 * True on X86_32 or when emulating IA32 on X86_64
20038 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20039 return rnd << PAGE_SHIFT;
20042 -static unsigned long mmap_base(void)
20043 +static unsigned long mmap_base(struct mm_struct *mm)
20045 unsigned long gap = rlimit(RLIMIT_STACK);
20046 + unsigned long pax_task_size = TASK_SIZE;
20048 +#ifdef CONFIG_PAX_SEGMEXEC
20049 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20050 + pax_task_size = SEGMEXEC_TASK_SIZE;
20055 else if (gap > MAX_GAP)
20058 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20059 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20063 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20064 * does, but not when emulating X86_32
20066 -static unsigned long mmap_legacy_base(void)
20067 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
20069 - if (mmap_is_ia32())
20070 + if (mmap_is_ia32()) {
20072 +#ifdef CONFIG_PAX_SEGMEXEC
20073 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20074 + return SEGMEXEC_TASK_UNMAPPED_BASE;
20078 return TASK_UNMAPPED_BASE;
20081 return TASK_UNMAPPED_BASE + mmap_rnd();
20084 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20085 void arch_pick_mmap_layout(struct mm_struct *mm)
20087 if (mmap_is_legacy()) {
20088 - mm->mmap_base = mmap_legacy_base();
20089 + mm->mmap_base = mmap_legacy_base(mm);
20091 +#ifdef CONFIG_PAX_RANDMMAP
20092 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20093 + mm->mmap_base += mm->delta_mmap;
20096 mm->get_unmapped_area = arch_get_unmapped_area;
20097 mm->unmap_area = arch_unmap_area;
20099 - mm->mmap_base = mmap_base();
20100 + mm->mmap_base = mmap_base(mm);
20102 +#ifdef CONFIG_PAX_RANDMMAP
20103 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20104 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20107 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20108 mm->unmap_area = arch_unmap_area_topdown;
20110 diff -urNp linux-2.6.39.4/arch/x86/mm/mmio-mod.c linux-2.6.39.4/arch/x86/mm/mmio-mod.c
20111 --- linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-05-19 00:06:34.000000000 -0400
20112 +++ linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-08-05 19:44:35.000000000 -0400
20113 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20117 - unsigned char *ip = (unsigned char *)instptr;
20118 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20119 my_trace->opcode = MMIO_UNKNOWN_OP;
20120 my_trace->width = 0;
20121 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20122 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20123 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20124 void __iomem *addr)
20126 - static atomic_t next_id;
20127 + static atomic_unchecked_t next_id;
20128 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20129 /* These are page-unaligned. */
20130 struct mmiotrace_map map = {
20131 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20135 - .id = atomic_inc_return(&next_id)
20136 + .id = atomic_inc_return_unchecked(&next_id)
20138 map.map_id = trace->id;
20140 diff -urNp linux-2.6.39.4/arch/x86/mm/numa_32.c linux-2.6.39.4/arch/x86/mm/numa_32.c
20141 --- linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-05-19 00:06:34.000000000 -0400
20142 +++ linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-08-05 19:44:35.000000000 -0400
20143 @@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int
20147 -extern unsigned long find_max_low_pfn(void);
20148 extern unsigned long highend_pfn, highstart_pfn;
20150 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
20151 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr.c linux-2.6.39.4/arch/x86/mm/pageattr.c
20152 --- linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-05-19 00:06:34.000000000 -0400
20153 +++ linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-08-05 19:44:35.000000000 -0400
20154 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20156 #ifdef CONFIG_PCI_BIOS
20157 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20158 - pgprot_val(forbidden) |= _PAGE_NX;
20159 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20163 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20164 * Does not cover __inittext since that is gone later on. On
20165 * 64bit we do not enforce !NX on the low mapping
20167 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20168 - pgprot_val(forbidden) |= _PAGE_NX;
20169 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20170 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20172 +#ifdef CONFIG_DEBUG_RODATA
20174 * The .rodata section needs to be read-only. Using the pfn
20175 * catches all aliases.
20176 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20177 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20178 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20179 pgprot_val(forbidden) |= _PAGE_RW;
20182 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20184 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20188 +#ifdef CONFIG_PAX_KERNEXEC
20189 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20190 + pgprot_val(forbidden) |= _PAGE_RW;
20191 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20195 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20198 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20199 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20201 /* change init_mm */
20202 + pax_open_kernel();
20203 set_pte_atomic(kpte, pte);
20205 #ifdef CONFIG_X86_32
20206 if (!SHARED_KERNEL_PMD) {
20208 +#ifdef CONFIG_PAX_PER_CPU_PGD
20209 + unsigned long cpu;
20214 +#ifdef CONFIG_PAX_PER_CPU_PGD
20215 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20216 + pgd_t *pgd = get_cpu_pgd(cpu);
20218 list_for_each_entry(page, &pgd_list, lru) {
20220 + pgd_t *pgd = (pgd_t *)page_address(page);
20226 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20227 + pgd += pgd_index(address);
20228 pud = pud_offset(pgd, address);
20229 pmd = pmd_offset(pud, address);
20230 set_pte_atomic((pte_t *)pmd, pte);
20234 + pax_close_kernel();
20238 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr-test.c linux-2.6.39.4/arch/x86/mm/pageattr-test.c
20239 --- linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-05-19 00:06:34.000000000 -0400
20240 +++ linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-08-05 19:44:35.000000000 -0400
20241 @@ -36,7 +36,7 @@ enum {
20243 static int pte_testbit(pte_t pte)
20245 - return pte_flags(pte) & _PAGE_UNUSED1;
20246 + return pte_flags(pte) & _PAGE_CPA_TEST;
20249 struct split_state {
20250 diff -urNp linux-2.6.39.4/arch/x86/mm/pat.c linux-2.6.39.4/arch/x86/mm/pat.c
20251 --- linux-2.6.39.4/arch/x86/mm/pat.c 2011-05-19 00:06:34.000000000 -0400
20252 +++ linux-2.6.39.4/arch/x86/mm/pat.c 2011-08-05 19:44:35.000000000 -0400
20253 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20256 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20257 - current->comm, current->pid, start, end);
20258 + current->comm, task_pid_nr(current), start, end);
20262 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20263 while (cursor < to) {
20264 if (!devmem_is_allowed(pfn)) {
20266 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20267 - current->comm, from, to);
20268 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20269 + current->comm, from, to, cursor);
20272 cursor += PAGE_SIZE;
20273 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20275 "%s:%d ioremap_change_attr failed %s "
20277 - current->comm, current->pid,
20278 + current->comm, task_pid_nr(current),
20280 base, (unsigned long long)(base + size));
20282 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20283 if (want_flags != flags) {
20284 printk(KERN_WARNING
20285 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20286 - current->comm, current->pid,
20287 + current->comm, task_pid_nr(current),
20288 cattr_name(want_flags),
20289 (unsigned long long)paddr,
20290 (unsigned long long)(paddr + size),
20291 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20292 free_memtype(paddr, paddr + size);
20293 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20294 " for %Lx-%Lx, got %s\n",
20295 - current->comm, current->pid,
20296 + current->comm, task_pid_nr(current),
20297 cattr_name(want_flags),
20298 (unsigned long long)paddr,
20299 (unsigned long long)(paddr + size),
20300 diff -urNp linux-2.6.39.4/arch/x86/mm/pf_in.c linux-2.6.39.4/arch/x86/mm/pf_in.c
20301 --- linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-05-19 00:06:34.000000000 -0400
20302 +++ linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-08-05 19:44:35.000000000 -0400
20303 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20305 enum reason_type rv = OTHERS;
20307 - p = (unsigned char *)ins_addr;
20308 + p = (unsigned char *)ktla_ktva(ins_addr);
20309 p += skip_prefix(p, &prf);
20310 p += get_opcode(p, &opcode);
20312 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20313 struct prefix_bits prf;
20316 - p = (unsigned char *)ins_addr;
20317 + p = (unsigned char *)ktla_ktva(ins_addr);
20318 p += skip_prefix(p, &prf);
20319 p += get_opcode(p, &opcode);
20321 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20322 struct prefix_bits prf;
20325 - p = (unsigned char *)ins_addr;
20326 + p = (unsigned char *)ktla_ktva(ins_addr);
20327 p += skip_prefix(p, &prf);
20328 p += get_opcode(p, &opcode);
20330 @@ -416,7 +416,7 @@ unsigned long get_ins_reg_val(unsigned l
20334 - p = (unsigned char *)ins_addr;
20335 + p = (unsigned char *)ktla_ktva(ins_addr);
20336 p += skip_prefix(p, &prf);
20337 p += get_opcode(p, &opcode);
20338 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20339 @@ -476,7 +476,7 @@ unsigned long get_ins_imm_val(unsigned l
20343 - p = (unsigned char *)ins_addr;
20344 + p = (unsigned char *)ktla_ktva(ins_addr);
20345 p += skip_prefix(p, &prf);
20346 p += get_opcode(p, &opcode);
20347 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20348 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable_32.c linux-2.6.39.4/arch/x86/mm/pgtable_32.c
20349 --- linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-05-19 00:06:34.000000000 -0400
20350 +++ linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-08-05 19:44:35.000000000 -0400
20351 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20354 pte = pte_offset_kernel(pmd, vaddr);
20356 + pax_open_kernel();
20357 if (pte_val(pteval))
20358 set_pte_at(&init_mm, vaddr, pte, pteval);
20360 pte_clear(&init_mm, vaddr, pte);
20361 + pax_close_kernel();
20364 * It's enough to flush this one mapping.
20365 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable.c linux-2.6.39.4/arch/x86/mm/pgtable.c
20366 --- linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-05-19 00:06:34.000000000 -0400
20367 +++ linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-08-05 19:44:35.000000000 -0400
20368 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20369 list_del(&page->lru);
20372 -#define UNSHARED_PTRS_PER_PGD \
20373 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20374 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20375 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20377 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20380 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20384 +#ifdef CONFIG_PAX_PER_CPU_PGD
20385 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20389 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20390 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20398 +#ifdef CONFIG_X86_64
20399 +#define pxd_t pud_t
20400 +#define pyd_t pgd_t
20401 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20402 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20403 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20404 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20405 +#define PYD_SIZE PGDIR_SIZE
20407 +#define pxd_t pmd_t
20408 +#define pyd_t pud_t
20409 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20410 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20411 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20412 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20413 +#define PYD_SIZE PUD_SIZE
20416 +#ifdef CONFIG_PAX_PER_CPU_PGD
20417 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20418 +static inline void pgd_dtor(pgd_t *pgd) {}
20420 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20422 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20423 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20425 spin_unlock(&pgd_lock);
20430 * List of all pgd's needed for non-PAE so it can invalidate entries
20431 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20435 -#ifdef CONFIG_X86_PAE
20436 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20438 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20439 * updating the top-level pagetable entries to guarantee the
20440 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20441 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20442 * and initialize the kernel pmds here.
20444 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20445 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20447 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20449 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20453 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20454 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20455 #else /* !CONFIG_X86_PAE */
20457 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20458 -#define PREALLOCATED_PMDS 0
20459 +#define PREALLOCATED_PXDS 0
20461 #endif /* CONFIG_X86_PAE */
20463 -static void free_pmds(pmd_t *pmds[])
20464 +static void free_pxds(pxd_t *pxds[])
20468 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20470 - free_page((unsigned long)pmds[i]);
20471 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20473 + free_page((unsigned long)pxds[i]);
20476 -static int preallocate_pmds(pmd_t *pmds[])
20477 +static int preallocate_pxds(pxd_t *pxds[])
20480 bool failed = false;
20482 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20483 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20485 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20486 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20499 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20500 * preallocate which never got a corresponding vma will need to be
20503 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20504 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20508 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20509 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20510 pgd_t pgd = pgdp[i];
20512 if (pgd_val(pgd) != 0) {
20513 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20514 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20516 - pgdp[i] = native_make_pgd(0);
20517 + set_pgd(pgdp + i, native_make_pgd(0));
20519 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20520 - pmd_free(mm, pmd);
20521 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20522 + pxd_free(mm, pxd);
20527 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20528 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20532 unsigned long addr;
20535 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20536 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20539 - pud = pud_offset(pgd, 0);
20540 +#ifdef CONFIG_X86_64
20541 + pyd = pyd_offset(mm, 0L);
20543 + pyd = pyd_offset(pgd, 0L);
20546 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20547 - i++, pud++, addr += PUD_SIZE) {
20548 - pmd_t *pmd = pmds[i];
20549 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20550 + i++, pyd++, addr += PYD_SIZE) {
20551 + pxd_t *pxd = pxds[i];
20553 if (i >= KERNEL_PGD_BOUNDARY)
20554 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20555 - sizeof(pmd_t) * PTRS_PER_PMD);
20556 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20557 + sizeof(pxd_t) * PTRS_PER_PMD);
20559 - pud_populate(mm, pud, pmd);
20560 + pyd_populate(mm, pyd, pxd);
20564 pgd_t *pgd_alloc(struct mm_struct *mm)
20567 - pmd_t *pmds[PREALLOCATED_PMDS];
20568 + pxd_t *pxds[PREALLOCATED_PXDS];
20570 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20572 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20576 - if (preallocate_pmds(pmds) != 0)
20577 + if (preallocate_pxds(pxds) != 0)
20580 if (paravirt_pgd_alloc(mm) != 0)
20581 - goto out_free_pmds;
20582 + goto out_free_pxds;
20585 * Make sure that pre-populating the pmds is atomic with
20586 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20587 spin_lock(&pgd_lock);
20590 - pgd_prepopulate_pmd(mm, pgd, pmds);
20591 + pgd_prepopulate_pxd(mm, pgd, pxds);
20593 spin_unlock(&pgd_lock);
20602 free_page((unsigned long)pgd);
20604 @@ -295,7 +344,7 @@ out:
20606 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20608 - pgd_mop_up_pmds(mm, pgd);
20609 + pgd_mop_up_pxds(mm, pgd);
20611 paravirt_pgd_free(mm, pgd);
20612 free_page((unsigned long)pgd);
20613 diff -urNp linux-2.6.39.4/arch/x86/mm/setup_nx.c linux-2.6.39.4/arch/x86/mm/setup_nx.c
20614 --- linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-05-19 00:06:34.000000000 -0400
20615 +++ linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-08-05 19:44:35.000000000 -0400
20617 #include <asm/pgtable.h>
20618 #include <asm/proto.h>
20620 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20621 static int disable_nx __cpuinitdata;
20623 +#ifndef CONFIG_PAX_PAGEEXEC
20627 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20630 early_param("noexec", noexec_setup);
20635 void __cpuinit x86_configure_nx(void)
20637 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20638 if (cpu_has_nx && !disable_nx)
20639 __supported_pte_mask |= _PAGE_NX;
20642 __supported_pte_mask &= ~_PAGE_NX;
20645 diff -urNp linux-2.6.39.4/arch/x86/mm/tlb.c linux-2.6.39.4/arch/x86/mm/tlb.c
20646 --- linux-2.6.39.4/arch/x86/mm/tlb.c 2011-05-19 00:06:34.000000000 -0400
20647 +++ linux-2.6.39.4/arch/x86/mm/tlb.c 2011-08-05 19:44:35.000000000 -0400
20648 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20650 cpumask_clear_cpu(cpu,
20651 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20653 +#ifndef CONFIG_PAX_PER_CPU_PGD
20654 load_cr3(swapper_pg_dir);
20658 EXPORT_SYMBOL_GPL(leave_mm);
20660 diff -urNp linux-2.6.39.4/arch/x86/oprofile/backtrace.c linux-2.6.39.4/arch/x86/oprofile/backtrace.c
20661 --- linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-05-19 00:06:34.000000000 -0400
20662 +++ linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-08-05 19:44:35.000000000 -0400
20663 @@ -57,7 +57,7 @@ dump_user_backtrace_32(struct stack_fram
20664 struct stack_frame_ia32 *fp;
20666 /* Also check accessibility of one struct frame_head beyond */
20667 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
20668 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
20670 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
20672 @@ -123,7 +123,7 @@ x86_backtrace(struct pt_regs * const reg
20674 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20676 - if (!user_mode_vm(regs)) {
20677 + if (!user_mode(regs)) {
20678 unsigned long stack = kernel_stack_pointer(regs);
20680 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20681 diff -urNp linux-2.6.39.4/arch/x86/pci/mrst.c linux-2.6.39.4/arch/x86/pci/mrst.c
20682 --- linux-2.6.39.4/arch/x86/pci/mrst.c 2011-05-19 00:06:34.000000000 -0400
20683 +++ linux-2.6.39.4/arch/x86/pci/mrst.c 2011-08-05 20:34:06.000000000 -0400
20684 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20685 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20686 pci_mmcfg_late_init();
20687 pcibios_enable_irq = mrst_pci_irq_enable;
20688 - pci_root_ops = pci_mrst_ops;
20689 + pax_open_kernel();
20690 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20691 + pax_close_kernel();
20692 /* Continue with standard init */
20695 diff -urNp linux-2.6.39.4/arch/x86/pci/pcbios.c linux-2.6.39.4/arch/x86/pci/pcbios.c
20696 --- linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-05-19 00:06:34.000000000 -0400
20697 +++ linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-08-05 20:34:06.000000000 -0400
20698 @@ -79,50 +79,93 @@ union bios32 {
20700 unsigned long address;
20701 unsigned short segment;
20702 -} bios32_indirect = { 0, __KERNEL_CS };
20703 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20706 * Returns the entry point for the given service, NULL on error
20709 -static unsigned long bios32_service(unsigned long service)
20710 +static unsigned long __devinit bios32_service(unsigned long service)
20712 unsigned char return_code; /* %al */
20713 unsigned long address; /* %ebx */
20714 unsigned long length; /* %ecx */
20715 unsigned long entry; /* %edx */
20716 unsigned long flags;
20717 + struct desc_struct d, *gdt;
20719 local_irq_save(flags);
20720 - __asm__("lcall *(%%edi); cld"
20722 + gdt = get_cpu_gdt_table(smp_processor_id());
20724 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20725 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20726 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20727 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20729 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20730 : "=a" (return_code),
20736 - "D" (&bios32_indirect));
20737 + "D" (&bios32_indirect),
20738 + "r"(__PCIBIOS_DS)
20741 + pax_open_kernel();
20742 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20743 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20744 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20745 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20746 + pax_close_kernel();
20748 local_irq_restore(flags);
20750 switch (return_code) {
20752 - return address + entry;
20753 - case 0x80: /* Not present */
20754 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20756 - default: /* Shouldn't happen */
20757 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20758 - service, return_code);
20761 + unsigned char flags;
20763 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20764 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20765 + printk(KERN_WARNING "bios32_service: not valid\n");
20768 + address = address + PAGE_OFFSET;
20769 + length += 16UL; /* some BIOSs underreport this... */
20771 + if (length >= 64*1024*1024) {
20772 + length >>= PAGE_SHIFT;
20776 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20777 + gdt = get_cpu_gdt_table(cpu);
20778 + pack_descriptor(&d, address, length, 0x9b, flags);
20779 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20780 + pack_descriptor(&d, address, length, 0x93, flags);
20781 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20785 + case 0x80: /* Not present */
20786 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20788 + default: /* Shouldn't happen */
20789 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20790 + service, return_code);
20796 unsigned long address;
20797 unsigned short segment;
20798 -} pci_indirect = { 0, __KERNEL_CS };
20799 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20801 -static int pci_bios_present;
20802 +static int pci_bios_present __read_only;
20804 static int __devinit check_pcibios(void)
20806 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20807 unsigned long flags, pcibios_entry;
20809 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20810 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20811 + pci_indirect.address = pcibios_entry;
20813 local_irq_save(flags);
20815 - "lcall *(%%edi); cld\n\t"
20816 + __asm__("movw %w6, %%ds\n\t"
20817 + "lcall *%%ss:(%%edi); cld\n\t"
20823 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20826 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20827 - "D" (&pci_indirect)
20828 + "D" (&pci_indirect),
20829 + "r" (__PCIBIOS_DS)
20831 local_irq_restore(flags);
20833 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20837 - __asm__("lcall *(%%esi); cld\n\t"
20838 + __asm__("movw %w6, %%ds\n\t"
20839 + "lcall *%%ss:(%%esi); cld\n\t"
20845 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20846 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20849 - "S" (&pci_indirect));
20850 + "S" (&pci_indirect),
20851 + "r" (__PCIBIOS_DS));
20853 * Zero-extend the result beyond 8 bits, do not trust the
20854 * BIOS having done it:
20855 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20859 - __asm__("lcall *(%%esi); cld\n\t"
20860 + __asm__("movw %w6, %%ds\n\t"
20861 + "lcall *%%ss:(%%esi); cld\n\t"
20867 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20868 : "1" (PCIBIOS_READ_CONFIG_WORD),
20871 - "S" (&pci_indirect));
20872 + "S" (&pci_indirect),
20873 + "r" (__PCIBIOS_DS));
20875 * Zero-extend the result beyond 16 bits, do not trust the
20876 * BIOS having done it:
20877 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20881 - __asm__("lcall *(%%esi); cld\n\t"
20882 + __asm__("movw %w6, %%ds\n\t"
20883 + "lcall *%%ss:(%%esi); cld\n\t"
20889 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20890 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20893 - "S" (&pci_indirect));
20894 + "S" (&pci_indirect),
20895 + "r" (__PCIBIOS_DS));
20899 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20903 - __asm__("lcall *(%%esi); cld\n\t"
20904 + __asm__("movw %w6, %%ds\n\t"
20905 + "lcall *%%ss:(%%esi); cld\n\t"
20911 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20915 - "S" (&pci_indirect));
20916 + "S" (&pci_indirect),
20917 + "r" (__PCIBIOS_DS));
20920 - __asm__("lcall *(%%esi); cld\n\t"
20921 + __asm__("movw %w6, %%ds\n\t"
20922 + "lcall *%%ss:(%%esi); cld\n\t"
20928 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20932 - "S" (&pci_indirect));
20933 + "S" (&pci_indirect),
20934 + "r" (__PCIBIOS_DS));
20937 - __asm__("lcall *(%%esi); cld\n\t"
20938 + __asm__("movw %w6, %%ds\n\t"
20939 + "lcall *%%ss:(%%esi); cld\n\t"
20945 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20949 - "S" (&pci_indirect));
20950 + "S" (&pci_indirect),
20951 + "r" (__PCIBIOS_DS));
20955 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20957 DBG("PCI: Fetching IRQ routing table... ");
20958 __asm__("push %%es\n\t"
20959 + "movw %w8, %%ds\n\t"
20962 - "lcall *(%%esi); cld\n\t"
20963 + "lcall *%%ss:(%%esi); cld\n\t"
20970 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20973 "S" (&pci_indirect),
20976 + "r" (__PCIBIOS_DS)
20978 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20980 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20984 - __asm__("lcall *(%%esi); cld\n\t"
20985 + __asm__("movw %w5, %%ds\n\t"
20986 + "lcall *%%ss:(%%esi); cld\n\t"
20992 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20993 : "0" (PCIBIOS_SET_PCI_HW_INT),
20994 "b" ((dev->bus->number << 8) | dev->devfn),
20995 "c" ((irq << 8) | (pin + 10)),
20996 - "S" (&pci_indirect));
20997 + "S" (&pci_indirect),
20998 + "r" (__PCIBIOS_DS));
20999 return !(ret & 0xff00);
21001 EXPORT_SYMBOL(pcibios_set_irq_routing);
21002 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_32.c linux-2.6.39.4/arch/x86/platform/efi/efi_32.c
21003 --- linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-05-19 00:06:34.000000000 -0400
21004 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-08-05 19:44:35.000000000 -0400
21005 @@ -38,70 +38,37 @@
21008 static unsigned long efi_rt_eflags;
21009 -static pgd_t efi_bak_pg_dir_pointer[2];
21010 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21012 -void efi_call_phys_prelog(void)
21013 +void __init efi_call_phys_prelog(void)
21015 - unsigned long cr4;
21016 - unsigned long temp;
21017 struct desc_ptr gdt_descr;
21019 local_irq_save(efi_rt_eflags);
21022 - * If I don't have PAE, I should just duplicate two entries in page
21023 - * directory. If I have PAE, I just need to duplicate one entry in
21024 - * page directory.
21026 - cr4 = read_cr4_safe();
21028 - if (cr4 & X86_CR4_PAE) {
21029 - efi_bak_pg_dir_pointer[0].pgd =
21030 - swapper_pg_dir[pgd_index(0)].pgd;
21031 - swapper_pg_dir[0].pgd =
21032 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21034 - efi_bak_pg_dir_pointer[0].pgd =
21035 - swapper_pg_dir[pgd_index(0)].pgd;
21036 - efi_bak_pg_dir_pointer[1].pgd =
21037 - swapper_pg_dir[pgd_index(0x400000)].pgd;
21038 - swapper_pg_dir[pgd_index(0)].pgd =
21039 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21040 - temp = PAGE_OFFSET + 0x400000;
21041 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21042 - swapper_pg_dir[pgd_index(temp)].pgd;
21044 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21045 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21046 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21049 * After the lock is released, the original page table is restored.
21053 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
21054 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
21055 gdt_descr.size = GDT_SIZE - 1;
21056 load_gdt(&gdt_descr);
21059 -void efi_call_phys_epilog(void)
21060 +void __init efi_call_phys_epilog(void)
21062 - unsigned long cr4;
21063 struct desc_ptr gdt_descr;
21065 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21066 + gdt_descr.address = get_cpu_gdt_table(0);
21067 gdt_descr.size = GDT_SIZE - 1;
21068 load_gdt(&gdt_descr);
21070 - cr4 = read_cr4_safe();
21072 - if (cr4 & X86_CR4_PAE) {
21073 - swapper_pg_dir[pgd_index(0)].pgd =
21074 - efi_bak_pg_dir_pointer[0].pgd;
21076 - swapper_pg_dir[pgd_index(0)].pgd =
21077 - efi_bak_pg_dir_pointer[0].pgd;
21078 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21079 - efi_bak_pg_dir_pointer[1].pgd;
21081 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21084 * After the lock is released, the original page table is restored.
21085 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S
21086 --- linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-05-19 00:06:34.000000000 -0400
21087 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-05 19:44:35.000000000 -0400
21091 #include <linux/linkage.h>
21092 +#include <linux/init.h>
21093 #include <asm/page_types.h>
21097 * service functions will comply with gcc calling convention, too.
21102 ENTRY(efi_call_phys)
21104 * 0. The function can only be called in Linux kernel. So CS has been
21105 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
21106 * The mapping of lower virtual memory has been created in prelog and
21110 - subl $__PAGE_OFFSET, %edx
21112 + jmp 1f-__PAGE_OFFSET
21116 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
21117 * parameter 2, ..., param n. To make things easy, we save the return
21118 * address of efi_call_phys in a global variable.
21121 - movl %edx, saved_return_addr
21122 - /* get the function pointer into ECX*/
21124 - movl %ecx, efi_rt_function_ptr
21126 - subl $__PAGE_OFFSET, %edx
21128 + popl (saved_return_addr)
21129 + popl (efi_rt_function_ptr)
21132 * 3. Clear PG bit in %CR0.
21133 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21135 * 5. Call the physical function.
21138 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21142 * 6. After EFI runtime service returns, control will return to
21143 * following instruction. We'd better readjust stack pointer first.
21144 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21146 orl $0x80000000, %edx
21152 * 8. Now restore the virtual mode from flat mode by
21153 * adding EIP with PAGE_OFFSET.
21157 + jmp 1f+__PAGE_OFFSET
21161 * 9. Balance the stack. And because EAX contain the return value,
21162 * we'd better not clobber it.
21164 - leal efi_rt_function_ptr, %edx
21165 - movl (%edx), %ecx
21167 + pushl (efi_rt_function_ptr)
21170 - * 10. Push the saved return address onto the stack and return.
21171 + * 10. Return to the saved return address.
21173 - leal saved_return_addr, %edx
21174 - movl (%edx), %ecx
21177 + jmpl *(saved_return_addr)
21178 ENDPROC(efi_call_phys)
21185 efi_rt_function_ptr:
21186 diff -urNp linux-2.6.39.4/arch/x86/platform/mrst/mrst.c linux-2.6.39.4/arch/x86/platform/mrst/mrst.c
21187 --- linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-05-19 00:06:34.000000000 -0400
21188 +++ linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-08-05 20:34:06.000000000 -0400
21189 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21192 /* Reboot and power off are handled by the SCU on a MID device */
21193 -static void mrst_power_off(void)
21194 +static __noreturn void mrst_power_off(void)
21196 intel_scu_ipc_simple_command(0xf1, 1);
21200 -static void mrst_reboot(void)
21201 +static __noreturn void mrst_reboot(void)
21203 intel_scu_ipc_simple_command(0xf1, 0);
21208 diff -urNp linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c
21209 --- linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-05-19 00:06:34.000000000 -0400
21210 +++ linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-08-05 19:44:35.000000000 -0400
21211 @@ -342,6 +342,8 @@ static void uv_reset_with_ipi(struct bau
21213 struct reset_args reset_args;
21215 + pax_track_stack();
21217 reset_args.sender = sender;
21220 diff -urNp linux-2.6.39.4/arch/x86/power/cpu.c linux-2.6.39.4/arch/x86/power/cpu.c
21221 --- linux-2.6.39.4/arch/x86/power/cpu.c 2011-05-19 00:06:34.000000000 -0400
21222 +++ linux-2.6.39.4/arch/x86/power/cpu.c 2011-08-05 19:44:35.000000000 -0400
21223 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21224 static void fix_processor_context(void)
21226 int cpu = smp_processor_id();
21227 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21228 + struct tss_struct *t = init_tss + cpu;
21230 set_tss_desc(cpu, t); /*
21231 * This just modifies memory; should not be
21232 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21235 #ifdef CONFIG_X86_64
21236 + pax_open_kernel();
21237 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21238 + pax_close_kernel();
21240 syscall_init(); /* This sets MSR_*STAR and related */
21242 Binary files linux-2.6.39.4/arch/x86/tools/test_get_len and linux-2.6.39.4/arch/x86/tools/test_get_len differ
21243 diff -urNp linux-2.6.39.4/arch/x86/vdso/Makefile linux-2.6.39.4/arch/x86/vdso/Makefile
21244 --- linux-2.6.39.4/arch/x86/vdso/Makefile 2011-05-19 00:06:34.000000000 -0400
21245 +++ linux-2.6.39.4/arch/x86/vdso/Makefile 2011-08-05 19:44:35.000000000 -0400
21246 @@ -123,7 +123,7 @@ quiet_cmd_vdso = VDSO $@
21247 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21248 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21250 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21251 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21255 diff -urNp linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c
21256 --- linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-05-19 00:06:34.000000000 -0400
21257 +++ linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-08-05 19:44:35.000000000 -0400
21258 @@ -22,24 +22,48 @@
21259 #include <asm/hpet.h>
21260 #include <asm/unistd.h>
21261 #include <asm/io.h>
21262 +#include <asm/fixmap.h>
21263 #include "vextern.h"
21265 #define gtod vdso_vsyscall_gtod_data
21267 +notrace noinline long __vdso_fallback_time(long *t)
21270 + asm volatile("syscall"
21272 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
21276 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
21279 asm("syscall" : "=a" (ret) :
21280 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
21281 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
21285 +notrace static inline cycle_t __vdso_vread_hpet(void)
21287 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
21290 +notrace static inline cycle_t __vdso_vread_tsc(void)
21292 + cycle_t ret = (cycle_t)vget_cycles();
21294 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
21297 notrace static inline long vgetns(void)
21300 - cycles_t (*vread)(void);
21301 - vread = gtod->clock.vread;
21302 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
21303 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
21304 + v = __vdso_vread_tsc();
21306 + v = __vdso_vread_hpet();
21307 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
21308 return (v * gtod->clock.mult) >> gtod->clock.shift;
21311 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
21313 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
21315 - if (likely(gtod->sysctl_enabled))
21316 + if (likely(gtod->sysctl_enabled &&
21317 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21318 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21320 case CLOCK_REALTIME:
21321 if (likely(gtod->clock.vread))
21322 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
21323 int clock_gettime(clockid_t, struct timespec *)
21324 __attribute__((weak, alias("__vdso_clock_gettime")));
21326 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21327 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
21330 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
21331 + asm("syscall" : "=a" (ret) :
21332 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
21336 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21338 + if (likely(gtod->sysctl_enabled &&
21339 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21340 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21342 if (likely(tv != NULL)) {
21343 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
21344 offsetof(struct timespec, tv_nsec) ||
21345 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
21349 - asm("syscall" : "=a" (ret) :
21350 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
21352 + return __vdso_fallback_gettimeofday(tv, tz);
21354 int gettimeofday(struct timeval *, struct timezone *)
21355 __attribute__((weak, alias("__vdso_gettimeofday")));
21356 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c
21357 --- linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-05-19 00:06:34.000000000 -0400
21358 +++ linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-08-05 19:44:35.000000000 -0400
21360 #include <asm/tlbflush.h>
21361 #include <asm/vdso.h>
21362 #include <asm/proto.h>
21363 +#include <asm/mman.h>
21367 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21368 void enable_sep_cpu(void)
21370 int cpu = get_cpu();
21371 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21372 + struct tss_struct *tss = init_tss + cpu;
21374 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21376 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21377 gate_vma.vm_start = FIXADDR_USER_START;
21378 gate_vma.vm_end = FIXADDR_USER_END;
21379 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21380 - gate_vma.vm_page_prot = __P101;
21381 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21383 * Make sure the vDSO gets into every core dump.
21384 * Dumping its contents makes post-mortem fully interpretable later
21385 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21387 addr = VDSO_HIGH_BASE;
21389 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21390 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21391 if (IS_ERR_VALUE(addr)) {
21397 - current->mm->context.vdso = (void *)addr;
21398 + current->mm->context.vdso = addr;
21400 if (compat_uses_vma || !compat) {
21402 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21405 current_thread_info()->sysenter_return =
21406 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21407 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21411 - current->mm->context.vdso = NULL;
21412 + current->mm->context.vdso = 0;
21414 up_write(&mm->mmap_sem);
21416 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21418 const char *arch_vma_name(struct vm_area_struct *vma)
21420 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21421 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21424 +#ifdef CONFIG_PAX_SEGMEXEC
21425 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21432 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21433 * Check to see if the corresponding task was created in compat vdso
21436 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21437 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21441 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso.lds.S linux-2.6.39.4/arch/x86/vdso/vdso.lds.S
21442 --- linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-05-19 00:06:34.000000000 -0400
21443 +++ linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-08-05 19:44:35.000000000 -0400
21444 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
21445 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
21446 #include "vextern.h"
21449 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
21450 +VEXTERN(fallback_gettimeofday)
21451 +VEXTERN(fallback_time)
21454 diff -urNp linux-2.6.39.4/arch/x86/vdso/vextern.h linux-2.6.39.4/arch/x86/vdso/vextern.h
21455 --- linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-05-19 00:06:34.000000000 -0400
21456 +++ linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-08-05 19:44:35.000000000 -0400
21458 put into vextern.h and be referenced as a pointer with vdso prefix.
21459 The main kernel later fills in the values. */
21462 VEXTERN(vgetcpu_mode)
21463 VEXTERN(vsyscall_gtod_data)
21464 diff -urNp linux-2.6.39.4/arch/x86/vdso/vma.c linux-2.6.39.4/arch/x86/vdso/vma.c
21465 --- linux-2.6.39.4/arch/x86/vdso/vma.c 2011-05-19 00:06:34.000000000 -0400
21466 +++ linux-2.6.39.4/arch/x86/vdso/vma.c 2011-08-05 19:44:35.000000000 -0400
21467 @@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
21471 - if (memcmp(vbase, "\177ELF", 4)) {
21472 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
21473 printk("VDSO: I'm broken; not ELF\n");
21476 @@ -118,7 +118,7 @@ int arch_setup_additional_pages(struct l
21480 - current->mm->context.vdso = (void *)addr;
21481 + current->mm->context.vdso = addr;
21483 ret = install_special_mapping(mm, addr, vdso_size,
21485 @@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l
21489 - current->mm->context.vdso = NULL;
21490 + current->mm->context.vdso = 0;
21494 @@ -134,10 +134,3 @@ up_fail:
21495 up_write(&mm->mmap_sem);
21499 -static __init int vdso_setup(char *s)
21501 - vdso_enabled = simple_strtoul(s, NULL, 0);
21504 -__setup("vdso=", vdso_setup);
21505 diff -urNp linux-2.6.39.4/arch/x86/xen/enlighten.c linux-2.6.39.4/arch/x86/xen/enlighten.c
21506 --- linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-05-19 00:06:34.000000000 -0400
21507 +++ linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-08-05 19:44:35.000000000 -0400
21508 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21510 struct shared_info xen_dummy_shared_info;
21512 -void *xen_initial_gdt;
21514 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21515 __read_mostly int xen_have_vector_callback;
21516 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21517 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21521 -static void xen_reboot(int reason)
21522 +static __noreturn void xen_reboot(int reason)
21524 struct sched_shutdown r = { .reason = reason };
21526 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21530 -static void xen_restart(char *msg)
21531 +static __noreturn void xen_restart(char *msg)
21533 xen_reboot(SHUTDOWN_reboot);
21536 -static void xen_emergency_restart(void)
21537 +static __noreturn void xen_emergency_restart(void)
21539 xen_reboot(SHUTDOWN_reboot);
21542 -static void xen_machine_halt(void)
21543 +static __noreturn void xen_machine_halt(void)
21545 xen_reboot(SHUTDOWN_poweroff);
21547 @@ -1127,7 +1125,17 @@ asmlinkage void __init xen_start_kernel(
21548 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21550 /* Work out if we support NX */
21551 - x86_configure_nx();
21552 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21553 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21554 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21557 + __supported_pte_mask |= _PAGE_NX;
21558 + rdmsr(MSR_EFER, l, h);
21560 + wrmsr(MSR_EFER, l, h);
21564 xen_setup_features();
21566 @@ -1158,13 +1166,6 @@ asmlinkage void __init xen_start_kernel(
21568 machine_ops = xen_machine_ops;
21571 - * The only reliable way to retain the initial address of the
21572 - * percpu gdt_page is to remember it here, so we can go and
21573 - * mark it RW later, when the initial percpu area is freed.
21575 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21579 #ifdef CONFIG_ACPI_NUMA
21580 diff -urNp linux-2.6.39.4/arch/x86/xen/mmu.c linux-2.6.39.4/arch/x86/xen/mmu.c
21581 --- linux-2.6.39.4/arch/x86/xen/mmu.c 2011-07-09 09:18:51.000000000 -0400
21582 +++ linux-2.6.39.4/arch/x86/xen/mmu.c 2011-08-05 19:44:35.000000000 -0400
21583 @@ -1801,6 +1801,8 @@ __init pgd_t *xen_setup_kernel_pagetable
21584 convert_pfn_mfn(init_level4_pgt);
21585 convert_pfn_mfn(level3_ident_pgt);
21586 convert_pfn_mfn(level3_kernel_pgt);
21587 + convert_pfn_mfn(level3_vmalloc_pgt);
21588 + convert_pfn_mfn(level3_vmemmap_pgt);
21590 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21591 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21592 @@ -1819,7 +1821,10 @@ __init pgd_t *xen_setup_kernel_pagetable
21593 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21594 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21595 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21596 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21597 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21598 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21599 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21600 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21601 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21603 diff -urNp linux-2.6.39.4/arch/x86/xen/smp.c linux-2.6.39.4/arch/x86/xen/smp.c
21604 --- linux-2.6.39.4/arch/x86/xen/smp.c 2011-07-09 09:18:51.000000000 -0400
21605 +++ linux-2.6.39.4/arch/x86/xen/smp.c 2011-08-05 19:44:35.000000000 -0400
21606 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
21608 BUG_ON(smp_processor_id() != 0);
21609 native_smp_prepare_boot_cpu();
21611 - /* We've switched to the "real" per-cpu gdt, so make sure the
21612 - old memory can be recycled */
21613 - make_lowmem_page_readwrite(xen_initial_gdt);
21615 xen_filter_cpu_maps();
21616 xen_setup_vcpu_info_placement();
21618 @@ -266,12 +261,12 @@ cpu_initialize_context(unsigned int cpu,
21619 gdt = get_cpu_gdt_table(cpu);
21621 ctxt->flags = VGCF_IN_KERNEL;
21622 - ctxt->user_regs.ds = __USER_DS;
21623 - ctxt->user_regs.es = __USER_DS;
21624 + ctxt->user_regs.ds = __KERNEL_DS;
21625 + ctxt->user_regs.es = __KERNEL_DS;
21626 ctxt->user_regs.ss = __KERNEL_DS;
21627 #ifdef CONFIG_X86_32
21628 ctxt->user_regs.fs = __KERNEL_PERCPU;
21629 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21630 + savesegment(gs, ctxt->user_regs.gs);
21632 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21634 @@ -322,13 +317,12 @@ static int __cpuinit xen_cpu_up(unsigned
21637 per_cpu(current_task, cpu) = idle;
21638 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21639 #ifdef CONFIG_X86_32
21642 clear_tsk_thread_flag(idle, TIF_FORK);
21643 - per_cpu(kernel_stack, cpu) =
21644 - (unsigned long)task_stack_page(idle) -
21645 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21646 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21648 xen_setup_runstate_info(cpu);
21649 xen_setup_timer(cpu);
21650 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-asm_32.S linux-2.6.39.4/arch/x86/xen/xen-asm_32.S
21651 --- linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-05-19 00:06:34.000000000 -0400
21652 +++ linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-08-05 19:44:35.000000000 -0400
21653 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21654 ESP_OFFSET=4 # bytes pushed onto stack
21657 - * Store vcpu_info pointer for easy access. Do it this way to
21658 - * avoid having to reload %fs
21659 + * Store vcpu_info pointer for easy access.
21662 - GET_THREAD_INFO(%eax)
21663 - movl TI_cpu(%eax), %eax
21664 - movl __per_cpu_offset(,%eax,4), %eax
21665 - mov xen_vcpu(%eax), %eax
21667 + mov $(__KERNEL_PERCPU), %eax
21669 + mov PER_CPU_VAR(xen_vcpu), %eax
21672 movl xen_vcpu, %eax
21674 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-head.S linux-2.6.39.4/arch/x86/xen/xen-head.S
21675 --- linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-05-19 00:06:34.000000000 -0400
21676 +++ linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-08-05 19:44:35.000000000 -0400
21677 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21678 #ifdef CONFIG_X86_32
21679 mov %esi,xen_start_info
21680 mov $init_thread_union+THREAD_SIZE,%esp
21682 + movl $cpu_gdt_table,%edi
21683 + movl $__per_cpu_load,%eax
21684 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21686 + movb %al,__KERNEL_PERCPU + 4(%edi)
21687 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21688 + movl $__per_cpu_end - 1,%eax
21689 + subl $__per_cpu_start,%eax
21690 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21693 mov %rsi,xen_start_info
21694 mov $init_thread_union+THREAD_SIZE,%rsp
21695 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-ops.h linux-2.6.39.4/arch/x86/xen/xen-ops.h
21696 --- linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-05-19 00:06:34.000000000 -0400
21697 +++ linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-08-05 19:44:35.000000000 -0400
21699 extern const char xen_hypervisor_callback[];
21700 extern const char xen_failsafe_callback[];
21702 -extern void *xen_initial_gdt;
21705 void xen_copy_trap_info(struct trap_info *traps);
21707 diff -urNp linux-2.6.39.4/block/blk-iopoll.c linux-2.6.39.4/block/blk-iopoll.c
21708 --- linux-2.6.39.4/block/blk-iopoll.c 2011-05-19 00:06:34.000000000 -0400
21709 +++ linux-2.6.39.4/block/blk-iopoll.c 2011-08-05 19:44:35.000000000 -0400
21710 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21712 EXPORT_SYMBOL(blk_iopoll_complete);
21714 -static void blk_iopoll_softirq(struct softirq_action *h)
21715 +static void blk_iopoll_softirq(void)
21717 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21718 int rearm = 0, budget = blk_iopoll_budget;
21719 diff -urNp linux-2.6.39.4/block/blk-map.c linux-2.6.39.4/block/blk-map.c
21720 --- linux-2.6.39.4/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
21721 +++ linux-2.6.39.4/block/blk-map.c 2011-08-05 19:44:35.000000000 -0400
21722 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21726 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21727 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21729 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21731 diff -urNp linux-2.6.39.4/block/blk-softirq.c linux-2.6.39.4/block/blk-softirq.c
21732 --- linux-2.6.39.4/block/blk-softirq.c 2011-05-19 00:06:34.000000000 -0400
21733 +++ linux-2.6.39.4/block/blk-softirq.c 2011-08-05 19:44:35.000000000 -0400
21734 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21735 * Softirq action handler - move entries to local list and loop over them
21736 * while passing them to the queue registered handler.
21738 -static void blk_done_softirq(struct softirq_action *h)
21739 +static void blk_done_softirq(void)
21741 struct list_head *cpu_list, local_list;
21743 diff -urNp linux-2.6.39.4/block/bsg.c linux-2.6.39.4/block/bsg.c
21744 --- linux-2.6.39.4/block/bsg.c 2011-05-19 00:06:34.000000000 -0400
21745 +++ linux-2.6.39.4/block/bsg.c 2011-08-05 19:44:35.000000000 -0400
21746 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21747 struct sg_io_v4 *hdr, struct bsg_device *bd,
21748 fmode_t has_write_perm)
21750 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21751 + unsigned char *cmdptr;
21753 if (hdr->request_len > BLK_MAX_CDB) {
21754 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21758 + cmdptr = rq->cmd;
21762 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21763 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21767 + if (cmdptr != rq->cmd)
21768 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21770 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21771 if (blk_verify_command(rq->cmd, has_write_perm))
21773 diff -urNp linux-2.6.39.4/block/scsi_ioctl.c linux-2.6.39.4/block/scsi_ioctl.c
21774 --- linux-2.6.39.4/block/scsi_ioctl.c 2011-05-19 00:06:34.000000000 -0400
21775 +++ linux-2.6.39.4/block/scsi_ioctl.c 2011-08-05 19:44:35.000000000 -0400
21776 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21777 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21778 struct sg_io_hdr *hdr, fmode_t mode)
21780 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21781 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21782 + unsigned char *cmdptr;
21784 + if (rq->cmd != rq->__cmd)
21785 + cmdptr = rq->cmd;
21789 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21792 + if (cmdptr != rq->cmd)
21793 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21795 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21798 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21800 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21801 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21802 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21803 + unsigned char *cmdptr;
21807 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21810 rq->cmd_len = cmdlen;
21811 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21813 + if (rq->cmd != rq->__cmd)
21814 + cmdptr = rq->cmd;
21818 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21821 + if (rq->cmd != cmdptr)
21822 + memcpy(rq->cmd, cmdptr, cmdlen);
21824 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21827 diff -urNp linux-2.6.39.4/crypto/cryptd.c linux-2.6.39.4/crypto/cryptd.c
21828 --- linux-2.6.39.4/crypto/cryptd.c 2011-05-19 00:06:34.000000000 -0400
21829 +++ linux-2.6.39.4/crypto/cryptd.c 2011-08-05 20:34:06.000000000 -0400
21830 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21832 struct cryptd_blkcipher_request_ctx {
21833 crypto_completion_t complete;
21837 struct cryptd_hash_ctx {
21838 struct crypto_shash *child;
21839 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21841 struct cryptd_aead_request_ctx {
21842 crypto_completion_t complete;
21846 static void cryptd_queue_worker(struct work_struct *work);
21848 diff -urNp linux-2.6.39.4/crypto/gf128mul.c linux-2.6.39.4/crypto/gf128mul.c
21849 --- linux-2.6.39.4/crypto/gf128mul.c 2011-05-19 00:06:34.000000000 -0400
21850 +++ linux-2.6.39.4/crypto/gf128mul.c 2011-08-05 19:44:35.000000000 -0400
21851 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21852 for (i = 0; i < 7; ++i)
21853 gf128mul_x_lle(&p[i + 1], &p[i]);
21855 - memset(r, 0, sizeof(r));
21856 + memset(r, 0, sizeof(*r));
21858 u8 ch = ((u8 *)b)[15 - i];
21860 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21861 for (i = 0; i < 7; ++i)
21862 gf128mul_x_bbe(&p[i + 1], &p[i]);
21864 - memset(r, 0, sizeof(r));
21865 + memset(r, 0, sizeof(*r));
21867 u8 ch = ((u8 *)b)[i];
21869 diff -urNp linux-2.6.39.4/crypto/serpent.c linux-2.6.39.4/crypto/serpent.c
21870 --- linux-2.6.39.4/crypto/serpent.c 2011-05-19 00:06:34.000000000 -0400
21871 +++ linux-2.6.39.4/crypto/serpent.c 2011-08-05 19:44:35.000000000 -0400
21872 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21873 u32 r0,r1,r2,r3,r4;
21876 + pax_track_stack();
21878 /* Copy key, add padding */
21880 for (i = 0; i < keylen; ++i)
21881 diff -urNp linux-2.6.39.4/Documentation/dontdiff linux-2.6.39.4/Documentation/dontdiff
21882 --- linux-2.6.39.4/Documentation/dontdiff 2011-05-19 00:06:34.000000000 -0400
21883 +++ linux-2.6.39.4/Documentation/dontdiff 2011-08-05 19:44:35.000000000 -0400
21912 @@ -49,11 +54,16 @@
21929 @@ -80,8 +90,11 @@ btfixupprep
21933 +capability_names.h
21941 @@ -106,16 +119,19 @@ fore200e_mkfirm
21956 initramfs_data.cpio
21957 +initramfs_data.cpio.bz2
21958 initramfs_data.cpio.gz
21961 @@ -125,7 +141,6 @@ int32.c
21969 @@ -149,7 +164,9 @@ mkboot
21979 @@ -165,6 +182,7 @@ parse.h
21987 @@ -180,7 +198,9 @@ r600_reg_safe.h
21997 @@ -189,6 +209,7 @@ setup
22005 @@ -213,13 +234,17 @@ version.h*
22023 diff -urNp linux-2.6.39.4/Documentation/kernel-parameters.txt linux-2.6.39.4/Documentation/kernel-parameters.txt
22024 --- linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-06-25 12:55:22.000000000 -0400
22025 +++ linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-08-05 19:44:35.000000000 -0400
22026 @@ -1879,6 +1879,13 @@ bytes respectively. Such letter suffixes
22027 the specified number of seconds. This is to be used if
22028 your oopses keep scrolling off the screen.
22030 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22031 + virtualization environments that don't cope well with the
22032 + expand down segment used by UDEREF on X86-32 or the frequent
22033 + page table updates on X86-64.
22035 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22040 diff -urNp linux-2.6.39.4/drivers/acpi/apei/cper.c linux-2.6.39.4/drivers/acpi/apei/cper.c
22041 --- linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-05-19 00:06:34.000000000 -0400
22042 +++ linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-08-05 19:44:35.000000000 -0400
22043 @@ -38,12 +38,12 @@
22045 u64 cper_next_record_id(void)
22047 - static atomic64_t seq;
22048 + static atomic64_unchecked_t seq;
22050 - if (!atomic64_read(&seq))
22051 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
22052 + if (!atomic64_read_unchecked(&seq))
22053 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22055 - return atomic64_inc_return(&seq);
22056 + return atomic64_inc_return_unchecked(&seq);
22058 EXPORT_SYMBOL_GPL(cper_next_record_id);
22060 diff -urNp linux-2.6.39.4/drivers/acpi/power_meter.c linux-2.6.39.4/drivers/acpi/power_meter.c
22061 --- linux-2.6.39.4/drivers/acpi/power_meter.c 2011-05-19 00:06:34.000000000 -0400
22062 +++ linux-2.6.39.4/drivers/acpi/power_meter.c 2011-08-05 19:44:35.000000000 -0400
22063 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
22070 mutex_lock(&resource->lock);
22071 resource->trip[attr->index - 7] = temp;
22072 diff -urNp linux-2.6.39.4/drivers/acpi/proc.c linux-2.6.39.4/drivers/acpi/proc.c
22073 --- linux-2.6.39.4/drivers/acpi/proc.c 2011-05-19 00:06:34.000000000 -0400
22074 +++ linux-2.6.39.4/drivers/acpi/proc.c 2011-08-05 19:44:35.000000000 -0400
22075 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22076 size_t count, loff_t * ppos)
22078 struct list_head *node, *next;
22080 - char str[5] = "";
22081 - unsigned int len = count;
22087 + char strbuf[5] = {0};
22089 - if (copy_from_user(strbuf, buffer, len))
22092 + if (copy_from_user(strbuf, buffer, count))
22094 - strbuf[len] = '\0';
22095 - sscanf(strbuf, "%s", str);
22096 + strbuf[count] = '\0';
22098 mutex_lock(&acpi_device_lock);
22099 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22100 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22101 if (!dev->wakeup.flags.valid)
22104 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
22105 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22106 if (device_can_wakeup(&dev->dev)) {
22107 bool enable = !device_may_wakeup(&dev->dev);
22108 device_set_wakeup_enable(&dev->dev, enable);
22109 diff -urNp linux-2.6.39.4/drivers/acpi/processor_driver.c linux-2.6.39.4/drivers/acpi/processor_driver.c
22110 --- linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-05-19 00:06:34.000000000 -0400
22111 +++ linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-08-05 19:44:35.000000000 -0400
22112 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22116 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22117 + BUG_ON(pr->id >= nr_cpu_ids);
22121 diff -urNp linux-2.6.39.4/drivers/ata/libata-core.c linux-2.6.39.4/drivers/ata/libata-core.c
22122 --- linux-2.6.39.4/drivers/ata/libata-core.c 2011-05-19 00:06:34.000000000 -0400
22123 +++ linux-2.6.39.4/drivers/ata/libata-core.c 2011-08-05 20:34:06.000000000 -0400
22124 @@ -4747,7 +4747,7 @@ void ata_qc_free(struct ata_queued_cmd *
22125 struct ata_port *ap;
22128 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22129 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22133 @@ -4763,7 +4763,7 @@ void __ata_qc_complete(struct ata_queued
22134 struct ata_port *ap;
22135 struct ata_link *link;
22137 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22138 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22139 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22141 link = qc->dev->link;
22142 @@ -5768,6 +5768,7 @@ static void ata_finalize_port_ops(struct
22146 + pax_open_kernel();
22148 for (cur = ops->inherits; cur; cur = cur->inherits) {
22149 void **inherit = (void **)cur;
22150 @@ -5781,8 +5782,9 @@ static void ata_finalize_port_ops(struct
22154 - ops->inherits = NULL;
22155 + *(struct ata_port_operations **)&ops->inherits = NULL;
22157 + pax_close_kernel();
22158 spin_unlock(&lock);
22161 diff -urNp linux-2.6.39.4/drivers/ata/libata-eh.c linux-2.6.39.4/drivers/ata/libata-eh.c
22162 --- linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:11:51.000000000 -0400
22163 +++ linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:12:20.000000000 -0400
22164 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22166 struct ata_link *link;
22168 + pax_track_stack();
22170 ata_for_each_link(link, ap, HOST_FIRST)
22171 ata_eh_link_report(link);
22173 diff -urNp linux-2.6.39.4/drivers/ata/pata_arasan_cf.c linux-2.6.39.4/drivers/ata/pata_arasan_cf.c
22174 --- linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-05-19 00:06:34.000000000 -0400
22175 +++ linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-08-05 20:34:06.000000000 -0400
22176 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22177 /* Handle platform specific quirks */
22178 if (pdata->quirk) {
22179 if (pdata->quirk & CF_BROKEN_PIO) {
22180 - ap->ops->set_piomode = NULL;
22181 + pax_open_kernel();
22182 + *(void **)&ap->ops->set_piomode = NULL;
22183 + pax_close_kernel();
22186 if (pdata->quirk & CF_BROKEN_MWDMA)
22187 diff -urNp linux-2.6.39.4/drivers/atm/adummy.c linux-2.6.39.4/drivers/atm/adummy.c
22188 --- linux-2.6.39.4/drivers/atm/adummy.c 2011-05-19 00:06:34.000000000 -0400
22189 +++ linux-2.6.39.4/drivers/atm/adummy.c 2011-08-05 19:44:36.000000000 -0400
22190 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22191 vcc->pop(vcc, skb);
22193 dev_kfree_skb_any(skb);
22194 - atomic_inc(&vcc->stats->tx);
22195 + atomic_inc_unchecked(&vcc->stats->tx);
22199 diff -urNp linux-2.6.39.4/drivers/atm/ambassador.c linux-2.6.39.4/drivers/atm/ambassador.c
22200 --- linux-2.6.39.4/drivers/atm/ambassador.c 2011-05-19 00:06:34.000000000 -0400
22201 +++ linux-2.6.39.4/drivers/atm/ambassador.c 2011-08-05 19:44:36.000000000 -0400
22202 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22203 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22206 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22207 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22209 // free the descriptor
22211 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22212 dump_skb ("<<<", vc, skb);
22215 - atomic_inc(&atm_vcc->stats->rx);
22216 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22217 __net_timestamp(skb);
22218 // end of our responsibility
22219 atm_vcc->push (atm_vcc, skb);
22220 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22222 PRINTK (KERN_INFO, "dropped over-size frame");
22223 // should we count this?
22224 - atomic_inc(&atm_vcc->stats->rx_drop);
22225 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22229 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22232 if (check_area (skb->data, skb->len)) {
22233 - atomic_inc(&atm_vcc->stats->tx_err);
22234 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22235 return -ENOMEM; // ?
22238 diff -urNp linux-2.6.39.4/drivers/atm/atmtcp.c linux-2.6.39.4/drivers/atm/atmtcp.c
22239 --- linux-2.6.39.4/drivers/atm/atmtcp.c 2011-05-19 00:06:34.000000000 -0400
22240 +++ linux-2.6.39.4/drivers/atm/atmtcp.c 2011-08-05 19:44:36.000000000 -0400
22241 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22242 if (vcc->pop) vcc->pop(vcc,skb);
22243 else dev_kfree_skb(skb);
22244 if (dev_data) return 0;
22245 - atomic_inc(&vcc->stats->tx_err);
22246 + atomic_inc_unchecked(&vcc->stats->tx_err);
22249 size = skb->len+sizeof(struct atmtcp_hdr);
22250 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22252 if (vcc->pop) vcc->pop(vcc,skb);
22253 else dev_kfree_skb(skb);
22254 - atomic_inc(&vcc->stats->tx_err);
22255 + atomic_inc_unchecked(&vcc->stats->tx_err);
22258 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22259 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22260 if (vcc->pop) vcc->pop(vcc,skb);
22261 else dev_kfree_skb(skb);
22262 out_vcc->push(out_vcc,new_skb);
22263 - atomic_inc(&vcc->stats->tx);
22264 - atomic_inc(&out_vcc->stats->rx);
22265 + atomic_inc_unchecked(&vcc->stats->tx);
22266 + atomic_inc_unchecked(&out_vcc->stats->rx);
22270 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22271 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22272 read_unlock(&vcc_sklist_lock);
22274 - atomic_inc(&vcc->stats->tx_err);
22275 + atomic_inc_unchecked(&vcc->stats->tx_err);
22278 skb_pull(skb,sizeof(struct atmtcp_hdr));
22279 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22280 __net_timestamp(new_skb);
22281 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22282 out_vcc->push(out_vcc,new_skb);
22283 - atomic_inc(&vcc->stats->tx);
22284 - atomic_inc(&out_vcc->stats->rx);
22285 + atomic_inc_unchecked(&vcc->stats->tx);
22286 + atomic_inc_unchecked(&out_vcc->stats->rx);
22288 if (vcc->pop) vcc->pop(vcc,skb);
22289 else dev_kfree_skb(skb);
22290 diff -urNp linux-2.6.39.4/drivers/atm/eni.c linux-2.6.39.4/drivers/atm/eni.c
22291 --- linux-2.6.39.4/drivers/atm/eni.c 2011-05-19 00:06:34.000000000 -0400
22292 +++ linux-2.6.39.4/drivers/atm/eni.c 2011-08-05 19:44:36.000000000 -0400
22293 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22294 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22297 - atomic_inc(&vcc->stats->rx_err);
22298 + atomic_inc_unchecked(&vcc->stats->rx_err);
22301 length = ATM_CELL_SIZE-1; /* no HEC */
22302 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22306 - atomic_inc(&vcc->stats->rx_err);
22307 + atomic_inc_unchecked(&vcc->stats->rx_err);
22310 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22311 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22312 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22313 vcc->dev->number,vcc->vci,length,size << 2,descr);
22315 - atomic_inc(&vcc->stats->rx_err);
22316 + atomic_inc_unchecked(&vcc->stats->rx_err);
22319 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22320 @@ -771,7 +771,7 @@ rx_dequeued++;
22321 vcc->push(vcc,skb);
22324 - atomic_inc(&vcc->stats->rx);
22325 + atomic_inc_unchecked(&vcc->stats->rx);
22327 wake_up(&eni_dev->rx_wait);
22329 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22331 if (vcc->pop) vcc->pop(vcc,skb);
22332 else dev_kfree_skb_irq(skb);
22333 - atomic_inc(&vcc->stats->tx);
22334 + atomic_inc_unchecked(&vcc->stats->tx);
22335 wake_up(&eni_dev->tx_wait);
22338 diff -urNp linux-2.6.39.4/drivers/atm/firestream.c linux-2.6.39.4/drivers/atm/firestream.c
22339 --- linux-2.6.39.4/drivers/atm/firestream.c 2011-05-19 00:06:34.000000000 -0400
22340 +++ linux-2.6.39.4/drivers/atm/firestream.c 2011-08-05 19:44:36.000000000 -0400
22341 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22345 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22346 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22348 fs_dprintk (FS_DEBUG_TXMEM, "i");
22349 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22350 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22352 skb_put (skb, qe->p1 & 0xffff);
22353 ATM_SKB(skb)->vcc = atm_vcc;
22354 - atomic_inc(&atm_vcc->stats->rx);
22355 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22356 __net_timestamp(skb);
22357 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22358 atm_vcc->push (atm_vcc, skb);
22359 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22363 - atomic_inc(&atm_vcc->stats->rx_drop);
22364 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22366 case 0x1f: /* Reassembly abort: no buffers. */
22367 /* Silently increment error counter. */
22369 - atomic_inc(&atm_vcc->stats->rx_drop);
22370 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22372 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22373 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22374 diff -urNp linux-2.6.39.4/drivers/atm/fore200e.c linux-2.6.39.4/drivers/atm/fore200e.c
22375 --- linux-2.6.39.4/drivers/atm/fore200e.c 2011-05-19 00:06:34.000000000 -0400
22376 +++ linux-2.6.39.4/drivers/atm/fore200e.c 2011-08-05 19:44:36.000000000 -0400
22377 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22379 /* check error condition */
22380 if (*entry->status & STATUS_ERROR)
22381 - atomic_inc(&vcc->stats->tx_err);
22382 + atomic_inc_unchecked(&vcc->stats->tx_err);
22384 - atomic_inc(&vcc->stats->tx);
22385 + atomic_inc_unchecked(&vcc->stats->tx);
22389 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22391 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22393 - atomic_inc(&vcc->stats->rx_drop);
22394 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22398 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22400 dev_kfree_skb_any(skb);
22402 - atomic_inc(&vcc->stats->rx_drop);
22403 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22407 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22409 vcc->push(vcc, skb);
22410 - atomic_inc(&vcc->stats->rx);
22411 + atomic_inc_unchecked(&vcc->stats->rx);
22413 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22415 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22416 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22417 fore200e->atm_dev->number,
22418 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22419 - atomic_inc(&vcc->stats->rx_err);
22420 + atomic_inc_unchecked(&vcc->stats->rx_err);
22424 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22428 - atomic_inc(&vcc->stats->tx_err);
22429 + atomic_inc_unchecked(&vcc->stats->tx_err);
22431 fore200e->tx_sat++;
22432 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22433 diff -urNp linux-2.6.39.4/drivers/atm/he.c linux-2.6.39.4/drivers/atm/he.c
22434 --- linux-2.6.39.4/drivers/atm/he.c 2011-05-19 00:06:34.000000000 -0400
22435 +++ linux-2.6.39.4/drivers/atm/he.c 2011-08-05 19:44:36.000000000 -0400
22436 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22438 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22439 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22440 - atomic_inc(&vcc->stats->rx_drop);
22441 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22442 goto return_host_buffers;
22445 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22446 RBRQ_LEN_ERR(he_dev->rbrq_head)
22448 vcc->vpi, vcc->vci);
22449 - atomic_inc(&vcc->stats->rx_err);
22450 + atomic_inc_unchecked(&vcc->stats->rx_err);
22451 goto return_host_buffers;
22454 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22455 vcc->push(vcc, skb);
22456 spin_lock(&he_dev->global_lock);
22458 - atomic_inc(&vcc->stats->rx);
22459 + atomic_inc_unchecked(&vcc->stats->rx);
22461 return_host_buffers:
22463 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22464 tpd->vcc->pop(tpd->vcc, tpd->skb);
22466 dev_kfree_skb_any(tpd->skb);
22467 - atomic_inc(&tpd->vcc->stats->tx_err);
22468 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22470 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22472 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22473 vcc->pop(vcc, skb);
22475 dev_kfree_skb_any(skb);
22476 - atomic_inc(&vcc->stats->tx_err);
22477 + atomic_inc_unchecked(&vcc->stats->tx_err);
22481 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22482 vcc->pop(vcc, skb);
22484 dev_kfree_skb_any(skb);
22485 - atomic_inc(&vcc->stats->tx_err);
22486 + atomic_inc_unchecked(&vcc->stats->tx_err);
22490 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22491 vcc->pop(vcc, skb);
22493 dev_kfree_skb_any(skb);
22494 - atomic_inc(&vcc->stats->tx_err);
22495 + atomic_inc_unchecked(&vcc->stats->tx_err);
22496 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22499 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22500 vcc->pop(vcc, skb);
22502 dev_kfree_skb_any(skb);
22503 - atomic_inc(&vcc->stats->tx_err);
22504 + atomic_inc_unchecked(&vcc->stats->tx_err);
22505 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22508 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22509 __enqueue_tpd(he_dev, tpd, cid);
22510 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22512 - atomic_inc(&vcc->stats->tx);
22513 + atomic_inc_unchecked(&vcc->stats->tx);
22517 diff -urNp linux-2.6.39.4/drivers/atm/horizon.c linux-2.6.39.4/drivers/atm/horizon.c
22518 --- linux-2.6.39.4/drivers/atm/horizon.c 2011-05-19 00:06:34.000000000 -0400
22519 +++ linux-2.6.39.4/drivers/atm/horizon.c 2011-08-05 19:44:36.000000000 -0400
22520 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22522 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22524 - atomic_inc(&vcc->stats->rx);
22525 + atomic_inc_unchecked(&vcc->stats->rx);
22526 __net_timestamp(skb);
22527 // end of our responsibility
22528 vcc->push (vcc, skb);
22529 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22530 dev->tx_iovec = NULL;
22533 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22534 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22537 hrz_kfree_skb (skb);
22538 diff -urNp linux-2.6.39.4/drivers/atm/idt77252.c linux-2.6.39.4/drivers/atm/idt77252.c
22539 --- linux-2.6.39.4/drivers/atm/idt77252.c 2011-05-19 00:06:34.000000000 -0400
22540 +++ linux-2.6.39.4/drivers/atm/idt77252.c 2011-08-05 19:44:36.000000000 -0400
22541 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22543 dev_kfree_skb(skb);
22545 - atomic_inc(&vcc->stats->tx);
22546 + atomic_inc_unchecked(&vcc->stats->tx);
22549 atomic_dec(&scq->used);
22550 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22551 if ((sb = dev_alloc_skb(64)) == NULL) {
22552 printk("%s: Can't allocate buffers for aal0.\n",
22554 - atomic_add(i, &vcc->stats->rx_drop);
22555 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22558 if (!atm_charge(vcc, sb->truesize)) {
22559 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22561 - atomic_add(i - 1, &vcc->stats->rx_drop);
22562 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22566 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22567 ATM_SKB(sb)->vcc = vcc;
22568 __net_timestamp(sb);
22569 vcc->push(vcc, sb);
22570 - atomic_inc(&vcc->stats->rx);
22571 + atomic_inc_unchecked(&vcc->stats->rx);
22573 cell += ATM_CELL_PAYLOAD;
22575 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22577 card->name, len, rpp->len, readl(SAR_REG_CDC));
22578 recycle_rx_pool_skb(card, rpp);
22579 - atomic_inc(&vcc->stats->rx_err);
22580 + atomic_inc_unchecked(&vcc->stats->rx_err);
22583 if (stat & SAR_RSQE_CRC) {
22584 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22585 recycle_rx_pool_skb(card, rpp);
22586 - atomic_inc(&vcc->stats->rx_err);
22587 + atomic_inc_unchecked(&vcc->stats->rx_err);
22590 if (skb_queue_len(&rpp->queue) > 1) {
22591 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22592 RXPRINTK("%s: Can't alloc RX skb.\n",
22594 recycle_rx_pool_skb(card, rpp);
22595 - atomic_inc(&vcc->stats->rx_err);
22596 + atomic_inc_unchecked(&vcc->stats->rx_err);
22599 if (!atm_charge(vcc, skb->truesize)) {
22600 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22601 __net_timestamp(skb);
22603 vcc->push(vcc, skb);
22604 - atomic_inc(&vcc->stats->rx);
22605 + atomic_inc_unchecked(&vcc->stats->rx);
22609 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22610 __net_timestamp(skb);
22612 vcc->push(vcc, skb);
22613 - atomic_inc(&vcc->stats->rx);
22614 + atomic_inc_unchecked(&vcc->stats->rx);
22616 if (skb->truesize > SAR_FB_SIZE_3)
22617 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22618 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22619 if (vcc->qos.aal != ATM_AAL0) {
22620 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22621 card->name, vpi, vci);
22622 - atomic_inc(&vcc->stats->rx_drop);
22623 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22627 if ((sb = dev_alloc_skb(64)) == NULL) {
22628 printk("%s: Can't allocate buffers for AAL0.\n",
22630 - atomic_inc(&vcc->stats->rx_err);
22631 + atomic_inc_unchecked(&vcc->stats->rx_err);
22635 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22636 ATM_SKB(sb)->vcc = vcc;
22637 __net_timestamp(sb);
22638 vcc->push(vcc, sb);
22639 - atomic_inc(&vcc->stats->rx);
22640 + atomic_inc_unchecked(&vcc->stats->rx);
22643 skb_pull(queue, 64);
22644 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22647 printk("%s: NULL connection in send().\n", card->name);
22648 - atomic_inc(&vcc->stats->tx_err);
22649 + atomic_inc_unchecked(&vcc->stats->tx_err);
22650 dev_kfree_skb(skb);
22653 if (!test_bit(VCF_TX, &vc->flags)) {
22654 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22655 - atomic_inc(&vcc->stats->tx_err);
22656 + atomic_inc_unchecked(&vcc->stats->tx_err);
22657 dev_kfree_skb(skb);
22660 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22663 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22664 - atomic_inc(&vcc->stats->tx_err);
22665 + atomic_inc_unchecked(&vcc->stats->tx_err);
22666 dev_kfree_skb(skb);
22670 if (skb_shinfo(skb)->nr_frags != 0) {
22671 printk("%s: No scatter-gather yet.\n", card->name);
22672 - atomic_inc(&vcc->stats->tx_err);
22673 + atomic_inc_unchecked(&vcc->stats->tx_err);
22674 dev_kfree_skb(skb);
22677 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22679 err = queue_skb(card, vc, skb, oam);
22681 - atomic_inc(&vcc->stats->tx_err);
22682 + atomic_inc_unchecked(&vcc->stats->tx_err);
22683 dev_kfree_skb(skb);
22686 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22687 skb = dev_alloc_skb(64);
22689 printk("%s: Out of memory in send_oam().\n", card->name);
22690 - atomic_inc(&vcc->stats->tx_err);
22691 + atomic_inc_unchecked(&vcc->stats->tx_err);
22694 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22695 diff -urNp linux-2.6.39.4/drivers/atm/iphase.c linux-2.6.39.4/drivers/atm/iphase.c
22696 --- linux-2.6.39.4/drivers/atm/iphase.c 2011-05-19 00:06:34.000000000 -0400
22697 +++ linux-2.6.39.4/drivers/atm/iphase.c 2011-08-05 19:44:36.000000000 -0400
22698 @@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
22699 status = (u_short) (buf_desc_ptr->desc_mode);
22700 if (status & (RX_CER | RX_PTE | RX_OFL))
22702 - atomic_inc(&vcc->stats->rx_err);
22703 + atomic_inc_unchecked(&vcc->stats->rx_err);
22704 IF_ERR(printk("IA: bad packet, dropping it");)
22705 if (status & RX_CER) {
22706 IF_ERR(printk(" cause: packet CRC error\n");)
22707 @@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
22708 len = dma_addr - buf_addr;
22709 if (len > iadev->rx_buf_sz) {
22710 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22711 - atomic_inc(&vcc->stats->rx_err);
22712 + atomic_inc_unchecked(&vcc->stats->rx_err);
22713 goto out_free_desc;
22716 @@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
22717 ia_vcc = INPH_IA_VCC(vcc);
22718 if (ia_vcc == NULL)
22720 - atomic_inc(&vcc->stats->rx_err);
22721 + atomic_inc_unchecked(&vcc->stats->rx_err);
22722 dev_kfree_skb_any(skb);
22723 atm_return(vcc, atm_guess_pdu2truesize(len));
22725 @@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
22726 if ((length > iadev->rx_buf_sz) || (length >
22727 (skb->len - sizeof(struct cpcs_trailer))))
22729 - atomic_inc(&vcc->stats->rx_err);
22730 + atomic_inc_unchecked(&vcc->stats->rx_err);
22731 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22732 length, skb->len);)
22733 dev_kfree_skb_any(skb);
22734 @@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
22736 IF_RX(printk("rx_dle_intr: skb push");)
22737 vcc->push(vcc,skb);
22738 - atomic_inc(&vcc->stats->rx);
22739 + atomic_inc_unchecked(&vcc->stats->rx);
22740 iadev->rx_pkt_cnt++;
22743 @@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
22745 struct k_sonet_stats *stats;
22746 stats = &PRIV(_ia_dev[board])->sonet_stats;
22747 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22748 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22749 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22750 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22751 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22752 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22753 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22754 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22755 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22756 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22757 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22758 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22759 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22760 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22761 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22762 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22763 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22764 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22766 ia_cmds.status = 0;
22768 @@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22769 if ((desc == 0) || (desc > iadev->num_tx_desc))
22771 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22772 - atomic_inc(&vcc->stats->tx);
22773 + atomic_inc_unchecked(&vcc->stats->tx);
22775 vcc->pop(vcc, skb);
22777 @@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22778 ATM_DESC(skb) = vcc->vci;
22779 skb_queue_tail(&iadev->tx_dma_q, skb);
22781 - atomic_inc(&vcc->stats->tx);
22782 + atomic_inc_unchecked(&vcc->stats->tx);
22783 iadev->tx_pkt_cnt++;
22784 /* Increment transaction counter */
22785 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22788 /* add flow control logic */
22789 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22790 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22791 if (iavcc->vc_desc_cnt > 10) {
22792 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22793 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22794 diff -urNp linux-2.6.39.4/drivers/atm/lanai.c linux-2.6.39.4/drivers/atm/lanai.c
22795 --- linux-2.6.39.4/drivers/atm/lanai.c 2011-05-19 00:06:34.000000000 -0400
22796 +++ linux-2.6.39.4/drivers/atm/lanai.c 2011-08-05 19:44:36.000000000 -0400
22797 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22798 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22799 lanai_endtx(lanai, lvcc);
22800 lanai_free_skb(lvcc->tx.atmvcc, skb);
22801 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22802 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22805 /* Try to fill the buffer - don't call unless there is backlog */
22806 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22807 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22808 __net_timestamp(skb);
22809 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22810 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22811 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22813 lvcc->rx.buf.ptr = end;
22814 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22815 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22816 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22817 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22818 lanai->stats.service_rxnotaal5++;
22819 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22820 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22823 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22824 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22826 read_unlock(&vcc_sklist_lock);
22827 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22828 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22829 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22830 lvcc->stats.x.aal5.service_trash++;
22831 bytes = (SERVICE_GET_END(s) * 16) -
22832 (((unsigned long) lvcc->rx.buf.ptr) -
22833 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22835 if (s & SERVICE_STREAM) {
22836 read_unlock(&vcc_sklist_lock);
22837 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22838 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22839 lvcc->stats.x.aal5.service_stream++;
22840 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22841 "PDU on VCI %d!\n", lanai->number, vci);
22842 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22845 DPRINTK("got rx crc error on vci %d\n", vci);
22846 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22847 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22848 lvcc->stats.x.aal5.service_rxcrc++;
22849 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22850 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22851 diff -urNp linux-2.6.39.4/drivers/atm/nicstar.c linux-2.6.39.4/drivers/atm/nicstar.c
22852 --- linux-2.6.39.4/drivers/atm/nicstar.c 2011-05-19 00:06:34.000000000 -0400
22853 +++ linux-2.6.39.4/drivers/atm/nicstar.c 2011-08-05 19:44:36.000000000 -0400
22854 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22855 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22856 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22858 - atomic_inc(&vcc->stats->tx_err);
22859 + atomic_inc_unchecked(&vcc->stats->tx_err);
22860 dev_kfree_skb_any(skb);
22863 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22865 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22867 - atomic_inc(&vcc->stats->tx_err);
22868 + atomic_inc_unchecked(&vcc->stats->tx_err);
22869 dev_kfree_skb_any(skb);
22872 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22873 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22874 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22876 - atomic_inc(&vcc->stats->tx_err);
22877 + atomic_inc_unchecked(&vcc->stats->tx_err);
22878 dev_kfree_skb_any(skb);
22882 if (skb_shinfo(skb)->nr_frags != 0) {
22883 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22884 - atomic_inc(&vcc->stats->tx_err);
22885 + atomic_inc_unchecked(&vcc->stats->tx_err);
22886 dev_kfree_skb_any(skb);
22889 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22892 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22893 - atomic_inc(&vcc->stats->tx_err);
22894 + atomic_inc_unchecked(&vcc->stats->tx_err);
22895 dev_kfree_skb_any(skb);
22898 - atomic_inc(&vcc->stats->tx);
22899 + atomic_inc_unchecked(&vcc->stats->tx);
22903 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22905 ("nicstar%d: Can't allocate buffers for aal0.\n",
22907 - atomic_add(i, &vcc->stats->rx_drop);
22908 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22911 if (!atm_charge(vcc, sb->truesize)) {
22913 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22915 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22916 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22917 dev_kfree_skb_any(sb);
22920 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22921 ATM_SKB(sb)->vcc = vcc;
22922 __net_timestamp(sb);
22923 vcc->push(vcc, sb);
22924 - atomic_inc(&vcc->stats->rx);
22925 + atomic_inc_unchecked(&vcc->stats->rx);
22926 cell += ATM_CELL_PAYLOAD;
22929 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22930 if (iovb == NULL) {
22931 printk("nicstar%d: Out of iovec buffers.\n",
22933 - atomic_inc(&vcc->stats->rx_drop);
22934 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22935 recycle_rx_buf(card, skb);
22938 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22939 small or large buffer itself. */
22940 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22941 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22942 - atomic_inc(&vcc->stats->rx_err);
22943 + atomic_inc_unchecked(&vcc->stats->rx_err);
22944 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22946 NS_PRV_IOVCNT(iovb) = 0;
22947 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22948 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22950 which_list(card, skb);
22951 - atomic_inc(&vcc->stats->rx_err);
22952 + atomic_inc_unchecked(&vcc->stats->rx_err);
22953 recycle_rx_buf(card, skb);
22955 recycle_iov_buf(card, iovb);
22956 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22957 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22959 which_list(card, skb);
22960 - atomic_inc(&vcc->stats->rx_err);
22961 + atomic_inc_unchecked(&vcc->stats->rx_err);
22962 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22963 NS_PRV_IOVCNT(iovb));
22965 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22966 printk(" - PDU size mismatch.\n");
22969 - atomic_inc(&vcc->stats->rx_err);
22970 + atomic_inc_unchecked(&vcc->stats->rx_err);
22971 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22972 NS_PRV_IOVCNT(iovb));
22974 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22975 /* skb points to a small buffer */
22976 if (!atm_charge(vcc, skb->truesize)) {
22977 push_rxbufs(card, skb);
22978 - atomic_inc(&vcc->stats->rx_drop);
22979 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22982 dequeue_sm_buf(card, skb);
22983 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22984 ATM_SKB(skb)->vcc = vcc;
22985 __net_timestamp(skb);
22986 vcc->push(vcc, skb);
22987 - atomic_inc(&vcc->stats->rx);
22988 + atomic_inc_unchecked(&vcc->stats->rx);
22990 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22991 struct sk_buff *sb;
22992 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22993 if (len <= NS_SMBUFSIZE) {
22994 if (!atm_charge(vcc, sb->truesize)) {
22995 push_rxbufs(card, sb);
22996 - atomic_inc(&vcc->stats->rx_drop);
22997 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23000 dequeue_sm_buf(card, sb);
23001 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23002 ATM_SKB(sb)->vcc = vcc;
23003 __net_timestamp(sb);
23004 vcc->push(vcc, sb);
23005 - atomic_inc(&vcc->stats->rx);
23006 + atomic_inc_unchecked(&vcc->stats->rx);
23009 push_rxbufs(card, skb);
23010 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23012 if (!atm_charge(vcc, skb->truesize)) {
23013 push_rxbufs(card, skb);
23014 - atomic_inc(&vcc->stats->rx_drop);
23015 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23017 dequeue_lg_buf(card, skb);
23018 #ifdef NS_USE_DESTRUCTORS
23019 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23020 ATM_SKB(skb)->vcc = vcc;
23021 __net_timestamp(skb);
23022 vcc->push(vcc, skb);
23023 - atomic_inc(&vcc->stats->rx);
23024 + atomic_inc_unchecked(&vcc->stats->rx);
23027 push_rxbufs(card, sb);
23028 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23030 ("nicstar%d: Out of huge buffers.\n",
23032 - atomic_inc(&vcc->stats->rx_drop);
23033 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23034 recycle_iovec_rx_bufs(card,
23037 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23038 card->hbpool.count++;
23040 dev_kfree_skb_any(hb);
23041 - atomic_inc(&vcc->stats->rx_drop);
23042 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23044 /* Copy the small buffer to the huge buffer */
23045 sb = (struct sk_buff *)iov->iov_base;
23046 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23047 #endif /* NS_USE_DESTRUCTORS */
23048 __net_timestamp(hb);
23049 vcc->push(vcc, hb);
23050 - atomic_inc(&vcc->stats->rx);
23051 + atomic_inc_unchecked(&vcc->stats->rx);
23055 diff -urNp linux-2.6.39.4/drivers/atm/solos-pci.c linux-2.6.39.4/drivers/atm/solos-pci.c
23056 --- linux-2.6.39.4/drivers/atm/solos-pci.c 2011-05-19 00:06:34.000000000 -0400
23057 +++ linux-2.6.39.4/drivers/atm/solos-pci.c 2011-08-05 19:44:36.000000000 -0400
23058 @@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
23060 atm_charge(vcc, skb->truesize);
23061 vcc->push(vcc, skb);
23062 - atomic_inc(&vcc->stats->rx);
23063 + atomic_inc_unchecked(&vcc->stats->rx);
23067 @@ -900,6 +900,8 @@ static int print_buffer(struct sk_buff *
23071 + pax_track_stack();
23074 for (i = 0; i < len; i++){
23076 @@ -1009,7 +1011,7 @@ static uint32_t fpga_tx(struct solos_car
23077 vcc = SKB_CB(oldskb)->vcc;
23080 - atomic_inc(&vcc->stats->tx);
23081 + atomic_inc_unchecked(&vcc->stats->tx);
23082 solos_pop(vcc, oldskb);
23084 dev_kfree_skb_irq(oldskb);
23085 diff -urNp linux-2.6.39.4/drivers/atm/suni.c linux-2.6.39.4/drivers/atm/suni.c
23086 --- linux-2.6.39.4/drivers/atm/suni.c 2011-05-19 00:06:34.000000000 -0400
23087 +++ linux-2.6.39.4/drivers/atm/suni.c 2011-08-05 19:44:36.000000000 -0400
23088 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23091 #define ADD_LIMITED(s,v) \
23092 - atomic_add((v),&stats->s); \
23093 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23094 + atomic_add_unchecked((v),&stats->s); \
23095 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23098 static void suni_hz(unsigned long from_timer)
23099 diff -urNp linux-2.6.39.4/drivers/atm/uPD98402.c linux-2.6.39.4/drivers/atm/uPD98402.c
23100 --- linux-2.6.39.4/drivers/atm/uPD98402.c 2011-05-19 00:06:34.000000000 -0400
23101 +++ linux-2.6.39.4/drivers/atm/uPD98402.c 2011-08-05 19:44:36.000000000 -0400
23102 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23103 struct sonet_stats tmp;
23106 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23107 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23108 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23109 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23110 if (zero && !error) {
23111 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23114 #define ADD_LIMITED(s,v) \
23115 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23116 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23117 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23118 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23119 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23120 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23123 static void stat_event(struct atm_dev *dev)
23124 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23125 if (reason & uPD98402_INT_PFM) stat_event(dev);
23126 if (reason & uPD98402_INT_PCO) {
23127 (void) GET(PCOCR); /* clear interrupt cause */
23128 - atomic_add(GET(HECCT),
23129 + atomic_add_unchecked(GET(HECCT),
23130 &PRIV(dev)->sonet_stats.uncorr_hcs);
23132 if ((reason & uPD98402_INT_RFO) &&
23133 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23134 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23135 uPD98402_INT_LOS),PIMR); /* enable them */
23136 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23137 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23138 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23139 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23140 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23141 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23142 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23146 diff -urNp linux-2.6.39.4/drivers/atm/zatm.c linux-2.6.39.4/drivers/atm/zatm.c
23147 --- linux-2.6.39.4/drivers/atm/zatm.c 2011-05-19 00:06:34.000000000 -0400
23148 +++ linux-2.6.39.4/drivers/atm/zatm.c 2011-08-05 19:44:36.000000000 -0400
23149 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23152 dev_kfree_skb_irq(skb);
23153 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23154 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23157 if (!atm_charge(vcc,skb->truesize)) {
23158 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23160 ATM_SKB(skb)->vcc = vcc;
23161 vcc->push(vcc,skb);
23162 - atomic_inc(&vcc->stats->rx);
23163 + atomic_inc_unchecked(&vcc->stats->rx);
23165 zout(pos & 0xffff,MTA(mbx));
23166 #if 0 /* probably a stupid idea */
23167 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23168 skb_queue_head(&zatm_vcc->backlog,skb);
23171 - atomic_inc(&vcc->stats->tx);
23172 + atomic_inc_unchecked(&vcc->stats->tx);
23173 wake_up(&zatm_vcc->tx_wait);
23176 diff -urNp linux-2.6.39.4/drivers/base/power/wakeup.c linux-2.6.39.4/drivers/base/power/wakeup.c
23177 --- linux-2.6.39.4/drivers/base/power/wakeup.c 2011-05-19 00:06:34.000000000 -0400
23178 +++ linux-2.6.39.4/drivers/base/power/wakeup.c 2011-08-05 19:44:36.000000000 -0400
23179 @@ -29,14 +29,14 @@ bool events_check_enabled;
23180 * They need to be modified together atomically, so it's better to use one
23181 * atomic variable to hold them both.
23183 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23184 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23186 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23187 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23189 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23191 - unsigned int comb = atomic_read(&combined_event_count);
23192 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23194 *cnt = (comb >> IN_PROGRESS_BITS);
23195 *inpr = comb & MAX_IN_PROGRESS;
23196 @@ -351,7 +351,7 @@ static void wakeup_source_activate(struc
23197 ws->last_time = ktime_get();
23199 /* Increment the counter of events in progress. */
23200 - atomic_inc(&combined_event_count);
23201 + atomic_inc_unchecked(&combined_event_count);
23205 @@ -441,7 +441,7 @@ static void wakeup_source_deactivate(str
23206 * Increment the counter of registered wakeup events and decrement the
23207 * couter of wakeup events in progress simultaneously.
23209 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23210 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23214 diff -urNp linux-2.6.39.4/drivers/block/cciss.c linux-2.6.39.4/drivers/block/cciss.c
23215 --- linux-2.6.39.4/drivers/block/cciss.c 2011-05-19 00:06:34.000000000 -0400
23216 +++ linux-2.6.39.4/drivers/block/cciss.c 2011-08-05 20:34:06.000000000 -0400
23217 @@ -1151,6 +1151,8 @@ static int cciss_ioctl32_passthru(struct
23221 + memset(&arg64, 0, sizeof(arg64));
23225 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23226 @@ -2933,7 +2935,7 @@ static void start_io(ctlr_info_t *h)
23227 while (!list_empty(&h->reqQ)) {
23228 c = list_entry(h->reqQ.next, CommandList_struct, list);
23229 /* can't do anything if fifo is full */
23230 - if ((h->access.fifo_full(h))) {
23231 + if ((h->access->fifo_full(h))) {
23232 dev_warn(&h->pdev->dev, "fifo full\n");
23235 @@ -2943,7 +2945,7 @@ static void start_io(ctlr_info_t *h)
23238 /* Tell the controller execute command */
23239 - h->access.submit_command(h, c);
23240 + h->access->submit_command(h, c);
23242 /* Put job onto the completed Q */
23244 @@ -3369,17 +3371,17 @@ startio:
23246 static inline unsigned long get_next_completion(ctlr_info_t *h)
23248 - return h->access.command_completed(h);
23249 + return h->access->command_completed(h);
23252 static inline int interrupt_pending(ctlr_info_t *h)
23254 - return h->access.intr_pending(h);
23255 + return h->access->intr_pending(h);
23258 static inline long interrupt_not_for_us(ctlr_info_t *h)
23260 - return ((h->access.intr_pending(h) == 0) ||
23261 + return ((h->access->intr_pending(h) == 0) ||
23262 (h->interrupts_enabled == 0));
23265 @@ -3412,7 +3414,7 @@ static inline u32 next_command(ctlr_info
23268 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23269 - return h->access.command_completed(h);
23270 + return h->access->command_completed(h);
23272 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23273 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23274 @@ -3910,7 +3912,7 @@ static void __devinit cciss_put_controll
23275 trans_support & CFGTBL_Trans_use_short_tags);
23277 /* Change the access methods to the performant access methods */
23278 - h->access = SA5_performant_access;
23279 + h->access = &SA5_performant_access;
23280 h->transMethod = CFGTBL_Trans_Performant;
23283 @@ -4179,7 +4181,7 @@ static int __devinit cciss_pci_init(ctlr
23284 if (prod_index < 0)
23286 h->product_name = products[prod_index].product_name;
23287 - h->access = *(products[prod_index].access);
23288 + h->access = products[prod_index].access;
23290 if (cciss_board_disabled(h)) {
23291 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23292 @@ -4661,7 +4663,7 @@ static int __devinit cciss_init_one(stru
23295 /* make sure the board interrupts are off */
23296 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23297 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23298 if (h->msi_vector || h->msix_vector) {
23299 if (request_irq(h->intr[PERF_MODE_INT],
23300 do_cciss_msix_intr,
23301 @@ -4744,7 +4746,7 @@ static int __devinit cciss_init_one(stru
23302 cciss_scsi_setup(h);
23304 /* Turn the interrupts on so we can service requests */
23305 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23306 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23308 /* Get the firmware version */
23309 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23310 @@ -4828,7 +4830,7 @@ static void cciss_shutdown(struct pci_de
23312 if (return_code != IO_OK)
23313 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23314 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23315 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23316 free_irq(h->intr[PERF_MODE_INT], h);
23319 diff -urNp linux-2.6.39.4/drivers/block/cciss.h linux-2.6.39.4/drivers/block/cciss.h
23320 --- linux-2.6.39.4/drivers/block/cciss.h 2011-05-19 00:06:34.000000000 -0400
23321 +++ linux-2.6.39.4/drivers/block/cciss.h 2011-08-05 20:34:06.000000000 -0400
23322 @@ -100,7 +100,7 @@ struct ctlr_info
23323 /* information about each logical volume */
23324 drive_info_struct *drv[CISS_MAX_LUN];
23326 - struct access_method access;
23327 + struct access_method *access;
23329 /* queue and queue Info */
23330 struct list_head reqQ;
23331 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.c linux-2.6.39.4/drivers/block/cpqarray.c
23332 --- linux-2.6.39.4/drivers/block/cpqarray.c 2011-05-19 00:06:34.000000000 -0400
23333 +++ linux-2.6.39.4/drivers/block/cpqarray.c 2011-08-05 20:34:06.000000000 -0400
23334 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23335 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23338 - hba[i]->access.set_intr_mask(hba[i], 0);
23339 + hba[i]->access->set_intr_mask(hba[i], 0);
23340 if (request_irq(hba[i]->intr, do_ida_intr,
23341 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23343 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23344 add_timer(&hba[i]->timer);
23346 /* Enable IRQ now that spinlock and rate limit timer are set up */
23347 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23348 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23350 for(j=0; j<NWD; j++) {
23351 struct gendisk *disk = ida_gendisk[i][j];
23352 @@ -694,7 +694,7 @@ DBGINFO(
23353 for(i=0; i<NR_PRODUCTS; i++) {
23354 if (board_id == products[i].board_id) {
23355 c->product_name = products[i].product_name;
23356 - c->access = *(products[i].access);
23357 + c->access = products[i].access;
23361 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23362 hba[ctlr]->intr = intr;
23363 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23364 hba[ctlr]->product_name = products[j].product_name;
23365 - hba[ctlr]->access = *(products[j].access);
23366 + hba[ctlr]->access = products[j].access;
23367 hba[ctlr]->ctlr = ctlr;
23368 hba[ctlr]->board_id = board_id;
23369 hba[ctlr]->pci_dev = NULL; /* not PCI */
23370 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23371 struct scatterlist tmp_sg[SG_MAX];
23374 + pax_track_stack();
23377 creq = blk_peek_request(q);
23379 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23381 while((c = h->reqQ) != NULL) {
23382 /* Can't do anything if we're busy */
23383 - if (h->access.fifo_full(h) == 0)
23384 + if (h->access->fifo_full(h) == 0)
23387 /* Get the first entry from the request Q */
23388 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23391 /* Tell the controller to do our bidding */
23392 - h->access.submit_command(h, c);
23393 + h->access->submit_command(h, c);
23395 /* Get onto the completion Q */
23397 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23398 unsigned long flags;
23401 - istat = h->access.intr_pending(h);
23402 + istat = h->access->intr_pending(h);
23403 /* Is this interrupt for us? */
23406 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23408 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23409 if (istat & FIFO_NOT_EMPTY) {
23410 - while((a = h->access.command_completed(h))) {
23411 + while((a = h->access->command_completed(h))) {
23413 if ((c = h->cmpQ) == NULL)
23415 @@ -1449,11 +1451,11 @@ static int sendcmd(
23417 * Disable interrupt
23419 - info_p->access.set_intr_mask(info_p, 0);
23420 + info_p->access->set_intr_mask(info_p, 0);
23421 /* Make sure there is room in the command FIFO */
23422 /* Actually it should be completely empty at this time. */
23423 for (i = 200000; i > 0; i--) {
23424 - temp = info_p->access.fifo_full(info_p);
23425 + temp = info_p->access->fifo_full(info_p);
23429 @@ -1466,7 +1468,7 @@ DBG(
23433 - info_p->access.submit_command(info_p, c);
23434 + info_p->access->submit_command(info_p, c);
23435 complete = pollcomplete(ctlr);
23437 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23438 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23439 * we check the new geometry. Then turn interrupts back on when
23442 - host->access.set_intr_mask(host, 0);
23443 + host->access->set_intr_mask(host, 0);
23445 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23446 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23448 for(i=0; i<NWD; i++) {
23449 struct gendisk *disk = ida_gendisk[ctlr][i];
23450 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23451 /* Wait (up to 2 seconds) for a command to complete */
23453 for (i = 200000; i > 0; i--) {
23454 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23455 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23457 udelay(10); /* a short fixed delay */
23459 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.h linux-2.6.39.4/drivers/block/cpqarray.h
23460 --- linux-2.6.39.4/drivers/block/cpqarray.h 2011-05-19 00:06:34.000000000 -0400
23461 +++ linux-2.6.39.4/drivers/block/cpqarray.h 2011-08-05 20:34:06.000000000 -0400
23462 @@ -99,7 +99,7 @@ struct ctlr_info {
23463 drv_info_t drv[NWD];
23464 struct proc_dir_entry *proc;
23466 - struct access_method access;
23467 + struct access_method *access;
23471 diff -urNp linux-2.6.39.4/drivers/block/DAC960.c linux-2.6.39.4/drivers/block/DAC960.c
23472 --- linux-2.6.39.4/drivers/block/DAC960.c 2011-05-19 00:06:34.000000000 -0400
23473 +++ linux-2.6.39.4/drivers/block/DAC960.c 2011-08-05 19:44:36.000000000 -0400
23474 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23475 unsigned long flags;
23476 int Channel, TargetID;
23478 + pax_track_stack();
23480 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23481 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23482 sizeof(DAC960_SCSI_Inquiry_T) +
23483 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_int.h linux-2.6.39.4/drivers/block/drbd/drbd_int.h
23484 --- linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-05-19 00:06:34.000000000 -0400
23485 +++ linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-08-05 19:44:36.000000000 -0400
23486 @@ -736,7 +736,7 @@ struct drbd_request;
23487 struct drbd_epoch {
23488 struct list_head list;
23489 unsigned int barrier_nr;
23490 - atomic_t epoch_size; /* increased on every request added. */
23491 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23492 atomic_t active; /* increased on every req. added, and dec on every finished. */
23493 unsigned long flags;
23495 @@ -1108,7 +1108,7 @@ struct drbd_conf {
23498 wait_queue_head_t seq_wait;
23499 - atomic_t packet_seq;
23500 + atomic_unchecked_t packet_seq;
23501 unsigned int peer_seq;
23502 spinlock_t peer_seq_lock;
23503 unsigned int minor;
23504 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_main.c linux-2.6.39.4/drivers/block/drbd/drbd_main.c
23505 --- linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-05-19 00:06:34.000000000 -0400
23506 +++ linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-08-05 19:44:36.000000000 -0400
23507 @@ -2387,7 +2387,7 @@ static int _drbd_send_ack(struct drbd_co
23509 p.block_id = block_id;
23510 p.blksize = blksize;
23511 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23512 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23514 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23516 @@ -2686,7 +2686,7 @@ int drbd_send_dblock(struct drbd_conf *m
23517 p.sector = cpu_to_be64(req->sector);
23518 p.block_id = (unsigned long)req;
23519 p.seq_num = cpu_to_be32(req->seq_num =
23520 - atomic_add_return(1, &mdev->packet_seq));
23521 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23523 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23525 @@ -2971,7 +2971,7 @@ void drbd_init_set_defaults(struct drbd_
23526 atomic_set(&mdev->unacked_cnt, 0);
23527 atomic_set(&mdev->local_cnt, 0);
23528 atomic_set(&mdev->net_cnt, 0);
23529 - atomic_set(&mdev->packet_seq, 0);
23530 + atomic_set_unchecked(&mdev->packet_seq, 0);
23531 atomic_set(&mdev->pp_in_use, 0);
23532 atomic_set(&mdev->pp_in_use_by_net, 0);
23533 atomic_set(&mdev->rs_sect_in, 0);
23534 @@ -3051,8 +3051,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23535 mdev->receiver.t_state);
23537 /* no need to lock it, I'm the only thread alive */
23538 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23539 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23540 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23541 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23542 mdev->al_writ_cnt =
23543 mdev->bm_writ_cnt =
23545 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_nl.c linux-2.6.39.4/drivers/block/drbd/drbd_nl.c
23546 --- linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-05-19 00:06:34.000000000 -0400
23547 +++ linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-08-05 19:44:36.000000000 -0400
23548 @@ -2298,7 +2298,7 @@ static void drbd_connector_callback(stru
23549 module_put(THIS_MODULE);
23552 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23553 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23555 static unsigned short *
23556 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23557 @@ -2369,7 +2369,7 @@ void drbd_bcast_state(struct drbd_conf *
23558 cn_reply->id.idx = CN_IDX_DRBD;
23559 cn_reply->id.val = CN_VAL_DRBD;
23561 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23562 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23563 cn_reply->ack = 0; /* not used here. */
23564 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23565 (int)((char *)tl - (char *)reply->tag_list);
23566 @@ -2401,7 +2401,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23567 cn_reply->id.idx = CN_IDX_DRBD;
23568 cn_reply->id.val = CN_VAL_DRBD;
23570 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23571 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23572 cn_reply->ack = 0; /* not used here. */
23573 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23574 (int)((char *)tl - (char *)reply->tag_list);
23575 @@ -2479,7 +2479,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23576 cn_reply->id.idx = CN_IDX_DRBD;
23577 cn_reply->id.val = CN_VAL_DRBD;
23579 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23580 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23581 cn_reply->ack = 0; // not used here.
23582 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23583 (int)((char*)tl - (char*)reply->tag_list);
23584 @@ -2518,7 +2518,7 @@ void drbd_bcast_sync_progress(struct drb
23585 cn_reply->id.idx = CN_IDX_DRBD;
23586 cn_reply->id.val = CN_VAL_DRBD;
23588 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23589 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23590 cn_reply->ack = 0; /* not used here. */
23591 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23592 (int)((char *)tl - (char *)reply->tag_list);
23593 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c
23594 --- linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-05-19 00:06:34.000000000 -0400
23595 +++ linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-08-05 19:44:36.000000000 -0400
23596 @@ -894,7 +894,7 @@ retry:
23597 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23598 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23600 - atomic_set(&mdev->packet_seq, 0);
23601 + atomic_set_unchecked(&mdev->packet_seq, 0);
23602 mdev->peer_seq = 0;
23604 drbd_thread_start(&mdev->asender);
23605 @@ -990,7 +990,7 @@ static enum finish_epoch drbd_may_finish
23609 - epoch_size = atomic_read(&epoch->epoch_size);
23610 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23612 switch (ev & ~EV_CLEANUP) {
23614 @@ -1025,7 +1025,7 @@ static enum finish_epoch drbd_may_finish
23618 - atomic_set(&epoch->epoch_size, 0);
23619 + atomic_set_unchecked(&epoch->epoch_size, 0);
23620 /* atomic_set(&epoch->active, 0); is already zero */
23621 if (rv == FE_STILL_LIVE)
23623 @@ -1196,14 +1196,14 @@ static int receive_Barrier(struct drbd_c
23624 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23627 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23628 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23629 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23634 epoch = mdev->current_epoch;
23635 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23636 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23638 D_ASSERT(atomic_read(&epoch->active) == 0);
23639 D_ASSERT(epoch->flags == 0);
23640 @@ -1215,11 +1215,11 @@ static int receive_Barrier(struct drbd_c
23644 - atomic_set(&epoch->epoch_size, 0);
23645 + atomic_set_unchecked(&epoch->epoch_size, 0);
23646 atomic_set(&epoch->active, 0);
23648 spin_lock(&mdev->epoch_lock);
23649 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23650 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23651 list_add(&epoch->list, &mdev->current_epoch->list);
23652 mdev->current_epoch = epoch;
23654 @@ -1668,7 +1668,7 @@ static int receive_Data(struct drbd_conf
23655 spin_unlock(&mdev->peer_seq_lock);
23657 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23658 - atomic_inc(&mdev->current_epoch->epoch_size);
23659 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23660 return drbd_drain_block(mdev, data_size);
23663 @@ -1694,7 +1694,7 @@ static int receive_Data(struct drbd_conf
23665 spin_lock(&mdev->epoch_lock);
23666 e->epoch = mdev->current_epoch;
23667 - atomic_inc(&e->epoch->epoch_size);
23668 + atomic_inc_unchecked(&e->epoch->epoch_size);
23669 atomic_inc(&e->epoch->active);
23670 spin_unlock(&mdev->epoch_lock);
23672 @@ -3905,7 +3905,7 @@ static void drbd_disconnect(struct drbd_
23673 D_ASSERT(list_empty(&mdev->done_ee));
23675 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23676 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23677 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23678 D_ASSERT(list_empty(&mdev->current_epoch->list));
23681 diff -urNp linux-2.6.39.4/drivers/block/nbd.c linux-2.6.39.4/drivers/block/nbd.c
23682 --- linux-2.6.39.4/drivers/block/nbd.c 2011-06-25 12:55:22.000000000 -0400
23683 +++ linux-2.6.39.4/drivers/block/nbd.c 2011-08-05 19:44:36.000000000 -0400
23684 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23686 sigset_t blocked, oldset;
23688 + pax_track_stack();
23690 if (unlikely(!sock)) {
23691 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23692 lo->disk->disk_name, (send ? "send" : "recv"));
23693 @@ -571,6 +573,8 @@ static void do_nbd_request(struct reques
23694 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23695 unsigned int cmd, unsigned long arg)
23697 + pax_track_stack();
23700 case NBD_DISCONNECT: {
23701 struct request sreq;
23702 diff -urNp linux-2.6.39.4/drivers/char/agp/frontend.c linux-2.6.39.4/drivers/char/agp/frontend.c
23703 --- linux-2.6.39.4/drivers/char/agp/frontend.c 2011-05-19 00:06:34.000000000 -0400
23704 +++ linux-2.6.39.4/drivers/char/agp/frontend.c 2011-08-05 19:44:36.000000000 -0400
23705 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23706 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23709 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23710 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23713 client = agp_find_client_by_pid(reserve.pid);
23714 diff -urNp linux-2.6.39.4/drivers/char/briq_panel.c linux-2.6.39.4/drivers/char/briq_panel.c
23715 --- linux-2.6.39.4/drivers/char/briq_panel.c 2011-05-19 00:06:34.000000000 -0400
23716 +++ linux-2.6.39.4/drivers/char/briq_panel.c 2011-08-05 19:44:36.000000000 -0400
23718 #include <linux/types.h>
23719 #include <linux/errno.h>
23720 #include <linux/tty.h>
23721 +#include <linux/mutex.h>
23722 #include <linux/timer.h>
23723 #include <linux/kernel.h>
23724 #include <linux/wait.h>
23725 @@ -34,6 +35,7 @@ static int vfd_is_open;
23726 static unsigned char vfd[40];
23727 static int vfd_cursor;
23728 static unsigned char ledpb, led;
23729 +static DEFINE_MUTEX(vfd_mutex);
23731 static void update_vfd(void)
23733 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23737 + mutex_lock(&vfd_mutex);
23742 - if (get_user(c, buf))
23743 + if (get_user(c, buf)) {
23744 + mutex_unlock(&vfd_mutex);
23750 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23754 + mutex_unlock(&vfd_mutex);
23758 diff -urNp linux-2.6.39.4/drivers/char/genrtc.c linux-2.6.39.4/drivers/char/genrtc.c
23759 --- linux-2.6.39.4/drivers/char/genrtc.c 2011-05-19 00:06:34.000000000 -0400
23760 +++ linux-2.6.39.4/drivers/char/genrtc.c 2011-08-05 19:44:36.000000000 -0400
23761 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23765 + memset(&pll, 0, sizeof(pll));
23766 if (get_rtc_pll(&pll))
23769 diff -urNp linux-2.6.39.4/drivers/char/hpet.c linux-2.6.39.4/drivers/char/hpet.c
23770 --- linux-2.6.39.4/drivers/char/hpet.c 2011-05-19 00:06:34.000000000 -0400
23771 +++ linux-2.6.39.4/drivers/char/hpet.c 2011-08-05 19:44:36.000000000 -0400
23772 @@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di
23776 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23777 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23778 struct hpet_info *info)
23780 struct hpet_timer __iomem *timer;
23781 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c
23782 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-05-19 00:06:34.000000000 -0400
23783 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-05 20:34:06.000000000 -0400
23784 @@ -414,7 +414,7 @@ struct ipmi_smi {
23785 struct proc_dir_entry *proc_dir;
23786 char proc_dir_name[10];
23788 - atomic_t stats[IPMI_NUM_STATS];
23789 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23792 * run_to_completion duplicate of smb_info, smi_info
23793 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23796 #define ipmi_inc_stat(intf, stat) \
23797 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23798 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23799 #define ipmi_get_stat(intf, stat) \
23800 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23801 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23803 static int is_lan_addr(struct ipmi_addr *addr)
23805 @@ -2844,7 +2844,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23806 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23807 init_waitqueue_head(&intf->waitq);
23808 for (i = 0; i < IPMI_NUM_STATS; i++)
23809 - atomic_set(&intf->stats[i], 0);
23810 + atomic_set_unchecked(&intf->stats[i], 0);
23812 intf->proc_dir = NULL;
23814 @@ -4196,6 +4196,8 @@ static void send_panic_events(char *str)
23815 struct ipmi_smi_msg smi_msg;
23816 struct ipmi_recv_msg recv_msg;
23818 + pax_track_stack();
23820 si = (struct ipmi_system_interface_addr *) &addr;
23821 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23822 si->channel = IPMI_BMC_CHANNEL;
23823 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c
23824 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-05-19 00:06:34.000000000 -0400
23825 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-05 19:44:36.000000000 -0400
23826 @@ -276,7 +276,7 @@ struct smi_info {
23827 unsigned char slave_addr;
23829 /* Counters and things for the proc filesystem. */
23830 - atomic_t stats[SI_NUM_STATS];
23831 + atomic_unchecked_t stats[SI_NUM_STATS];
23833 struct task_struct *thread;
23835 @@ -285,9 +285,9 @@ struct smi_info {
23838 #define smi_inc_stat(smi, stat) \
23839 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23840 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23841 #define smi_get_stat(smi, stat) \
23842 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23843 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23845 #define SI_MAX_PARMS 4
23847 @@ -3198,7 +3198,7 @@ static int try_smi_init(struct smi_info
23848 atomic_set(&new_smi->req_events, 0);
23849 new_smi->run_to_completion = 0;
23850 for (i = 0; i < SI_NUM_STATS; i++)
23851 - atomic_set(&new_smi->stats[i], 0);
23852 + atomic_set_unchecked(&new_smi->stats[i], 0);
23854 new_smi->interrupt_disabled = 1;
23855 atomic_set(&new_smi->stop_operation, 0);
23856 diff -urNp linux-2.6.39.4/drivers/char/Kconfig linux-2.6.39.4/drivers/char/Kconfig
23857 --- linux-2.6.39.4/drivers/char/Kconfig 2011-05-19 00:06:34.000000000 -0400
23858 +++ linux-2.6.39.4/drivers/char/Kconfig 2011-08-05 19:44:36.000000000 -0400
23859 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23862 bool "/dev/kmem virtual device support"
23865 + depends on !GRKERNSEC_KMEM
23867 Say Y here if you want to support the /dev/kmem device. The
23868 /dev/kmem device is rarely used, but can be used for certain
23869 @@ -596,6 +597,7 @@ config DEVPORT
23872 depends on ISA || PCI
23873 + depends on !GRKERNSEC_KMEM
23876 source "drivers/s390/char/Kconfig"
23877 diff -urNp linux-2.6.39.4/drivers/char/mem.c linux-2.6.39.4/drivers/char/mem.c
23878 --- linux-2.6.39.4/drivers/char/mem.c 2011-05-19 00:06:34.000000000 -0400
23879 +++ linux-2.6.39.4/drivers/char/mem.c 2011-08-05 19:44:36.000000000 -0400
23881 #include <linux/raw.h>
23882 #include <linux/tty.h>
23883 #include <linux/capability.h>
23884 +#include <linux/security.h>
23885 #include <linux/ptrace.h>
23886 #include <linux/device.h>
23887 #include <linux/highmem.h>
23889 # include <linux/efi.h>
23892 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23893 +extern struct file_operations grsec_fops;
23896 static inline unsigned long size_inside_page(unsigned long start,
23897 unsigned long size)
23899 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23901 while (cursor < to) {
23902 if (!devmem_is_allowed(pfn)) {
23903 +#ifdef CONFIG_GRKERNSEC_KMEM
23904 + gr_handle_mem_readwrite(from, to);
23907 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23908 current->comm, from, to);
23912 cursor += PAGE_SIZE;
23913 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23917 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23918 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23923 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23925 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23927 while (count > 0) {
23928 unsigned long remaining;
23931 sz = size_inside_page(p, count);
23933 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23937 - remaining = copy_to_user(buf, ptr, sz);
23938 +#ifdef CONFIG_PAX_USERCOPY
23939 + temp = kmalloc(sz, GFP_KERNEL);
23941 + unxlate_dev_mem_ptr(p, ptr);
23944 + memcpy(temp, ptr, sz);
23949 + remaining = copy_to_user(buf, temp, sz);
23951 +#ifdef CONFIG_PAX_USERCOPY
23955 unxlate_dev_mem_ptr(p, ptr);
23958 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23959 size_t count, loff_t *ppos)
23961 unsigned long p = *ppos;
23962 - ssize_t low_count, read, sz;
23963 + ssize_t low_count, read, sz, err = 0;
23964 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23968 if (p < (unsigned long) high_memory) {
23969 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23972 while (low_count > 0) {
23975 sz = size_inside_page(p, low_count);
23978 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23980 kbuf = xlate_dev_kmem_ptr((char *)p);
23982 - if (copy_to_user(buf, kbuf, sz))
23983 +#ifdef CONFIG_PAX_USERCOPY
23984 + temp = kmalloc(sz, GFP_KERNEL);
23987 + memcpy(temp, kbuf, sz);
23992 + err = copy_to_user(buf, temp, sz);
23994 +#ifdef CONFIG_PAX_USERCOPY
24002 @@ -854,6 +901,9 @@ static const struct memdev {
24003 #ifdef CONFIG_CRASH_DUMP
24004 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24006 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24007 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24011 static int memory_open(struct inode *inode, struct file *filp)
24012 diff -urNp linux-2.6.39.4/drivers/char/nvram.c linux-2.6.39.4/drivers/char/nvram.c
24013 --- linux-2.6.39.4/drivers/char/nvram.c 2011-05-19 00:06:34.000000000 -0400
24014 +++ linux-2.6.39.4/drivers/char/nvram.c 2011-08-05 19:44:36.000000000 -0400
24015 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24017 spin_unlock_irq(&rtc_lock);
24019 - if (copy_to_user(buf, contents, tmp - contents))
24020 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24024 diff -urNp linux-2.6.39.4/drivers/char/random.c linux-2.6.39.4/drivers/char/random.c
24025 --- linux-2.6.39.4/drivers/char/random.c 2011-05-19 00:06:34.000000000 -0400
24026 +++ linux-2.6.39.4/drivers/char/random.c 2011-08-05 19:44:36.000000000 -0400
24027 @@ -261,8 +261,13 @@
24029 * Configuration information
24031 +#ifdef CONFIG_GRKERNSEC_RANDNET
24032 +#define INPUT_POOL_WORDS 512
24033 +#define OUTPUT_POOL_WORDS 128
24035 #define INPUT_POOL_WORDS 128
24036 #define OUTPUT_POOL_WORDS 32
24038 #define SEC_XFER_SIZE 512
24039 #define EXTRACT_SIZE 10
24041 @@ -300,10 +305,17 @@ static struct poolinfo {
24043 int tap1, tap2, tap3, tap4, tap5;
24044 } poolinfo_table[] = {
24045 +#ifdef CONFIG_GRKERNSEC_RANDNET
24046 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24047 + { 512, 411, 308, 208, 104, 1 },
24048 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24049 + { 128, 103, 76, 51, 25, 1 },
24051 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24052 { 128, 103, 76, 51, 25, 1 },
24053 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24054 { 32, 26, 20, 14, 7, 1 },
24057 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24058 { 2048, 1638, 1231, 819, 411, 1 },
24059 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24061 extract_buf(r, tmp);
24062 i = min_t(int, nbytes, EXTRACT_SIZE);
24063 - if (copy_to_user(buf, tmp, i)) {
24064 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24068 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24069 #include <linux/sysctl.h>
24071 static int min_read_thresh = 8, min_write_thresh;
24072 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
24073 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24074 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24075 static char sysctl_bootid[16];
24077 diff -urNp linux-2.6.39.4/drivers/char/sonypi.c linux-2.6.39.4/drivers/char/sonypi.c
24078 --- linux-2.6.39.4/drivers/char/sonypi.c 2011-05-19 00:06:34.000000000 -0400
24079 +++ linux-2.6.39.4/drivers/char/sonypi.c 2011-08-05 19:44:36.000000000 -0400
24081 #include <asm/uaccess.h>
24082 #include <asm/io.h>
24083 #include <asm/system.h>
24084 +#include <asm/local.h>
24086 #include <linux/sonypi.h>
24088 @@ -491,7 +492,7 @@ static struct sonypi_device {
24089 spinlock_t fifo_lock;
24090 wait_queue_head_t fifo_proc_list;
24091 struct fasync_struct *fifo_async;
24093 + local_t open_count;
24095 struct input_dev *input_jog_dev;
24096 struct input_dev *input_key_dev;
24097 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24098 static int sonypi_misc_release(struct inode *inode, struct file *file)
24100 mutex_lock(&sonypi_device.lock);
24101 - sonypi_device.open_count--;
24102 + local_dec(&sonypi_device.open_count);
24103 mutex_unlock(&sonypi_device.lock);
24106 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24108 mutex_lock(&sonypi_device.lock);
24109 /* Flush input queue on first open */
24110 - if (!sonypi_device.open_count)
24111 + if (!local_read(&sonypi_device.open_count))
24112 kfifo_reset(&sonypi_device.fifo);
24113 - sonypi_device.open_count++;
24114 + local_inc(&sonypi_device.open_count);
24115 mutex_unlock(&sonypi_device.lock);
24118 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm_bios.c linux-2.6.39.4/drivers/char/tpm/tpm_bios.c
24119 --- linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-05-19 00:06:34.000000000 -0400
24120 +++ linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-08-05 19:44:36.000000000 -0400
24121 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24124 if ((event->event_type == 0 && event->event_size == 0) ||
24125 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24126 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24130 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24133 if ((event->event_type == 0 && event->event_size == 0) ||
24134 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24135 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24139 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24142 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24143 - seq_putc(m, data[i]);
24144 + if (!seq_putc(m, data[i]))
24149 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24150 log->bios_event_log_end = log->bios_event_log + len;
24152 virt = acpi_os_map_memory(start, len);
24154 + kfree(log->bios_event_log);
24155 + log->bios_event_log = NULL;
24159 memcpy(log->bios_event_log, virt, len);
24161 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm.c linux-2.6.39.4/drivers/char/tpm/tpm.c
24162 --- linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-05-19 00:06:34.000000000 -0400
24163 +++ linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-08-05 19:44:36.000000000 -0400
24164 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24165 chip->vendor.req_complete_val)
24168 - if ((status == chip->vendor.req_canceled)) {
24169 + if (status == chip->vendor.req_canceled) {
24170 dev_err(chip->dev, "Operation Canceled\n");
24173 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24175 struct tpm_chip *chip = dev_get_drvdata(dev);
24177 + pax_track_stack();
24179 tpm_cmd.header.in = tpm_readpubek_header;
24180 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24181 "attempting to read the PUBEK");
24182 diff -urNp linux-2.6.39.4/drivers/crypto/hifn_795x.c linux-2.6.39.4/drivers/crypto/hifn_795x.c
24183 --- linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-05-19 00:06:34.000000000 -0400
24184 +++ linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-08-05 19:44:36.000000000 -0400
24185 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24186 0xCA, 0x34, 0x2B, 0x2E};
24187 struct scatterlist sg;
24189 + pax_track_stack();
24191 memset(src, 0, sizeof(src));
24192 memset(ctx.key, 0, sizeof(ctx.key));
24194 diff -urNp linux-2.6.39.4/drivers/crypto/padlock-aes.c linux-2.6.39.4/drivers/crypto/padlock-aes.c
24195 --- linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-05-19 00:06:34.000000000 -0400
24196 +++ linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-08-05 19:44:36.000000000 -0400
24197 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24198 struct crypto_aes_ctx gen_aes;
24201 + pax_track_stack();
24204 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24206 diff -urNp linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c
24207 --- linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-05-19 00:06:34.000000000 -0400
24208 +++ linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-08-05 19:44:36.000000000 -0400
24209 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24210 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24211 static int edac_pci_poll_msec = 1000; /* one second workq period */
24213 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24214 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24215 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24216 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24218 static struct kobject *edac_pci_top_main_kobj;
24219 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24220 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24221 edac_printk(KERN_CRIT, EDAC_PCI,
24222 "Signaled System Error on %s\n",
24224 - atomic_inc(&pci_nonparity_count);
24225 + atomic_inc_unchecked(&pci_nonparity_count);
24228 if (status & (PCI_STATUS_PARITY)) {
24229 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24230 "Master Data Parity Error on %s\n",
24233 - atomic_inc(&pci_parity_count);
24234 + atomic_inc_unchecked(&pci_parity_count);
24237 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24238 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24239 "Detected Parity Error on %s\n",
24242 - atomic_inc(&pci_parity_count);
24243 + atomic_inc_unchecked(&pci_parity_count);
24247 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24248 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24249 "Signaled System Error on %s\n",
24251 - atomic_inc(&pci_nonparity_count);
24252 + atomic_inc_unchecked(&pci_nonparity_count);
24255 if (status & (PCI_STATUS_PARITY)) {
24256 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24257 "Master Data Parity Error on "
24258 "%s\n", pci_name(dev));
24260 - atomic_inc(&pci_parity_count);
24261 + atomic_inc_unchecked(&pci_parity_count);
24264 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24265 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24266 "Detected Parity Error on %s\n",
24269 - atomic_inc(&pci_parity_count);
24270 + atomic_inc_unchecked(&pci_parity_count);
24274 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24275 if (!check_pci_errors)
24278 - before_count = atomic_read(&pci_parity_count);
24279 + before_count = atomic_read_unchecked(&pci_parity_count);
24281 /* scan all PCI devices looking for a Parity Error on devices and
24283 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24284 /* Only if operator has selected panic on PCI Error */
24285 if (edac_pci_get_panic_on_pe()) {
24286 /* If the count is different 'after' from 'before' */
24287 - if (before_count != atomic_read(&pci_parity_count))
24288 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24289 panic("EDAC: PCI Parity Error");
24292 diff -urNp linux-2.6.39.4/drivers/edac/i7core_edac.c linux-2.6.39.4/drivers/edac/i7core_edac.c
24293 --- linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-05-19 00:06:34.000000000 -0400
24294 +++ linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-08-05 19:44:36.000000000 -0400
24295 @@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24296 char *type, *optype, *err, *msg;
24297 unsigned long error = m->status & 0x1ff0000l;
24298 u32 optypenum = (m->status >> 4) & 0x07;
24299 - u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24300 + u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24301 u32 dimm = (m->misc >> 16) & 0x3;
24302 u32 channel = (m->misc >> 18) & 0x3;
24303 u32 syndrome = m->misc >> 32;
24304 diff -urNp linux-2.6.39.4/drivers/edac/mce_amd.h linux-2.6.39.4/drivers/edac/mce_amd.h
24305 --- linux-2.6.39.4/drivers/edac/mce_amd.h 2011-05-19 00:06:34.000000000 -0400
24306 +++ linux-2.6.39.4/drivers/edac/mce_amd.h 2011-08-05 20:34:06.000000000 -0400
24307 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24308 bool (*dc_mce)(u16, u8);
24309 bool (*ic_mce)(u16, u8);
24310 bool (*nb_mce)(u16, u8);
24314 void amd_report_gart_errors(bool);
24315 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24316 diff -urNp linux-2.6.39.4/drivers/firewire/core-card.c linux-2.6.39.4/drivers/firewire/core-card.c
24317 --- linux-2.6.39.4/drivers/firewire/core-card.c 2011-05-19 00:06:34.000000000 -0400
24318 +++ linux-2.6.39.4/drivers/firewire/core-card.c 2011-08-05 20:34:06.000000000 -0400
24319 @@ -652,7 +652,7 @@ void fw_card_release(struct kref *kref)
24321 void fw_core_remove_card(struct fw_card *card)
24323 - struct fw_card_driver dummy_driver = dummy_driver_template;
24324 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24326 card->driver->update_phy_reg(card, 4,
24327 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24328 diff -urNp linux-2.6.39.4/drivers/firewire/core-cdev.c linux-2.6.39.4/drivers/firewire/core-cdev.c
24329 --- linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-05-19 00:06:34.000000000 -0400
24330 +++ linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-08-05 19:44:36.000000000 -0400
24331 @@ -1312,8 +1312,7 @@ static int init_iso_resource(struct clie
24334 if ((request->channels == 0 && request->bandwidth == 0) ||
24335 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24336 - request->bandwidth < 0)
24337 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24340 r = kmalloc(sizeof(*r), GFP_KERNEL);
24341 diff -urNp linux-2.6.39.4/drivers/firewire/core.h linux-2.6.39.4/drivers/firewire/core.h
24342 --- linux-2.6.39.4/drivers/firewire/core.h 2011-05-19 00:06:34.000000000 -0400
24343 +++ linux-2.6.39.4/drivers/firewire/core.h 2011-08-05 20:34:06.000000000 -0400
24344 @@ -99,6 +99,7 @@ struct fw_card_driver {
24346 int (*stop_iso)(struct fw_iso_context *ctx);
24348 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24350 void fw_card_initialize(struct fw_card *card,
24351 const struct fw_card_driver *driver, struct device *device);
24352 diff -urNp linux-2.6.39.4/drivers/firewire/core-transaction.c linux-2.6.39.4/drivers/firewire/core-transaction.c
24353 --- linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-05-19 00:06:34.000000000 -0400
24354 +++ linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-08-05 19:44:36.000000000 -0400
24356 #include <linux/string.h>
24357 #include <linux/timer.h>
24358 #include <linux/types.h>
24359 +#include <linux/sched.h>
24361 #include <asm/byteorder.h>
24363 @@ -420,6 +421,8 @@ int fw_run_transaction(struct fw_card *c
24364 struct transaction_callback_data d;
24365 struct fw_transaction t;
24367 + pax_track_stack();
24369 init_timer_on_stack(&t.split_timeout_timer);
24370 init_completion(&d.done);
24371 d.payload = payload;
24372 diff -urNp linux-2.6.39.4/drivers/firmware/dmi_scan.c linux-2.6.39.4/drivers/firmware/dmi_scan.c
24373 --- linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-05-19 00:06:34.000000000 -0400
24374 +++ linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-08-05 19:44:36.000000000 -0400
24375 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24380 - * no iounmap() for that ioremap(); it would be a no-op, but
24381 - * it's so early in setup that sucker gets confused into doing
24382 - * what it shouldn't if we actually call it.
24384 p = dmi_ioremap(0xF0000, 0x10000);
24387 diff -urNp linux-2.6.39.4/drivers/gpio/vr41xx_giu.c linux-2.6.39.4/drivers/gpio/vr41xx_giu.c
24388 --- linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-05-19 00:06:34.000000000 -0400
24389 +++ linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-08-05 19:44:36.000000000 -0400
24390 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24391 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24392 maskl, pendl, maskh, pendh);
24394 - atomic_inc(&irq_err_count);
24395 + atomic_inc_unchecked(&irq_err_count);
24399 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c
24400 --- linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-05-19 00:06:34.000000000 -0400
24401 +++ linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-05 19:44:36.000000000 -0400
24402 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24403 struct drm_crtc *tmp;
24406 - WARN(!crtc, "checking null crtc?\n");
24411 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24412 struct drm_encoder *encoder;
24415 + pax_track_stack();
24417 crtc->enabled = drm_helper_crtc_in_use(crtc);
24418 if (!crtc->enabled)
24420 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_drv.c linux-2.6.39.4/drivers/gpu/drm/drm_drv.c
24421 --- linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-05-19 00:06:34.000000000 -0400
24422 +++ linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-08-05 19:44:36.000000000 -0400
24423 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24425 dev = file_priv->minor->dev;
24426 atomic_inc(&dev->ioctl_count);
24427 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24428 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24429 ++file_priv->ioctl_count;
24431 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24432 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_fops.c linux-2.6.39.4/drivers/gpu/drm/drm_fops.c
24433 --- linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-05-19 00:06:34.000000000 -0400
24434 +++ linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-08-05 19:44:36.000000000 -0400
24435 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24438 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24439 - atomic_set(&dev->counts[i], 0);
24440 + atomic_set_unchecked(&dev->counts[i], 0);
24442 dev->sigdata.lock = NULL;
24444 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24446 retcode = drm_open_helper(inode, filp, dev);
24448 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24449 - if (!dev->open_count++)
24450 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24451 + if (local_inc_return(&dev->open_count) == 1)
24452 retcode = drm_setup(dev);
24455 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24457 mutex_lock(&drm_global_mutex);
24459 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24460 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24462 if (dev->driver->preclose)
24463 dev->driver->preclose(dev, file_priv);
24464 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24465 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24466 task_pid_nr(current),
24467 (long)old_encode_dev(file_priv->minor->device),
24468 - dev->open_count);
24469 + local_read(&dev->open_count));
24471 /* if the master has gone away we can't do anything with the lock */
24472 if (file_priv->minor->master)
24473 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24474 * End inline drm_release
24477 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24478 - if (!--dev->open_count) {
24479 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24480 + if (local_dec_and_test(&dev->open_count)) {
24481 if (atomic_read(&dev->ioctl_count)) {
24482 DRM_ERROR("Device busy: %d\n",
24483 atomic_read(&dev->ioctl_count));
24484 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_global.c linux-2.6.39.4/drivers/gpu/drm/drm_global.c
24485 --- linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-05-19 00:06:34.000000000 -0400
24486 +++ linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-08-05 19:44:36.000000000 -0400
24488 struct drm_global_item {
24489 struct mutex mutex;
24492 + atomic_t refcount;
24495 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24496 @@ -49,7 +49,7 @@ void drm_global_init(void)
24497 struct drm_global_item *item = &glob[i];
24498 mutex_init(&item->mutex);
24499 item->object = NULL;
24500 - item->refcount = 0;
24501 + atomic_set(&item->refcount, 0);
24505 @@ -59,7 +59,7 @@ void drm_global_release(void)
24506 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24507 struct drm_global_item *item = &glob[i];
24508 BUG_ON(item->object != NULL);
24509 - BUG_ON(item->refcount != 0);
24510 + BUG_ON(atomic_read(&item->refcount) != 0);
24514 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24517 mutex_lock(&item->mutex);
24518 - if (item->refcount == 0) {
24519 + if (atomic_read(&item->refcount) == 0) {
24520 item->object = kzalloc(ref->size, GFP_KERNEL);
24521 if (unlikely(item->object == NULL)) {
24523 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24527 - ++item->refcount;
24528 + atomic_inc(&item->refcount);
24529 ref->object = item->object;
24530 object = item->object;
24531 mutex_unlock(&item->mutex);
24532 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24533 struct drm_global_item *item = &glob[ref->global_type];
24535 mutex_lock(&item->mutex);
24536 - BUG_ON(item->refcount == 0);
24537 + BUG_ON(atomic_read(&item->refcount) == 0);
24538 BUG_ON(ref->object != item->object);
24539 - if (--item->refcount == 0) {
24540 + if (atomic_dec_and_test(&item->refcount)) {
24542 item->object = NULL;
24544 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_info.c linux-2.6.39.4/drivers/gpu/drm/drm_info.c
24545 --- linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-05-19 00:06:34.000000000 -0400
24546 +++ linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-08-05 19:44:36.000000000 -0400
24547 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24548 struct drm_local_map *map;
24549 struct drm_map_list *r_list;
24551 - /* Hardcoded from _DRM_FRAME_BUFFER,
24552 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24553 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24554 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24555 + static const char * const types[] = {
24556 + [_DRM_FRAME_BUFFER] = "FB",
24557 + [_DRM_REGISTERS] = "REG",
24558 + [_DRM_SHM] = "SHM",
24559 + [_DRM_AGP] = "AGP",
24560 + [_DRM_SCATTER_GATHER] = "SG",
24561 + [_DRM_CONSISTENT] = "PCI",
24562 + [_DRM_GEM] = "GEM" };
24566 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24570 - if (map->type < 0 || map->type > 5)
24571 + if (map->type >= ARRAY_SIZE(types))
24574 type = types[map->type];
24575 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24576 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24577 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24578 vma->vm_flags & VM_IO ? 'i' : '-',
24579 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24585 #if defined(__i386__)
24586 pgprot = pgprot_val(vma->vm_page_prot);
24587 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c
24588 --- linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-05-19 00:06:34.000000000 -0400
24589 +++ linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-08-05 19:44:36.000000000 -0400
24590 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24591 stats->data[i].value =
24592 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24594 - stats->data[i].value = atomic_read(&dev->counts[i]);
24595 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24596 stats->data[i].type = dev->types[i];
24599 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_lock.c linux-2.6.39.4/drivers/gpu/drm/drm_lock.c
24600 --- linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-05-19 00:06:34.000000000 -0400
24601 +++ linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-08-05 19:44:36.000000000 -0400
24602 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24603 if (drm_lock_take(&master->lock, lock->context)) {
24604 master->lock.file_priv = file_priv;
24605 master->lock.lock_time = jiffies;
24606 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24607 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24608 break; /* Got lock */
24611 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24615 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24616 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24618 if (drm_lock_free(&master->lock, lock->context)) {
24619 /* FIXME: Should really bail out here. */
24620 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c
24621 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-05-19 00:06:34.000000000 -0400
24622 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-05 19:44:36.000000000 -0400
24623 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24624 dma->buflist[vertex->idx],
24625 vertex->discard, vertex->used);
24627 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24628 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24629 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24630 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24631 sarea_priv->last_enqueue = dev_priv->counter - 1;
24632 sarea_priv->last_dispatch = (int)hw_status[5];
24634 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24635 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24638 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24639 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24640 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24641 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24642 sarea_priv->last_enqueue = dev_priv->counter - 1;
24643 sarea_priv->last_dispatch = (int)hw_status[5];
24645 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h
24646 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-05-19 00:06:34.000000000 -0400
24647 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-05 19:44:36.000000000 -0400
24648 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24651 wait_queue_head_t irq_queue;
24652 - atomic_t irq_received;
24653 - atomic_t irq_emitted;
24654 + atomic_unchecked_t irq_received;
24655 + atomic_unchecked_t irq_emitted;
24658 } drm_i810_private_t;
24659 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c
24660 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-19 00:06:34.000000000 -0400
24661 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-05 19:44:36.000000000 -0400
24662 @@ -496,7 +496,7 @@ static int i915_interrupt_info(struct se
24665 seq_printf(m, "Interrupts received: %d\n",
24666 - atomic_read(&dev_priv->irq_received));
24667 + atomic_read_unchecked(&dev_priv->irq_received));
24668 for (i = 0; i < I915_NUM_RINGS; i++) {
24669 if (IS_GEN6(dev)) {
24670 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24671 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c
24672 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-05-19 00:06:34.000000000 -0400
24673 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-05 19:44:36.000000000 -0400
24674 @@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
24677 spin_lock(&dev->count_lock);
24678 - can_switch = (dev->open_count == 0);
24679 + can_switch = (local_read(&dev->open_count) == 0);
24680 spin_unlock(&dev->count_lock);
24683 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h
24684 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-05-19 00:06:34.000000000 -0400
24685 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:34:06.000000000 -0400
24686 @@ -209,7 +209,7 @@ struct drm_i915_display_funcs {
24687 /* display clock increase/decrease */
24688 /* pll clock increase/decrease */
24689 /* clock gating init */
24693 struct intel_device_info {
24695 @@ -287,7 +287,7 @@ typedef struct drm_i915_private {
24699 - atomic_t irq_received;
24700 + atomic_unchecked_t irq_received;
24702 /* protects the irq masks */
24703 spinlock_t irq_lock;
24704 @@ -848,7 +848,7 @@ struct drm_i915_gem_object {
24705 * will be page flipped away on the next vblank. When it
24706 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24708 - atomic_t pending_flip;
24709 + atomic_unchecked_t pending_flip;
24712 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24713 @@ -1232,7 +1232,7 @@ extern int intel_setup_gmbus(struct drm_
24714 extern void intel_teardown_gmbus(struct drm_device *dev);
24715 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24716 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24717 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24718 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24720 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24722 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24723 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-19 00:06:34.000000000 -0400
24724 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-05 19:44:36.000000000 -0400
24725 @@ -192,7 +192,7 @@ i915_gem_object_set_to_gpu_domain(struct
24726 i915_gem_release_mmap(obj);
24728 if (obj->base.pending_write_domain)
24729 - cd->flips |= atomic_read(&obj->pending_flip);
24730 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24732 /* The actual obj->write_domain will be updated with
24733 * pending_write_domain after we emit the accumulated flush for all
24734 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c
24735 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-07-09 09:18:51.000000000 -0400
24736 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-05 19:44:36.000000000 -0400
24737 @@ -1101,7 +1101,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
24738 int ret = IRQ_NONE, pipe;
24739 bool blc_event = false;
24741 - atomic_inc(&dev_priv->irq_received);
24742 + atomic_inc_unchecked(&dev_priv->irq_received);
24744 if (HAS_PCH_SPLIT(dev))
24745 return ironlake_irq_handler(dev);
24746 @@ -1666,7 +1666,7 @@ void i915_driver_irq_preinstall(struct d
24747 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24750 - atomic_set(&dev_priv->irq_received, 0);
24751 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24753 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24754 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24755 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c
24756 --- linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-05-19 00:06:34.000000000 -0400
24757 +++ linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-08-05 19:44:36.000000000 -0400
24758 @@ -2244,7 +2244,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24760 wait_event(dev_priv->pending_flip_queue,
24761 atomic_read(&dev_priv->mm.wedged) ||
24762 - atomic_read(&obj->pending_flip) == 0);
24763 + atomic_read_unchecked(&obj->pending_flip) == 0);
24765 /* Big Hammer, we also need to ensure that any pending
24766 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24767 @@ -2712,7 +2712,7 @@ static void intel_crtc_wait_for_pending_
24768 obj = to_intel_framebuffer(crtc->fb)->obj;
24769 dev_priv = crtc->dev->dev_private;
24770 wait_event(dev_priv->pending_flip_queue,
24771 - atomic_read(&obj->pending_flip) == 0);
24772 + atomic_read_unchecked(&obj->pending_flip) == 0);
24775 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24776 @@ -6016,7 +6016,7 @@ static void do_intel_finish_page_flip(st
24778 atomic_clear_mask(1 << intel_crtc->plane,
24779 &obj->pending_flip.counter);
24780 - if (atomic_read(&obj->pending_flip) == 0)
24781 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24782 wake_up(&dev_priv->pending_flip_queue);
24784 schedule_work(&work->work);
24785 @@ -6145,7 +6145,7 @@ static int intel_crtc_page_flip(struct d
24786 /* Block clients from rendering to the new back buffer until
24787 * the flip occurs and the object is no longer visible.
24789 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24790 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24792 switch (INTEL_INFO(dev)->gen) {
24794 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h
24795 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-05-19 00:06:34.000000000 -0400
24796 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-05 19:44:36.000000000 -0400
24797 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24801 - atomic_t vbl_received; /**< Number of vblanks received. */
24802 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24803 wait_queue_head_t fence_queue;
24804 - atomic_t last_fence_retired;
24805 + atomic_unchecked_t last_fence_retired;
24806 u32 next_fence_to_post;
24808 unsigned int fb_cpp;
24809 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c
24810 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-05-19 00:06:34.000000000 -0400
24811 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-05 19:44:36.000000000 -0400
24812 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24816 - return atomic_read(&dev_priv->vbl_received);
24817 + return atomic_read_unchecked(&dev_priv->vbl_received);
24821 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24822 /* VBLANK interrupt */
24823 if (status & MGA_VLINEPEN) {
24824 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24825 - atomic_inc(&dev_priv->vbl_received);
24826 + atomic_inc_unchecked(&dev_priv->vbl_received);
24827 drm_handle_vblank(dev, 0);
24830 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24831 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24832 MGA_WRITE(MGA_PRIMEND, prim_end);
24834 - atomic_inc(&dev_priv->last_fence_retired);
24835 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24836 DRM_WAKEUP(&dev_priv->fence_queue);
24839 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24842 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24843 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24844 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24845 - *sequence) <= (1 << 23)));
24847 *sequence = cur_fence;
24848 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24849 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-05-19 00:06:34.000000000 -0400
24850 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-05 20:34:06.000000000 -0400
24851 @@ -228,7 +228,7 @@ struct nouveau_channel {
24852 struct list_head pending;
24854 uint32_t sequence_ack;
24855 - atomic_t last_sequence_irq;
24856 + atomic_unchecked_t last_sequence_irq;
24859 /* DMA push buffer */
24860 @@ -317,13 +317,13 @@ struct nouveau_instmem_engine {
24861 struct nouveau_mc_engine {
24862 int (*init)(struct drm_device *dev);
24863 void (*takedown)(struct drm_device *dev);
24867 struct nouveau_timer_engine {
24868 int (*init)(struct drm_device *dev);
24869 void (*takedown)(struct drm_device *dev);
24870 uint64_t (*read)(struct drm_device *dev);
24874 struct nouveau_fb_engine {
24876 @@ -516,7 +516,7 @@ struct nouveau_vram_engine {
24877 void (*put)(struct drm_device *, struct nouveau_mem **);
24879 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24883 struct nouveau_engine {
24884 struct nouveau_instmem_engine instmem;
24885 @@ -662,7 +662,7 @@ struct drm_nouveau_private {
24886 struct drm_global_reference mem_global_ref;
24887 struct ttm_bo_global_ref bo_global_ref;
24888 struct ttm_bo_device bdev;
24889 - atomic_t validate_sequence;
24890 + atomic_unchecked_t validate_sequence;
24894 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24895 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-19 00:06:34.000000000 -0400
24896 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-05 19:44:36.000000000 -0400
24897 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24898 if (USE_REFCNT(dev))
24899 sequence = nvchan_rd32(chan, 0x48);
24901 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24902 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24904 if (chan->fence.sequence_ack == sequence)
24906 @@ -553,7 +553,7 @@ nouveau_fence_channel_init(struct nouvea
24908 INIT_LIST_HEAD(&chan->fence.pending);
24909 spin_lock_init(&chan->fence.lock);
24910 - atomic_set(&chan->fence.last_sequence_irq, 0);
24911 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24915 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24916 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-19 00:06:34.000000000 -0400
24917 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-05 19:44:36.000000000 -0400
24918 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24922 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24923 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24925 if (++trycnt > 100000) {
24926 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24927 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c
24928 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-19 00:06:34.000000000 -0400
24929 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-05 19:44:36.000000000 -0400
24930 @@ -583,7 +583,7 @@ static bool nouveau_switcheroo_can_switc
24933 spin_lock(&dev->count_lock);
24934 - can_switch = (dev->open_count == 0);
24935 + can_switch = (local_read(&dev->open_count) == 0);
24936 spin_unlock(&dev->count_lock);
24939 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c
24940 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-19 00:06:34.000000000 -0400
24941 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-05 19:44:36.000000000 -0400
24942 @@ -552,7 +552,7 @@ static int
24943 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24944 u32 class, u32 mthd, u32 data)
24946 - atomic_set(&chan->fence.last_sequence_irq, data);
24947 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24951 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c
24952 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-05-19 00:06:34.000000000 -0400
24953 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-05 19:44:36.000000000 -0400
24954 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24956 /* GH: Simple idle check.
24958 - atomic_set(&dev_priv->idle_count, 0);
24959 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24961 /* We don't support anything other than bus-mastering ring mode,
24962 * but the ring can be in either AGP or PCI space for the ring
24963 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h
24964 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-05-19 00:06:34.000000000 -0400
24965 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-05 19:44:36.000000000 -0400
24966 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24968 unsigned long cce_buffers_offset;
24970 - atomic_t idle_count;
24971 + atomic_unchecked_t idle_count;
24976 u32 crtc_offset_cntl;
24978 - atomic_t vbl_received;
24979 + atomic_unchecked_t vbl_received;
24982 unsigned int front_offset;
24983 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c
24984 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-05-19 00:06:34.000000000 -0400
24985 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-05 19:44:36.000000000 -0400
24986 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24990 - return atomic_read(&dev_priv->vbl_received);
24991 + return atomic_read_unchecked(&dev_priv->vbl_received);
24994 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24995 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24996 /* VBLANK interrupt */
24997 if (status & R128_CRTC_VBLANK_INT) {
24998 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24999 - atomic_inc(&dev_priv->vbl_received);
25000 + atomic_inc_unchecked(&dev_priv->vbl_received);
25001 drm_handle_vblank(dev, 0);
25002 return IRQ_HANDLED;
25004 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c
25005 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-05-19 00:06:34.000000000 -0400
25006 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-08-05 19:44:36.000000000 -0400
25007 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25009 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25011 - if (atomic_read(&dev_priv->idle_count) == 0)
25012 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25013 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25015 - atomic_set(&dev_priv->idle_count, 0);
25016 + atomic_set_unchecked(&dev_priv->idle_count, 0);
25020 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c
25021 --- linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-05-19 00:06:34.000000000 -0400
25022 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-08-05 19:44:36.000000000 -0400
25023 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
25027 + pax_track_stack();
25032 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c
25033 --- linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-05-19 00:06:34.000000000 -0400
25034 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-05 19:44:36.000000000 -0400
25035 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
25037 regmatch_t match[4];
25045 struct offset *offset;
25046 char last_reg_s[10];
25048 + unsigned long last_reg;
25051 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25052 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c
25053 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-19 00:06:34.000000000 -0400
25054 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-05 19:44:36.000000000 -0400
25055 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25056 struct radeon_gpio_rec gpio;
25057 struct radeon_hpd hpd;
25059 + pax_track_stack();
25061 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25064 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c
25065 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-06-25 12:55:22.000000000 -0400
25066 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-05 19:44:36.000000000 -0400
25067 @@ -674,7 +674,7 @@ static bool radeon_switcheroo_can_switch
25070 spin_lock(&dev->count_lock);
25071 - can_switch = (dev->open_count == 0);
25072 + can_switch = (local_read(&dev->open_count) == 0);
25073 spin_unlock(&dev->count_lock);
25076 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c
25077 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:11:51.000000000 -0400
25078 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:12:20.000000000 -0400
25079 @@ -937,6 +937,8 @@ void radeon_compute_pll_legacy(struct ra
25081 u32 pll_out_min, pll_out_max;
25083 + pax_track_stack();
25085 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25086 freq = freq * 1000;
25088 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h
25089 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-19 00:06:34.000000000 -0400
25090 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-05 19:44:36.000000000 -0400
25091 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25094 wait_queue_head_t swi_queue;
25095 - atomic_t swi_emitted;
25096 + atomic_unchecked_t swi_emitted;
25098 uint32_t irq_enable_reg;
25099 uint32_t r500_disp_irq_reg;
25100 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c
25101 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-19 00:06:34.000000000 -0400
25102 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-05 19:44:36.000000000 -0400
25103 @@ -49,7 +49,7 @@ int radeon_fence_emit(struct radeon_devi
25104 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25107 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25108 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25109 if (!rdev->cp.ready) {
25110 /* FIXME: cp is not running assume everythings is done right
25112 @@ -352,7 +352,7 @@ int radeon_fence_driver_init(struct rade
25115 WREG32(rdev->fence_drv.scratch_reg, 0);
25116 - atomic_set(&rdev->fence_drv.seq, 0);
25117 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25118 INIT_LIST_HEAD(&rdev->fence_drv.created);
25119 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25120 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25121 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h
25122 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-05-19 00:06:34.000000000 -0400
25123 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:34:06.000000000 -0400
25124 @@ -189,7 +189,7 @@ extern int sumo_get_temp(struct radeon_d
25126 struct radeon_fence_driver {
25127 uint32_t scratch_reg;
25129 + atomic_unchecked_t seq;
25131 unsigned long last_jiffies;
25132 unsigned long last_timeout;
25133 @@ -958,7 +958,7 @@ struct radeon_asic {
25134 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25135 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25136 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25142 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25143 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-19 00:06:34.000000000 -0400
25144 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-05 19:44:36.000000000 -0400
25145 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25146 request = compat_alloc_user_space(sizeof(*request));
25147 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25148 || __put_user(req32.param, &request->param)
25149 - || __put_user((void __user *)(unsigned long)req32.value,
25150 + || __put_user((unsigned long)req32.value,
25154 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c
25155 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-19 00:06:34.000000000 -0400
25156 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-05 19:44:36.000000000 -0400
25157 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25161 - atomic_inc(&dev_priv->swi_emitted);
25162 - ret = atomic_read(&dev_priv->swi_emitted);
25163 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25164 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25167 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25168 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25169 drm_radeon_private_t *dev_priv =
25170 (drm_radeon_private_t *) dev->dev_private;
25172 - atomic_set(&dev_priv->swi_emitted, 0);
25173 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25174 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25176 dev->max_vblank_count = 0x001fffff;
25177 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c
25178 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-05-19 00:06:34.000000000 -0400
25179 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-05 19:44:36.000000000 -0400
25180 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25181 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25182 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25184 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25185 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25186 sarea_priv->nbox * sizeof(depth_boxes[0])))
25189 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25191 drm_radeon_private_t *dev_priv = dev->dev_private;
25192 drm_radeon_getparam_t *param = data;
25196 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25198 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c
25199 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-19 00:06:34.000000000 -0400
25200 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-05 20:34:06.000000000 -0400
25201 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25203 if (unlikely(ttm_vm_ops == NULL)) {
25204 ttm_vm_ops = vma->vm_ops;
25205 - radeon_ttm_vm_ops = *ttm_vm_ops;
25206 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25207 + pax_open_kernel();
25208 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25209 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25210 + pax_close_kernel();
25212 vma->vm_ops = &radeon_ttm_vm_ops;
25214 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c
25215 --- linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-05-19 00:06:34.000000000 -0400
25216 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-08-05 19:44:36.000000000 -0400
25217 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25218 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25219 rdev->pm.sideport_bandwidth.full)
25220 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25221 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25222 + read_delay_latency.full = dfixed_const(800 * 1000);
25223 read_delay_latency.full = dfixed_div(read_delay_latency,
25224 rdev->pm.igp_sideport_mclk);
25225 + a.full = dfixed_const(370);
25226 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25228 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25229 rdev->pm.k8_bandwidth.full)
25230 diff -urNp linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25231 --- linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-19 00:06:34.000000000 -0400
25232 +++ linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-05 19:44:36.000000000 -0400
25233 @@ -397,9 +397,9 @@ static int ttm_pool_get_num_unused_pages
25235 static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
25237 - static atomic_t start_pool = ATOMIC_INIT(0);
25238 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25240 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25241 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25242 struct ttm_page_pool *pool;
25244 pool_offset = pool_offset % NUM_POOLS;
25245 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h
25246 --- linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-05-19 00:06:34.000000000 -0400
25247 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-08-05 19:44:36.000000000 -0400
25248 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25249 typedef uint32_t maskarray_t[5];
25251 typedef struct drm_via_irq {
25252 - atomic_t irq_received;
25253 + atomic_unchecked_t irq_received;
25254 uint32_t pending_mask;
25255 uint32_t enable_mask;
25256 wait_queue_head_t irq_queue;
25257 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25258 struct timeval last_vblank;
25259 int last_vblank_valid;
25260 unsigned usec_per_vblank;
25261 - atomic_t vbl_received;
25262 + atomic_unchecked_t vbl_received;
25263 drm_via_state_t hc_state;
25264 char pci_buf[VIA_PCI_BUF_SIZE];
25265 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25266 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c
25267 --- linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-05-19 00:06:34.000000000 -0400
25268 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-08-05 19:44:36.000000000 -0400
25269 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25273 - return atomic_read(&dev_priv->vbl_received);
25274 + return atomic_read_unchecked(&dev_priv->vbl_received);
25277 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25278 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25280 status = VIA_READ(VIA_REG_INTERRUPT);
25281 if (status & VIA_IRQ_VBLANK_PENDING) {
25282 - atomic_inc(&dev_priv->vbl_received);
25283 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25284 + atomic_inc_unchecked(&dev_priv->vbl_received);
25285 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25286 do_gettimeofday(&cur_vblank);
25287 if (dev_priv->last_vblank_valid) {
25288 dev_priv->usec_per_vblank =
25289 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25290 dev_priv->last_vblank = cur_vblank;
25291 dev_priv->last_vblank_valid = 1;
25293 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25294 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25295 DRM_DEBUG("US per vblank is: %u\n",
25296 dev_priv->usec_per_vblank);
25298 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25300 for (i = 0; i < dev_priv->num_irqs; ++i) {
25301 if (status & cur_irq->pending_mask) {
25302 - atomic_inc(&cur_irq->irq_received);
25303 + atomic_inc_unchecked(&cur_irq->irq_received);
25304 DRM_WAKEUP(&cur_irq->irq_queue);
25306 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25307 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25308 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25309 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25311 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25312 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25314 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25315 (((cur_irq_sequence =
25316 - atomic_read(&cur_irq->irq_received)) -
25317 + atomic_read_unchecked(&cur_irq->irq_received)) -
25318 *sequence) <= (1 << 23)));
25320 *sequence = cur_irq_sequence;
25321 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25324 for (i = 0; i < dev_priv->num_irqs; ++i) {
25325 - atomic_set(&cur_irq->irq_received, 0);
25326 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25327 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25328 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25329 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25330 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25331 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25332 case VIA_IRQ_RELATIVE:
25333 irqwait->request.sequence +=
25334 - atomic_read(&cur_irq->irq_received);
25335 + atomic_read_unchecked(&cur_irq->irq_received);
25336 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25337 case VIA_IRQ_ABSOLUTE:
25339 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25340 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-19 00:06:34.000000000 -0400
25341 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-05 19:44:36.000000000 -0400
25342 @@ -240,7 +240,7 @@ struct vmw_private {
25343 * Fencing and IRQs.
25346 - atomic_t fence_seq;
25347 + atomic_unchecked_t fence_seq;
25348 wait_queue_head_t fence_queue;
25349 wait_queue_head_t fifo_queue;
25350 atomic_t fence_queue_waiters;
25351 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25352 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-19 00:06:34.000000000 -0400
25353 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-05 19:44:36.000000000 -0400
25354 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25355 while (!vmw_lag_lt(queue, us)) {
25356 spin_lock(&queue->lock);
25357 if (list_empty(&queue->head))
25358 - sequence = atomic_read(&dev_priv->fence_seq);
25359 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25361 fence = list_first_entry(&queue->head,
25362 struct vmw_fence, head);
25363 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25364 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-19 00:06:34.000000000 -0400
25365 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-05 20:34:06.000000000 -0400
25366 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25367 (unsigned int) min,
25368 (unsigned int) fifo->capabilities);
25370 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25371 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25372 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25373 vmw_fence_queue_init(&fifo->fence_queue);
25374 return vmw_fifo_send_fence(dev_priv, &dummy);
25375 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25377 fm = vmw_fifo_reserve(dev_priv, bytes);
25378 if (unlikely(fm == NULL)) {
25379 - *sequence = atomic_read(&dev_priv->fence_seq);
25380 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25382 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25384 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25388 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25389 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25390 } while (*sequence == 0);
25392 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25393 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25394 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-19 00:06:34.000000000 -0400
25395 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-05 19:44:36.000000000 -0400
25396 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25397 * emitted. Then the fence is stale and signaled.
25400 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25401 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25405 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25408 down_read(&fifo_state->rwsem);
25409 - signal_seq = atomic_read(&dev_priv->fence_seq);
25410 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25414 diff -urNp linux-2.6.39.4/drivers/hid/hid-core.c linux-2.6.39.4/drivers/hid/hid-core.c
25415 --- linux-2.6.39.4/drivers/hid/hid-core.c 2011-05-19 00:06:34.000000000 -0400
25416 +++ linux-2.6.39.4/drivers/hid/hid-core.c 2011-08-05 19:44:36.000000000 -0400
25417 @@ -1888,7 +1888,7 @@ static bool hid_ignore(struct hid_device
25419 int hid_add_device(struct hid_device *hdev)
25421 - static atomic_t id = ATOMIC_INIT(0);
25422 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25425 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25426 @@ -1903,7 +1903,7 @@ int hid_add_device(struct hid_device *hd
25427 /* XXX hack, any other cleaner solution after the driver core
25428 * is converted to allow more than 20 bytes as the device name? */
25429 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25430 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25431 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25433 hid_debug_register(hdev, dev_name(&hdev->dev));
25434 ret = device_add(&hdev->dev);
25435 diff -urNp linux-2.6.39.4/drivers/hid/usbhid/hiddev.c linux-2.6.39.4/drivers/hid/usbhid/hiddev.c
25436 --- linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-05-19 00:06:34.000000000 -0400
25437 +++ linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-08-05 19:44:36.000000000 -0400
25438 @@ -613,7 +613,7 @@ static long hiddev_ioctl(struct file *fi
25441 case HIDIOCAPPLICATION:
25442 - if (arg < 0 || arg >= hid->maxapplication)
25443 + if (arg >= hid->maxapplication)
25446 for (i = 0; i < hid->maxcollection; i++)
25447 diff -urNp linux-2.6.39.4/drivers/hwmon/sht15.c linux-2.6.39.4/drivers/hwmon/sht15.c
25448 --- linux-2.6.39.4/drivers/hwmon/sht15.c 2011-05-19 00:06:34.000000000 -0400
25449 +++ linux-2.6.39.4/drivers/hwmon/sht15.c 2011-08-05 19:44:36.000000000 -0400
25450 @@ -113,7 +113,7 @@ struct sht15_data {
25452 int supply_uV_valid;
25453 struct work_struct update_supply_work;
25454 - atomic_t interrupt_handled;
25455 + atomic_unchecked_t interrupt_handled;
25459 @@ -246,13 +246,13 @@ static inline int sht15_update_single_va
25462 gpio_direction_input(data->pdata->gpio_data);
25463 - atomic_set(&data->interrupt_handled, 0);
25464 + atomic_set_unchecked(&data->interrupt_handled, 0);
25466 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25467 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25468 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25469 /* Only relevant if the interrupt hasn't occurred. */
25470 - if (!atomic_read(&data->interrupt_handled))
25471 + if (!atomic_read_unchecked(&data->interrupt_handled))
25472 schedule_work(&data->read_work);
25474 ret = wait_event_timeout(data->wait_queue,
25475 @@ -399,7 +399,7 @@ static irqreturn_t sht15_interrupt_fired
25476 struct sht15_data *data = d;
25477 /* First disable the interrupt */
25478 disable_irq_nosync(irq);
25479 - atomic_inc(&data->interrupt_handled);
25480 + atomic_inc_unchecked(&data->interrupt_handled);
25481 /* Then schedule a reading work struct */
25482 if (data->flag != SHT15_READING_NOTHING)
25483 schedule_work(&data->read_work);
25484 @@ -450,11 +450,11 @@ static void sht15_bh_read_data(struct wo
25485 here as could have gone low in meantime so verify
25488 - atomic_set(&data->interrupt_handled, 0);
25489 + atomic_set_unchecked(&data->interrupt_handled, 0);
25490 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25491 /* If still not occurred or another handler has been scheduled */
25492 if (gpio_get_value(data->pdata->gpio_data)
25493 - || atomic_read(&data->interrupt_handled))
25494 + || atomic_read_unchecked(&data->interrupt_handled))
25497 /* Read the data back from the device */
25498 diff -urNp linux-2.6.39.4/drivers/hwmon/w83791d.c linux-2.6.39.4/drivers/hwmon/w83791d.c
25499 --- linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-05-19 00:06:34.000000000 -0400
25500 +++ linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-08-05 19:44:36.000000000 -0400
25501 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25502 struct i2c_board_info *info);
25503 static int w83791d_remove(struct i2c_client *client);
25505 -static int w83791d_read(struct i2c_client *client, u8 register);
25506 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25507 +static int w83791d_read(struct i2c_client *client, u8 reg);
25508 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25509 static struct w83791d_data *w83791d_update_device(struct device *dev);
25512 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c
25513 --- linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-05-19 00:06:34.000000000 -0400
25514 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:34:06.000000000 -0400
25516 extern struct i2c_adapter amd756_smbus;
25518 static struct i2c_adapter *s4882_adapter;
25519 -static struct i2c_algorithm *s4882_algo;
25520 +static i2c_algorithm_no_const *s4882_algo;
25522 /* Wrapper access functions for multiplexed SMBus */
25523 static DEFINE_MUTEX(amd756_lock);
25524 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25525 --- linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-05-19 00:06:34.000000000 -0400
25526 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:34:06.000000000 -0400
25528 extern struct i2c_adapter *nforce2_smbus;
25530 static struct i2c_adapter *s4985_adapter;
25531 -static struct i2c_algorithm *s4985_algo;
25532 +static i2c_algorithm_no_const *s4985_algo;
25534 /* Wrapper access functions for multiplexed SMBus */
25535 static DEFINE_MUTEX(nforce2_lock);
25536 diff -urNp linux-2.6.39.4/drivers/i2c/i2c-mux.c linux-2.6.39.4/drivers/i2c/i2c-mux.c
25537 --- linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-05-19 00:06:34.000000000 -0400
25538 +++ linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-08-05 20:34:06.000000000 -0400
25540 /* multiplexer per channel data */
25541 struct i2c_mux_priv {
25542 struct i2c_adapter adap;
25543 - struct i2c_algorithm algo;
25544 + i2c_algorithm_no_const algo;
25546 struct i2c_adapter *parent;
25547 void *mux_dev; /* the mux chip/device */
25548 diff -urNp linux-2.6.39.4/drivers/ide/ide-cd.c linux-2.6.39.4/drivers/ide/ide-cd.c
25549 --- linux-2.6.39.4/drivers/ide/ide-cd.c 2011-06-03 00:04:14.000000000 -0400
25550 +++ linux-2.6.39.4/drivers/ide/ide-cd.c 2011-08-05 19:44:36.000000000 -0400
25551 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25552 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25553 if ((unsigned long)buf & alignment
25554 || blk_rq_bytes(rq) & q->dma_pad_mask
25555 - || object_is_on_stack(buf))
25556 + || object_starts_on_stack(buf))
25560 diff -urNp linux-2.6.39.4/drivers/ide/ide-floppy.c linux-2.6.39.4/drivers/ide/ide-floppy.c
25561 --- linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-05-19 00:06:34.000000000 -0400
25562 +++ linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-08-05 19:44:36.000000000 -0400
25563 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25564 u8 pc_buf[256], header_len, desc_cnt;
25565 int i, rc = 1, blocks, length;
25567 + pax_track_stack();
25569 ide_debug_log(IDE_DBG_FUNC, "enter");
25571 drive->bios_cyl = 0;
25572 diff -urNp linux-2.6.39.4/drivers/ide/setup-pci.c linux-2.6.39.4/drivers/ide/setup-pci.c
25573 --- linux-2.6.39.4/drivers/ide/setup-pci.c 2011-05-19 00:06:34.000000000 -0400
25574 +++ linux-2.6.39.4/drivers/ide/setup-pci.c 2011-08-05 19:44:36.000000000 -0400
25575 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25576 int ret, i, n_ports = dev2 ? 4 : 2;
25577 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25579 + pax_track_stack();
25581 for (i = 0; i < n_ports / 2; i++) {
25582 ret = ide_setup_pci_controller(pdev[i], d, !i);
25584 diff -urNp linux-2.6.39.4/drivers/infiniband/core/cm.c linux-2.6.39.4/drivers/infiniband/core/cm.c
25585 --- linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-05-19 00:06:34.000000000 -0400
25586 +++ linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-08-05 19:44:36.000000000 -0400
25587 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25589 struct cm_counter_group {
25590 struct kobject obj;
25591 - atomic_long_t counter[CM_ATTR_COUNT];
25592 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25595 struct cm_counter_attribute {
25596 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25597 struct ib_mad_send_buf *msg = NULL;
25600 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25601 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25602 counter[CM_REQ_COUNTER]);
25604 /* Quick state check to discard duplicate REQs. */
25605 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25609 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25610 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25611 counter[CM_REP_COUNTER]);
25612 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25614 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25615 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25616 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25617 spin_unlock_irq(&cm_id_priv->lock);
25618 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25619 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25620 counter[CM_RTU_COUNTER]);
25623 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25624 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25625 dreq_msg->local_comm_id);
25627 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25628 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25629 counter[CM_DREQ_COUNTER]);
25630 cm_issue_drep(work->port, work->mad_recv_wc);
25632 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25633 case IB_CM_MRA_REP_RCVD:
25635 case IB_CM_TIMEWAIT:
25636 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25637 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25638 counter[CM_DREQ_COUNTER]);
25639 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25641 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25644 case IB_CM_DREQ_RCVD:
25645 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25646 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25647 counter[CM_DREQ_COUNTER]);
25650 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25651 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25652 cm_id_priv->msg, timeout)) {
25653 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25654 - atomic_long_inc(&work->port->
25655 + atomic_long_inc_unchecked(&work->port->
25656 counter_group[CM_RECV_DUPLICATES].
25657 counter[CM_MRA_COUNTER]);
25659 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25661 case IB_CM_MRA_REQ_RCVD:
25662 case IB_CM_MRA_REP_RCVD:
25663 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25664 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25665 counter[CM_MRA_COUNTER]);
25668 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25669 case IB_CM_LAP_IDLE:
25671 case IB_CM_MRA_LAP_SENT:
25672 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25673 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25674 counter[CM_LAP_COUNTER]);
25675 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25677 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25680 case IB_CM_LAP_RCVD:
25681 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25682 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25683 counter[CM_LAP_COUNTER]);
25686 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25687 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25688 if (cur_cm_id_priv) {
25689 spin_unlock_irq(&cm.lock);
25690 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25691 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25692 counter[CM_SIDR_REQ_COUNTER]);
25693 goto out; /* Duplicate message. */
25695 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25696 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25699 - atomic_long_add(1 + msg->retries,
25700 + atomic_long_add_unchecked(1 + msg->retries,
25701 &port->counter_group[CM_XMIT].counter[attr_index]);
25703 - atomic_long_add(msg->retries,
25704 + atomic_long_add_unchecked(msg->retries,
25705 &port->counter_group[CM_XMIT_RETRIES].
25706 counter[attr_index]);
25708 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25711 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25712 - atomic_long_inc(&port->counter_group[CM_RECV].
25713 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25714 counter[attr_id - CM_ATTR_ID_OFFSET]);
25716 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25717 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25718 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25720 return sprintf(buf, "%ld\n",
25721 - atomic_long_read(&group->counter[cm_attr->index]));
25722 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25725 static const struct sysfs_ops cm_counter_ops = {
25726 diff -urNp linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c
25727 --- linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-05-19 00:06:34.000000000 -0400
25728 +++ linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-08-05 19:44:36.000000000 -0400
25729 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25731 struct task_struct *thread;
25733 - atomic_t req_ser;
25734 - atomic_t flush_ser;
25735 + atomic_unchecked_t req_ser;
25736 + atomic_unchecked_t flush_ser;
25738 wait_queue_head_t force_wait;
25740 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25741 struct ib_fmr_pool *pool = pool_ptr;
25744 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25745 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25746 ib_fmr_batch_release(pool);
25748 - atomic_inc(&pool->flush_ser);
25749 + atomic_inc_unchecked(&pool->flush_ser);
25750 wake_up_interruptible(&pool->force_wait);
25752 if (pool->flush_function)
25753 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25756 set_current_state(TASK_INTERRUPTIBLE);
25757 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25758 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25759 !kthread_should_stop())
25761 __set_current_state(TASK_RUNNING);
25762 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25763 pool->dirty_watermark = params->dirty_watermark;
25764 pool->dirty_len = 0;
25765 spin_lock_init(&pool->pool_lock);
25766 - atomic_set(&pool->req_ser, 0);
25767 - atomic_set(&pool->flush_ser, 0);
25768 + atomic_set_unchecked(&pool->req_ser, 0);
25769 + atomic_set_unchecked(&pool->flush_ser, 0);
25770 init_waitqueue_head(&pool->force_wait);
25772 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25773 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25775 spin_unlock_irq(&pool->pool_lock);
25777 - serial = atomic_inc_return(&pool->req_ser);
25778 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25779 wake_up_process(pool->thread);
25781 if (wait_event_interruptible(pool->force_wait,
25782 - atomic_read(&pool->flush_ser) - serial >= 0))
25783 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25787 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25789 list_add_tail(&fmr->list, &pool->dirty_list);
25790 if (++pool->dirty_len >= pool->dirty_watermark) {
25791 - atomic_inc(&pool->req_ser);
25792 + atomic_inc_unchecked(&pool->req_ser);
25793 wake_up_process(pool->thread);
25796 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c
25797 --- linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-05-19 00:06:34.000000000 -0400
25798 +++ linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-05 19:44:36.000000000 -0400
25799 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25801 struct fw_ri_tpte tpt;
25803 - static atomic_t key;
25804 + static atomic_unchecked_t key;
25806 if (c4iw_fatal_error(rdev))
25808 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25809 &rdev->resource.tpt_fifo_lock);
25812 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25813 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25815 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25816 __func__, stag_state, type, pdid, stag_idx);
25817 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c
25818 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-19 00:06:34.000000000 -0400
25819 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-05 19:44:36.000000000 -0400
25820 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25821 struct infinipath_counters counters;
25822 struct ipath_devdata *dd;
25824 + pax_track_stack();
25826 dd = file->f_path.dentry->d_inode->i_private;
25827 dd->ipath_f_read_counters(dd, &counters);
25829 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c
25830 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-19 00:06:34.000000000 -0400
25831 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-05 19:44:36.000000000 -0400
25832 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25833 struct ib_atomic_eth *ateth;
25834 struct ipath_ack_entry *e;
25836 - atomic64_t *maddr;
25837 + atomic64_unchecked_t *maddr;
25841 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25842 IB_ACCESS_REMOTE_ATOMIC)))
25843 goto nack_acc_unlck;
25844 /* Perform atomic OP and save result. */
25845 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25846 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25847 sdata = be64_to_cpu(ateth->swap_data);
25848 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25849 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25850 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25851 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25852 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25853 be64_to_cpu(ateth->compare_data),
25855 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25856 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-19 00:06:34.000000000 -0400
25857 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-05 19:44:36.000000000 -0400
25858 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25859 unsigned long flags;
25862 - atomic64_t *maddr;
25863 + atomic64_unchecked_t *maddr;
25864 enum ib_wc_status send_status;
25867 @@ -382,11 +382,11 @@ again:
25868 IB_ACCESS_REMOTE_ATOMIC)))
25870 /* Perform atomic OP and save result. */
25871 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25872 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25873 sdata = wqe->wr.wr.atomic.compare_add;
25874 *(u64 *) sqp->s_sge.sge.vaddr =
25875 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25876 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25877 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25878 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25879 sdata, wqe->wr.wr.atomic.swap);
25881 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c
25882 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-05-19 00:06:34.000000000 -0400
25883 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-08-05 19:44:36.000000000 -0400
25884 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25885 LIST_HEAD(nes_adapter_list);
25886 static LIST_HEAD(nes_dev_list);
25888 -atomic_t qps_destroyed;
25889 +atomic_unchecked_t qps_destroyed;
25891 static unsigned int ee_flsh_adapter;
25892 static unsigned int sysfs_nonidx_addr;
25893 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25894 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25895 struct nes_adapter *nesadapter = nesdev->nesadapter;
25897 - atomic_inc(&qps_destroyed);
25898 + atomic_inc_unchecked(&qps_destroyed);
25900 /* Free the control structures */
25902 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c
25903 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-05-19 00:06:34.000000000 -0400
25904 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-05 19:44:36.000000000 -0400
25905 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25906 u32 cm_packets_retrans;
25907 u32 cm_packets_created;
25908 u32 cm_packets_received;
25909 -atomic_t cm_listens_created;
25910 -atomic_t cm_listens_destroyed;
25911 +atomic_unchecked_t cm_listens_created;
25912 +atomic_unchecked_t cm_listens_destroyed;
25913 u32 cm_backlog_drops;
25914 -atomic_t cm_loopbacks;
25915 -atomic_t cm_nodes_created;
25916 -atomic_t cm_nodes_destroyed;
25917 -atomic_t cm_accel_dropped_pkts;
25918 -atomic_t cm_resets_recvd;
25919 +atomic_unchecked_t cm_loopbacks;
25920 +atomic_unchecked_t cm_nodes_created;
25921 +atomic_unchecked_t cm_nodes_destroyed;
25922 +atomic_unchecked_t cm_accel_dropped_pkts;
25923 +atomic_unchecked_t cm_resets_recvd;
25925 static inline int mini_cm_accelerated(struct nes_cm_core *,
25926 struct nes_cm_node *);
25927 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25929 static struct nes_cm_core *g_cm_core;
25931 -atomic_t cm_connects;
25932 -atomic_t cm_accepts;
25933 -atomic_t cm_disconnects;
25934 -atomic_t cm_closes;
25935 -atomic_t cm_connecteds;
25936 -atomic_t cm_connect_reqs;
25937 -atomic_t cm_rejects;
25938 +atomic_unchecked_t cm_connects;
25939 +atomic_unchecked_t cm_accepts;
25940 +atomic_unchecked_t cm_disconnects;
25941 +atomic_unchecked_t cm_closes;
25942 +atomic_unchecked_t cm_connecteds;
25943 +atomic_unchecked_t cm_connect_reqs;
25944 +atomic_unchecked_t cm_rejects;
25948 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25952 - atomic_inc(&cm_listens_destroyed);
25953 + atomic_inc_unchecked(&cm_listens_destroyed);
25955 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25957 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25960 add_hte_node(cm_core, cm_node);
25961 - atomic_inc(&cm_nodes_created);
25962 + atomic_inc_unchecked(&cm_nodes_created);
25966 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25969 atomic_dec(&cm_core->node_cnt);
25970 - atomic_inc(&cm_nodes_destroyed);
25971 + atomic_inc_unchecked(&cm_nodes_destroyed);
25972 nesqp = cm_node->nesqp;
25974 nesqp->cm_node = NULL;
25975 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25977 static void drop_packet(struct sk_buff *skb)
25979 - atomic_inc(&cm_accel_dropped_pkts);
25980 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25981 dev_kfree_skb_any(skb);
25984 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25987 int reset = 0; /* whether to send reset in case of err.. */
25988 - atomic_inc(&cm_resets_recvd);
25989 + atomic_inc_unchecked(&cm_resets_recvd);
25990 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25991 " refcnt=%d\n", cm_node, cm_node->state,
25992 atomic_read(&cm_node->ref_count));
25993 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25994 rem_ref_cm_node(cm_node->cm_core, cm_node);
25997 - atomic_inc(&cm_loopbacks);
25998 + atomic_inc_unchecked(&cm_loopbacks);
25999 loopbackremotenode->loopbackpartner = cm_node;
26000 loopbackremotenode->tcp_cntxt.rcv_wscale =
26001 NES_CM_DEFAULT_RCV_WND_SCALE;
26002 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
26003 add_ref_cm_node(cm_node);
26004 } else if (cm_node->state == NES_CM_STATE_TSA) {
26005 rem_ref_cm_node(cm_core, cm_node);
26006 - atomic_inc(&cm_accel_dropped_pkts);
26007 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26008 dev_kfree_skb_any(skb);
26011 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
26013 if ((cm_id) && (cm_id->event_handler)) {
26014 if (issue_disconn) {
26015 - atomic_inc(&cm_disconnects);
26016 + atomic_inc_unchecked(&cm_disconnects);
26017 cm_event.event = IW_CM_EVENT_DISCONNECT;
26018 cm_event.status = disconn_status;
26019 cm_event.local_addr = cm_id->local_addr;
26020 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
26024 - atomic_inc(&cm_closes);
26025 + atomic_inc_unchecked(&cm_closes);
26026 nes_disconnect(nesqp, 1);
26028 cm_id->provider_data = nesqp;
26029 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
26031 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
26032 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
26033 - atomic_inc(&cm_accepts);
26034 + atomic_inc_unchecked(&cm_accepts);
26036 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26037 netdev_refcnt_read(nesvnic->netdev));
26038 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26040 struct nes_cm_core *cm_core;
26042 - atomic_inc(&cm_rejects);
26043 + atomic_inc_unchecked(&cm_rejects);
26044 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26045 loopback = cm_node->loopbackpartner;
26046 cm_core = cm_node->cm_core;
26047 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26048 ntohl(cm_id->local_addr.sin_addr.s_addr),
26049 ntohs(cm_id->local_addr.sin_port));
26051 - atomic_inc(&cm_connects);
26052 + atomic_inc_unchecked(&cm_connects);
26053 nesqp->active_conn = 1;
26055 /* cache the cm_id in the qp */
26056 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26057 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26060 - atomic_inc(&cm_listens_created);
26061 + atomic_inc_unchecked(&cm_listens_created);
26064 cm_id->add_ref(cm_id);
26065 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26066 if (nesqp->destroyed) {
26069 - atomic_inc(&cm_connecteds);
26070 + atomic_inc_unchecked(&cm_connecteds);
26071 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26072 " local port 0x%04X. jiffies = %lu.\n",
26074 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26076 cm_id->add_ref(cm_id);
26077 ret = cm_id->event_handler(cm_id, &cm_event);
26078 - atomic_inc(&cm_closes);
26079 + atomic_inc_unchecked(&cm_closes);
26080 cm_event.event = IW_CM_EVENT_CLOSE;
26081 cm_event.status = IW_CM_EVENT_STATUS_OK;
26082 cm_event.provider_data = cm_id->provider_data;
26083 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26085 cm_id = cm_node->cm_id;
26087 - atomic_inc(&cm_connect_reqs);
26088 + atomic_inc_unchecked(&cm_connect_reqs);
26089 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26090 cm_node, cm_id, jiffies);
26092 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26094 cm_id = cm_node->cm_id;
26096 - atomic_inc(&cm_connect_reqs);
26097 + atomic_inc_unchecked(&cm_connect_reqs);
26098 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26099 cm_node, cm_id, jiffies);
26101 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h
26102 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-05-19 00:06:34.000000000 -0400
26103 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-08-05 19:44:36.000000000 -0400
26104 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26105 extern unsigned int wqm_quanta;
26106 extern struct list_head nes_adapter_list;
26108 -extern atomic_t cm_connects;
26109 -extern atomic_t cm_accepts;
26110 -extern atomic_t cm_disconnects;
26111 -extern atomic_t cm_closes;
26112 -extern atomic_t cm_connecteds;
26113 -extern atomic_t cm_connect_reqs;
26114 -extern atomic_t cm_rejects;
26115 -extern atomic_t mod_qp_timouts;
26116 -extern atomic_t qps_created;
26117 -extern atomic_t qps_destroyed;
26118 -extern atomic_t sw_qps_destroyed;
26119 +extern atomic_unchecked_t cm_connects;
26120 +extern atomic_unchecked_t cm_accepts;
26121 +extern atomic_unchecked_t cm_disconnects;
26122 +extern atomic_unchecked_t cm_closes;
26123 +extern atomic_unchecked_t cm_connecteds;
26124 +extern atomic_unchecked_t cm_connect_reqs;
26125 +extern atomic_unchecked_t cm_rejects;
26126 +extern atomic_unchecked_t mod_qp_timouts;
26127 +extern atomic_unchecked_t qps_created;
26128 +extern atomic_unchecked_t qps_destroyed;
26129 +extern atomic_unchecked_t sw_qps_destroyed;
26130 extern u32 mh_detected;
26131 extern u32 mh_pauses_sent;
26132 extern u32 cm_packets_sent;
26133 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26134 extern u32 cm_packets_received;
26135 extern u32 cm_packets_dropped;
26136 extern u32 cm_packets_retrans;
26137 -extern atomic_t cm_listens_created;
26138 -extern atomic_t cm_listens_destroyed;
26139 +extern atomic_unchecked_t cm_listens_created;
26140 +extern atomic_unchecked_t cm_listens_destroyed;
26141 extern u32 cm_backlog_drops;
26142 -extern atomic_t cm_loopbacks;
26143 -extern atomic_t cm_nodes_created;
26144 -extern atomic_t cm_nodes_destroyed;
26145 -extern atomic_t cm_accel_dropped_pkts;
26146 -extern atomic_t cm_resets_recvd;
26147 +extern atomic_unchecked_t cm_loopbacks;
26148 +extern atomic_unchecked_t cm_nodes_created;
26149 +extern atomic_unchecked_t cm_nodes_destroyed;
26150 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26151 +extern atomic_unchecked_t cm_resets_recvd;
26153 extern u32 int_mod_timer_init;
26154 extern u32 int_mod_cq_depth_256;
26155 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c
26156 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-05-19 00:06:34.000000000 -0400
26157 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-05 19:44:36.000000000 -0400
26158 @@ -1302,31 +1302,31 @@ static void nes_netdev_get_ethtool_stats
26159 target_stat_values[++index] = mh_detected;
26160 target_stat_values[++index] = mh_pauses_sent;
26161 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26162 - target_stat_values[++index] = atomic_read(&cm_connects);
26163 - target_stat_values[++index] = atomic_read(&cm_accepts);
26164 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26165 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26166 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26167 - target_stat_values[++index] = atomic_read(&cm_rejects);
26168 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26169 - target_stat_values[++index] = atomic_read(&qps_created);
26170 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26171 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26172 - target_stat_values[++index] = atomic_read(&cm_closes);
26173 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26174 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26175 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26176 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26177 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26178 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26179 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26180 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26181 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26182 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26183 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26184 target_stat_values[++index] = cm_packets_sent;
26185 target_stat_values[++index] = cm_packets_bounced;
26186 target_stat_values[++index] = cm_packets_created;
26187 target_stat_values[++index] = cm_packets_received;
26188 target_stat_values[++index] = cm_packets_dropped;
26189 target_stat_values[++index] = cm_packets_retrans;
26190 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26191 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26192 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26193 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26194 target_stat_values[++index] = cm_backlog_drops;
26195 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26196 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26197 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26198 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26199 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26200 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26201 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26202 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26203 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26204 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26205 target_stat_values[++index] = nesadapter->free_4kpbl;
26206 target_stat_values[++index] = nesadapter->free_256pbl;
26207 target_stat_values[++index] = int_mod_timer_init;
26208 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c
26209 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-19 00:06:34.000000000 -0400
26210 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-05 19:44:36.000000000 -0400
26213 #include <rdma/ib_umem.h>
26215 -atomic_t mod_qp_timouts;
26216 -atomic_t qps_created;
26217 -atomic_t sw_qps_destroyed;
26218 +atomic_unchecked_t mod_qp_timouts;
26219 +atomic_unchecked_t qps_created;
26220 +atomic_unchecked_t sw_qps_destroyed;
26222 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26224 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26225 if (init_attr->create_flags)
26226 return ERR_PTR(-EINVAL);
26228 - atomic_inc(&qps_created);
26229 + atomic_inc_unchecked(&qps_created);
26230 switch (init_attr->qp_type) {
26232 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26233 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26234 struct iw_cm_event cm_event;
26237 - atomic_inc(&sw_qps_destroyed);
26238 + atomic_inc_unchecked(&sw_qps_destroyed);
26239 nesqp->destroyed = 1;
26241 /* Blow away the connection if it exists. */
26242 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h
26243 --- linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-05-19 00:06:34.000000000 -0400
26244 +++ linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-08-05 20:34:06.000000000 -0400
26246 #include <linux/completion.h>
26247 #include <linux/kref.h>
26248 #include <linux/sched.h>
26249 +#include <linux/slab.h>
26251 #include "qib_common.h"
26252 #include "qib_verbs.h"
26253 diff -urNp linux-2.6.39.4/drivers/input/gameport/gameport.c linux-2.6.39.4/drivers/input/gameport/gameport.c
26254 --- linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-05-19 00:06:34.000000000 -0400
26255 +++ linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-08-05 19:44:37.000000000 -0400
26256 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26258 static void gameport_init_port(struct gameport *gameport)
26260 - static atomic_t gameport_no = ATOMIC_INIT(0);
26261 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26263 __module_get(THIS_MODULE);
26265 mutex_init(&gameport->drv_mutex);
26266 device_initialize(&gameport->dev);
26267 dev_set_name(&gameport->dev, "gameport%lu",
26268 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26269 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26270 gameport->dev.bus = &gameport_bus;
26271 gameport->dev.release = gameport_release_port;
26272 if (gameport->parent)
26273 diff -urNp linux-2.6.39.4/drivers/input/input.c linux-2.6.39.4/drivers/input/input.c
26274 --- linux-2.6.39.4/drivers/input/input.c 2011-07-09 09:18:51.000000000 -0400
26275 +++ linux-2.6.39.4/drivers/input/input.c 2011-08-05 19:44:37.000000000 -0400
26276 @@ -1815,7 +1815,7 @@ static void input_cleanse_bitmasks(struc
26278 int input_register_device(struct input_dev *dev)
26280 - static atomic_t input_no = ATOMIC_INIT(0);
26281 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26282 struct input_handler *handler;
26285 @@ -1852,7 +1852,7 @@ int input_register_device(struct input_d
26286 dev->setkeycode = input_default_setkeycode;
26288 dev_set_name(&dev->dev, "input%ld",
26289 - (unsigned long) atomic_inc_return(&input_no) - 1);
26290 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26292 error = device_add(&dev->dev);
26294 diff -urNp linux-2.6.39.4/drivers/input/joystick/sidewinder.c linux-2.6.39.4/drivers/input/joystick/sidewinder.c
26295 --- linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-05-19 00:06:34.000000000 -0400
26296 +++ linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-08-05 19:44:37.000000000 -0400
26298 #include <linux/kernel.h>
26299 #include <linux/module.h>
26300 #include <linux/slab.h>
26301 +#include <linux/sched.h>
26302 #include <linux/init.h>
26303 #include <linux/input.h>
26304 #include <linux/gameport.h>
26305 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26306 unsigned char buf[SW_LENGTH];
26309 + pax_track_stack();
26311 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26313 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26314 diff -urNp linux-2.6.39.4/drivers/input/joystick/xpad.c linux-2.6.39.4/drivers/input/joystick/xpad.c
26315 --- linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-05-19 00:06:34.000000000 -0400
26316 +++ linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-08-05 19:44:37.000000000 -0400
26317 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26319 static int xpad_led_probe(struct usb_xpad *xpad)
26321 - static atomic_t led_seq = ATOMIC_INIT(0);
26322 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26324 struct xpad_led *led;
26325 struct led_classdev *led_cdev;
26326 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26330 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26331 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26333 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26335 diff -urNp linux-2.6.39.4/drivers/input/mousedev.c linux-2.6.39.4/drivers/input/mousedev.c
26336 --- linux-2.6.39.4/drivers/input/mousedev.c 2011-07-09 09:18:51.000000000 -0400
26337 +++ linux-2.6.39.4/drivers/input/mousedev.c 2011-08-05 19:44:37.000000000 -0400
26338 @@ -764,7 +764,7 @@ static ssize_t mousedev_read(struct file
26340 spin_unlock_irq(&client->packet_lock);
26342 - if (copy_to_user(buffer, data, count))
26343 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26347 diff -urNp linux-2.6.39.4/drivers/input/serio/serio.c linux-2.6.39.4/drivers/input/serio/serio.c
26348 --- linux-2.6.39.4/drivers/input/serio/serio.c 2011-05-19 00:06:34.000000000 -0400
26349 +++ linux-2.6.39.4/drivers/input/serio/serio.c 2011-08-05 19:44:37.000000000 -0400
26350 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26352 static void serio_init_port(struct serio *serio)
26354 - static atomic_t serio_no = ATOMIC_INIT(0);
26355 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26357 __module_get(THIS_MODULE);
26359 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26360 mutex_init(&serio->drv_mutex);
26361 device_initialize(&serio->dev);
26362 dev_set_name(&serio->dev, "serio%ld",
26363 - (long)atomic_inc_return(&serio_no) - 1);
26364 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26365 serio->dev.bus = &serio_bus;
26366 serio->dev.release = serio_release_port;
26367 serio->dev.groups = serio_device_attr_groups;
26368 diff -urNp linux-2.6.39.4/drivers/isdn/capi/capi.c linux-2.6.39.4/drivers/isdn/capi/capi.c
26369 --- linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-05-19 00:06:34.000000000 -0400
26370 +++ linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-08-05 19:44:37.000000000 -0400
26371 @@ -89,8 +89,8 @@ struct capiminor {
26373 struct capi20_appl *ap;
26375 - atomic_t datahandle;
26377 + atomic_unchecked_t datahandle;
26378 + atomic_unchecked_t msgid;
26380 struct tty_port port;
26382 @@ -414,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *m
26383 capimsg_setu16(s, 2, mp->ap->applid);
26384 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26385 capimsg_setu8 (s, 5, CAPI_RESP);
26386 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26387 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26388 capimsg_setu32(s, 8, mp->ncci);
26389 capimsg_setu16(s, 12, datahandle);
26391 @@ -547,14 +547,14 @@ static void handle_minor_send(struct cap
26392 mp->outbytes -= len;
26393 spin_unlock_bh(&mp->outlock);
26395 - datahandle = atomic_inc_return(&mp->datahandle);
26396 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26397 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26398 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26399 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26400 capimsg_setu16(skb->data, 2, mp->ap->applid);
26401 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26402 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26403 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26404 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26405 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26406 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26407 capimsg_setu16(skb->data, 16, len); /* Data length */
26408 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/common.c linux-2.6.39.4/drivers/isdn/gigaset/common.c
26409 --- linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-05-19 00:06:34.000000000 -0400
26410 +++ linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-08-05 19:44:37.000000000 -0400
26411 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26412 cs->commands_pending = 0;
26413 cs->cur_at_seq = 0;
26415 - cs->open_count = 0;
26416 + local_set(&cs->open_count, 0);
26419 cs->tty_dev = NULL;
26420 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h
26421 --- linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-05-19 00:06:34.000000000 -0400
26422 +++ linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-08-05 19:44:37.000000000 -0400
26424 #include <linux/tty_driver.h>
26425 #include <linux/list.h>
26426 #include <asm/atomic.h>
26427 +#include <asm/local.h>
26429 #define GIG_VERSION {0, 5, 0, 0}
26430 #define GIG_COMPAT {0, 4, 0, 0}
26431 @@ -433,7 +434,7 @@ struct cardstate {
26432 spinlock_t cmdlock;
26433 unsigned curlen, cmdbytes;
26435 - unsigned open_count;
26436 + local_t open_count;
26437 struct tty_struct *tty;
26438 struct tasklet_struct if_wake_tasklet;
26439 unsigned control_state;
26440 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/interface.c linux-2.6.39.4/drivers/isdn/gigaset/interface.c
26441 --- linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-05-19 00:06:34.000000000 -0400
26442 +++ linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-08-05 19:44:37.000000000 -0400
26443 @@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
26444 return -ERESTARTSYS;
26445 tty->driver_data = cs;
26447 - ++cs->open_count;
26449 - if (cs->open_count == 1) {
26450 + if (local_inc_return(&cs->open_count) == 1) {
26451 spin_lock_irqsave(&cs->lock, flags);
26453 spin_unlock_irqrestore(&cs->lock, flags);
26454 @@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
26456 if (!cs->connected)
26457 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26458 - else if (!cs->open_count)
26459 + else if (!local_read(&cs->open_count))
26460 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26462 - if (!--cs->open_count) {
26463 + if (!local_dec_return(&cs->open_count)) {
26464 spin_lock_irqsave(&cs->lock, flags);
26466 spin_unlock_irqrestore(&cs->lock, flags);
26467 @@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
26468 if (!cs->connected) {
26469 gig_dbg(DEBUG_IF, "not connected");
26471 - } else if (!cs->open_count)
26472 + } else if (!local_read(&cs->open_count))
26473 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26476 @@ -358,7 +356,7 @@ static int if_write(struct tty_struct *t
26480 - if (!cs->open_count) {
26481 + if (!local_read(&cs->open_count)) {
26482 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26485 @@ -411,7 +409,7 @@ static int if_write_room(struct tty_stru
26486 if (!cs->connected) {
26487 gig_dbg(DEBUG_IF, "not connected");
26489 - } else if (!cs->open_count)
26490 + } else if (!local_read(&cs->open_count))
26491 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26492 else if (cs->mstate != MS_LOCKED) {
26493 dev_warn(cs->dev, "can't write to unlocked device\n");
26494 @@ -441,7 +439,7 @@ static int if_chars_in_buffer(struct tty
26496 if (!cs->connected)
26497 gig_dbg(DEBUG_IF, "not connected");
26498 - else if (!cs->open_count)
26499 + else if (!local_read(&cs->open_count))
26500 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26501 else if (cs->mstate != MS_LOCKED)
26502 dev_warn(cs->dev, "can't write to unlocked device\n");
26503 @@ -469,7 +467,7 @@ static void if_throttle(struct tty_struc
26505 if (!cs->connected)
26506 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26507 - else if (!cs->open_count)
26508 + else if (!local_read(&cs->open_count))
26509 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26511 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26512 @@ -493,7 +491,7 @@ static void if_unthrottle(struct tty_str
26514 if (!cs->connected)
26515 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26516 - else if (!cs->open_count)
26517 + else if (!local_read(&cs->open_count))
26518 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26520 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26521 @@ -524,7 +522,7 @@ static void if_set_termios(struct tty_st
26525 - if (!cs->open_count) {
26526 + if (!local_read(&cs->open_count)) {
26527 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26530 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c
26531 --- linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-05-19 00:06:34.000000000 -0400
26532 +++ linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-08-05 19:44:37.000000000 -0400
26533 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26536 if (t4file->user) {
26537 - if (copy_from_user(buf, dp, left))
26538 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26541 memcpy(buf, dp, left);
26542 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26545 if (config->user) {
26546 - if (copy_from_user(buf, dp, left))
26547 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26550 memcpy(buf, dp, left);
26551 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c
26552 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-19 00:06:34.000000000 -0400
26553 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-05 19:44:37.000000000 -0400
26554 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26555 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26556 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26558 + pax_track_stack();
26560 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26562 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c
26563 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-05-19 00:06:34.000000000 -0400
26564 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-05 19:44:37.000000000 -0400
26565 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26567 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26569 + pax_track_stack();
26571 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26573 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26574 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c
26575 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-19 00:06:34.000000000 -0400
26576 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-05 19:44:37.000000000 -0400
26577 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26579 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26581 + pax_track_stack();
26583 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26585 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26586 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c
26587 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-19 00:06:34.000000000 -0400
26588 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-05 19:44:37.000000000 -0400
26589 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
26591 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26593 + pax_track_stack();
26595 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26597 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26598 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h
26599 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-05-19 00:06:34.000000000 -0400
26600 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:34:06.000000000 -0400
26601 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26602 } diva_didd_add_adapter_t;
26603 typedef struct _diva_didd_remove_adapter {
26604 IDI_CALL p_request;
26605 -} diva_didd_remove_adapter_t;
26606 +} __no_const diva_didd_remove_adapter_t;
26607 typedef struct _diva_didd_read_adapter_array {
26610 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c
26611 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-05-19 00:06:34.000000000 -0400
26612 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-05 19:44:37.000000000 -0400
26613 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26615 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26617 + pax_track_stack();
26619 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26621 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26622 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c
26623 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-05-19 00:06:34.000000000 -0400
26624 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-08-05 19:44:37.000000000 -0400
26625 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
26629 + pax_track_stack();
26632 Id = ((word)plci->Id<<8)|a->Id;
26633 PUT_WORD(&SS_Ind[4],0x0000);
26634 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
26638 + pax_track_stack();
26641 for(i=0;i<8;i++) bp_parms[i].length = 0;
26642 for(i=0;i<2;i++) global_config[i].length = 0;
26643 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
26644 const byte llc3[] = {4,3,2,2,6,6,0};
26645 const byte header[] = {0,2,3,3,0,0,0};
26647 + pax_track_stack();
26649 for(i=0;i<8;i++) bp_parms[i].length = 0;
26650 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26651 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26652 @@ -14760,6 +14766,8 @@ static void group_optimization(DIVA_CAPI
26653 word appl_number_group_type[MAX_APPL];
26656 + pax_track_stack();
26658 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26660 if(!a->group_optimization_enabled)
26661 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c
26662 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-19 00:06:34.000000000 -0400
26663 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-05 19:44:37.000000000 -0400
26664 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26666 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26668 + pax_track_stack();
26670 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26672 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26673 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26674 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-05-19 00:06:34.000000000 -0400
26675 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:34:06.000000000 -0400
26676 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26677 typedef struct _diva_os_idi_adapter_interface {
26678 diva_init_card_proc_t cleanup_adapter_proc;
26679 diva_cmd_card_proc_t cmd_proc;
26680 -} diva_os_idi_adapter_interface_t;
26681 +} __no_const diva_os_idi_adapter_interface_t;
26683 typedef struct _diva_os_xdi_adapter {
26684 struct list_head link;
26685 diff -urNp linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c
26686 --- linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-05-19 00:06:34.000000000 -0400
26687 +++ linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-08-05 19:44:37.000000000 -0400
26688 @@ -1292,6 +1292,8 @@ isdn_ioctl(struct file *file, uint cmd,
26690 void __user *argp = (void __user *)arg;
26692 + pax_track_stack();
26694 #define name iocpar.name
26695 #define bname iocpar.bname
26696 #define iocts iocpar.iocts
26697 diff -urNp linux-2.6.39.4/drivers/isdn/icn/icn.c linux-2.6.39.4/drivers/isdn/icn/icn.c
26698 --- linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-05-19 00:06:34.000000000 -0400
26699 +++ linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-08-05 19:44:37.000000000 -0400
26700 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26704 - if (copy_from_user(msg, buf, count))
26705 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26708 memcpy(msg, buf, count);
26709 diff -urNp linux-2.6.39.4/drivers/lguest/core.c linux-2.6.39.4/drivers/lguest/core.c
26710 --- linux-2.6.39.4/drivers/lguest/core.c 2011-05-19 00:06:34.000000000 -0400
26711 +++ linux-2.6.39.4/drivers/lguest/core.c 2011-08-05 19:44:37.000000000 -0400
26712 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26713 * it's worked so far. The end address needs +1 because __get_vm_area
26714 * allocates an extra guard page, so we need space for that.
26717 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26718 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26719 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26720 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26722 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26723 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26724 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26727 if (!switcher_vma) {
26729 printk("lguest: could not map switcher pages high\n");
26730 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26731 * Now the Switcher is mapped at the right address, we can't fail!
26732 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26734 - memcpy(switcher_vma->addr, start_switcher_text,
26735 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26736 end_switcher_text - start_switcher_text);
26738 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26739 diff -urNp linux-2.6.39.4/drivers/lguest/x86/core.c linux-2.6.39.4/drivers/lguest/x86/core.c
26740 --- linux-2.6.39.4/drivers/lguest/x86/core.c 2011-05-19 00:06:34.000000000 -0400
26741 +++ linux-2.6.39.4/drivers/lguest/x86/core.c 2011-08-05 19:44:37.000000000 -0400
26742 @@ -59,7 +59,7 @@ static struct {
26743 /* Offset from where switcher.S was compiled to where we've copied it */
26744 static unsigned long switcher_offset(void)
26746 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26747 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26750 /* This cpu's struct lguest_pages. */
26751 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26752 * These copies are pretty cheap, so we do them unconditionally: */
26753 /* Save the current Host top-level page directory.
26756 +#ifdef CONFIG_PAX_PER_CPU_PGD
26757 + pages->state.host_cr3 = read_cr3();
26759 pages->state.host_cr3 = __pa(current->mm->pgd);
26763 * Set up the Guest's page tables to see this CPU's pages (and no
26764 * other CPU's pages).
26765 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26766 * compiled-in switcher code and the high-mapped copy we just made.
26768 for (i = 0; i < IDT_ENTRIES; i++)
26769 - default_idt_entries[i] += switcher_offset();
26770 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26773 * Set up the Switcher's per-cpu areas.
26774 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26775 * it will be undisturbed when we switch. To change %cs and jump we
26776 * need this structure to feed to Intel's "lcall" instruction.
26778 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26779 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26780 lguest_entry.segment = LGUEST_CS;
26783 diff -urNp linux-2.6.39.4/drivers/lguest/x86/switcher_32.S linux-2.6.39.4/drivers/lguest/x86/switcher_32.S
26784 --- linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-05-19 00:06:34.000000000 -0400
26785 +++ linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-08-05 19:44:37.000000000 -0400
26787 #include <asm/page.h>
26788 #include <asm/segment.h>
26789 #include <asm/lguest.h>
26790 +#include <asm/processor-flags.h>
26792 // We mark the start of the code to copy
26793 // It's placed in .text tho it's never run here
26794 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26795 // Changes type when we load it: damn Intel!
26796 // For after we switch over our page tables
26797 // That entry will be read-only: we'd crash.
26799 +#ifdef CONFIG_PAX_KERNEXEC
26801 + xor $X86_CR0_WP, %edx
26805 movl $(GDT_ENTRY_TSS*8), %edx
26808 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26809 // Let's clear it again for our return.
26810 // The GDT descriptor of the Host
26811 // Points to the table after two "size" bytes
26812 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26813 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26814 // Clear "used" from type field (byte 5, bit 2)
26815 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26816 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26818 +#ifdef CONFIG_PAX_KERNEXEC
26820 + xor $X86_CR0_WP, %eax
26824 // Once our page table's switched, the Guest is live!
26825 // The Host fades as we run this final step.
26826 @@ -295,13 +309,12 @@ deliver_to_host:
26827 // I consulted gcc, and it gave
26828 // These instructions, which I gladly credit:
26829 leal (%edx,%ebx,8), %eax
26830 - movzwl (%eax),%edx
26831 - movl 4(%eax), %eax
26834 + movl 4(%eax), %edx
26836 // Now the address of the handler's in %edx
26837 // We call it now: its "iret" drops us home.
26839 + ljmp $__KERNEL_CS, $1f
26842 // Every interrupt can come to us here
26843 // But we must truly tell each apart.
26844 diff -urNp linux-2.6.39.4/drivers/md/dm.c linux-2.6.39.4/drivers/md/dm.c
26845 --- linux-2.6.39.4/drivers/md/dm.c 2011-05-19 00:06:34.000000000 -0400
26846 +++ linux-2.6.39.4/drivers/md/dm.c 2011-08-05 19:44:37.000000000 -0400
26847 @@ -162,9 +162,9 @@ struct mapped_device {
26851 - atomic_t event_nr;
26852 + atomic_unchecked_t event_nr;
26853 wait_queue_head_t eventq;
26854 - atomic_t uevent_seq;
26855 + atomic_unchecked_t uevent_seq;
26856 struct list_head uevent_list;
26857 spinlock_t uevent_lock; /* Protect access to uevent_list */
26859 @@ -1836,8 +1836,8 @@ static struct mapped_device *alloc_dev(i
26860 rwlock_init(&md->map_lock);
26861 atomic_set(&md->holders, 1);
26862 atomic_set(&md->open_count, 0);
26863 - atomic_set(&md->event_nr, 0);
26864 - atomic_set(&md->uevent_seq, 0);
26865 + atomic_set_unchecked(&md->event_nr, 0);
26866 + atomic_set_unchecked(&md->uevent_seq, 0);
26867 INIT_LIST_HEAD(&md->uevent_list);
26868 spin_lock_init(&md->uevent_lock);
26870 @@ -1971,7 +1971,7 @@ static void event_callback(void *context
26872 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26874 - atomic_inc(&md->event_nr);
26875 + atomic_inc_unchecked(&md->event_nr);
26876 wake_up(&md->eventq);
26879 @@ -2547,18 +2547,18 @@ int dm_kobject_uevent(struct mapped_devi
26881 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26883 - return atomic_add_return(1, &md->uevent_seq);
26884 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26887 uint32_t dm_get_event_nr(struct mapped_device *md)
26889 - return atomic_read(&md->event_nr);
26890 + return atomic_read_unchecked(&md->event_nr);
26893 int dm_wait_event(struct mapped_device *md, int event_nr)
26895 return wait_event_interruptible(md->eventq,
26896 - (event_nr != atomic_read(&md->event_nr)));
26897 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26900 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26901 diff -urNp linux-2.6.39.4/drivers/md/dm-ioctl.c linux-2.6.39.4/drivers/md/dm-ioctl.c
26902 --- linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-05-19 00:06:34.000000000 -0400
26903 +++ linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-08-05 19:44:37.000000000 -0400
26904 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26905 cmd == DM_LIST_VERSIONS_CMD)
26908 - if ((cmd == DM_DEV_CREATE_CMD)) {
26909 + if (cmd == DM_DEV_CREATE_CMD) {
26910 if (!*param->name) {
26911 DMWARN("name not supplied when creating device");
26913 diff -urNp linux-2.6.39.4/drivers/md/dm-raid1.c linux-2.6.39.4/drivers/md/dm-raid1.c
26914 --- linux-2.6.39.4/drivers/md/dm-raid1.c 2011-05-19 00:06:34.000000000 -0400
26915 +++ linux-2.6.39.4/drivers/md/dm-raid1.c 2011-08-05 19:44:37.000000000 -0400
26916 @@ -42,7 +42,7 @@ enum dm_raid1_error {
26919 struct mirror_set *ms;
26920 - atomic_t error_count;
26921 + atomic_unchecked_t error_count;
26922 unsigned long error_type;
26923 struct dm_dev *dev;
26925 @@ -187,7 +187,7 @@ static struct mirror *get_valid_mirror(s
26928 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26929 - if (!atomic_read(&m->error_count))
26930 + if (!atomic_read_unchecked(&m->error_count))
26934 @@ -219,7 +219,7 @@ static void fail_mirror(struct mirror *m
26935 * simple way to tell if a device has encountered
26938 - atomic_inc(&m->error_count);
26939 + atomic_inc_unchecked(&m->error_count);
26941 if (test_and_set_bit(error_type, &m->error_type))
26943 @@ -410,7 +410,7 @@ static struct mirror *choose_mirror(stru
26944 struct mirror *m = get_default_mirror(ms);
26947 - if (likely(!atomic_read(&m->error_count)))
26948 + if (likely(!atomic_read_unchecked(&m->error_count)))
26951 if (m-- == ms->mirror)
26952 @@ -424,7 +424,7 @@ static int default_ok(struct mirror *m)
26954 struct mirror *default_mirror = get_default_mirror(m->ms);
26956 - return !atomic_read(&default_mirror->error_count);
26957 + return !atomic_read_unchecked(&default_mirror->error_count);
26960 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26961 @@ -561,7 +561,7 @@ static void do_reads(struct mirror_set *
26963 if (likely(region_in_sync(ms, region, 1)))
26964 m = choose_mirror(ms, bio->bi_sector);
26965 - else if (m && atomic_read(&m->error_count))
26966 + else if (m && atomic_read_unchecked(&m->error_count))
26970 @@ -939,7 +939,7 @@ static int get_mirror(struct mirror_set
26973 ms->mirror[mirror].ms = ms;
26974 - atomic_set(&(ms->mirror[mirror].error_count), 0);
26975 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26976 ms->mirror[mirror].error_type = 0;
26977 ms->mirror[mirror].offset = offset;
26979 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26981 static char device_status_char(struct mirror *m)
26983 - if (!atomic_read(&(m->error_count)))
26984 + if (!atomic_read_unchecked(&(m->error_count)))
26987 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26988 diff -urNp linux-2.6.39.4/drivers/md/dm-stripe.c linux-2.6.39.4/drivers/md/dm-stripe.c
26989 --- linux-2.6.39.4/drivers/md/dm-stripe.c 2011-05-19 00:06:34.000000000 -0400
26990 +++ linux-2.6.39.4/drivers/md/dm-stripe.c 2011-08-05 19:44:37.000000000 -0400
26991 @@ -20,7 +20,7 @@ struct stripe {
26992 struct dm_dev *dev;
26993 sector_t physical_start;
26995 - atomic_t error_count;
26996 + atomic_unchecked_t error_count;
27000 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
27004 - atomic_set(&(sc->stripe[i].error_count), 0);
27005 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
27009 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
27010 DMEMIT("%d ", sc->stripes);
27011 for (i = 0; i < sc->stripes; i++) {
27012 DMEMIT("%s ", sc->stripe[i].dev->name);
27013 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
27014 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
27018 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
27020 for (i = 0; i < sc->stripes; i++)
27021 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
27022 - atomic_inc(&(sc->stripe[i].error_count));
27023 - if (atomic_read(&(sc->stripe[i].error_count)) <
27024 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
27025 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
27026 DM_IO_ERROR_THRESHOLD)
27027 schedule_work(&sc->trigger_event);
27029 diff -urNp linux-2.6.39.4/drivers/md/dm-table.c linux-2.6.39.4/drivers/md/dm-table.c
27030 --- linux-2.6.39.4/drivers/md/dm-table.c 2011-06-03 00:04:14.000000000 -0400
27031 +++ linux-2.6.39.4/drivers/md/dm-table.c 2011-08-05 19:44:37.000000000 -0400
27032 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
27036 - if ((start >= dev_size) || (start + len > dev_size)) {
27037 + if ((start >= dev_size) || (len > dev_size - start)) {
27038 DMWARN("%s: %s too small for target: "
27039 "start=%llu, len=%llu, dev_size=%llu",
27040 dm_device_name(ti->table->md), bdevname(bdev, b),
27041 diff -urNp linux-2.6.39.4/drivers/md/md.c linux-2.6.39.4/drivers/md/md.c
27042 --- linux-2.6.39.4/drivers/md/md.c 2011-07-09 09:18:51.000000000 -0400
27043 +++ linux-2.6.39.4/drivers/md/md.c 2011-08-05 19:44:37.000000000 -0400
27044 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27045 * start build, activate spare
27047 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27048 -static atomic_t md_event_count;
27049 +static atomic_unchecked_t md_event_count;
27050 void md_new_event(mddev_t *mddev)
27052 - atomic_inc(&md_event_count);
27053 + atomic_inc_unchecked(&md_event_count);
27054 wake_up(&md_event_waiters);
27056 EXPORT_SYMBOL_GPL(md_new_event);
27057 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27059 static void md_new_event_inintr(mddev_t *mddev)
27061 - atomic_inc(&md_event_count);
27062 + atomic_inc_unchecked(&md_event_count);
27063 wake_up(&md_event_waiters);
27066 @@ -1454,7 +1454,7 @@ static int super_1_load(mdk_rdev_t *rdev
27068 rdev->preferred_minor = 0xffff;
27069 rdev->data_offset = le64_to_cpu(sb->data_offset);
27070 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27071 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27073 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27074 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27075 @@ -1632,7 +1632,7 @@ static void super_1_sync(mddev_t *mddev,
27077 sb->resync_offset = cpu_to_le64(0);
27079 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27080 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27082 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27083 sb->size = cpu_to_le64(mddev->dev_sectors);
27084 @@ -2414,7 +2414,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27086 errors_show(mdk_rdev_t *rdev, char *page)
27088 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27089 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27093 @@ -2423,7 +2423,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27095 unsigned long n = simple_strtoul(buf, &e, 10);
27096 if (*buf && (*e == 0 || *e == '\n')) {
27097 - atomic_set(&rdev->corrected_errors, n);
27098 + atomic_set_unchecked(&rdev->corrected_errors, n);
27102 @@ -2779,8 +2779,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27103 rdev->last_read_error.tv_sec = 0;
27104 rdev->last_read_error.tv_nsec = 0;
27105 atomic_set(&rdev->nr_pending, 0);
27106 - atomic_set(&rdev->read_errors, 0);
27107 - atomic_set(&rdev->corrected_errors, 0);
27108 + atomic_set_unchecked(&rdev->read_errors, 0);
27109 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27111 INIT_LIST_HEAD(&rdev->same_set);
27112 init_waitqueue_head(&rdev->blocked_wait);
27113 @@ -6388,7 +6388,7 @@ static int md_seq_show(struct seq_file *
27115 spin_unlock(&pers_lock);
27116 seq_printf(seq, "\n");
27117 - mi->event = atomic_read(&md_event_count);
27118 + mi->event = atomic_read_unchecked(&md_event_count);
27121 if (v == (void*)2) {
27122 @@ -6477,7 +6477,7 @@ static int md_seq_show(struct seq_file *
27123 chunk_kb ? "KB" : "B");
27124 if (bitmap->file) {
27125 seq_printf(seq, ", file: ");
27126 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27127 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27130 seq_printf(seq, "\n");
27131 @@ -6511,7 +6511,7 @@ static int md_seq_open(struct inode *ino
27133 struct seq_file *p = file->private_data;
27135 - mi->event = atomic_read(&md_event_count);
27136 + mi->event = atomic_read_unchecked(&md_event_count);
27140 @@ -6527,7 +6527,7 @@ static unsigned int mdstat_poll(struct f
27141 /* always allow read */
27142 mask = POLLIN | POLLRDNORM;
27144 - if (mi->event != atomic_read(&md_event_count))
27145 + if (mi->event != atomic_read_unchecked(&md_event_count))
27146 mask |= POLLERR | POLLPRI;
27149 @@ -6571,7 +6571,7 @@ static int is_mddev_idle(mddev_t *mddev,
27150 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27151 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27152 (int)part_stat_read(&disk->part0, sectors[1]) -
27153 - atomic_read(&disk->sync_io);
27154 + atomic_read_unchecked(&disk->sync_io);
27155 /* sync IO will cause sync_io to increase before the disk_stats
27156 * as sync_io is counted when a request starts, and
27157 * disk_stats is counted when it completes.
27158 diff -urNp linux-2.6.39.4/drivers/md/md.h linux-2.6.39.4/drivers/md/md.h
27159 --- linux-2.6.39.4/drivers/md/md.h 2011-05-19 00:06:34.000000000 -0400
27160 +++ linux-2.6.39.4/drivers/md/md.h 2011-08-05 19:44:37.000000000 -0400
27161 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27162 * only maintained for arrays that
27163 * support hot removal
27165 - atomic_t read_errors; /* number of consecutive read errors that
27166 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27167 * we have tried to ignore.
27169 struct timespec last_read_error; /* monotonic time since our
27172 - atomic_t corrected_errors; /* number of corrected read errors,
27173 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27174 * for reporting to userspace and storing
27177 @@ -342,7 +342,7 @@ static inline void rdev_dec_pending(mdk_
27179 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27181 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27182 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27185 struct mdk_personality
27186 diff -urNp linux-2.6.39.4/drivers/md/raid10.c linux-2.6.39.4/drivers/md/raid10.c
27187 --- linux-2.6.39.4/drivers/md/raid10.c 2011-05-19 00:06:34.000000000 -0400
27188 +++ linux-2.6.39.4/drivers/md/raid10.c 2011-08-05 19:44:37.000000000 -0400
27189 @@ -1209,7 +1209,7 @@ static void end_sync_read(struct bio *bi
27190 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27191 set_bit(R10BIO_Uptodate, &r10_bio->state);
27193 - atomic_add(r10_bio->sectors,
27194 + atomic_add_unchecked(r10_bio->sectors,
27195 &conf->mirrors[d].rdev->corrected_errors);
27196 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27197 md_error(r10_bio->mddev,
27198 @@ -1417,7 +1417,7 @@ static void check_decay_read_errors(mdde
27200 struct timespec cur_time_mon;
27201 unsigned long hours_since_last;
27202 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27203 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27205 ktime_get_ts(&cur_time_mon);
27207 @@ -1439,9 +1439,9 @@ static void check_decay_read_errors(mdde
27208 * overflowing the shift of read_errors by hours_since_last.
27210 if (hours_since_last >= 8 * sizeof(read_errors))
27211 - atomic_set(&rdev->read_errors, 0);
27212 + atomic_set_unchecked(&rdev->read_errors, 0);
27214 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27215 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27219 @@ -1476,8 +1476,8 @@ static void fix_read_error(conf_t *conf,
27222 check_decay_read_errors(mddev, rdev);
27223 - atomic_inc(&rdev->read_errors);
27224 - cur_read_error_count = atomic_read(&rdev->read_errors);
27225 + atomic_inc_unchecked(&rdev->read_errors);
27226 + cur_read_error_count = atomic_read_unchecked(&rdev->read_errors);
27227 if (cur_read_error_count > max_read_errors) {
27230 @@ -1550,7 +1550,7 @@ static void fix_read_error(conf_t *conf,
27231 test_bit(In_sync, &rdev->flags)) {
27232 atomic_inc(&rdev->nr_pending);
27234 - atomic_add(s, &rdev->corrected_errors);
27235 + atomic_add_unchecked(s, &rdev->corrected_errors);
27236 if (sync_page_io(rdev,
27237 r10_bio->devs[sl].addr +
27239 diff -urNp linux-2.6.39.4/drivers/md/raid1.c linux-2.6.39.4/drivers/md/raid1.c
27240 --- linux-2.6.39.4/drivers/md/raid1.c 2011-05-19 00:06:34.000000000 -0400
27241 +++ linux-2.6.39.4/drivers/md/raid1.c 2011-08-05 19:44:37.000000000 -0400
27242 @@ -1342,7 +1342,7 @@ static void sync_request_write(mddev_t *
27243 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
27245 rdev = conf->mirrors[d].rdev;
27246 - atomic_add(s, &rdev->corrected_errors);
27247 + atomic_add_unchecked(s, &rdev->corrected_errors);
27248 if (sync_page_io(rdev,
27251 @@ -1488,7 +1488,7 @@ static void fix_read_error(conf_t *conf,
27252 /* Well, this device is dead */
27253 md_error(mddev, rdev);
27255 - atomic_add(s, &rdev->corrected_errors);
27256 + atomic_add_unchecked(s, &rdev->corrected_errors);
27258 "md/raid1:%s: read error corrected "
27259 "(%d sectors at %llu on %s)\n",
27260 diff -urNp linux-2.6.39.4/drivers/md/raid5.c linux-2.6.39.4/drivers/md/raid5.c
27261 --- linux-2.6.39.4/drivers/md/raid5.c 2011-06-25 12:55:22.000000000 -0400
27262 +++ linux-2.6.39.4/drivers/md/raid5.c 2011-08-05 19:44:37.000000000 -0400
27263 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27264 bi->bi_next = NULL;
27265 if ((rw & WRITE) &&
27266 test_bit(R5_ReWrite, &sh->dev[i].flags))
27267 - atomic_add(STRIPE_SECTORS,
27268 + atomic_add_unchecked(STRIPE_SECTORS,
27269 &rdev->corrected_errors);
27270 generic_make_request(bi);
27272 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27273 clear_bit(R5_ReadError, &sh->dev[i].flags);
27274 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27276 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27277 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27278 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27279 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27281 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27283 rdev = conf->disks[i].rdev;
27285 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27286 - atomic_inc(&rdev->read_errors);
27287 + atomic_inc_unchecked(&rdev->read_errors);
27288 if (conf->mddev->degraded >= conf->max_degraded)
27289 printk_rl(KERN_WARNING
27290 "md/raid:%s: read error not correctable "
27291 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27292 (unsigned long long)(sh->sector
27293 + rdev->data_offset),
27295 - else if (atomic_read(&rdev->read_errors)
27296 + else if (atomic_read_unchecked(&rdev->read_errors)
27297 > conf->max_nr_stripes)
27298 printk(KERN_WARNING
27299 "md/raid:%s: Too many read errors, failing device %s.\n",
27300 @@ -1947,6 +1947,7 @@ static sector_t compute_blocknr(struct s
27302 struct stripe_head sh2;
27304 + pax_track_stack();
27306 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27307 stripe = new_sector;
27308 diff -urNp linux-2.6.39.4/drivers/media/common/saa7146_hlp.c linux-2.6.39.4/drivers/media/common/saa7146_hlp.c
27309 --- linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-05-19 00:06:34.000000000 -0400
27310 +++ linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-08-05 19:44:37.000000000 -0400
27311 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27313 int x[32], y[32], w[32], h[32];
27315 + pax_track_stack();
27317 /* clear out memory */
27318 memset(&line_list[0], 0x00, sizeof(u32)*32);
27319 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27320 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27321 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-19 00:06:34.000000000 -0400
27322 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-05 19:44:37.000000000 -0400
27323 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27324 u8 buf[HOST_LINK_BUF_SIZE];
27327 + pax_track_stack();
27329 dprintk("%s\n", __func__);
27331 /* check if we have space for a link buf in the rx_buffer */
27332 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27333 unsigned long timeout;
27336 + pax_track_stack();
27338 dprintk("%s\n", __func__);
27340 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27341 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h
27342 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-05-19 00:06:34.000000000 -0400
27343 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:34:06.000000000 -0400
27344 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
27347 dmx_section_cb sec;
27351 struct dvb_demux *demux;
27353 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c
27354 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-05-19 00:06:34.000000000 -0400
27355 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:34:06.000000000 -0400
27356 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27357 const struct dvb_device *template, void *priv, int type)
27359 struct dvb_device *dvbdev;
27360 - struct file_operations *dvbdevfops;
27361 + file_operations_no_const *dvbdevfops;
27362 struct device *clsdev;
27365 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c
27366 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-05-19 00:06:34.000000000 -0400
27367 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:34:06.000000000 -0400
27368 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27369 struct dib0700_adapter_state {
27370 int (*set_param_save) (struct dvb_frontend *,
27371 struct dvb_frontend_parameters *);
27375 static int dib7070_set_param_override(struct dvb_frontend *fe,
27376 struct dvb_frontend_parameters *fep)
27377 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27378 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-19 00:06:34.000000000 -0400
27379 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-05 19:44:37.000000000 -0400
27380 @@ -391,6 +391,8 @@ int dib0700_download_firmware(struct usb
27384 + pax_track_stack();
27386 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27387 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27388 hx.addr, hx.len, hx.chk);
27389 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c
27390 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-05-19 00:06:34.000000000 -0400
27391 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-05 20:34:06.000000000 -0400
27392 @@ -95,7 +95,7 @@ struct su3000_state {
27394 struct s6x0_state {
27395 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27400 static int dvb_usb_dw2102_debug;
27401 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c
27402 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-05-19 00:06:34.000000000 -0400
27403 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-05 19:44:37.000000000 -0400
27404 @@ -663,6 +663,7 @@ static int lme2510_download_firmware(str
27405 packet_size = 0x31;
27408 + pax_track_stack();
27410 info("FRM Starting Firmware Download");
27412 @@ -715,6 +716,8 @@ static void lme_coldreset(struct usb_dev
27413 int ret = 0, len_in;
27414 u8 data[512] = {0};
27416 + pax_track_stack();
27420 info("FRM Firmware Cold Reset");
27421 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h
27422 --- linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-05-19 00:06:34.000000000 -0400
27423 +++ linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:34:06.000000000 -0400
27424 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
27425 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
27426 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27427 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27431 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27432 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27433 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c
27434 --- linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-05-19 00:06:34.000000000 -0400
27435 +++ linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-05 19:44:37.000000000 -0400
27436 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27440 + pax_track_stack();
27442 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27445 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c
27446 --- linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-05-19 00:06:34.000000000 -0400
27447 +++ linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-08-05 19:44:37.000000000 -0400
27448 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27452 + pax_track_stack();
27454 dprintk("Firmware is %zd bytes\n",fw->size);
27456 /* Get eprom data */
27457 diff -urNp linux-2.6.39.4/drivers/media/radio/radio-cadet.c linux-2.6.39.4/drivers/media/radio/radio-cadet.c
27458 --- linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-05-19 00:06:34.000000000 -0400
27459 +++ linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-08-05 19:44:37.000000000 -0400
27460 @@ -349,7 +349,7 @@ static ssize_t cadet_read(struct file *f
27461 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
27462 mutex_unlock(&dev->lock);
27464 - if (copy_to_user(data, readbuf, i))
27465 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
27469 diff -urNp linux-2.6.39.4/drivers/media/rc/rc-main.c linux-2.6.39.4/drivers/media/rc/rc-main.c
27470 --- linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-05-19 00:06:34.000000000 -0400
27471 +++ linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-08-05 19:44:37.000000000 -0400
27472 @@ -996,7 +996,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
27474 int rc_register_device(struct rc_dev *dev)
27476 - static atomic_t devno = ATOMIC_INIT(0);
27477 + static atomic_unchecked_t devno = ATOMIC_INIT(0);
27478 struct rc_map *rc_map;
27481 @@ -1019,7 +1019,7 @@ int rc_register_device(struct rc_dev *de
27483 dev->input_dev->close = ir_close;
27485 - dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
27486 + dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
27487 dev_set_name(&dev->dev, "rc%ld", dev->devno);
27488 dev_set_drvdata(&dev->dev, dev);
27489 rc = device_add(&dev->dev);
27490 diff -urNp linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c
27491 --- linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-05-19 00:06:34.000000000 -0400
27492 +++ linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-08-05 19:44:37.000000000 -0400
27493 @@ -61,7 +61,7 @@ static struct pci_device_id cx18_pci_tbl
27495 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
27497 -static atomic_t cx18_instance = ATOMIC_INIT(0);
27498 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
27500 /* Parameter declarations */
27501 static int cardtype[CX18_MAX_CARDS];
27502 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27503 struct i2c_client c;
27506 + pax_track_stack();
27508 memset(&c, 0, sizeof(c));
27509 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27510 c.adapter = &cx->i2c_adap[0];
27511 @@ -892,7 +894,7 @@ static int __devinit cx18_probe(struct p
27514 /* FIXME - module parameter arrays constrain max instances */
27515 - i = atomic_inc_return(&cx18_instance) - 1;
27516 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
27517 if (i >= CX18_MAX_CARDS) {
27518 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
27519 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
27520 diff -urNp linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c
27521 --- linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-05-19 00:06:34.000000000 -0400
27522 +++ linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-05 19:44:37.000000000 -0400
27523 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27524 bool handle = false;
27525 struct ir_raw_event ir_core_event[64];
27527 + pax_track_stack();
27531 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27532 diff -urNp linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c
27533 --- linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-05-19 00:06:34.000000000 -0400
27534 +++ linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-08-05 19:44:37.000000000 -0400
27535 @@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl
27536 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
27538 /* ivtv instance counter */
27539 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
27540 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
27542 /* Parameter declarations */
27543 static int cardtype[IVTV_MAX_CARDS];
27544 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.c linux-2.6.39.4/drivers/media/video/omap24xxcam.c
27545 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-05-19 00:06:34.000000000 -0400
27546 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-08-05 19:44:37.000000000 -0400
27547 @@ -403,7 +403,7 @@ static void omap24xxcam_vbq_complete(str
27548 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
27550 do_gettimeofday(&vb->ts);
27551 - vb->field_count = atomic_add_return(2, &fh->field_count);
27552 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
27553 if (csr & csr_error) {
27554 vb->state = VIDEOBUF_ERROR;
27555 if (!atomic_read(&fh->cam->in_reset)) {
27556 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.h linux-2.6.39.4/drivers/media/video/omap24xxcam.h
27557 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-05-19 00:06:34.000000000 -0400
27558 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-08-05 19:44:37.000000000 -0400
27559 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
27560 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
27561 struct videobuf_queue vbq;
27562 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
27563 - atomic_t field_count; /* field counter for videobuf_buffer */
27564 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
27565 /* accessing cam here doesn't need serialisation: it's constant */
27566 struct omap24xxcam_device *cam;
27568 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27569 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-19 00:06:34.000000000 -0400
27570 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-05 19:44:37.000000000 -0400
27571 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27573 struct tveeprom tvdata;
27575 + pax_track_stack();
27577 memset(&tvdata,0,sizeof(tvdata));
27579 eeprom = pvr2_eeprom_fetch(hdw);
27580 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
27581 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-05-19 00:06:34.000000000 -0400
27582 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-05 20:34:06.000000000 -0400
27583 @@ -196,7 +196,7 @@ struct pvr2_hdw {
27586 struct i2c_adapter i2c_adap;
27587 - struct i2c_algorithm i2c_algo;
27588 + i2c_algorithm_no_const i2c_algo;
27589 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
27590 int i2c_cx25840_hack_state;
27592 diff -urNp linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c
27593 --- linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-05-19 00:06:34.000000000 -0400
27594 +++ linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-05 19:44:37.000000000 -0400
27595 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27596 unsigned char localPAT[256];
27597 unsigned char localPMT[256];
27599 + pax_track_stack();
27601 /* Set video format - must be done first as it resets other settings */
27602 set_reg8(client, 0x41, h->video_format);
27604 diff -urNp linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c
27605 --- linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-19 00:06:34.000000000 -0400
27606 +++ linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-05 19:44:37.000000000 -0400
27607 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27609 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27611 + pax_track_stack();
27613 /* While any outstand message on the bus exists... */
27616 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27618 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27620 + pax_track_stack();
27624 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27625 diff -urNp linux-2.6.39.4/drivers/media/video/timblogiw.c linux-2.6.39.4/drivers/media/video/timblogiw.c
27626 --- linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-05-19 00:06:34.000000000 -0400
27627 +++ linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-08-05 20:34:06.000000000 -0400
27628 @@ -746,7 +746,7 @@ static int timblogiw_mmap(struct file *f
27630 /* Platform device functions */
27632 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27633 +static __devinitdata struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27634 .vidioc_querycap = timblogiw_querycap,
27635 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27636 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27637 @@ -768,7 +768,7 @@ static __devinitconst struct v4l2_ioctl_
27638 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
27641 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
27642 +static __devinitdata struct v4l2_file_operations timblogiw_fops = {
27643 .owner = THIS_MODULE,
27644 .open = timblogiw_open,
27645 .release = timblogiw_close,
27646 diff -urNp linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c
27647 --- linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-05-19 00:06:34.000000000 -0400
27648 +++ linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-05 19:44:37.000000000 -0400
27649 @@ -799,6 +799,8 @@ static enum parse_state usbvision_parse_
27650 unsigned char rv, gv, bv;
27651 static unsigned char *Y, *U, *V;
27653 + pax_track_stack();
27655 frame = usbvision->cur_frame;
27656 image_size = frame->frmwidth * frame->frmheight;
27657 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27658 diff -urNp linux-2.6.39.4/drivers/media/video/v4l2-device.c linux-2.6.39.4/drivers/media/video/v4l2-device.c
27659 --- linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-05-19 00:06:34.000000000 -0400
27660 +++ linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-08-05 19:44:37.000000000 -0400
27661 @@ -71,9 +71,9 @@ int v4l2_device_put(struct v4l2_device *
27662 EXPORT_SYMBOL_GPL(v4l2_device_put);
27664 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
27665 - atomic_t *instance)
27666 + atomic_unchecked_t *instance)
27668 - int num = atomic_inc_return(instance) - 1;
27669 + int num = atomic_inc_return_unchecked(instance) - 1;
27670 int len = strlen(basename);
27672 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
27673 diff -urNp linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c
27674 --- linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-05-19 00:06:34.000000000 -0400
27675 +++ linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-08-05 19:44:37.000000000 -0400
27676 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27678 struct videobuf_queue q;
27680 + pax_track_stack();
27682 /* Required to make generic handler to call __videobuf_alloc */
27683 q.int_ops = &sg_ops;
27685 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptbase.c linux-2.6.39.4/drivers/message/fusion/mptbase.c
27686 --- linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-05-19 00:06:34.000000000 -0400
27687 +++ linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-08-05 20:34:06.000000000 -0400
27688 @@ -6683,8 +6683,13 @@ static int mpt_iocinfo_proc_show(struct
27689 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27690 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27692 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27693 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27695 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27696 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27700 * Rounding UP to nearest 4-kB boundary here...
27702 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptsas.c linux-2.6.39.4/drivers/message/fusion/mptsas.c
27703 --- linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-05-19 00:06:34.000000000 -0400
27704 +++ linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-08-05 19:44:37.000000000 -0400
27705 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27709 +static inline void
27710 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27712 + if (phy_info->port_details) {
27713 + phy_info->port_details->rphy = rphy;
27714 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27715 + ioc->name, rphy));
27719 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27720 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27721 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27722 + ioc->name, rphy, rphy->dev.release));
27728 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27729 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27733 -static inline void
27734 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27736 - if (phy_info->port_details) {
27737 - phy_info->port_details->rphy = rphy;
27738 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27739 - ioc->name, rphy));
27743 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27744 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27745 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27746 - ioc->name, rphy, rphy->dev.release));
27750 static inline struct sas_port *
27751 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27753 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptscsih.c linux-2.6.39.4/drivers/message/fusion/mptscsih.c
27754 --- linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-05-19 00:06:34.000000000 -0400
27755 +++ linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-08-05 19:44:37.000000000 -0400
27756 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27758 h = shost_priv(SChost);
27761 - if (h->info_kbuf == NULL)
27762 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27763 - return h->info_kbuf;
27764 - h->info_kbuf[0] = '\0';
27768 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27769 - h->info_kbuf[size-1] = '\0';
27771 + if (h->info_kbuf == NULL)
27772 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27773 + return h->info_kbuf;
27774 + h->info_kbuf[0] = '\0';
27776 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27777 + h->info_kbuf[size-1] = '\0';
27779 return h->info_kbuf;
27781 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_config.c linux-2.6.39.4/drivers/message/i2o/i2o_config.c
27782 --- linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-05-19 00:06:34.000000000 -0400
27783 +++ linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-08-05 19:44:37.000000000 -0400
27784 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27785 struct i2o_message *msg;
27788 + pax_track_stack();
27790 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27793 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_proc.c linux-2.6.39.4/drivers/message/i2o/i2o_proc.c
27794 --- linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-05-19 00:06:34.000000000 -0400
27795 +++ linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-08-05 19:44:37.000000000 -0400
27796 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27797 "Array Controller Device"
27800 -static char *chtostr(u8 * chars, int n)
27804 - return strncat(tmp, (char *)chars, n);
27807 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27810 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27812 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27813 seq_printf(seq, "%-#8x", ddm_table.module_id);
27814 - seq_printf(seq, "%-29s",
27815 - chtostr(ddm_table.module_name_version, 28));
27816 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27817 seq_printf(seq, "%9d ", ddm_table.data_size);
27818 seq_printf(seq, "%8d", ddm_table.code_size);
27820 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27822 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27823 seq_printf(seq, "%-#8x", dst->module_id);
27824 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27825 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27826 + seq_printf(seq, "%-.28s", dst->module_name_version);
27827 + seq_printf(seq, "%-.8s", dst->date);
27828 seq_printf(seq, "%8d ", dst->module_size);
27829 seq_printf(seq, "%8d ", dst->mpb_size);
27830 seq_printf(seq, "0x%04x", dst->module_flags);
27831 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27832 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27833 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27834 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27835 - seq_printf(seq, "Vendor info : %s\n",
27836 - chtostr((u8 *) (work32 + 2), 16));
27837 - seq_printf(seq, "Product info : %s\n",
27838 - chtostr((u8 *) (work32 + 6), 16));
27839 - seq_printf(seq, "Description : %s\n",
27840 - chtostr((u8 *) (work32 + 10), 16));
27841 - seq_printf(seq, "Product rev. : %s\n",
27842 - chtostr((u8 *) (work32 + 14), 8));
27843 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27844 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27845 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27846 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27848 seq_printf(seq, "Serial number : ");
27849 print_serial_number(seq, (u8 *) (work32 + 16),
27850 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27853 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27854 - seq_printf(seq, "Module name : %s\n",
27855 - chtostr(result.module_name, 24));
27856 - seq_printf(seq, "Module revision : %s\n",
27857 - chtostr(result.module_rev, 8));
27858 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27859 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27861 seq_printf(seq, "Serial number : ");
27862 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27863 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27867 - seq_printf(seq, "Device name : %s\n",
27868 - chtostr(result.device_name, 64));
27869 - seq_printf(seq, "Service name : %s\n",
27870 - chtostr(result.service_name, 64));
27871 - seq_printf(seq, "Physical name : %s\n",
27872 - chtostr(result.physical_location, 64));
27873 - seq_printf(seq, "Instance number : %s\n",
27874 - chtostr(result.instance_number, 4));
27875 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27876 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27877 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27878 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27882 diff -urNp linux-2.6.39.4/drivers/message/i2o/iop.c linux-2.6.39.4/drivers/message/i2o/iop.c
27883 --- linux-2.6.39.4/drivers/message/i2o/iop.c 2011-05-19 00:06:34.000000000 -0400
27884 +++ linux-2.6.39.4/drivers/message/i2o/iop.c 2011-08-05 19:44:37.000000000 -0400
27885 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27887 spin_lock_irqsave(&c->context_list_lock, flags);
27889 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27890 - atomic_inc(&c->context_list_counter);
27891 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27892 + atomic_inc_unchecked(&c->context_list_counter);
27894 - entry->context = atomic_read(&c->context_list_counter);
27895 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27897 list_add(&entry->list, &c->context_list);
27899 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27901 #if BITS_PER_LONG == 64
27902 spin_lock_init(&c->context_list_lock);
27903 - atomic_set(&c->context_list_counter, 0);
27904 + atomic_set_unchecked(&c->context_list_counter, 0);
27905 INIT_LIST_HEAD(&c->context_list);
27908 diff -urNp linux-2.6.39.4/drivers/mfd/abx500-core.c linux-2.6.39.4/drivers/mfd/abx500-core.c
27909 --- linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-05-19 00:06:34.000000000 -0400
27910 +++ linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-08-05 20:34:06.000000000 -0400
27911 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27913 struct abx500_device_entry {
27914 struct list_head list;
27915 - struct abx500_ops ops;
27916 + abx500_ops_no_const ops;
27917 struct device *dev;
27920 diff -urNp linux-2.6.39.4/drivers/mfd/janz-cmodio.c linux-2.6.39.4/drivers/mfd/janz-cmodio.c
27921 --- linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-05-19 00:06:34.000000000 -0400
27922 +++ linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-08-05 19:44:37.000000000 -0400
27925 #include <linux/kernel.h>
27926 #include <linux/module.h>
27927 +#include <linux/slab.h>
27928 #include <linux/init.h>
27929 #include <linux/pci.h>
27930 #include <linux/interrupt.h>
27931 diff -urNp linux-2.6.39.4/drivers/mfd/wm8350-i2c.c linux-2.6.39.4/drivers/mfd/wm8350-i2c.c
27932 --- linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-05-19 00:06:34.000000000 -0400
27933 +++ linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-08-05 19:44:37.000000000 -0400
27934 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27935 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27938 + pax_track_stack();
27940 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27943 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c
27944 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-05-19 00:06:34.000000000 -0400
27945 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-05 19:44:37.000000000 -0400
27946 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27947 * the lid is closed. This leads to interrupts as soon as a little move
27950 - atomic_inc(&lis3_dev.count);
27951 + atomic_inc_unchecked(&lis3_dev.count);
27953 wake_up_interruptible(&lis3_dev.misc_wait);
27954 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27955 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27956 if (lis3_dev.pm_dev)
27957 pm_runtime_get_sync(lis3_dev.pm_dev);
27959 - atomic_set(&lis3_dev.count, 0);
27960 + atomic_set_unchecked(&lis3_dev.count, 0);
27964 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27965 add_wait_queue(&lis3_dev.misc_wait, &wait);
27967 set_current_state(TASK_INTERRUPTIBLE);
27968 - data = atomic_xchg(&lis3_dev.count, 0);
27969 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27973 @@ -583,7 +583,7 @@ out:
27974 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27976 poll_wait(file, &lis3_dev.misc_wait, wait);
27977 - if (atomic_read(&lis3_dev.count))
27978 + if (atomic_read_unchecked(&lis3_dev.count))
27979 return POLLIN | POLLRDNORM;
27982 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h
27983 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-05-19 00:06:34.000000000 -0400
27984 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-05 19:44:37.000000000 -0400
27985 @@ -265,7 +265,7 @@ struct lis3lv02d {
27986 struct input_polled_dev *idev; /* input device */
27987 struct platform_device *pdev; /* platform device */
27988 struct regulator_bulk_data regulators[2];
27989 - atomic_t count; /* interrupt count after last read */
27990 + atomic_unchecked_t count; /* interrupt count after last read */
27991 union axis_conversion ac; /* hw -> logical axis */
27992 int mapped_btns[3];
27994 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c
27995 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-05-19 00:06:34.000000000 -0400
27996 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-05 19:44:37.000000000 -0400
27997 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27998 unsigned long nsec;
28000 nsec = CLKS2NSEC(clks);
28001 - atomic_long_inc(&mcs_op_statistics[op].count);
28002 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
28003 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28004 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28005 if (mcs_op_statistics[op].max < nsec)
28006 mcs_op_statistics[op].max = nsec;
28008 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c
28009 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-05-19 00:06:34.000000000 -0400
28010 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-05 19:44:37.000000000 -0400
28013 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28015 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28016 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28018 - unsigned long val = atomic_long_read(v);
28019 + unsigned long val = atomic_long_read_unchecked(v);
28021 seq_printf(s, "%16lu %s\n", val, id);
28023 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28025 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28026 for (op = 0; op < mcsop_last; op++) {
28027 - count = atomic_long_read(&mcs_op_statistics[op].count);
28028 - total = atomic_long_read(&mcs_op_statistics[op].total);
28029 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28030 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28031 max = mcs_op_statistics[op].max;
28032 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28033 count ? total / count : 0, max);
28034 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h
28035 --- linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-05-19 00:06:34.000000000 -0400
28036 +++ linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-08-05 19:44:37.000000000 -0400
28037 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28040 struct gru_stats_s {
28041 - atomic_long_t vdata_alloc;
28042 - atomic_long_t vdata_free;
28043 - atomic_long_t gts_alloc;
28044 - atomic_long_t gts_free;
28045 - atomic_long_t gms_alloc;
28046 - atomic_long_t gms_free;
28047 - atomic_long_t gts_double_allocate;
28048 - atomic_long_t assign_context;
28049 - atomic_long_t assign_context_failed;
28050 - atomic_long_t free_context;
28051 - atomic_long_t load_user_context;
28052 - atomic_long_t load_kernel_context;
28053 - atomic_long_t lock_kernel_context;
28054 - atomic_long_t unlock_kernel_context;
28055 - atomic_long_t steal_user_context;
28056 - atomic_long_t steal_kernel_context;
28057 - atomic_long_t steal_context_failed;
28058 - atomic_long_t nopfn;
28059 - atomic_long_t asid_new;
28060 - atomic_long_t asid_next;
28061 - atomic_long_t asid_wrap;
28062 - atomic_long_t asid_reuse;
28063 - atomic_long_t intr;
28064 - atomic_long_t intr_cbr;
28065 - atomic_long_t intr_tfh;
28066 - atomic_long_t intr_spurious;
28067 - atomic_long_t intr_mm_lock_failed;
28068 - atomic_long_t call_os;
28069 - atomic_long_t call_os_wait_queue;
28070 - atomic_long_t user_flush_tlb;
28071 - atomic_long_t user_unload_context;
28072 - atomic_long_t user_exception;
28073 - atomic_long_t set_context_option;
28074 - atomic_long_t check_context_retarget_intr;
28075 - atomic_long_t check_context_unload;
28076 - atomic_long_t tlb_dropin;
28077 - atomic_long_t tlb_preload_page;
28078 - atomic_long_t tlb_dropin_fail_no_asid;
28079 - atomic_long_t tlb_dropin_fail_upm;
28080 - atomic_long_t tlb_dropin_fail_invalid;
28081 - atomic_long_t tlb_dropin_fail_range_active;
28082 - atomic_long_t tlb_dropin_fail_idle;
28083 - atomic_long_t tlb_dropin_fail_fmm;
28084 - atomic_long_t tlb_dropin_fail_no_exception;
28085 - atomic_long_t tfh_stale_on_fault;
28086 - atomic_long_t mmu_invalidate_range;
28087 - atomic_long_t mmu_invalidate_page;
28088 - atomic_long_t flush_tlb;
28089 - atomic_long_t flush_tlb_gru;
28090 - atomic_long_t flush_tlb_gru_tgh;
28091 - atomic_long_t flush_tlb_gru_zero_asid;
28093 - atomic_long_t copy_gpa;
28094 - atomic_long_t read_gpa;
28096 - atomic_long_t mesq_receive;
28097 - atomic_long_t mesq_receive_none;
28098 - atomic_long_t mesq_send;
28099 - atomic_long_t mesq_send_failed;
28100 - atomic_long_t mesq_noop;
28101 - atomic_long_t mesq_send_unexpected_error;
28102 - atomic_long_t mesq_send_lb_overflow;
28103 - atomic_long_t mesq_send_qlimit_reached;
28104 - atomic_long_t mesq_send_amo_nacked;
28105 - atomic_long_t mesq_send_put_nacked;
28106 - atomic_long_t mesq_page_overflow;
28107 - atomic_long_t mesq_qf_locked;
28108 - atomic_long_t mesq_qf_noop_not_full;
28109 - atomic_long_t mesq_qf_switch_head_failed;
28110 - atomic_long_t mesq_qf_unexpected_error;
28111 - atomic_long_t mesq_noop_unexpected_error;
28112 - atomic_long_t mesq_noop_lb_overflow;
28113 - atomic_long_t mesq_noop_qlimit_reached;
28114 - atomic_long_t mesq_noop_amo_nacked;
28115 - atomic_long_t mesq_noop_put_nacked;
28116 - atomic_long_t mesq_noop_page_overflow;
28117 + atomic_long_unchecked_t vdata_alloc;
28118 + atomic_long_unchecked_t vdata_free;
28119 + atomic_long_unchecked_t gts_alloc;
28120 + atomic_long_unchecked_t gts_free;
28121 + atomic_long_unchecked_t gms_alloc;
28122 + atomic_long_unchecked_t gms_free;
28123 + atomic_long_unchecked_t gts_double_allocate;
28124 + atomic_long_unchecked_t assign_context;
28125 + atomic_long_unchecked_t assign_context_failed;
28126 + atomic_long_unchecked_t free_context;
28127 + atomic_long_unchecked_t load_user_context;
28128 + atomic_long_unchecked_t load_kernel_context;
28129 + atomic_long_unchecked_t lock_kernel_context;
28130 + atomic_long_unchecked_t unlock_kernel_context;
28131 + atomic_long_unchecked_t steal_user_context;
28132 + atomic_long_unchecked_t steal_kernel_context;
28133 + atomic_long_unchecked_t steal_context_failed;
28134 + atomic_long_unchecked_t nopfn;
28135 + atomic_long_unchecked_t asid_new;
28136 + atomic_long_unchecked_t asid_next;
28137 + atomic_long_unchecked_t asid_wrap;
28138 + atomic_long_unchecked_t asid_reuse;
28139 + atomic_long_unchecked_t intr;
28140 + atomic_long_unchecked_t intr_cbr;
28141 + atomic_long_unchecked_t intr_tfh;
28142 + atomic_long_unchecked_t intr_spurious;
28143 + atomic_long_unchecked_t intr_mm_lock_failed;
28144 + atomic_long_unchecked_t call_os;
28145 + atomic_long_unchecked_t call_os_wait_queue;
28146 + atomic_long_unchecked_t user_flush_tlb;
28147 + atomic_long_unchecked_t user_unload_context;
28148 + atomic_long_unchecked_t user_exception;
28149 + atomic_long_unchecked_t set_context_option;
28150 + atomic_long_unchecked_t check_context_retarget_intr;
28151 + atomic_long_unchecked_t check_context_unload;
28152 + atomic_long_unchecked_t tlb_dropin;
28153 + atomic_long_unchecked_t tlb_preload_page;
28154 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28155 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28156 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28157 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28158 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28159 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28160 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28161 + atomic_long_unchecked_t tfh_stale_on_fault;
28162 + atomic_long_unchecked_t mmu_invalidate_range;
28163 + atomic_long_unchecked_t mmu_invalidate_page;
28164 + atomic_long_unchecked_t flush_tlb;
28165 + atomic_long_unchecked_t flush_tlb_gru;
28166 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28167 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28169 + atomic_long_unchecked_t copy_gpa;
28170 + atomic_long_unchecked_t read_gpa;
28172 + atomic_long_unchecked_t mesq_receive;
28173 + atomic_long_unchecked_t mesq_receive_none;
28174 + atomic_long_unchecked_t mesq_send;
28175 + atomic_long_unchecked_t mesq_send_failed;
28176 + atomic_long_unchecked_t mesq_noop;
28177 + atomic_long_unchecked_t mesq_send_unexpected_error;
28178 + atomic_long_unchecked_t mesq_send_lb_overflow;
28179 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28180 + atomic_long_unchecked_t mesq_send_amo_nacked;
28181 + atomic_long_unchecked_t mesq_send_put_nacked;
28182 + atomic_long_unchecked_t mesq_page_overflow;
28183 + atomic_long_unchecked_t mesq_qf_locked;
28184 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28185 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28186 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28187 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28188 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28189 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28190 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28191 + atomic_long_unchecked_t mesq_noop_put_nacked;
28192 + atomic_long_unchecked_t mesq_noop_page_overflow;
28196 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28197 tghop_invalidate, mcsop_last};
28199 struct mcs_op_statistic {
28200 - atomic_long_t count;
28201 - atomic_long_t total;
28202 + atomic_long_unchecked_t count;
28203 + atomic_long_unchecked_t total;
28207 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28209 #define STAT(id) do { \
28210 if (gru_options & OPT_STATS) \
28211 - atomic_long_inc(&gru_stats.id); \
28212 + atomic_long_inc_unchecked(&gru_stats.id); \
28215 #ifdef CONFIG_SGI_GRU_DEBUG
28216 diff -urNp linux-2.6.39.4/drivers/misc/sgi-xp/xp.h linux-2.6.39.4/drivers/misc/sgi-xp/xp.h
28217 --- linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-05-19 00:06:34.000000000 -0400
28218 +++ linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-08-05 20:34:06.000000000 -0400
28219 @@ -289,7 +289,7 @@ struct xpc_interface {
28220 xpc_notify_func, void *);
28221 void (*received) (short, int, void *);
28222 enum xp_retval (*partid_to_nasids) (short, void *);
28226 extern struct xpc_interface xpc_interface;
28228 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c
28229 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-19 00:06:34.000000000 -0400
28230 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-05 19:44:37.000000000 -0400
28231 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28232 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28233 unsigned long timeo = jiffies + HZ;
28235 + pax_track_stack();
28237 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28238 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28240 @@ -1657,6 +1659,8 @@ static int __xipram do_write_buffer(stru
28241 unsigned long initial_adr;
28242 int initial_len = len;
28244 + pax_track_stack();
28246 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28247 adr += chip->start;
28249 @@ -1875,6 +1879,8 @@ static int __xipram do_erase_oneblock(st
28253 + pax_track_stack();
28255 adr += chip->start;
28258 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c
28259 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-19 00:06:34.000000000 -0400
28260 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-05 19:44:37.000000000 -0400
28261 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28262 unsigned long cmd_addr;
28263 struct cfi_private *cfi = map->fldrv_priv;
28265 + pax_track_stack();
28267 adr += chip->start;
28269 /* Ensure cmd read/writes are aligned. */
28270 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
28271 DECLARE_WAITQUEUE(wait, current);
28274 + pax_track_stack();
28276 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28277 if (adr & (map_bankwidth(map)-1))
28279 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
28280 DECLARE_WAITQUEUE(wait, current);
28283 + pax_track_stack();
28285 adr += chip->start;
28287 /* Let's determine this according to the interleave only once */
28288 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
28289 unsigned long timeo = jiffies + HZ;
28290 DECLARE_WAITQUEUE(wait, current);
28292 + pax_track_stack();
28294 adr += chip->start;
28296 /* Let's determine this according to the interleave only once */
28297 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
28298 unsigned long timeo = jiffies + HZ;
28299 DECLARE_WAITQUEUE(wait, current);
28301 + pax_track_stack();
28303 adr += chip->start;
28305 /* Let's determine this according to the interleave only once */
28306 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2000.c linux-2.6.39.4/drivers/mtd/devices/doc2000.c
28307 --- linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-05-19 00:06:34.000000000 -0400
28308 +++ linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-08-05 19:44:37.000000000 -0400
28309 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28311 /* The ECC will not be calculated correctly if less than 512 is written */
28313 - if (len != 0x200 && eccbuf)
28314 + if (len != 0x200)
28315 printk(KERN_WARNING
28316 "ECC needs a full sector write (adr: %lx size %lx)\n",
28317 (long) to, (long) len);
28318 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2001.c linux-2.6.39.4/drivers/mtd/devices/doc2001.c
28319 --- linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-05-19 00:06:34.000000000 -0400
28320 +++ linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-08-05 19:44:37.000000000 -0400
28321 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28322 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28324 /* Don't allow read past end of device */
28325 - if (from >= this->totlen)
28326 + if (from >= this->totlen || !len)
28329 /* Don't allow a single read to cross a 512-byte block boundary */
28330 diff -urNp linux-2.6.39.4/drivers/mtd/ftl.c linux-2.6.39.4/drivers/mtd/ftl.c
28331 --- linux-2.6.39.4/drivers/mtd/ftl.c 2011-05-19 00:06:34.000000000 -0400
28332 +++ linux-2.6.39.4/drivers/mtd/ftl.c 2011-08-05 19:44:37.000000000 -0400
28333 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28335 uint16_t srcunitswap = cpu_to_le16(srcunit);
28337 + pax_track_stack();
28339 eun = &part->EUNInfo[srcunit];
28340 xfer = &part->XferInfo[xferunit];
28341 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28342 diff -urNp linux-2.6.39.4/drivers/mtd/inftlcore.c linux-2.6.39.4/drivers/mtd/inftlcore.c
28343 --- linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-05-19 00:06:34.000000000 -0400
28344 +++ linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-08-05 19:44:37.000000000 -0400
28345 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28346 struct inftl_oob oob;
28349 + pax_track_stack();
28351 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28352 "pending=%d)\n", inftl, thisVUC, pendingblock);
28354 diff -urNp linux-2.6.39.4/drivers/mtd/inftlmount.c linux-2.6.39.4/drivers/mtd/inftlmount.c
28355 --- linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-05-19 00:06:34.000000000 -0400
28356 +++ linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-08-05 19:44:37.000000000 -0400
28357 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28358 struct INFTLPartition *ip;
28361 + pax_track_stack();
28363 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28366 diff -urNp linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c
28367 --- linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-05-19 00:06:34.000000000 -0400
28368 +++ linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-05 19:44:37.000000000 -0400
28369 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28371 map_word pfow_val[4];
28373 + pax_track_stack();
28375 /* Check identification string */
28376 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28377 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28378 diff -urNp linux-2.6.39.4/drivers/mtd/mtdchar.c linux-2.6.39.4/drivers/mtd/mtdchar.c
28379 --- linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-05-19 00:06:34.000000000 -0400
28380 +++ linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-08-05 19:44:37.000000000 -0400
28381 @@ -560,6 +560,8 @@ static int mtd_ioctl(struct file *file,
28383 struct mtd_info_user info;
28385 + pax_track_stack();
28387 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28389 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28390 diff -urNp linux-2.6.39.4/drivers/mtd/nand/denali.c linux-2.6.39.4/drivers/mtd/nand/denali.c
28391 --- linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-05-19 00:06:34.000000000 -0400
28392 +++ linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-08-05 19:44:37.000000000 -0400
28394 #include <linux/pci.h>
28395 #include <linux/mtd/mtd.h>
28396 #include <linux/module.h>
28397 +#include <linux/slab.h>
28399 #include "denali.h"
28401 diff -urNp linux-2.6.39.4/drivers/mtd/nftlcore.c linux-2.6.39.4/drivers/mtd/nftlcore.c
28402 --- linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-05-19 00:06:34.000000000 -0400
28403 +++ linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-08-05 19:44:37.000000000 -0400
28404 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28408 + pax_track_stack();
28410 memset(BlockMap, 0xff, sizeof(BlockMap));
28411 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28413 diff -urNp linux-2.6.39.4/drivers/mtd/nftlmount.c linux-2.6.39.4/drivers/mtd/nftlmount.c
28414 --- linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-05-19 00:06:34.000000000 -0400
28415 +++ linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-08-05 19:44:37.000000000 -0400
28417 #include <asm/errno.h>
28418 #include <linux/delay.h>
28419 #include <linux/slab.h>
28420 +#include <linux/sched.h>
28421 #include <linux/mtd/mtd.h>
28422 #include <linux/mtd/nand.h>
28423 #include <linux/mtd/nftl.h>
28424 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28425 struct mtd_info *mtd = nftl->mbd.mtd;
28428 + pax_track_stack();
28430 /* Assume logical EraseSize == physical erasesize for starting the scan.
28431 We'll sort it out later if we find a MediaHeader which says otherwise */
28432 /* Actually, we won't. The new DiskOnChip driver has already scanned
28433 diff -urNp linux-2.6.39.4/drivers/mtd/ubi/build.c linux-2.6.39.4/drivers/mtd/ubi/build.c
28434 --- linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-05-19 00:06:34.000000000 -0400
28435 +++ linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-08-05 19:44:37.000000000 -0400
28436 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28437 static int __init bytes_str_to_int(const char *str)
28440 - unsigned long result;
28441 + unsigned long result, scale = 1;
28443 result = simple_strtoul(str, &endp, 0);
28444 if (str == endp || result >= INT_MAX) {
28445 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28457 if (endp[1] == 'i' && endp[2] == 'B')
28460 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28465 + if ((intoverflow_t)result*scale >= INT_MAX) {
28466 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28471 + return result*scale;
28475 diff -urNp linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c
28476 --- linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-05-19 00:06:34.000000000 -0400
28477 +++ linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-05 20:34:06.000000000 -0400
28478 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28479 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28480 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28482 -static struct bfa_ioc_hwif nw_hwif_ct;
28483 +static struct bfa_ioc_hwif nw_hwif_ct = {
28484 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28485 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28486 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28487 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28488 + .ioc_map_port = bfa_ioc_ct_map_port,
28489 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28490 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28491 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28492 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28493 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28494 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28495 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28496 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28500 * Called from bfa_ioc_attach() to map asic specific calls.
28501 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28503 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28505 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28506 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28507 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28508 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28509 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28510 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28511 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28512 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28513 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28514 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28515 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28516 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28517 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28519 ioc->ioc_hwif = &nw_hwif_ct;
28522 diff -urNp linux-2.6.39.4/drivers/net/bna/bnad.c linux-2.6.39.4/drivers/net/bna/bnad.c
28523 --- linux-2.6.39.4/drivers/net/bna/bnad.c 2011-05-19 00:06:34.000000000 -0400
28524 +++ linux-2.6.39.4/drivers/net/bna/bnad.c 2011-08-05 20:34:06.000000000 -0400
28525 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28526 struct bna_intr_info *intr_info =
28527 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28528 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28529 - struct bna_tx_event_cbfn tx_cbfn;
28530 + static struct bna_tx_event_cbfn tx_cbfn = {
28531 + /* Initialize the tx event handlers */
28532 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28533 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28534 + .tx_stall_cbfn = bnad_cb_tx_stall,
28535 + .tx_resume_cbfn = bnad_cb_tx_resume,
28536 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28539 unsigned long flags;
28541 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28542 tx_config->txq_depth = bnad->txq_depth;
28543 tx_config->tx_type = BNA_TX_T_REGULAR;
28545 - /* Initialize the tx event handlers */
28546 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28547 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28548 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28549 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28550 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28552 /* Get BNA's resource requirement for one tx object */
28553 spin_lock_irqsave(&bnad->bna_lock, flags);
28554 bna_tx_res_req(bnad->num_txq_per_tx,
28555 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28556 struct bna_intr_info *intr_info =
28557 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28558 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28559 - struct bna_rx_event_cbfn rx_cbfn;
28560 + static struct bna_rx_event_cbfn rx_cbfn = {
28561 + /* Initialize the Rx event handlers */
28562 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28563 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28564 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28565 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28566 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28567 + .rx_post_cbfn = bnad_cb_rx_post
28570 unsigned long flags;
28572 /* Initialize the Rx object configuration */
28573 bnad_init_rx_config(bnad, rx_config);
28575 - /* Initialize the Rx event handlers */
28576 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28577 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28578 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28579 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28580 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28581 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28583 /* Get BNA's resource requirement for one Rx object */
28584 spin_lock_irqsave(&bnad->bna_lock, flags);
28585 bna_rx_res_req(rx_config, res_info);
28586 diff -urNp linux-2.6.39.4/drivers/net/bnx2.c linux-2.6.39.4/drivers/net/bnx2.c
28587 --- linux-2.6.39.4/drivers/net/bnx2.c 2011-05-19 00:06:34.000000000 -0400
28588 +++ linux-2.6.39.4/drivers/net/bnx2.c 2011-08-05 19:44:37.000000000 -0400
28589 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28593 + pax_track_stack();
28595 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28596 goto test_nvram_done;
28598 diff -urNp linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c
28599 --- linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-05-19 00:06:34.000000000 -0400
28600 +++ linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-05 19:44:37.000000000 -0400
28601 @@ -1788,6 +1788,8 @@ static int bnx2x_test_nvram(struct bnx2x
28605 + pax_track_stack();
28610 diff -urNp linux-2.6.39.4/drivers/net/cxgb3/l2t.h linux-2.6.39.4/drivers/net/cxgb3/l2t.h
28611 --- linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-05-19 00:06:34.000000000 -0400
28612 +++ linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-08-05 20:34:06.000000000 -0400
28613 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28615 struct l2t_skb_cb {
28616 arp_failure_handler_func arp_failure_handler;
28620 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28622 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c
28623 --- linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-05-19 00:06:34.000000000 -0400
28624 +++ linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-05 19:44:37.000000000 -0400
28625 @@ -3428,6 +3428,8 @@ static int __devinit enable_msix(struct
28626 unsigned int nchan = adap->params.nports;
28627 struct msix_entry entries[MAX_INGQ + 1];
28629 + pax_track_stack();
28631 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28632 entries[i].entry = i;
28634 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c
28635 --- linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-05-19 00:06:34.000000000 -0400
28636 +++ linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-08-05 19:44:37.000000000 -0400
28637 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28638 u8 vpd[VPD_LEN], csum;
28639 unsigned int vpdr_len, kw_offset, id_len;
28641 + pax_track_stack();
28643 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28646 diff -urNp linux-2.6.39.4/drivers/net/e1000e/82571.c linux-2.6.39.4/drivers/net/e1000e/82571.c
28647 --- linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-05-19 00:06:34.000000000 -0400
28648 +++ linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-08-05 20:34:06.000000000 -0400
28649 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28651 struct e1000_hw *hw = &adapter->hw;
28652 struct e1000_mac_info *mac = &hw->mac;
28653 - struct e1000_mac_operations *func = &mac->ops;
28654 + e1000_mac_operations_no_const *func = &mac->ops;
28657 bool force_clear_smbi = false;
28658 diff -urNp linux-2.6.39.4/drivers/net/e1000e/es2lan.c linux-2.6.39.4/drivers/net/e1000e/es2lan.c
28659 --- linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-05-19 00:06:34.000000000 -0400
28660 +++ linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-08-05 20:34:06.000000000 -0400
28661 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28663 struct e1000_hw *hw = &adapter->hw;
28664 struct e1000_mac_info *mac = &hw->mac;
28665 - struct e1000_mac_operations *func = &mac->ops;
28666 + e1000_mac_operations_no_const *func = &mac->ops;
28668 /* Set media type */
28669 switch (adapter->pdev->device) {
28670 diff -urNp linux-2.6.39.4/drivers/net/e1000e/hw.h linux-2.6.39.4/drivers/net/e1000e/hw.h
28671 --- linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-05-19 00:06:34.000000000 -0400
28672 +++ linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-08-05 20:34:06.000000000 -0400
28673 @@ -775,6 +775,7 @@ struct e1000_mac_operations {
28674 void (*write_vfta)(struct e1000_hw *, u32, u32);
28675 s32 (*read_mac_addr)(struct e1000_hw *);
28677 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28679 /* Function pointers for the PHY. */
28680 struct e1000_phy_operations {
28681 @@ -798,6 +799,7 @@ struct e1000_phy_operations {
28682 void (*power_up)(struct e1000_hw *);
28683 void (*power_down)(struct e1000_hw *);
28685 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28687 /* Function pointers for the NVM. */
28688 struct e1000_nvm_operations {
28689 @@ -809,9 +811,10 @@ struct e1000_nvm_operations {
28690 s32 (*validate)(struct e1000_hw *);
28691 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28693 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28695 struct e1000_mac_info {
28696 - struct e1000_mac_operations ops;
28697 + e1000_mac_operations_no_const ops;
28699 u8 perm_addr[ETH_ALEN];
28701 @@ -852,7 +855,7 @@ struct e1000_mac_info {
28704 struct e1000_phy_info {
28705 - struct e1000_phy_operations ops;
28706 + e1000_phy_operations_no_const ops;
28708 enum e1000_phy_type type;
28710 @@ -886,7 +889,7 @@ struct e1000_phy_info {
28713 struct e1000_nvm_info {
28714 - struct e1000_nvm_operations ops;
28715 + e1000_nvm_operations_no_const ops;
28717 enum e1000_nvm_type type;
28718 enum e1000_nvm_override override;
28719 diff -urNp linux-2.6.39.4/drivers/net/hamradio/6pack.c linux-2.6.39.4/drivers/net/hamradio/6pack.c
28720 --- linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-07-09 09:18:51.000000000 -0400
28721 +++ linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-08-05 19:44:37.000000000 -0400
28722 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28723 unsigned char buf[512];
28726 + pax_track_stack();
28731 diff -urNp linux-2.6.39.4/drivers/net/igb/e1000_hw.h linux-2.6.39.4/drivers/net/igb/e1000_hw.h
28732 --- linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-05-19 00:06:34.000000000 -0400
28733 +++ linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-08-05 20:34:06.000000000 -0400
28734 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28735 s32 (*read_mac_addr)(struct e1000_hw *);
28736 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28738 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28740 struct e1000_phy_operations {
28741 s32 (*acquire)(struct e1000_hw *);
28742 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28743 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28744 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28746 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28748 struct e1000_nvm_operations {
28749 s32 (*acquire)(struct e1000_hw *);
28750 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28751 s32 (*update)(struct e1000_hw *);
28752 s32 (*validate)(struct e1000_hw *);
28754 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28756 struct e1000_info {
28757 s32 (*get_invariants)(struct e1000_hw *);
28758 @@ -350,7 +353,7 @@ struct e1000_info {
28759 extern const struct e1000_info e1000_82575_info;
28761 struct e1000_mac_info {
28762 - struct e1000_mac_operations ops;
28763 + e1000_mac_operations_no_const ops;
28767 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28770 struct e1000_phy_info {
28771 - struct e1000_phy_operations ops;
28772 + e1000_phy_operations_no_const ops;
28774 enum e1000_phy_type type;
28776 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28779 struct e1000_nvm_info {
28780 - struct e1000_nvm_operations ops;
28781 + e1000_nvm_operations_no_const ops;
28782 enum e1000_nvm_type type;
28783 enum e1000_nvm_override override;
28785 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28786 s32 (*check_for_ack)(struct e1000_hw *, u16);
28787 s32 (*check_for_rst)(struct e1000_hw *, u16);
28789 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28791 struct e1000_mbx_stats {
28793 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28796 struct e1000_mbx_info {
28797 - struct e1000_mbx_operations ops;
28798 + e1000_mbx_operations_no_const ops;
28799 struct e1000_mbx_stats stats;
28802 diff -urNp linux-2.6.39.4/drivers/net/igbvf/vf.h linux-2.6.39.4/drivers/net/igbvf/vf.h
28803 --- linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-05-19 00:06:34.000000000 -0400
28804 +++ linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-08-05 20:34:06.000000000 -0400
28805 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28806 s32 (*read_mac_addr)(struct e1000_hw *);
28807 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28809 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28811 struct e1000_mac_info {
28812 - struct e1000_mac_operations ops;
28813 + e1000_mac_operations_no_const ops;
28817 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28818 s32 (*check_for_ack)(struct e1000_hw *);
28819 s32 (*check_for_rst)(struct e1000_hw *);
28821 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28823 struct e1000_mbx_stats {
28825 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28828 struct e1000_mbx_info {
28829 - struct e1000_mbx_operations ops;
28830 + e1000_mbx_operations_no_const ops;
28831 struct e1000_mbx_stats stats;
28834 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c
28835 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-05-19 00:06:34.000000000 -0400
28836 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-08-05 19:44:37.000000000 -0400
28837 @@ -1069,6 +1069,8 @@ ixgb_set_multi(struct net_device *netdev
28841 + pax_track_stack();
28843 /* Check for Promiscuous and All Multicast modes */
28845 rctl = IXGB_READ_REG(hw, RCTL);
28846 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c
28847 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-05-19 00:06:34.000000000 -0400
28848 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-08-05 19:44:37.000000000 -0400
28849 @@ -261,6 +261,9 @@ void __devinit
28850 ixgb_check_options(struct ixgb_adapter *adapter)
28852 int bd = adapter->bd_number;
28854 + pax_track_stack();
28856 if (bd >= IXGB_MAX_NIC) {
28857 pr_notice("Warning: no configuration for board #%i\n", bd);
28858 pr_notice("Using defaults for all values\n");
28859 diff -urNp linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h
28860 --- linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-05-19 00:06:34.000000000 -0400
28861 +++ linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-05 20:34:06.000000000 -0400
28862 @@ -2496,6 +2496,7 @@ struct ixgbe_eeprom_operations {
28863 s32 (*update_checksum)(struct ixgbe_hw *);
28864 u16 (*calc_checksum)(struct ixgbe_hw *);
28866 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28868 struct ixgbe_mac_operations {
28869 s32 (*init_hw)(struct ixgbe_hw *);
28870 @@ -2551,6 +2552,7 @@ struct ixgbe_mac_operations {
28872 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28874 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28876 struct ixgbe_phy_operations {
28877 s32 (*identify)(struct ixgbe_hw *);
28878 @@ -2570,9 +2572,10 @@ struct ixgbe_phy_operations {
28879 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28880 s32 (*check_overtemp)(struct ixgbe_hw *);
28882 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28884 struct ixgbe_eeprom_info {
28885 - struct ixgbe_eeprom_operations ops;
28886 + ixgbe_eeprom_operations_no_const ops;
28887 enum ixgbe_eeprom_type type;
28888 u32 semaphore_delay;
28890 @@ -2581,7 +2584,7 @@ struct ixgbe_eeprom_info {
28892 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28893 struct ixgbe_mac_info {
28894 - struct ixgbe_mac_operations ops;
28895 + ixgbe_mac_operations_no_const ops;
28896 enum ixgbe_mac_type type;
28897 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28898 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28899 @@ -2608,7 +2611,7 @@ struct ixgbe_mac_info {
28902 struct ixgbe_phy_info {
28903 - struct ixgbe_phy_operations ops;
28904 + ixgbe_phy_operations_no_const ops;
28905 struct mdio_if_info mdio;
28906 enum ixgbe_phy_type type;
28908 @@ -2636,6 +2639,7 @@ struct ixgbe_mbx_operations {
28909 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28910 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28912 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28914 struct ixgbe_mbx_stats {
28916 @@ -2647,7 +2651,7 @@ struct ixgbe_mbx_stats {
28919 struct ixgbe_mbx_info {
28920 - struct ixgbe_mbx_operations ops;
28921 + ixgbe_mbx_operations_no_const ops;
28922 struct ixgbe_mbx_stats stats;
28925 diff -urNp linux-2.6.39.4/drivers/net/ixgbevf/vf.h linux-2.6.39.4/drivers/net/ixgbevf/vf.h
28926 --- linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-05-19 00:06:34.000000000 -0400
28927 +++ linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-08-05 20:34:06.000000000 -0400
28928 @@ -69,6 +69,7 @@ struct ixgbe_mac_operations {
28929 s32 (*clear_vfta)(struct ixgbe_hw *);
28930 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28932 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28934 enum ixgbe_mac_type {
28935 ixgbe_mac_unknown = 0,
28936 @@ -78,7 +79,7 @@ enum ixgbe_mac_type {
28939 struct ixgbe_mac_info {
28940 - struct ixgbe_mac_operations ops;
28941 + ixgbe_mac_operations_no_const ops;
28945 @@ -102,6 +103,7 @@ struct ixgbe_mbx_operations {
28946 s32 (*check_for_ack)(struct ixgbe_hw *);
28947 s32 (*check_for_rst)(struct ixgbe_hw *);
28949 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28951 struct ixgbe_mbx_stats {
28953 @@ -113,7 +115,7 @@ struct ixgbe_mbx_stats {
28956 struct ixgbe_mbx_info {
28957 - struct ixgbe_mbx_operations ops;
28958 + ixgbe_mbx_operations_no_const ops;
28959 struct ixgbe_mbx_stats stats;
28962 diff -urNp linux-2.6.39.4/drivers/net/ksz884x.c linux-2.6.39.4/drivers/net/ksz884x.c
28963 --- linux-2.6.39.4/drivers/net/ksz884x.c 2011-05-19 00:06:34.000000000 -0400
28964 +++ linux-2.6.39.4/drivers/net/ksz884x.c 2011-08-05 20:34:06.000000000 -0400
28965 @@ -6536,6 +6536,8 @@ static void netdev_get_ethtool_stats(str
28967 u64 counter[TOTAL_PORT_COUNTER_NUM];
28969 + pax_track_stack();
28971 mutex_lock(&hw_priv->lock);
28972 n = SWITCH_PORT_NUM;
28973 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28974 diff -urNp linux-2.6.39.4/drivers/net/mlx4/main.c linux-2.6.39.4/drivers/net/mlx4/main.c
28975 --- linux-2.6.39.4/drivers/net/mlx4/main.c 2011-05-19 00:06:34.000000000 -0400
28976 +++ linux-2.6.39.4/drivers/net/mlx4/main.c 2011-08-05 19:44:37.000000000 -0400
28978 #include <linux/dma-mapping.h>
28979 #include <linux/slab.h>
28980 #include <linux/io-mapping.h>
28981 +#include <linux/sched.h>
28983 #include <linux/mlx4/device.h>
28984 #include <linux/mlx4/doorbell.h>
28985 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28989 + pax_track_stack();
28991 err = mlx4_QUERY_FW(dev);
28993 if (err == -EACCES)
28994 diff -urNp linux-2.6.39.4/drivers/net/niu.c linux-2.6.39.4/drivers/net/niu.c
28995 --- linux-2.6.39.4/drivers/net/niu.c 2011-05-19 00:06:34.000000000 -0400
28996 +++ linux-2.6.39.4/drivers/net/niu.c 2011-08-05 19:44:37.000000000 -0400
28997 @@ -9067,6 +9067,8 @@ static void __devinit niu_try_msix(struc
28998 int i, num_irqs, err;
29001 + pax_track_stack();
29003 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29004 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29005 ldg_num_map[i] = first_ldg + i;
29006 diff -urNp linux-2.6.39.4/drivers/net/pcnet32.c linux-2.6.39.4/drivers/net/pcnet32.c
29007 --- linux-2.6.39.4/drivers/net/pcnet32.c 2011-05-19 00:06:34.000000000 -0400
29008 +++ linux-2.6.39.4/drivers/net/pcnet32.c 2011-08-05 20:34:06.000000000 -0400
29009 @@ -82,7 +82,7 @@ static int cards_found;
29011 * VLB I/O addresses
29013 -static unsigned int pcnet32_portlist[] __initdata =
29014 +static unsigned int pcnet32_portlist[] __devinitdata =
29015 { 0x300, 0x320, 0x340, 0x360, 0 };
29017 static int pcnet32_debug;
29018 @@ -270,7 +270,7 @@ struct pcnet32_private {
29019 struct sk_buff **rx_skbuff;
29020 dma_addr_t *tx_dma_addr;
29021 dma_addr_t *rx_dma_addr;
29022 - struct pcnet32_access a;
29023 + struct pcnet32_access *a;
29024 spinlock_t lock; /* Guard lock */
29025 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29026 unsigned int rx_ring_size; /* current rx ring size */
29027 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29030 netif_wake_queue(dev);
29031 - val = lp->a.read_csr(ioaddr, CSR3);
29032 + val = lp->a->read_csr(ioaddr, CSR3);
29034 - lp->a.write_csr(ioaddr, CSR3, val);
29035 + lp->a->write_csr(ioaddr, CSR3, val);
29036 napi_enable(&lp->napi);
29039 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29040 r = mii_link_ok(&lp->mii_if);
29041 } else if (lp->chip_version >= PCNET32_79C970A) {
29042 ulong ioaddr = dev->base_addr; /* card base I/O address */
29043 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29044 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29045 } else { /* can not detect link on really old chips */
29048 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29049 pcnet32_netif_stop(dev);
29051 spin_lock_irqsave(&lp->lock, flags);
29052 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29053 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29055 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29057 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29058 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29060 struct pcnet32_private *lp = netdev_priv(dev);
29061 - struct pcnet32_access *a = &lp->a; /* access to registers */
29062 + struct pcnet32_access *a = lp->a; /* access to registers */
29063 ulong ioaddr = dev->base_addr; /* card base I/O address */
29064 struct sk_buff *skb; /* sk buff */
29065 int x, i; /* counters */
29066 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29067 pcnet32_netif_stop(dev);
29069 spin_lock_irqsave(&lp->lock, flags);
29070 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29071 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29073 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
29075 /* Reset the PCNET32 */
29076 - lp->a.reset(ioaddr);
29077 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29078 + lp->a->reset(ioaddr);
29079 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29081 /* switch pcnet32 to 32bit mode */
29082 - lp->a.write_bcr(ioaddr, 20, 2);
29083 + lp->a->write_bcr(ioaddr, 20, 2);
29085 /* purge & init rings but don't actually restart */
29086 pcnet32_restart(dev, 0x0000);
29088 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29089 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29091 /* Initialize Transmit buffers. */
29092 size = data_len + 15;
29093 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
29095 /* set int loopback in CSR15 */
29096 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
29097 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
29098 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
29100 teststatus = cpu_to_le16(0x8000);
29101 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29102 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29104 /* Check status of descriptors */
29105 for (x = 0; x < numbuffs; x++) {
29106 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
29110 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29111 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29113 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
29114 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
29115 @@ -1015,7 +1015,7 @@ clean_up:
29116 pcnet32_restart(dev, CSR0_NORMAL);
29118 pcnet32_purge_rx_ring(dev);
29119 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29120 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29122 spin_unlock_irqrestore(&lp->lock, flags);
29124 @@ -1025,7 +1025,7 @@ clean_up:
29125 static void pcnet32_led_blink_callback(struct net_device *dev)
29127 struct pcnet32_private *lp = netdev_priv(dev);
29128 - struct pcnet32_access *a = &lp->a;
29129 + struct pcnet32_access *a = lp->a;
29130 ulong ioaddr = dev->base_addr;
29131 unsigned long flags;
29133 @@ -1041,7 +1041,7 @@ static void pcnet32_led_blink_callback(s
29134 static int pcnet32_phys_id(struct net_device *dev, u32 data)
29136 struct pcnet32_private *lp = netdev_priv(dev);
29137 - struct pcnet32_access *a = &lp->a;
29138 + struct pcnet32_access *a = lp->a;
29139 ulong ioaddr = dev->base_addr;
29140 unsigned long flags;
29142 @@ -1085,7 +1085,7 @@ static int pcnet32_suspend(struct net_de
29145 struct pcnet32_private *lp = netdev_priv(dev);
29146 - struct pcnet32_access *a = &lp->a;
29147 + struct pcnet32_access *a = lp->a;
29148 ulong ioaddr = dev->base_addr;
29151 @@ -1342,8 +1342,8 @@ static int pcnet32_poll(struct napi_stru
29152 spin_lock_irqsave(&lp->lock, flags);
29153 if (pcnet32_tx(dev)) {
29154 /* reset the chip to clear the error condition, then restart */
29155 - lp->a.reset(ioaddr);
29156 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29157 + lp->a->reset(ioaddr);
29158 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29159 pcnet32_restart(dev, CSR0_START);
29160 netif_wake_queue(dev);
29162 @@ -1355,12 +1355,12 @@ static int pcnet32_poll(struct napi_stru
29163 __napi_complete(napi);
29165 /* clear interrupt masks */
29166 - val = lp->a.read_csr(ioaddr, CSR3);
29167 + val = lp->a->read_csr(ioaddr, CSR3);
29169 - lp->a.write_csr(ioaddr, CSR3, val);
29170 + lp->a->write_csr(ioaddr, CSR3, val);
29172 /* Set interrupt enable. */
29173 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29174 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29176 spin_unlock_irqrestore(&lp->lock, flags);
29178 @@ -1383,7 +1383,7 @@ static void pcnet32_get_regs(struct net_
29181 struct pcnet32_private *lp = netdev_priv(dev);
29182 - struct pcnet32_access *a = &lp->a;
29183 + struct pcnet32_access *a = lp->a;
29184 ulong ioaddr = dev->base_addr;
29185 unsigned long flags;
29187 @@ -1419,9 +1419,9 @@ static void pcnet32_get_regs(struct net_
29188 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29189 if (lp->phymask & (1 << j)) {
29190 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29191 - lp->a.write_bcr(ioaddr, 33,
29192 + lp->a->write_bcr(ioaddr, 33,
29194 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29195 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29199 @@ -1803,7 +1803,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29200 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29201 lp->options |= PCNET32_PORT_FD;
29206 /* prior to register_netdev, dev->name is not yet correct */
29207 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29208 @@ -1862,7 +1862,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29210 /* lp->phycount and lp->phymask are set to 0 by memset above */
29212 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29213 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29214 /* scan for PHYs */
29215 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29216 unsigned short id1, id2;
29217 @@ -1882,7 +1882,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29218 pr_info("Found PHY %04x:%04x at address %d\n",
29221 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29222 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29223 if (lp->phycount > 1)
29224 lp->options |= PCNET32_PORT_MII;
29226 @@ -2038,10 +2038,10 @@ static int pcnet32_open(struct net_devic
29229 /* Reset the PCNET32 */
29230 - lp->a.reset(ioaddr);
29231 + lp->a->reset(ioaddr);
29233 /* switch pcnet32 to 32bit mode */
29234 - lp->a.write_bcr(ioaddr, 20, 2);
29235 + lp->a->write_bcr(ioaddr, 20, 2);
29237 netif_printk(lp, ifup, KERN_DEBUG, dev,
29238 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29239 @@ -2050,14 +2050,14 @@ static int pcnet32_open(struct net_devic
29240 (u32) (lp->init_dma_addr));
29242 /* set/reset autoselect bit */
29243 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29244 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29245 if (lp->options & PCNET32_PORT_ASEL)
29247 - lp->a.write_bcr(ioaddr, 2, val);
29248 + lp->a->write_bcr(ioaddr, 2, val);
29250 /* handle full duplex setting */
29251 if (lp->mii_if.full_duplex) {
29252 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29253 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29254 if (lp->options & PCNET32_PORT_FD) {
29256 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29257 @@ -2067,14 +2067,14 @@ static int pcnet32_open(struct net_devic
29258 if (lp->chip_version == 0x2627)
29261 - lp->a.write_bcr(ioaddr, 9, val);
29262 + lp->a->write_bcr(ioaddr, 9, val);
29265 /* set/reset GPSI bit in test register */
29266 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29267 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29268 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29270 - lp->a.write_csr(ioaddr, 124, val);
29271 + lp->a->write_csr(ioaddr, 124, val);
29273 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29274 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29275 @@ -2093,24 +2093,24 @@ static int pcnet32_open(struct net_devic
29276 * duplex, and/or enable auto negotiation, and clear DANAS
29278 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29279 - lp->a.write_bcr(ioaddr, 32,
29280 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29281 + lp->a->write_bcr(ioaddr, 32,
29282 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29283 /* disable Auto Negotiation, set 10Mpbs, HD */
29284 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29285 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29286 if (lp->options & PCNET32_PORT_FD)
29288 if (lp->options & PCNET32_PORT_100)
29290 - lp->a.write_bcr(ioaddr, 32, val);
29291 + lp->a->write_bcr(ioaddr, 32, val);
29293 if (lp->options & PCNET32_PORT_ASEL) {
29294 - lp->a.write_bcr(ioaddr, 32,
29295 - lp->a.read_bcr(ioaddr,
29296 + lp->a->write_bcr(ioaddr, 32,
29297 + lp->a->read_bcr(ioaddr,
29299 /* enable auto negotiate, setup, disable fd */
29300 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29301 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29303 - lp->a.write_bcr(ioaddr, 32, val);
29304 + lp->a->write_bcr(ioaddr, 32, val);
29308 @@ -2123,10 +2123,10 @@ static int pcnet32_open(struct net_devic
29309 * There is really no good other way to handle multiple PHYs
29310 * other than turning off all automatics
29312 - val = lp->a.read_bcr(ioaddr, 2);
29313 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29314 - val = lp->a.read_bcr(ioaddr, 32);
29315 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29316 + val = lp->a->read_bcr(ioaddr, 2);
29317 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29318 + val = lp->a->read_bcr(ioaddr, 32);
29319 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29321 if (!(lp->options & PCNET32_PORT_ASEL)) {
29323 @@ -2136,7 +2136,7 @@ static int pcnet32_open(struct net_devic
29326 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
29327 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29328 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29330 if (lp->options & PCNET32_PORT_FD) {
29331 ecmd.duplex = DUPLEX_FULL;
29332 @@ -2145,7 +2145,7 @@ static int pcnet32_open(struct net_devic
29333 ecmd.duplex = DUPLEX_HALF;
29336 - lp->a.write_bcr(ioaddr, 9, bcr9);
29337 + lp->a->write_bcr(ioaddr, 9, bcr9);
29340 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29341 @@ -2176,9 +2176,9 @@ static int pcnet32_open(struct net_devic
29344 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29345 - val = lp->a.read_csr(ioaddr, CSR3);
29346 + val = lp->a->read_csr(ioaddr, CSR3);
29348 - lp->a.write_csr(ioaddr, CSR3, val);
29349 + lp->a->write_csr(ioaddr, CSR3, val);
29353 @@ -2194,11 +2194,11 @@ static int pcnet32_open(struct net_devic
29354 napi_enable(&lp->napi);
29356 /* Re-initialize the PCNET32, and start it when done. */
29357 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29358 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29359 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29360 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29362 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29363 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29364 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29365 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29367 netif_start_queue(dev);
29369 @@ -2210,19 +2210,19 @@ static int pcnet32_open(struct net_devic
29373 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29374 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29377 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29378 * reports that doing so triggers a bug in the '974.
29380 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29381 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29383 netif_printk(lp, ifup, KERN_DEBUG, dev,
29384 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29386 (u32) (lp->init_dma_addr),
29387 - lp->a.read_csr(ioaddr, CSR0));
29388 + lp->a->read_csr(ioaddr, CSR0));
29390 spin_unlock_irqrestore(&lp->lock, flags);
29392 @@ -2236,7 +2236,7 @@ err_free_ring:
29393 * Switch back to 16bit mode to avoid problems with dumb
29394 * DOS packet driver after a warm reboot
29396 - lp->a.write_bcr(ioaddr, 20, 4);
29397 + lp->a->write_bcr(ioaddr, 20, 4);
29400 spin_unlock_irqrestore(&lp->lock, flags);
29401 @@ -2341,7 +2341,7 @@ static void pcnet32_restart(struct net_d
29403 /* wait for stop */
29404 for (i = 0; i < 100; i++)
29405 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29406 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29410 @@ -2353,13 +2353,13 @@ static void pcnet32_restart(struct net_d
29414 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29415 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29418 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29419 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29422 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29423 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29426 static void pcnet32_tx_timeout(struct net_device *dev)
29427 @@ -2371,8 +2371,8 @@ static void pcnet32_tx_timeout(struct ne
29428 /* Transmitter timeout, serious problems. */
29429 if (pcnet32_debug & NETIF_MSG_DRV)
29430 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29431 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29432 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29433 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29434 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29435 dev->stats.tx_errors++;
29436 if (netif_msg_tx_err(lp)) {
29438 @@ -2415,7 +2415,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29440 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29441 "%s() called, csr0 %4.4x\n",
29442 - __func__, lp->a.read_csr(ioaddr, CSR0));
29443 + __func__, lp->a->read_csr(ioaddr, CSR0));
29445 /* Default status -- will not enable Successful-TxDone
29446 * interrupt when that option is available to us.
29447 @@ -2445,7 +2445,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29448 dev->stats.tx_bytes += skb->len;
29450 /* Trigger an immediate send poll. */
29451 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29452 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29454 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29456 @@ -2470,16 +2470,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29458 spin_lock(&lp->lock);
29460 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29461 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29462 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29463 if (csr0 == 0xffff)
29464 break; /* PCMCIA remove happened */
29465 /* Acknowledge all of the current interrupt sources ASAP. */
29466 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29467 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29469 netif_printk(lp, intr, KERN_DEBUG, dev,
29470 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29471 - csr0, lp->a.read_csr(ioaddr, CSR0));
29472 + csr0, lp->a->read_csr(ioaddr, CSR0));
29474 /* Log misc errors. */
29476 @@ -2506,19 +2506,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29477 if (napi_schedule_prep(&lp->napi)) {
29479 /* set interrupt masks */
29480 - val = lp->a.read_csr(ioaddr, CSR3);
29481 + val = lp->a->read_csr(ioaddr, CSR3);
29483 - lp->a.write_csr(ioaddr, CSR3, val);
29484 + lp->a->write_csr(ioaddr, CSR3, val);
29486 __napi_schedule(&lp->napi);
29489 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29490 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29493 netif_printk(lp, intr, KERN_DEBUG, dev,
29494 "exiting interrupt, csr0=%#4.4x\n",
29495 - lp->a.read_csr(ioaddr, CSR0));
29496 + lp->a->read_csr(ioaddr, CSR0));
29498 spin_unlock(&lp->lock);
29500 @@ -2538,20 +2538,20 @@ static int pcnet32_close(struct net_devi
29502 spin_lock_irqsave(&lp->lock, flags);
29504 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29505 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29507 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29508 "Shutting down ethercard, status was %2.2x\n",
29509 - lp->a.read_csr(ioaddr, CSR0));
29510 + lp->a->read_csr(ioaddr, CSR0));
29512 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29513 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29514 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29517 * Switch back to 16bit mode to avoid problems with dumb
29518 * DOS packet driver after a warm reboot
29520 - lp->a.write_bcr(ioaddr, 20, 4);
29521 + lp->a->write_bcr(ioaddr, 20, 4);
29523 spin_unlock_irqrestore(&lp->lock, flags);
29525 @@ -2574,7 +2574,7 @@ static struct net_device_stats *pcnet32_
29526 unsigned long flags;
29528 spin_lock_irqsave(&lp->lock, flags);
29529 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29530 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29531 spin_unlock_irqrestore(&lp->lock, flags);
29533 return &dev->stats;
29534 @@ -2596,10 +2596,10 @@ static void pcnet32_load_multicast(struc
29535 if (dev->flags & IFF_ALLMULTI) {
29536 ib->filter[0] = cpu_to_le32(~0U);
29537 ib->filter[1] = cpu_to_le32(~0U);
29538 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29539 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29540 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29541 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29542 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29543 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29544 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29545 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29548 /* clear the multicast filter */
29549 @@ -2619,7 +2619,7 @@ static void pcnet32_load_multicast(struc
29550 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29552 for (i = 0; i < 4; i++)
29553 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29554 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29555 le16_to_cpu(mcast_table[i]));
29558 @@ -2634,28 +2634,28 @@ static void pcnet32_set_multicast_list(s
29560 spin_lock_irqsave(&lp->lock, flags);
29561 suspended = pcnet32_suspend(dev, &flags, 0);
29562 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29563 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29564 if (dev->flags & IFF_PROMISC) {
29565 /* Log any net taps. */
29566 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29567 lp->init_block->mode =
29568 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29570 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29571 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29573 lp->init_block->mode =
29574 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29575 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29576 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29577 pcnet32_load_multicast(dev);
29582 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29583 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29584 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29585 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29586 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29588 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29589 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29590 pcnet32_restart(dev, CSR0_NORMAL);
29591 netif_wake_queue(dev);
29593 @@ -2673,8 +2673,8 @@ static int mdio_read(struct net_device *
29597 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29598 - val_out = lp->a.read_bcr(ioaddr, 34);
29599 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29600 + val_out = lp->a->read_bcr(ioaddr, 34);
29604 @@ -2688,8 +2688,8 @@ static void mdio_write(struct net_device
29608 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29609 - lp->a.write_bcr(ioaddr, 34, val);
29610 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29611 + lp->a->write_bcr(ioaddr, 34, val);
29614 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29615 @@ -2766,7 +2766,7 @@ static void pcnet32_check_media(struct n
29616 curr_link = mii_link_ok(&lp->mii_if);
29618 ulong ioaddr = dev->base_addr; /* card base I/O address */
29619 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29620 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29623 if (prev_link || verbose) {
29624 @@ -2789,13 +2789,13 @@ static void pcnet32_check_media(struct n
29625 (ecmd.duplex == DUPLEX_FULL)
29626 ? "full" : "half");
29628 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29629 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29630 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29631 if (lp->mii_if.full_duplex)
29635 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29636 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29639 netif_info(lp, link, dev, "link up\n");
29640 diff -urNp linux-2.6.39.4/drivers/net/ppp_generic.c linux-2.6.39.4/drivers/net/ppp_generic.c
29641 --- linux-2.6.39.4/drivers/net/ppp_generic.c 2011-05-19 00:06:34.000000000 -0400
29642 +++ linux-2.6.39.4/drivers/net/ppp_generic.c 2011-08-05 19:44:37.000000000 -0400
29643 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29644 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29645 struct ppp_stats stats;
29646 struct ppp_comp_stats cstats;
29650 case SIOCGPPPSTATS:
29651 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29655 - vers = PPP_VERSION;
29656 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29657 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29661 diff -urNp linux-2.6.39.4/drivers/net/r8169.c linux-2.6.39.4/drivers/net/r8169.c
29662 --- linux-2.6.39.4/drivers/net/r8169.c 2011-05-19 00:06:34.000000000 -0400
29663 +++ linux-2.6.39.4/drivers/net/r8169.c 2011-08-05 20:34:06.000000000 -0400
29664 @@ -552,12 +552,12 @@ struct rtl8169_private {
29666 void (*write)(void __iomem *, int, int);
29667 int (*read)(void __iomem *, int);
29669 + } __no_const mdio_ops;
29671 struct pll_power_ops {
29672 void (*down)(struct rtl8169_private *);
29673 void (*up)(struct rtl8169_private *);
29675 + } __no_const pll_power_ops;
29677 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29678 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29679 diff -urNp linux-2.6.39.4/drivers/net/tg3.h linux-2.6.39.4/drivers/net/tg3.h
29680 --- linux-2.6.39.4/drivers/net/tg3.h 2011-05-19 00:06:34.000000000 -0400
29681 +++ linux-2.6.39.4/drivers/net/tg3.h 2011-08-05 19:44:37.000000000 -0400
29682 @@ -131,6 +131,7 @@
29683 #define CHIPREV_ID_5750_A0 0x4000
29684 #define CHIPREV_ID_5750_A1 0x4001
29685 #define CHIPREV_ID_5750_A3 0x4003
29686 +#define CHIPREV_ID_5750_C1 0x4201
29687 #define CHIPREV_ID_5750_C2 0x4202
29688 #define CHIPREV_ID_5752_A0_HW 0x5000
29689 #define CHIPREV_ID_5752_A0 0x6000
29690 diff -urNp linux-2.6.39.4/drivers/net/tokenring/abyss.c linux-2.6.39.4/drivers/net/tokenring/abyss.c
29691 --- linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-05-19 00:06:34.000000000 -0400
29692 +++ linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-08-05 20:34:06.000000000 -0400
29693 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29695 static int __init abyss_init (void)
29697 - abyss_netdev_ops = tms380tr_netdev_ops;
29698 + pax_open_kernel();
29699 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29701 - abyss_netdev_ops.ndo_open = abyss_open;
29702 - abyss_netdev_ops.ndo_stop = abyss_close;
29703 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29704 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29705 + pax_close_kernel();
29707 return pci_register_driver(&abyss_driver);
29709 diff -urNp linux-2.6.39.4/drivers/net/tokenring/madgemc.c linux-2.6.39.4/drivers/net/tokenring/madgemc.c
29710 --- linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-05-19 00:06:34.000000000 -0400
29711 +++ linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-08-05 20:34:06.000000000 -0400
29712 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29714 static int __init madgemc_init (void)
29716 - madgemc_netdev_ops = tms380tr_netdev_ops;
29717 - madgemc_netdev_ops.ndo_open = madgemc_open;
29718 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29719 + pax_open_kernel();
29720 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29721 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29722 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29723 + pax_close_kernel();
29725 return mca_register_driver (&madgemc_driver);
29727 diff -urNp linux-2.6.39.4/drivers/net/tokenring/proteon.c linux-2.6.39.4/drivers/net/tokenring/proteon.c
29728 --- linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-05-19 00:06:34.000000000 -0400
29729 +++ linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-08-05 20:34:06.000000000 -0400
29730 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29731 struct platform_device *pdev;
29732 int i, num = 0, err = 0;
29734 - proteon_netdev_ops = tms380tr_netdev_ops;
29735 - proteon_netdev_ops.ndo_open = proteon_open;
29736 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29737 + pax_open_kernel();
29738 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29739 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29740 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29741 + pax_close_kernel();
29743 err = platform_driver_register(&proteon_driver);
29745 diff -urNp linux-2.6.39.4/drivers/net/tokenring/skisa.c linux-2.6.39.4/drivers/net/tokenring/skisa.c
29746 --- linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-05-19 00:06:34.000000000 -0400
29747 +++ linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-08-05 20:34:06.000000000 -0400
29748 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29749 struct platform_device *pdev;
29750 int i, num = 0, err = 0;
29752 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29753 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29754 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29755 + pax_open_kernel();
29756 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29757 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29758 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29759 + pax_close_kernel();
29761 err = platform_driver_register(&sk_isa_driver);
29763 diff -urNp linux-2.6.39.4/drivers/net/tulip/de2104x.c linux-2.6.39.4/drivers/net/tulip/de2104x.c
29764 --- linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-05-19 00:06:34.000000000 -0400
29765 +++ linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-08-05 19:44:37.000000000 -0400
29766 @@ -1817,6 +1817,8 @@ static void __devinit de21041_get_srom_i
29767 struct de_srom_info_leaf *il;
29770 + pax_track_stack();
29772 /* download entire eeprom */
29773 for (i = 0; i < DE_EEPROM_WORDS; i++)
29774 ((__le16 *)ee_data)[i] =
29775 diff -urNp linux-2.6.39.4/drivers/net/tulip/de4x5.c linux-2.6.39.4/drivers/net/tulip/de4x5.c
29776 --- linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-05-19 00:06:34.000000000 -0400
29777 +++ linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-08-05 19:44:37.000000000 -0400
29778 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29779 for (i=0; i<ETH_ALEN; i++) {
29780 tmp.addr[i] = dev->dev_addr[i];
29782 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29783 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29786 case DE4X5_SET_HWADDR: /* Set the hardware address */
29787 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29788 spin_lock_irqsave(&lp->lock, flags);
29789 memcpy(&statbuf, &lp->pktStats, ioc->len);
29790 spin_unlock_irqrestore(&lp->lock, flags);
29791 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29792 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29796 diff -urNp linux-2.6.39.4/drivers/net/usb/hso.c linux-2.6.39.4/drivers/net/usb/hso.c
29797 --- linux-2.6.39.4/drivers/net/usb/hso.c 2011-05-19 00:06:34.000000000 -0400
29798 +++ linux-2.6.39.4/drivers/net/usb/hso.c 2011-08-05 19:44:37.000000000 -0400
29800 #include <asm/byteorder.h>
29801 #include <linux/serial_core.h>
29802 #include <linux/serial.h>
29804 +#include <asm/local.h>
29806 #define MOD_AUTHOR "Option Wireless"
29807 #define MOD_DESCRIPTION "USB High Speed Option driver"
29808 @@ -257,7 +257,7 @@ struct hso_serial {
29810 /* from usb_serial_port */
29811 struct tty_struct *tty;
29813 + local_t open_count;
29814 spinlock_t serial_lock;
29816 int (*write_data) (struct hso_serial *serial);
29817 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29820 urb = serial->rx_urb[0];
29821 - if (serial->open_count > 0) {
29822 + if (local_read(&serial->open_count) > 0) {
29823 count = put_rxbuf_data(urb, serial);
29826 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29827 DUMP1(urb->transfer_buffer, urb->actual_length);
29829 /* Anyone listening? */
29830 - if (serial->open_count == 0)
29831 + if (local_read(&serial->open_count) == 0)
29835 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29836 spin_unlock_irq(&serial->serial_lock);
29838 /* check for port already opened, if not set the termios */
29839 - serial->open_count++;
29840 - if (serial->open_count == 1) {
29841 + if (local_inc_return(&serial->open_count) == 1) {
29842 serial->rx_state = RX_IDLE;
29843 /* Force default termio settings */
29844 _hso_serial_set_termios(tty, NULL);
29845 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29846 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29848 hso_stop_serial_device(serial->parent);
29849 - serial->open_count--;
29850 + local_dec(&serial->open_count);
29851 kref_put(&serial->parent->ref, hso_serial_ref_free);
29854 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29856 /* reset the rts and dtr */
29857 /* do the actual close */
29858 - serial->open_count--;
29859 + local_dec(&serial->open_count);
29861 - if (serial->open_count <= 0) {
29862 - serial->open_count = 0;
29863 + if (local_read(&serial->open_count) <= 0) {
29864 + local_set(&serial->open_count, 0);
29865 spin_lock_irq(&serial->serial_lock);
29866 if (serial->tty == tty) {
29867 serial->tty->driver_data = NULL;
29868 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29870 /* the actual setup */
29871 spin_lock_irqsave(&serial->serial_lock, flags);
29872 - if (serial->open_count)
29873 + if (local_read(&serial->open_count))
29874 _hso_serial_set_termios(tty, old);
29876 tty->termios = old;
29877 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29878 D1("Pending read interrupt on port %d\n", i);
29879 spin_lock(&serial->serial_lock);
29880 if (serial->rx_state == RX_IDLE &&
29881 - serial->open_count > 0) {
29882 + local_read(&serial->open_count) > 0) {
29883 /* Setup and send a ctrl req read on
29885 if (!serial->rx_urb_filled[0]) {
29886 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
29887 /* Start all serial ports */
29888 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29889 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29890 - if (dev2ser(serial_table[i])->open_count) {
29891 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29893 hso_start_serial_device(serial_table[i], GFP_NOIO);
29894 hso_kick_transmit(dev2ser(serial_table[i]));
29895 diff -urNp linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29896 --- linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-05-19 00:06:34.000000000 -0400
29897 +++ linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-05 19:44:37.000000000 -0400
29898 @@ -631,8 +631,7 @@ vmxnet3_set_rss_indir(struct net_device
29899 * Return with error code if any of the queue indices
29902 - if (p->ring_index[i] < 0 ||
29903 - p->ring_index[i] >= adapter->num_rx_queues)
29904 + if (p->ring_index[i] >= adapter->num_rx_queues)
29908 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-config.h linux-2.6.39.4/drivers/net/vxge/vxge-config.h
29909 --- linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-05-19 00:06:34.000000000 -0400
29910 +++ linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-08-05 20:34:06.000000000 -0400
29911 @@ -508,7 +508,7 @@ struct vxge_hw_uld_cbs {
29912 void (*link_down)(struct __vxge_hw_device *devh);
29913 void (*crit_err)(struct __vxge_hw_device *devh,
29914 enum vxge_hw_event type, u64 ext_data);
29919 * struct __vxge_hw_blockpool_entry - Block private data structure
29920 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-main.c linux-2.6.39.4/drivers/net/vxge/vxge-main.c
29921 --- linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-05-19 00:06:34.000000000 -0400
29922 +++ linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-08-05 19:44:37.000000000 -0400
29923 @@ -97,6 +97,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29924 struct sk_buff *completed[NR_SKB_COMPLETED];
29927 + pax_track_stack();
29931 skb_ptr = completed;
29932 @@ -1927,6 +1929,8 @@ static enum vxge_hw_status vxge_rth_conf
29933 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29936 + pax_track_stack();
29940 * - itable with bucket numbers
29941 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h
29942 --- linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-05-19 00:06:34.000000000 -0400
29943 +++ linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:34:06.000000000 -0400
29944 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29945 struct vxge_hw_mempool_dma *dma_object,
29951 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29952 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29953 diff -urNp linux-2.6.39.4/drivers/net/wan/cycx_x25.c linux-2.6.39.4/drivers/net/wan/cycx_x25.c
29954 --- linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-05-19 00:06:34.000000000 -0400
29955 +++ linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-08-05 19:44:37.000000000 -0400
29956 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29957 unsigned char hex[1024],
29960 + pax_track_stack();
29962 if (len >= (sizeof(hex) / 2))
29963 len = (sizeof(hex) / 2) - 1;
29965 diff -urNp linux-2.6.39.4/drivers/net/wan/hdlc_x25.c linux-2.6.39.4/drivers/net/wan/hdlc_x25.c
29966 --- linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-05-19 00:06:34.000000000 -0400
29967 +++ linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-08-05 20:34:06.000000000 -0400
29968 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29970 static int x25_open(struct net_device *dev)
29972 - struct lapb_register_struct cb;
29973 + static struct lapb_register_struct cb = {
29974 + .connect_confirmation = x25_connected,
29975 + .connect_indication = x25_connected,
29976 + .disconnect_confirmation = x25_disconnected,
29977 + .disconnect_indication = x25_disconnected,
29978 + .data_indication = x25_data_indication,
29979 + .data_transmit = x25_data_transmit
29983 - cb.connect_confirmation = x25_connected;
29984 - cb.connect_indication = x25_connected;
29985 - cb.disconnect_confirmation = x25_disconnected;
29986 - cb.disconnect_indication = x25_disconnected;
29987 - cb.data_indication = x25_data_indication;
29988 - cb.data_transmit = x25_data_transmit;
29990 result = lapb_register(dev, &cb);
29991 if (result != LAPB_OK)
29993 diff -urNp linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c
29994 --- linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-05-19 00:06:34.000000000 -0400
29995 +++ linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-05 19:44:37.000000000 -0400
29996 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29998 DECLARE_COMPLETION_ONSTACK(notif_completion);
30000 + pax_track_stack();
30002 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30003 i2400m, ack, ack_size);
30004 BUG_ON(_ack == i2400m->bm_ack_buf);
30005 diff -urNp linux-2.6.39.4/drivers/net/wireless/airo.c linux-2.6.39.4/drivers/net/wireless/airo.c
30006 --- linux-2.6.39.4/drivers/net/wireless/airo.c 2011-05-19 00:06:34.000000000 -0400
30007 +++ linux-2.6.39.4/drivers/net/wireless/airo.c 2011-08-05 19:44:37.000000000 -0400
30008 @@ -3001,6 +3001,8 @@ static void airo_process_scan_results (s
30009 BSSListElement * loop_net;
30010 BSSListElement * tmp_net;
30012 + pax_track_stack();
30014 /* Blow away current list of scan results */
30015 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30016 list_move_tail (&loop_net->list, &ai->network_free_list);
30017 @@ -3792,6 +3794,8 @@ static u16 setup_card(struct airo_info *
30021 + pax_track_stack();
30023 memset( &mySsid, 0, sizeof( mySsid ) );
30026 @@ -4760,6 +4764,8 @@ static int proc_stats_rid_open( struct i
30027 __le32 *vals = stats.vals;
30030 + pax_track_stack();
30032 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30034 data = file->private_data;
30035 @@ -5483,6 +5489,8 @@ static int proc_BSSList_open( struct ino
30036 /* If doLoseSync is not 1, we won't do a Lose Sync */
30037 int doLoseSync = -1;
30039 + pax_track_stack();
30041 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30043 data = file->private_data;
30044 @@ -7190,6 +7198,8 @@ static int airo_get_aplist(struct net_de
30046 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30048 + pax_track_stack();
30050 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30053 @@ -7750,6 +7760,8 @@ static void airo_read_wireless_stats(str
30054 CapabilityRid cap_rid;
30055 __le32 *vals = stats_rid.vals;
30057 + pax_track_stack();
30059 /* Get stats out of the card */
30060 clear_bit(JOB_WSTATS, &local->jobs);
30061 if (local->power.event) {
30062 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c
30063 --- linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-05-19 00:06:34.000000000 -0400
30064 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-05 19:44:37.000000000 -0400
30065 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30069 + pax_track_stack();
30071 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30072 len += snprintf(buf+len, sizeof(buf)-len,
30073 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30074 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30075 unsigned int len = 0;
30078 + pax_track_stack();
30080 len += snprintf(buf+len, sizeof(buf)-len,
30081 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
30083 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
30087 + pax_track_stack();
30089 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
30090 sc->ah->ah_ant_mode);
30091 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
30092 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
30093 unsigned int len = 0;
30094 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
30096 + pax_track_stack();
30098 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
30100 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
30101 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
30102 unsigned int len = 0;
30105 + pax_track_stack();
30107 len += snprintf(buf+len, sizeof(buf)-len,
30108 "RX\n---------------------\n");
30109 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
30110 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
30112 unsigned int len = 0;
30114 + pax_track_stack();
30116 len += snprintf(buf+len, sizeof(buf)-len,
30117 "HW has PHY error counters:\t%s\n",
30118 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
30119 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
30120 struct ath5k_buf *bf, *bf0;
30123 + pax_track_stack();
30125 len += snprintf(buf+len, sizeof(buf)-len,
30126 "available txbuffers: %d\n", sc->txbuf_len);
30128 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30129 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-05-19 00:06:34.000000000 -0400
30130 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-05 19:44:37.000000000 -0400
30131 @@ -734,6 +734,8 @@ static void ar9003_hw_tx_iq_cal(struct a
30132 s32 i, j, ip, im, nmeasurement;
30133 u8 nchains = get_streams(common->tx_chainmask);
30135 + pax_track_stack();
30137 for (ip = 0; ip < MPASS; ip++) {
30138 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
30139 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
30140 @@ -856,6 +858,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30144 + pax_track_stack();
30146 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30147 if (ah->txchainmask & (1 << i))
30149 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30150 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-05-19 00:06:34.000000000 -0400
30151 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-05 19:44:37.000000000 -0400
30152 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30153 int theta_low_bin = 0;
30156 + pax_track_stack();
30158 /* disregard any bin that contains <= 16 samples */
30159 thresh_accum_cnt = 16;
30161 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c
30162 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-05-19 00:06:34.000000000 -0400
30163 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-05 19:44:37.000000000 -0400
30164 @@ -335,6 +335,8 @@ static ssize_t read_file_interrupt(struc
30166 unsigned int len = 0;
30168 + pax_track_stack();
30170 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30171 len += snprintf(buf + len, sizeof(buf) - len,
30172 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30173 @@ -422,6 +424,8 @@ static ssize_t read_file_wiphy(struct fi
30177 + pax_track_stack();
30179 len += snprintf(buf + len, sizeof(buf) - len,
30180 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30181 wiphy_name(sc->hw->wiphy),
30182 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c
30183 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-05-19 00:06:34.000000000 -0400
30184 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-08-05 20:34:06.000000000 -0400
30185 @@ -737,6 +737,8 @@ static ssize_t read_file_tgt_stats(struc
30186 unsigned int len = 0;
30189 + pax_track_stack();
30191 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30193 WMI_CMD(WMI_TGT_STATS_CMDID);
30194 @@ -782,6 +784,8 @@ static ssize_t read_file_xmit(struct fil
30196 unsigned int len = 0;
30198 + pax_track_stack();
30200 len += snprintf(buf + len, sizeof(buf) - len,
30201 "%20s : %10u\n", "Buffers queued",
30202 priv->debug.tx_stats.buf_queued);
30203 @@ -831,6 +835,8 @@ static ssize_t read_file_recv(struct fil
30205 unsigned int len = 0;
30207 + pax_track_stack();
30209 len += snprintf(buf + len, sizeof(buf) - len,
30210 "%20s : %10u\n", "SKBs allocated",
30211 priv->debug.rx_stats.skb_allocated);
30212 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h
30213 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-05-19 00:06:34.000000000 -0400
30214 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-05 20:34:06.000000000 -0400
30215 @@ -592,7 +592,7 @@ struct ath_hw_private_ops {
30218 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30223 * struct ath_hw_ops - callbacks used by hardware code and driver code
30224 @@ -642,7 +642,7 @@ struct ath_hw_ops {
30225 u32 burstDuration);
30226 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
30231 struct ath_nf_limits {
30233 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c
30234 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-19 00:06:34.000000000 -0400
30235 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-05 19:44:37.000000000 -0400
30236 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30238 DECLARE_SSID_BUF(ssid);
30240 + pax_track_stack();
30242 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30245 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30246 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30249 + pax_track_stack();
30251 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30254 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30255 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-19 00:06:34.000000000 -0400
30256 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-05 19:44:37.000000000 -0400
30257 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30258 unsigned long flags;
30259 DECLARE_SSID_BUF(ssid);
30261 + pax_track_stack();
30263 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30264 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30265 print_ssid(ssid, info_element->data, info_element->len),
30266 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30267 --- linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-05-19 00:06:34.000000000 -0400
30268 +++ linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-05 20:34:06.000000000 -0400
30269 @@ -3958,7 +3958,9 @@ static int iwl3945_pci_probe(struct pci_
30271 if (iwl3945_mod_params.disable_hw_scan) {
30272 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30273 - iwl3945_hw_ops.hw_scan = NULL;
30274 + pax_open_kernel();
30275 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30276 + pax_close_kernel();
30279 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30280 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c
30281 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-06-25 12:55:22.000000000 -0400
30282 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:34:06.000000000 -0400
30283 @@ -3974,7 +3974,9 @@ static int iwl_pci_probe(struct pci_dev
30284 if (cfg->mod_params->disable_hw_scan) {
30285 dev_printk(KERN_DEBUG, &(pdev->dev),
30286 "sw scan support is deprecated\n");
30287 - iwlagn_hw_ops.hw_scan = NULL;
30288 + pax_open_kernel();
30289 + *(void **)&iwlagn_hw_ops.hw_scan = NULL;
30290 + pax_close_kernel();
30293 hw = iwl_alloc_all(cfg);
30294 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30295 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-19 00:06:34.000000000 -0400
30296 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-05 19:44:37.000000000 -0400
30297 @@ -883,6 +883,8 @@ static void rs_tx_status(void *priv_r, s
30298 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30299 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30301 + pax_track_stack();
30303 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30305 /* Treat uninitialized rate scaling data same as non-existing. */
30306 @@ -2894,6 +2896,8 @@ static void rs_fill_link_cmd(struct iwl_
30307 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30308 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30310 + pax_track_stack();
30312 /* Override starting rate (index 0) if needed for debug purposes */
30313 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30315 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30316 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-19 00:06:34.000000000 -0400
30317 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-05 19:44:37.000000000 -0400
30318 @@ -549,6 +549,8 @@ static ssize_t iwl_dbgfs_status_read(str
30320 const size_t bufsz = sizeof(buf);
30322 + pax_track_stack();
30324 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30325 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30326 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30327 @@ -681,6 +683,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30328 char buf[256 * NUM_IWL_RXON_CTX];
30329 const size_t bufsz = sizeof(buf);
30331 + pax_track_stack();
30333 for_each_context(priv, ctx) {
30334 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30336 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30337 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-05-19 00:06:34.000000000 -0400
30338 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-05 19:44:37.000000000 -0400
30339 @@ -68,8 +68,8 @@ do {
30343 -#define IWL_DEBUG(__priv, level, fmt, args...)
30344 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30345 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30346 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30347 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30348 const void *p, u32 len)
30350 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30351 --- linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-19 00:06:34.000000000 -0400
30352 +++ linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-05 19:44:37.000000000 -0400
30353 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30357 + pax_track_stack();
30361 if (count < sizeof(buf))
30362 diff -urNp linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c
30363 --- linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-05-19 00:06:34.000000000 -0400
30364 +++ linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-05 20:34:06.000000000 -0400
30365 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30368 if (fake_hw_scan) {
30369 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30370 - mac80211_hwsim_ops.sw_scan_start = NULL;
30371 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30372 + pax_open_kernel();
30373 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30374 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30375 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30376 + pax_close_kernel();
30379 spin_lock_init(&hwsim_radio_lock);
30380 diff -urNp linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c
30381 --- linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-05-19 00:06:34.000000000 -0400
30382 +++ linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-08-05 19:44:37.000000000 -0400
30383 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30385 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30387 - if (rts_threshold < 0 || rts_threshold > 2347)
30388 + if (rts_threshold > 2347)
30389 rts_threshold = 2347;
30391 tmp = cpu_to_le32(rts_threshold);
30392 diff -urNp linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30393 --- linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-05-19 00:06:34.000000000 -0400
30394 +++ linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-05 19:44:37.000000000 -0400
30395 @@ -827,6 +827,8 @@ static bool _rtl92c_phy_sw_chnl_step_by_
30397 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30399 + pax_track_stack();
30401 precommoncmdcnt = 0;
30402 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30404 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h
30405 --- linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-05-19 00:06:34.000000000 -0400
30406 +++ linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-05 20:34:06.000000000 -0400
30407 @@ -260,7 +260,7 @@ struct wl1251_if_operations {
30408 void (*reset)(struct wl1251 *wl);
30409 void (*enable_irq)(struct wl1251 *wl);
30410 void (*disable_irq)(struct wl1251 *wl);
30415 struct ieee80211_hw *hw;
30416 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c
30417 --- linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-05-19 00:06:34.000000000 -0400
30418 +++ linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-08-05 19:44:37.000000000 -0400
30419 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30423 + pax_track_stack();
30425 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30427 spi_message_init(&m);
30428 diff -urNp linux-2.6.39.4/drivers/oprofile/buffer_sync.c linux-2.6.39.4/drivers/oprofile/buffer_sync.c
30429 --- linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-06-25 12:55:22.000000000 -0400
30430 +++ linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-08-05 19:44:37.000000000 -0400
30431 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30432 if (cookie == NO_COOKIE)
30434 if (cookie == INVALID_COOKIE) {
30435 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30436 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30439 if (cookie != last_cookie) {
30440 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30441 /* add userspace sample */
30444 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30445 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30449 cookie = lookup_dcookie(mm, s->eip, &offset);
30451 if (cookie == INVALID_COOKIE) {
30452 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30453 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30457 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30458 /* ignore backtraces if failed to add a sample */
30459 if (state == sb_bt_start) {
30460 state = sb_bt_ignore;
30461 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30462 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30466 diff -urNp linux-2.6.39.4/drivers/oprofile/event_buffer.c linux-2.6.39.4/drivers/oprofile/event_buffer.c
30467 --- linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-05-19 00:06:34.000000000 -0400
30468 +++ linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-08-05 19:44:37.000000000 -0400
30469 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30472 if (buffer_pos == buffer_size) {
30473 - atomic_inc(&oprofile_stats.event_lost_overflow);
30474 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30478 diff -urNp linux-2.6.39.4/drivers/oprofile/oprof.c linux-2.6.39.4/drivers/oprofile/oprof.c
30479 --- linux-2.6.39.4/drivers/oprofile/oprof.c 2011-05-19 00:06:34.000000000 -0400
30480 +++ linux-2.6.39.4/drivers/oprofile/oprof.c 2011-08-05 19:44:37.000000000 -0400
30481 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30482 if (oprofile_ops.switch_events())
30485 - atomic_inc(&oprofile_stats.multiplex_counter);
30486 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30487 start_switch_worker();
30490 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofilefs.c linux-2.6.39.4/drivers/oprofile/oprofilefs.c
30491 --- linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-05-19 00:06:34.000000000 -0400
30492 +++ linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-08-05 19:44:37.000000000 -0400
30493 @@ -186,7 +186,7 @@ static const struct file_operations atom
30496 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30497 - char const *name, atomic_t *val)
30498 + char const *name, atomic_unchecked_t *val)
30500 return __oprofilefs_create_file(sb, root, name,
30501 &atomic_ro_fops, 0444, val);
30502 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.c linux-2.6.39.4/drivers/oprofile/oprofile_stats.c
30503 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-05-19 00:06:34.000000000 -0400
30504 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-08-05 19:44:37.000000000 -0400
30505 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30506 cpu_buf->sample_invalid_eip = 0;
30509 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30510 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30511 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30512 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30513 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30514 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30515 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30516 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30517 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30518 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30522 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.h linux-2.6.39.4/drivers/oprofile/oprofile_stats.h
30523 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-05-19 00:06:34.000000000 -0400
30524 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-08-05 19:44:37.000000000 -0400
30525 @@ -13,11 +13,11 @@
30526 #include <asm/atomic.h>
30528 struct oprofile_stat_struct {
30529 - atomic_t sample_lost_no_mm;
30530 - atomic_t sample_lost_no_mapping;
30531 - atomic_t bt_lost_no_mapping;
30532 - atomic_t event_lost_overflow;
30533 - atomic_t multiplex_counter;
30534 + atomic_unchecked_t sample_lost_no_mm;
30535 + atomic_unchecked_t sample_lost_no_mapping;
30536 + atomic_unchecked_t bt_lost_no_mapping;
30537 + atomic_unchecked_t event_lost_overflow;
30538 + atomic_unchecked_t multiplex_counter;
30541 extern struct oprofile_stat_struct oprofile_stats;
30542 diff -urNp linux-2.6.39.4/drivers/parport/procfs.c linux-2.6.39.4/drivers/parport/procfs.c
30543 --- linux-2.6.39.4/drivers/parport/procfs.c 2011-05-19 00:06:34.000000000 -0400
30544 +++ linux-2.6.39.4/drivers/parport/procfs.c 2011-08-05 19:44:37.000000000 -0400
30545 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30549 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30550 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30553 #ifdef CONFIG_PARPORT_1284
30554 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30558 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30559 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30561 #endif /* IEEE1284.3 support. */
30563 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h
30564 --- linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-05-19 00:06:34.000000000 -0400
30565 +++ linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:34:06.000000000 -0400
30566 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30567 int (*hardware_test) (struct slot* slot, u32 value);
30568 u8 (*get_power) (struct slot* slot);
30569 int (*set_power) (struct slot* slot, int value);
30573 struct cpci_hp_controller {
30575 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c
30576 --- linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-05-19 00:06:34.000000000 -0400
30577 +++ linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-05 19:44:37.000000000 -0400
30578 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30580 void compaq_nvram_init (void __iomem *rom_start)
30583 +#ifndef CONFIG_PAX_KERNEXEC
30585 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30589 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30591 /* initialize our int15 lock */
30592 diff -urNp linux-2.6.39.4/drivers/pci/pcie/aspm.c linux-2.6.39.4/drivers/pci/pcie/aspm.c
30593 --- linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-05-19 00:06:34.000000000 -0400
30594 +++ linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-08-05 19:44:37.000000000 -0400
30596 #define MODULE_PARAM_PREFIX "pcie_aspm."
30598 /* Note: those are not register definitions */
30599 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30600 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30601 -#define ASPM_STATE_L1 (4) /* L1 state */
30602 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30603 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30604 +#define ASPM_STATE_L1 (4U) /* L1 state */
30605 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30606 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30608 diff -urNp linux-2.6.39.4/drivers/pci/probe.c linux-2.6.39.4/drivers/pci/probe.c
30609 --- linux-2.6.39.4/drivers/pci/probe.c 2011-05-19 00:06:34.000000000 -0400
30610 +++ linux-2.6.39.4/drivers/pci/probe.c 2011-08-05 20:34:06.000000000 -0400
30611 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
30615 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
30616 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
30617 struct device_attribute *attr,
30620 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
30623 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
30624 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
30625 struct device_attribute *attr,
30628 @@ -165,7 +165,7 @@ int __pci_read_base(struct pci_dev *dev,
30632 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30633 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30635 if (!dev->mmio_always_on) {
30636 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30637 diff -urNp linux-2.6.39.4/drivers/pci/proc.c linux-2.6.39.4/drivers/pci/proc.c
30638 --- linux-2.6.39.4/drivers/pci/proc.c 2011-05-19 00:06:34.000000000 -0400
30639 +++ linux-2.6.39.4/drivers/pci/proc.c 2011-08-05 19:44:37.000000000 -0400
30640 @@ -476,7 +476,16 @@ static const struct file_operations proc
30641 static int __init pci_proc_init(void)
30643 struct pci_dev *dev = NULL;
30645 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30646 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30647 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30648 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30649 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30652 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30654 proc_create("devices", 0, proc_bus_pci_dir,
30655 &proc_bus_pci_dev_operations);
30656 proc_initialized = 1;
30657 diff -urNp linux-2.6.39.4/drivers/pci/xen-pcifront.c linux-2.6.39.4/drivers/pci/xen-pcifront.c
30658 --- linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-05-19 00:06:34.000000000 -0400
30659 +++ linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-08-05 20:34:06.000000000 -0400
30660 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30661 struct pcifront_sd *sd = bus->sysdata;
30662 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30664 + pax_track_stack();
30666 if (verbose_request)
30667 dev_info(&pdev->xdev->dev,
30668 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30669 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30670 struct pcifront_sd *sd = bus->sysdata;
30671 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30673 + pax_track_stack();
30675 if (verbose_request)
30676 dev_info(&pdev->xdev->dev,
30677 "write dev=%04x:%02x:%02x.%01x - "
30678 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30679 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30680 struct msi_desc *entry;
30682 + pax_track_stack();
30684 if (nvec > SH_INFO_MAX_VEC) {
30685 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30686 " Increase SH_INFO_MAX_VEC.\n", nvec);
30687 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30688 struct pcifront_sd *sd = dev->bus->sysdata;
30689 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30691 + pax_track_stack();
30693 err = do_pci_op(pdev, &op);
30695 /* What should do for error ? */
30696 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30697 struct pcifront_sd *sd = dev->bus->sysdata;
30698 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30700 + pax_track_stack();
30702 err = do_pci_op(pdev, &op);
30703 if (likely(!err)) {
30704 vector[0] = op.value;
30705 diff -urNp linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c
30706 --- linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-05-19 00:06:34.000000000 -0400
30707 +++ linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:34:06.000000000 -0400
30708 @@ -2109,7 +2109,7 @@ static int hotkey_mask_get(void)
30712 -void static hotkey_mask_warn_incomplete_mask(void)
30713 +static void hotkey_mask_warn_incomplete_mask(void)
30715 /* log only what the user can fix... */
30716 const u32 wantedmask = hotkey_driver_mask &
30717 diff -urNp linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c
30718 --- linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-05-19 00:06:34.000000000 -0400
30719 +++ linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-05 19:44:37.000000000 -0400
30720 @@ -59,7 +59,7 @@ do { \
30721 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30724 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30725 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30726 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30729 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30732 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30734 + pax_open_kernel();
30735 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30736 + pax_close_kernel();
30738 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30739 spin_lock_irqsave(&pnp_bios_lock, flags);
30740 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30742 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30744 + pax_open_kernel();
30745 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30746 + pax_close_kernel();
30750 /* If we get here and this is set then the PnP BIOS faulted on us. */
30751 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30755 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30756 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30760 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30761 pnp_bios_callpoint.offset = header->fields.pm16offset;
30762 pnp_bios_callpoint.segment = PNP_CS16;
30764 + pax_open_kernel();
30766 for_each_possible_cpu(i) {
30767 struct desc_struct *gdt = get_cpu_gdt_table(i);
30769 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30770 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30771 (unsigned long)__va(header->fields.pm16dseg));
30774 + pax_close_kernel();
30776 diff -urNp linux-2.6.39.4/drivers/pnp/resource.c linux-2.6.39.4/drivers/pnp/resource.c
30777 --- linux-2.6.39.4/drivers/pnp/resource.c 2011-05-19 00:06:34.000000000 -0400
30778 +++ linux-2.6.39.4/drivers/pnp/resource.c 2011-08-05 19:44:37.000000000 -0400
30779 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30782 /* check if the resource is valid */
30783 - if (*irq < 0 || *irq > 15)
30787 /* check if the resource is reserved */
30788 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30791 /* check if the resource is valid */
30792 - if (*dma < 0 || *dma == 4 || *dma > 7)
30793 + if (*dma == 4 || *dma > 7)
30796 /* check if the resource is reserved */
30797 diff -urNp linux-2.6.39.4/drivers/power/bq27x00_battery.c linux-2.6.39.4/drivers/power/bq27x00_battery.c
30798 --- linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-05-19 00:06:34.000000000 -0400
30799 +++ linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-08-05 20:34:06.000000000 -0400
30801 struct bq27x00_device_info;
30802 struct bq27x00_access_methods {
30803 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30807 enum bq27x00_chip { BQ27000, BQ27500 };
30809 diff -urNp linux-2.6.39.4/drivers/regulator/max8660.c linux-2.6.39.4/drivers/regulator/max8660.c
30810 --- linux-2.6.39.4/drivers/regulator/max8660.c 2011-05-19 00:06:34.000000000 -0400
30811 +++ linux-2.6.39.4/drivers/regulator/max8660.c 2011-08-05 20:34:06.000000000 -0400
30812 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30813 max8660->shadow_regs[MAX8660_OVER1] = 5;
30815 /* Otherwise devices can be toggled via software */
30816 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30817 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30818 + pax_open_kernel();
30819 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30820 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30821 + pax_close_kernel();
30825 diff -urNp linux-2.6.39.4/drivers/regulator/mc13892-regulator.c linux-2.6.39.4/drivers/regulator/mc13892-regulator.c
30826 --- linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-05-19 00:06:34.000000000 -0400
30827 +++ linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-08-05 20:34:06.000000000 -0400
30828 @@ -560,10 +560,12 @@ static int __devinit mc13892_regulator_p
30830 mc13xxx_unlock(mc13892);
30832 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30833 + pax_open_kernel();
30834 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30835 = mc13892_vcam_set_mode;
30836 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30837 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30838 = mc13892_vcam_get_mode;
30839 + pax_close_kernel();
30840 for (i = 0; i < pdata->num_regulators; i++) {
30841 init_data = &pdata->regulators[i];
30842 priv->regulators[i] = regulator_register(
30843 diff -urNp linux-2.6.39.4/drivers/rtc/rtc-dev.c linux-2.6.39.4/drivers/rtc/rtc-dev.c
30844 --- linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-05-19 00:06:34.000000000 -0400
30845 +++ linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-08-05 19:44:37.000000000 -0400
30847 #include <linux/module.h>
30848 #include <linux/rtc.h>
30849 #include <linux/sched.h>
30850 +#include <linux/grsecurity.h>
30851 #include "rtc-core.h"
30853 static dev_t rtc_devt;
30854 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30855 if (copy_from_user(&tm, uarg, sizeof(tm)))
30858 + gr_log_timechange();
30860 return rtc_set_time(rtc, &tm);
30863 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h
30864 --- linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-05-19 00:06:34.000000000 -0400
30865 +++ linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:34:06.000000000 -0400
30866 @@ -492,7 +492,7 @@ struct adapter_ops
30867 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30868 /* Administrative operations */
30869 int (*adapter_comm)(struct aac_dev * dev, int comm);
30874 * Define which interrupt handler needs to be installed
30875 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c
30876 --- linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-05-19 00:06:34.000000000 -0400
30877 +++ linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-08-05 19:44:37.000000000 -0400
30878 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30879 u32 actual_fibsize64, actual_fibsize = 0;
30882 + pax_track_stack();
30884 if (dev->in_reset) {
30885 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30886 diff -urNp linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c
30887 --- linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-05-19 00:06:34.000000000 -0400
30888 +++ linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-08-05 19:44:37.000000000 -0400
30889 @@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(stru
30890 flash_error_table[i].reason);
30893 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
30894 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
30895 asd_show_update_bios, asd_store_update_bios);
30897 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
30898 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfad.c linux-2.6.39.4/drivers/scsi/bfa/bfad.c
30899 --- linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-05-19 00:06:34.000000000 -0400
30900 +++ linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-08-05 19:44:37.000000000 -0400
30901 @@ -1027,6 +1027,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30902 struct bfad_vport_s *vport, *vport_new;
30903 struct bfa_fcs_driver_info_s driver_info;
30905 + pax_track_stack();
30907 /* Fill the driver_info info to fcs*/
30908 memset(&driver_info, 0, sizeof(driver_info));
30909 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30910 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c
30911 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-05-19 00:06:34.000000000 -0400
30912 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-05 19:44:37.000000000 -0400
30913 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30917 + pax_track_stack();
30920 * get hba attributes
30922 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30926 + pax_track_stack();
30929 * get port attributes
30931 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c
30932 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-05-19 00:06:34.000000000 -0400
30933 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-05 19:44:37.000000000 -0400
30934 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30935 struct fc_rpsc_speed_info_s speeds;
30936 struct bfa_port_attr_s pport_attr;
30938 + pax_track_stack();
30940 bfa_trc(port->fcs, rx_fchs->s_id);
30941 bfa_trc(port->fcs, rx_fchs->d_id);
30943 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa.h linux-2.6.39.4/drivers/scsi/bfa/bfa.h
30944 --- linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-05-19 00:06:34.000000000 -0400
30945 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-08-05 20:34:06.000000000 -0400
30946 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30947 u32 *nvecs, u32 *maxvec);
30948 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30952 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30954 struct bfa_iocfc_s {
30955 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h
30956 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-05-19 00:06:34.000000000 -0400
30957 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:34:06.000000000 -0400
30958 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30959 bfa_ioc_disable_cbfn_t disable_cbfn;
30960 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30961 bfa_ioc_reset_cbfn_t reset_cbfn;
30966 * Heartbeat failure notification queue element.
30967 @@ -267,7 +267,7 @@ struct bfa_ioc_hwif_s {
30968 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30969 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30970 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30974 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30975 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30976 diff -urNp linux-2.6.39.4/drivers/scsi/BusLogic.c linux-2.6.39.4/drivers/scsi/BusLogic.c
30977 --- linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-05-19 00:06:34.000000000 -0400
30978 +++ linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-08-05 19:44:37.000000000 -0400
30979 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30980 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30981 *PrototypeHostAdapter)
30983 + pax_track_stack();
30986 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30987 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30988 diff -urNp linux-2.6.39.4/drivers/scsi/dpt_i2o.c linux-2.6.39.4/drivers/scsi/dpt_i2o.c
30989 --- linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-05-19 00:06:34.000000000 -0400
30990 +++ linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-08-05 19:44:37.000000000 -0400
30991 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30995 + pax_track_stack();
30997 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30998 // get user msg size in u32s
30999 if(get_user(size, &user_msg[0])){
31000 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31004 + pax_track_stack();
31006 memset(msg, 0 , sizeof(msg));
31007 len = scsi_bufflen(cmd);
31008 direction = 0x00000000;
31009 diff -urNp linux-2.6.39.4/drivers/scsi/eata.c linux-2.6.39.4/drivers/scsi/eata.c
31010 --- linux-2.6.39.4/drivers/scsi/eata.c 2011-05-19 00:06:34.000000000 -0400
31011 +++ linux-2.6.39.4/drivers/scsi/eata.c 2011-08-05 19:44:37.000000000 -0400
31012 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31013 struct hostdata *ha;
31016 + pax_track_stack();
31018 sprintf(name, "%s%d", driver_name, j);
31020 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31021 diff -urNp linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c
31022 --- linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-05-19 00:06:34.000000000 -0400
31023 +++ linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-05 20:34:06.000000000 -0400
31024 @@ -2458,6 +2458,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31028 + pax_track_stack();
31030 fiph = (struct fip_header *)skb->data;
31031 sub = fiph->fip_subcode;
31033 diff -urNp linux-2.6.39.4/drivers/scsi/gdth.c linux-2.6.39.4/drivers/scsi/gdth.c
31034 --- linux-2.6.39.4/drivers/scsi/gdth.c 2011-05-19 00:06:34.000000000 -0400
31035 +++ linux-2.6.39.4/drivers/scsi/gdth.c 2011-08-05 19:44:37.000000000 -0400
31036 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31037 unsigned long flags;
31040 + pax_track_stack();
31042 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31044 ha = gdth_find_ha(ldrv.ionode);
31045 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31049 + pax_track_stack();
31051 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31052 res.number >= MAX_HDRIVES)
31054 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31058 + pax_track_stack();
31060 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31062 ha = gdth_find_ha(gen.ionode);
31063 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31065 gdth_cmd_str gdtcmd;
31066 char cmnd[MAX_COMMAND_SIZE];
31068 + pax_track_stack();
31070 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31072 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31073 diff -urNp linux-2.6.39.4/drivers/scsi/gdth_proc.c linux-2.6.39.4/drivers/scsi/gdth_proc.c
31074 --- linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-05-19 00:06:34.000000000 -0400
31075 +++ linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-08-05 19:44:37.000000000 -0400
31076 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31079 char cmnd[MAX_COMMAND_SIZE];
31081 + pax_track_stack();
31083 memset(cmnd, 0xff, 12);
31084 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
31086 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
31087 gdth_hget_str *phg;
31088 char cmnd[MAX_COMMAND_SIZE];
31090 + pax_track_stack();
31092 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
31093 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
31094 if (!gdtcmd || !estr)
31095 diff -urNp linux-2.6.39.4/drivers/scsi/hosts.c linux-2.6.39.4/drivers/scsi/hosts.c
31096 --- linux-2.6.39.4/drivers/scsi/hosts.c 2011-05-19 00:06:34.000000000 -0400
31097 +++ linux-2.6.39.4/drivers/scsi/hosts.c 2011-08-05 19:44:37.000000000 -0400
31099 #include "scsi_logging.h"
31102 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
31103 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
31106 static void scsi_host_cls_release(struct device *dev)
31107 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
31108 * subtract one because we increment first then return, but we need to
31109 * know what the next host number was before increment
31111 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
31112 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
31113 shost->dma_channel = 0xff;
31115 /* These three are default values which can be overridden */
31116 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.c linux-2.6.39.4/drivers/scsi/hpsa.c
31117 --- linux-2.6.39.4/drivers/scsi/hpsa.c 2011-05-19 00:06:34.000000000 -0400
31118 +++ linux-2.6.39.4/drivers/scsi/hpsa.c 2011-08-05 20:34:06.000000000 -0400
31119 @@ -469,7 +469,7 @@ static inline u32 next_command(struct ct
31122 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31123 - return h->access.command_completed(h);
31124 + return h->access->command_completed(h);
31126 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31127 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31128 @@ -2889,7 +2889,7 @@ static void start_io(struct ctlr_info *h
31129 while (!list_empty(&h->reqQ)) {
31130 c = list_entry(h->reqQ.next, struct CommandList, list);
31131 /* can't do anything if fifo is full */
31132 - if ((h->access.fifo_full(h))) {
31133 + if ((h->access->fifo_full(h))) {
31134 dev_warn(&h->pdev->dev, "fifo full\n");
31137 @@ -2899,7 +2899,7 @@ static void start_io(struct ctlr_info *h
31140 /* Tell the controller execute command */
31141 - h->access.submit_command(h, c);
31142 + h->access->submit_command(h, c);
31144 /* Put job onto the completed Q */
31146 @@ -2908,17 +2908,17 @@ static void start_io(struct ctlr_info *h
31148 static inline unsigned long get_next_completion(struct ctlr_info *h)
31150 - return h->access.command_completed(h);
31151 + return h->access->command_completed(h);
31154 static inline bool interrupt_pending(struct ctlr_info *h)
31156 - return h->access.intr_pending(h);
31157 + return h->access->intr_pending(h);
31160 static inline long interrupt_not_for_us(struct ctlr_info *h)
31162 - return (h->access.intr_pending(h) == 0) ||
31163 + return (h->access->intr_pending(h) == 0) ||
31164 (h->interrupts_enabled == 0);
31167 @@ -3684,7 +3684,7 @@ static int __devinit hpsa_pci_init(struc
31168 if (prod_index < 0)
31170 h->product_name = products[prod_index].product_name;
31171 - h->access = *(products[prod_index].access);
31172 + h->access = products[prod_index].access;
31174 if (hpsa_board_disabled(h->pdev)) {
31175 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31176 @@ -3845,7 +3845,7 @@ static int __devinit hpsa_init_one(struc
31179 /* make sure the board interrupts are off */
31180 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31181 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31183 if (h->msix_vector || h->msi_vector)
31184 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
31185 @@ -3892,7 +3892,7 @@ static int __devinit hpsa_init_one(struc
31186 hpsa_scsi_setup(h);
31188 /* Turn the interrupts on so we can service requests */
31189 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31190 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31192 hpsa_put_ctlr_into_performant_mode(h);
31193 hpsa_hba_inquiry(h);
31194 @@ -3955,7 +3955,7 @@ static void hpsa_shutdown(struct pci_dev
31195 * To write all data in the battery backed cache to disks
31197 hpsa_flush_cache(h);
31198 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31199 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31200 free_irq(h->intr[h->intr_mode], h);
31201 #ifdef CONFIG_PCI_MSI
31202 if (h->msix_vector)
31203 @@ -4118,7 +4118,7 @@ static __devinit void hpsa_enter_perform
31206 /* Change the access methods to the performant access methods */
31207 - h->access = SA5_performant_access;
31208 + h->access = &SA5_performant_access;
31209 h->transMethod = CFGTBL_Trans_Performant;
31212 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.h linux-2.6.39.4/drivers/scsi/hpsa.h
31213 --- linux-2.6.39.4/drivers/scsi/hpsa.h 2011-05-19 00:06:34.000000000 -0400
31214 +++ linux-2.6.39.4/drivers/scsi/hpsa.h 2011-08-05 20:34:06.000000000 -0400
31215 @@ -73,7 +73,7 @@ struct ctlr_info {
31216 unsigned int msix_vector;
31217 unsigned int msi_vector;
31218 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31219 - struct access_method access;
31220 + struct access_method *access;
31222 /* queue and queue Info */
31223 struct list_head reqQ;
31224 diff -urNp linux-2.6.39.4/drivers/scsi/ips.h linux-2.6.39.4/drivers/scsi/ips.h
31225 --- linux-2.6.39.4/drivers/scsi/ips.h 2011-05-19 00:06:34.000000000 -0400
31226 +++ linux-2.6.39.4/drivers/scsi/ips.h 2011-08-05 20:34:06.000000000 -0400
31227 @@ -1027,7 +1027,7 @@ typedef struct {
31228 int (*intr)(struct ips_ha *);
31229 void (*enableint)(struct ips_ha *);
31230 uint32_t (*statupd)(struct ips_ha *);
31232 +} __no_const ips_hw_func_t;
31234 typedef struct ips_ha {
31235 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31236 diff -urNp linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c
31237 --- linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-05-19 00:06:34.000000000 -0400
31238 +++ linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-08-05 19:44:37.000000000 -0400
31239 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31240 * all together if not used XXX
31243 - atomic_t no_free_exch;
31244 - atomic_t no_free_exch_xid;
31245 - atomic_t xid_not_found;
31246 - atomic_t xid_busy;
31247 - atomic_t seq_not_found;
31248 - atomic_t non_bls_resp;
31249 + atomic_unchecked_t no_free_exch;
31250 + atomic_unchecked_t no_free_exch_xid;
31251 + atomic_unchecked_t xid_not_found;
31252 + atomic_unchecked_t xid_busy;
31253 + atomic_unchecked_t seq_not_found;
31254 + atomic_unchecked_t non_bls_resp;
31258 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31259 /* allocate memory for exchange */
31260 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31262 - atomic_inc(&mp->stats.no_free_exch);
31263 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31266 memset(ep, 0, sizeof(*ep));
31267 @@ -761,7 +761,7 @@ out:
31270 spin_unlock_bh(&pool->lock);
31271 - atomic_inc(&mp->stats.no_free_exch_xid);
31272 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31273 mempool_free(ep, mp->ep_pool);
31276 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31277 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31278 ep = fc_exch_find(mp, xid);
31280 - atomic_inc(&mp->stats.xid_not_found);
31281 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31282 reject = FC_RJT_OX_ID;
31285 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31286 ep = fc_exch_find(mp, xid);
31287 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31289 - atomic_inc(&mp->stats.xid_busy);
31290 + atomic_inc_unchecked(&mp->stats.xid_busy);
31291 reject = FC_RJT_RX_ID;
31294 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31296 xid = ep->xid; /* get our XID */
31298 - atomic_inc(&mp->stats.xid_not_found);
31299 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31300 reject = FC_RJT_RX_ID; /* XID not found */
31303 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31306 if (sp->id != fh->fh_seq_id) {
31307 - atomic_inc(&mp->stats.seq_not_found);
31308 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31309 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31312 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31314 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31316 - atomic_inc(&mp->stats.xid_not_found);
31317 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31320 if (ep->esb_stat & ESB_ST_COMPLETE) {
31321 - atomic_inc(&mp->stats.xid_not_found);
31322 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31325 if (ep->rxid == FC_XID_UNKNOWN)
31326 ep->rxid = ntohs(fh->fh_rx_id);
31327 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31328 - atomic_inc(&mp->stats.xid_not_found);
31329 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31332 if (ep->did != ntoh24(fh->fh_s_id) &&
31333 ep->did != FC_FID_FLOGI) {
31334 - atomic_inc(&mp->stats.xid_not_found);
31335 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31339 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31340 sp->ssb_stat |= SSB_ST_RESP;
31341 sp->id = fh->fh_seq_id;
31342 } else if (sp->id != fh->fh_seq_id) {
31343 - atomic_inc(&mp->stats.seq_not_found);
31344 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31348 @@ -1479,9 +1479,9 @@ static void fc_exch_recv_resp(struct fc_
31349 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31352 - atomic_inc(&mp->stats.xid_not_found);
31353 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31355 - atomic_inc(&mp->stats.non_bls_resp);
31356 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31360 diff -urNp linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c
31361 --- linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-05-19 00:06:34.000000000 -0400
31362 +++ linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-08-05 20:34:06.000000000 -0400
31363 @@ -314,7 +314,7 @@ static struct ata_port_operations sas_sa
31364 .postreset = ata_std_postreset,
31365 .error_handler = ata_std_error_handler,
31366 .post_internal_cmd = sas_ata_post_internal,
31367 - .qc_defer = ata_std_qc_defer,
31368 + .qc_defer = ata_std_qc_defer,
31369 .qc_prep = ata_noop_qc_prep,
31370 .qc_issue = sas_ata_qc_issue,
31371 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31372 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c
31373 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-19 00:06:34.000000000 -0400
31374 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-05 19:44:37.000000000 -0400
31375 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31377 #include <linux/debugfs.h>
31379 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31380 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31381 static unsigned long lpfc_debugfs_start_time = 0L;
31384 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31385 lpfc_debugfs_enable = 0;
31388 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31389 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31390 (lpfc_debugfs_max_disc_trc - 1);
31391 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31392 dtp = vport->disc_trc + i;
31393 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31394 lpfc_debugfs_enable = 0;
31397 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31398 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31399 (lpfc_debugfs_max_slow_ring_trc - 1);
31400 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31401 dtp = phba->slow_ring_trc + i;
31402 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31406 + pax_track_stack();
31409 spin_lock_irq(&phba->hbalock);
31411 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31412 !vport || !vport->disc_trc)
31415 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31416 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31417 (lpfc_debugfs_max_disc_trc - 1);
31418 dtp = vport->disc_trc + index;
31420 dtp->data1 = data1;
31421 dtp->data2 = data2;
31422 dtp->data3 = data3;
31423 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31424 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31425 dtp->jif = jiffies;
31428 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31429 !phba || !phba->slow_ring_trc)
31432 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31433 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31434 (lpfc_debugfs_max_slow_ring_trc - 1);
31435 dtp = phba->slow_ring_trc + index;
31437 dtp->data1 = data1;
31438 dtp->data2 = data2;
31439 dtp->data3 = data3;
31440 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31441 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31442 dtp->jif = jiffies;
31445 @@ -2145,7 +2147,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31446 "slow_ring buffer\n");
31449 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31450 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31451 memset(phba->slow_ring_trc, 0,
31452 (sizeof(struct lpfc_debugfs_trc) *
31453 lpfc_debugfs_max_slow_ring_trc));
31454 @@ -2191,7 +2193,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31458 - atomic_set(&vport->disc_trc_cnt, 0);
31459 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31461 snprintf(name, sizeof(name), "discovery_trace");
31462 vport->debug_disc_trc =
31463 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h
31464 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-05-19 00:06:34.000000000 -0400
31465 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-08-05 19:44:37.000000000 -0400
31466 @@ -419,7 +419,7 @@ struct lpfc_vport {
31467 struct dentry *debug_nodelist;
31468 struct dentry *vport_debugfs_root;
31469 struct lpfc_debugfs_trc *disc_trc;
31470 - atomic_t disc_trc_cnt;
31471 + atomic_unchecked_t disc_trc_cnt;
31473 uint8_t stat_data_enabled;
31474 uint8_t stat_data_blocked;
31475 @@ -785,8 +785,8 @@ struct lpfc_hba {
31476 struct timer_list fabric_block_timer;
31477 unsigned long bit_flags;
31478 #define FABRIC_COMANDS_BLOCKED 0
31479 - atomic_t num_rsrc_err;
31480 - atomic_t num_cmd_success;
31481 + atomic_unchecked_t num_rsrc_err;
31482 + atomic_unchecked_t num_cmd_success;
31483 unsigned long last_rsrc_error_time;
31484 unsigned long last_ramp_down_time;
31485 unsigned long last_ramp_up_time;
31486 @@ -800,7 +800,7 @@ struct lpfc_hba {
31487 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31488 struct dentry *debug_slow_ring_trc;
31489 struct lpfc_debugfs_trc *slow_ring_trc;
31490 - atomic_t slow_ring_trc_cnt;
31491 + atomic_unchecked_t slow_ring_trc_cnt;
31492 /* iDiag debugfs sub-directory */
31493 struct dentry *idiag_root;
31494 struct dentry *idiag_pci_cfg;
31495 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c
31496 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-05-19 00:06:34.000000000 -0400
31497 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:34:06.000000000 -0400
31498 @@ -9535,8 +9535,10 @@ lpfc_init(void)
31499 printk(LPFC_COPYRIGHT "\n");
31501 if (lpfc_enable_npiv) {
31502 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31503 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31504 + pax_open_kernel();
31505 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31506 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31507 + pax_close_kernel();
31509 lpfc_transport_template =
31510 fc_attach_transport(&lpfc_transport_functions);
31511 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c
31512 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-19 00:06:34.000000000 -0400
31513 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-05 19:44:37.000000000 -0400
31514 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31515 uint32_t evt_posted;
31517 spin_lock_irqsave(&phba->hbalock, flags);
31518 - atomic_inc(&phba->num_rsrc_err);
31519 + atomic_inc_unchecked(&phba->num_rsrc_err);
31520 phba->last_rsrc_error_time = jiffies;
31522 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31523 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31524 unsigned long flags;
31525 struct lpfc_hba *phba = vport->phba;
31526 uint32_t evt_posted;
31527 - atomic_inc(&phba->num_cmd_success);
31528 + atomic_inc_unchecked(&phba->num_cmd_success);
31530 if (vport->cfg_lun_queue_depth <= queue_depth)
31532 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31533 unsigned long num_rsrc_err, num_cmd_success;
31536 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31537 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31538 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31539 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31541 vports = lpfc_create_vport_work_array(phba);
31542 if (vports != NULL)
31543 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31546 lpfc_destroy_vport_work_array(phba, vports);
31547 - atomic_set(&phba->num_rsrc_err, 0);
31548 - atomic_set(&phba->num_cmd_success, 0);
31549 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31550 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31554 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31557 lpfc_destroy_vport_work_array(phba, vports);
31558 - atomic_set(&phba->num_rsrc_err, 0);
31559 - atomic_set(&phba->num_cmd_success, 0);
31560 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31561 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31565 diff -urNp linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c
31566 --- linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-19 00:06:34.000000000 -0400
31567 +++ linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-05 19:44:37.000000000 -0400
31568 @@ -3510,6 +3510,8 @@ megaraid_cmm_register(adapter_t *adapter
31572 + pax_track_stack();
31574 // Allocate memory for the base list of scb for management module.
31575 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31577 diff -urNp linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c
31578 --- linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-05-19 00:06:34.000000000 -0400
31579 +++ linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-08-05 19:44:37.000000000 -0400
31580 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31581 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31584 + pax_track_stack();
31586 or = osd_start_request(od, GFP_KERNEL);
31589 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.c linux-2.6.39.4/drivers/scsi/pmcraid.c
31590 --- linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-05-19 00:06:34.000000000 -0400
31591 +++ linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-08-05 19:44:37.000000000 -0400
31592 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31593 res->scsi_dev = scsi_dev;
31594 scsi_dev->hostdata = res;
31595 res->change_detected = 0;
31596 - atomic_set(&res->read_failures, 0);
31597 - atomic_set(&res->write_failures, 0);
31598 + atomic_set_unchecked(&res->read_failures, 0);
31599 + atomic_set_unchecked(&res->write_failures, 0);
31602 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31603 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31605 /* If this was a SCSI read/write command keep count of errors */
31606 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31607 - atomic_inc(&res->read_failures);
31608 + atomic_inc_unchecked(&res->read_failures);
31609 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31610 - atomic_inc(&res->write_failures);
31611 + atomic_inc_unchecked(&res->write_failures);
31613 if (!RES_IS_GSCSI(res->cfg_entry) &&
31614 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31615 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31616 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31617 * hrrq_id assigned here in queuecommand
31619 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31620 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31621 pinstance->num_hrrq;
31622 cmd->cmd_done = pmcraid_io_done;
31624 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31625 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31626 * hrrq_id assigned here in queuecommand
31628 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31629 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31630 pinstance->num_hrrq;
31632 if (request_size) {
31633 @@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(stru
31635 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31636 /* add resources only after host is added into system */
31637 - if (!atomic_read(&pinstance->expose_resources))
31638 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31641 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31642 @@ -5329,8 +5329,8 @@ static int __devinit pmcraid_init_instan
31643 init_waitqueue_head(&pinstance->reset_wait_q);
31645 atomic_set(&pinstance->outstanding_cmds, 0);
31646 - atomic_set(&pinstance->last_message_id, 0);
31647 - atomic_set(&pinstance->expose_resources, 0);
31648 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31649 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31651 INIT_LIST_HEAD(&pinstance->free_res_q);
31652 INIT_LIST_HEAD(&pinstance->used_res_q);
31653 @@ -6045,7 +6045,7 @@ static int __devinit pmcraid_probe(
31654 /* Schedule worker thread to handle CCN and take care of adding and
31655 * removing devices to OS
31657 - atomic_set(&pinstance->expose_resources, 1);
31658 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31659 schedule_work(&pinstance->worker_q);
31662 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.h linux-2.6.39.4/drivers/scsi/pmcraid.h
31663 --- linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-05-19 00:06:34.000000000 -0400
31664 +++ linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-08-05 19:44:37.000000000 -0400
31665 @@ -750,7 +750,7 @@ struct pmcraid_instance {
31666 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31668 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31669 - atomic_t last_message_id;
31670 + atomic_unchecked_t last_message_id;
31672 /* configuration table */
31673 struct pmcraid_config_table *cfg_table;
31674 @@ -779,7 +779,7 @@ struct pmcraid_instance {
31675 atomic_t outstanding_cmds;
31677 /* should add/delete resources to mid-layer now ?*/
31678 - atomic_t expose_resources;
31679 + atomic_unchecked_t expose_resources;
31683 @@ -815,8 +815,8 @@ struct pmcraid_resource_entry {
31684 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31686 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31687 - atomic_t read_failures; /* count of failed READ commands */
31688 - atomic_t write_failures; /* count of failed WRITE commands */
31689 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31690 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31692 /* To indicate add/delete/modify during CCN */
31693 u8 change_detected;
31694 diff -urNp linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h
31695 --- linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-05-19 00:06:34.000000000 -0400
31696 +++ linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:34:06.000000000 -0400
31697 @@ -2236,7 +2236,7 @@ struct isp_operations {
31698 int (*get_flash_version) (struct scsi_qla_host *, void *);
31699 int (*start_scsi) (srb_t *);
31700 int (*abort_isp) (struct scsi_qla_host *);
31704 /* MSI-X Support *************************************************************/
31706 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h
31707 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-05-19 00:06:34.000000000 -0400
31708 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-05 19:44:37.000000000 -0400
31709 @@ -256,7 +256,7 @@ struct ddb_entry {
31710 atomic_t retry_relogin_timer; /* Min Time between relogins
31712 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31713 - atomic_t relogin_retry_count; /* Num of times relogin has been
31714 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31718 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c
31719 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-05-19 00:06:34.000000000 -0400
31720 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-05 19:44:37.000000000 -0400
31721 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31722 ddb_entry->fw_ddb_index = fw_ddb_index;
31723 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31724 atomic_set(&ddb_entry->relogin_timer, 0);
31725 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31726 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31727 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31728 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31729 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31730 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31731 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31732 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31733 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31734 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31735 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31736 atomic_set(&ddb_entry->relogin_timer, 0);
31737 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31738 iscsi_unblock_session(ddb_entry->sess);
31739 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c
31740 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-05-19 00:06:34.000000000 -0400
31741 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-05 19:44:37.000000000 -0400
31742 @@ -802,13 +802,13 @@ static void qla4xxx_timer(struct scsi_ql
31743 ddb_entry->fw_ddb_device_state ==
31744 DDB_DS_SESSION_FAILED) {
31745 /* Reset retry relogin timer */
31746 - atomic_inc(&ddb_entry->relogin_retry_count);
31747 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31748 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31749 " timed out-retrying"
31752 ddb_entry->fw_ddb_index,
31753 - atomic_read(&ddb_entry->
31754 + atomic_read_unchecked(&ddb_entry->
31755 relogin_retry_count))
31758 diff -urNp linux-2.6.39.4/drivers/scsi/scsi.c linux-2.6.39.4/drivers/scsi/scsi.c
31759 --- linux-2.6.39.4/drivers/scsi/scsi.c 2011-05-19 00:06:34.000000000 -0400
31760 +++ linux-2.6.39.4/drivers/scsi/scsi.c 2011-08-05 19:44:37.000000000 -0400
31761 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31762 unsigned long timeout;
31765 - atomic_inc(&cmd->device->iorequest_cnt);
31766 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31768 /* check if the device is still usable */
31769 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31770 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_debug.c linux-2.6.39.4/drivers/scsi/scsi_debug.c
31771 --- linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-05-19 00:06:34.000000000 -0400
31772 +++ linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-08-05 19:44:37.000000000 -0400
31773 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31774 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31775 unsigned char *cmd = (unsigned char *)scp->cmnd;
31777 + pax_track_stack();
31779 if ((errsts = check_readiness(scp, 1, devip)))
31781 memset(arr, 0, sizeof(arr));
31782 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31783 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31784 unsigned char *cmd = (unsigned char *)scp->cmnd;
31786 + pax_track_stack();
31788 if ((errsts = check_readiness(scp, 1, devip)))
31790 memset(arr, 0, sizeof(arr));
31791 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_lib.c linux-2.6.39.4/drivers/scsi/scsi_lib.c
31792 --- linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-05-19 00:06:34.000000000 -0400
31793 +++ linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-08-05 19:44:37.000000000 -0400
31794 @@ -1410,7 +1410,7 @@ static void scsi_kill_request(struct req
31795 shost = sdev->host;
31796 scsi_init_cmd_errh(cmd);
31797 cmd->result = DID_NO_CONNECT << 16;
31798 - atomic_inc(&cmd->device->iorequest_cnt);
31799 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31802 * SCSI request completion path will do scsi_device_unbusy(),
31803 @@ -1436,9 +1436,9 @@ static void scsi_softirq_done(struct req
31805 INIT_LIST_HEAD(&cmd->eh_entry);
31807 - atomic_inc(&cmd->device->iodone_cnt);
31808 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31810 - atomic_inc(&cmd->device->ioerr_cnt);
31811 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31813 disposition = scsi_decide_disposition(cmd);
31814 if (disposition != SUCCESS &&
31815 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_sysfs.c linux-2.6.39.4/drivers/scsi/scsi_sysfs.c
31816 --- linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:22.000000000 -0400
31817 +++ linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-08-05 19:44:37.000000000 -0400
31818 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31821 struct scsi_device *sdev = to_scsi_device(dev); \
31822 - unsigned long long count = atomic_read(&sdev->field); \
31823 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31824 return snprintf(buf, 20, "0x%llx\n", count); \
31826 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31827 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c
31828 --- linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-05-19 00:06:34.000000000 -0400
31829 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-08-05 19:44:37.000000000 -0400
31830 @@ -485,7 +485,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31831 * Netlink Infrastructure
31834 -static atomic_t fc_event_seq;
31835 +static atomic_unchecked_t fc_event_seq;
31838 * fc_get_event_number - Obtain the next sequential FC event number
31839 @@ -498,7 +498,7 @@ static atomic_t fc_event_seq;
31841 fc_get_event_number(void)
31843 - return atomic_add_return(1, &fc_event_seq);
31844 + return atomic_add_return_unchecked(1, &fc_event_seq);
31846 EXPORT_SYMBOL(fc_get_event_number);
31848 @@ -646,7 +646,7 @@ static __init int fc_transport_init(void
31852 - atomic_set(&fc_event_seq, 0);
31853 + atomic_set_unchecked(&fc_event_seq, 0);
31855 error = transport_class_register(&fc_host_class);
31857 @@ -836,7 +836,7 @@ static int fc_str_to_dev_loss(const char
31860 *val = simple_strtoul(buf, &cp, 0);
31861 - if ((*cp && (*cp != '\n')) || (*val < 0))
31862 + if (*cp && (*cp != '\n'))
31865 * Check for overflow; dev_loss_tmo is u32
31866 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c
31867 --- linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-05-19 00:06:34.000000000 -0400
31868 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-05 19:44:37.000000000 -0400
31869 @@ -83,7 +83,7 @@ struct iscsi_internal {
31870 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31873 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31874 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31875 static struct workqueue_struct *iscsi_eh_timer_workq;
31878 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31881 ihost = shost->shost_data;
31882 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31883 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31885 if (id == ISCSI_MAX_TARGET) {
31886 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31887 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31888 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31889 ISCSI_TRANSPORT_VERSION);
31891 - atomic_set(&iscsi_session_nr, 0);
31892 + atomic_set_unchecked(&iscsi_session_nr, 0);
31894 err = class_register(&iscsi_transport_class);
31896 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c
31897 --- linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-05-19 00:06:34.000000000 -0400
31898 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-08-05 19:44:37.000000000 -0400
31900 #include "scsi_transport_srp_internal.h"
31902 struct srp_host_attrs {
31903 - atomic_t next_port_id;
31904 + atomic_unchecked_t next_port_id;
31906 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31908 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31909 struct Scsi_Host *shost = dev_to_shost(dev);
31910 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31912 - atomic_set(&srp_host->next_port_id, 0);
31913 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31917 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31918 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31919 rport->roles = ids->roles;
31921 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31922 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31923 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31925 transport_setup_device(&rport->dev);
31926 diff -urNp linux-2.6.39.4/drivers/scsi/sg.c linux-2.6.39.4/drivers/scsi/sg.c
31927 --- linux-2.6.39.4/drivers/scsi/sg.c 2011-05-19 00:06:34.000000000 -0400
31928 +++ linux-2.6.39.4/drivers/scsi/sg.c 2011-08-05 19:44:37.000000000 -0400
31929 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31930 const struct file_operations * fops;
31933 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31934 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31935 {"allow_dio", &adio_fops},
31936 {"debug", &debug_fops},
31937 {"def_reserved_size", &dressz_fops},
31938 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31941 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31942 - struct sg_proc_leaf * leaf;
31943 + const struct sg_proc_leaf * leaf;
31945 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31947 diff -urNp linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31948 --- linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-19 00:06:34.000000000 -0400
31949 +++ linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-05 19:44:37.000000000 -0400
31950 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31951 int do_iounmap = 0;
31952 int do_disable_device = 1;
31954 + pax_track_stack();
31956 memset(&sym_dev, 0, sizeof(sym_dev));
31957 memset(&nvram, 0, sizeof(nvram));
31958 sym_dev.pdev = pdev;
31959 diff -urNp linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c
31960 --- linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-05-19 00:06:34.000000000 -0400
31961 +++ linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-08-05 19:44:37.000000000 -0400
31962 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31966 + pax_track_stack();
31968 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31969 cmd.reqRingNumPages = adapter->req_pages;
31970 cmd.cmpRingNumPages = adapter->cmp_pages;
31971 diff -urNp linux-2.6.39.4/drivers/spi/spi.c linux-2.6.39.4/drivers/spi/spi.c
31972 --- linux-2.6.39.4/drivers/spi/spi.c 2011-05-19 00:06:34.000000000 -0400
31973 +++ linux-2.6.39.4/drivers/spi/spi.c 2011-08-05 19:44:37.000000000 -0400
31974 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31975 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31977 /* portable code must never pass more than 32 bytes */
31978 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31979 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
31983 diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31984 --- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-05-19 00:06:34.000000000 -0400
31985 +++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-14 12:12:59.000000000 -0400
31986 @@ -384,7 +384,7 @@ static struct ar_cookie s_ar_cookie_mem[
31987 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31990 -static struct net_device_ops ar6000_netdev_ops = {
31991 +static net_device_ops_no_const ar6000_netdev_ops = {
31993 .ndo_open = ar6000_open,
31994 .ndo_stop = ar6000_close,
31995 diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31996 --- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-05-19 00:06:34.000000000 -0400
31997 +++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-14 09:32:05.000000000 -0400
31998 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31999 typedef struct ar6k_pal_config_s
32001 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
32002 -}ar6k_pal_config_t;
32003 +} __no_const ar6k_pal_config_t;
32005 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
32006 #endif /* _AR6K_PAL_H_ */
32007 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
32008 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-05-19 00:06:34.000000000 -0400
32009 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-05 20:34:06.000000000 -0400
32010 @@ -857,14 +857,14 @@ static void dhd_op_if(dhd_if_t *ifp)
32011 free_netdev(ifp->net);
32013 /* Allocate etherdev, including space for private structure */
32014 - ifp->net = alloc_etherdev(sizeof(dhd));
32015 + ifp->net = alloc_etherdev(sizeof(*dhd));
32017 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32021 strcpy(ifp->net->name, ifp->name);
32022 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
32023 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
32024 err = dhd_net_attach(&dhd->pub, ifp->idx);
32026 DHD_ERROR(("%s: dhd_net_attach failed, "
32027 @@ -1923,7 +1923,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32028 strcpy(nv_path, nvram_path);
32030 /* Allocate etherdev, including space for private structure */
32031 - net = alloc_etherdev(sizeof(dhd));
32032 + net = alloc_etherdev(sizeof(*dhd));
32034 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32036 @@ -1939,7 +1939,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32038 * Save the dhd_info into the priv
32040 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32041 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32043 /* Set network interface name if it was provided as module parameter */
32044 if (iface_name[0]) {
32045 @@ -2056,7 +2056,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32047 * Save the dhd_info into the priv
32049 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32050 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32052 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
32054 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c
32055 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-05-19 00:06:34.000000000 -0400
32056 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-08-05 19:44:37.000000000 -0400
32057 @@ -495,7 +495,7 @@ wl_iw_get_range(struct net_device *dev,
32058 list = (wl_u32_list_t *) channels;
32060 dwrq->length = sizeof(struct iw_range);
32061 - memset(range, 0, sizeof(range));
32062 + memset(range, 0, sizeof(*range));
32064 range->min_nwid = range->max_nwid = 0;
32066 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c
32067 --- linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-05-19 00:06:34.000000000 -0400
32068 +++ linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-08-05 19:44:37.000000000 -0400
32069 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
32070 struct net_device_stats *stats = &etdev->net_stats;
32072 if (tcb->flags & fMP_DEST_BROAD)
32073 - atomic_inc(&etdev->Stats.brdcstxmt);
32074 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
32075 else if (tcb->flags & fMP_DEST_MULTI)
32076 - atomic_inc(&etdev->Stats.multixmt);
32077 + atomic_inc_unchecked(&etdev->Stats.multixmt);
32079 - atomic_inc(&etdev->Stats.unixmt);
32080 + atomic_inc_unchecked(&etdev->Stats.unixmt);
32083 stats->tx_bytes += tcb->skb->len;
32084 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h
32085 --- linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-05-19 00:06:34.000000000 -0400
32086 +++ linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-08-05 19:44:37.000000000 -0400
32087 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
32090 u32 unircv; /* # multicast packets received */
32091 - atomic_t unixmt; /* # multicast packets for Tx */
32092 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
32093 u32 multircv; /* # multicast packets received */
32094 - atomic_t multixmt; /* # multicast packets for Tx */
32095 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
32096 u32 brdcstrcv; /* # broadcast packets received */
32097 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
32098 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32099 u32 norcvbuf; /* # Rx packets discarded */
32100 u32 noxmtbuf; /* # Tx packets discarded */
32102 diff -urNp linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c
32103 --- linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-05-19 00:06:34.000000000 -0400
32104 +++ linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-08-14 12:25:25.000000000 -0400
32105 @@ -230,8 +230,10 @@ int psb_mmap(struct file *filp, struct v
32106 if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
32107 dev_priv->ttm_vm_ops = (struct vm_operations_struct *)
32109 - psb_ttm_vm_ops = *vma->vm_ops;
32110 - psb_ttm_vm_ops.fault = &psb_ttm_fault;
32111 + pax_open_kernel();
32112 + memcpy((void *)&psb_ttm_vm_ops, vma->vm_ops, sizeof(psb_ttm_vm_ops));
32113 + *(void **)&psb_ttm_vm_ops.fault = &psb_ttm_fault;
32114 + pax_close_kernel();
32117 vma->vm_ops = &psb_ttm_vm_ops;
32118 diff -urNp linux-2.6.39.4/drivers/staging/hv/channel.c linux-2.6.39.4/drivers/staging/hv/channel.c
32119 --- linux-2.6.39.4/drivers/staging/hv/channel.c 2011-05-19 00:06:34.000000000 -0400
32120 +++ linux-2.6.39.4/drivers/staging/hv/channel.c 2011-08-05 19:44:37.000000000 -0400
32121 @@ -509,8 +509,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32122 unsigned long flags;
32125 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32126 - atomic_inc(&vmbus_connection.next_gpadl_handle);
32127 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32128 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32130 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32132 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv.c linux-2.6.39.4/drivers/staging/hv/hv.c
32133 --- linux-2.6.39.4/drivers/staging/hv/hv.c 2011-05-19 00:06:34.000000000 -0400
32134 +++ linux-2.6.39.4/drivers/staging/hv/hv.c 2011-08-05 19:44:37.000000000 -0400
32135 @@ -163,7 +163,7 @@ static u64 do_hypercall(u64 control, voi
32136 u64 output_address = (output) ? virt_to_phys(output) : 0;
32137 u32 output_address_hi = output_address >> 32;
32138 u32 output_address_lo = output_address & 0xFFFFFFFF;
32139 - volatile void *hypercall_page = hv_context.hypercall_page;
32140 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32142 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
32143 control, input, output);
32144 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv_mouse.c linux-2.6.39.4/drivers/staging/hv/hv_mouse.c
32145 --- linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-05-19 00:06:34.000000000 -0400
32146 +++ linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-08-13 20:26:10.000000000 -0400
32147 @@ -898,8 +898,10 @@ static void reportdesc_callback(struct h
32149 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32151 - hid_dev->ll_driver->open = mousevsc_hid_open;
32152 - hid_dev->ll_driver->close = mousevsc_hid_close;
32153 + pax_open_kernel();
32154 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32155 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32156 + pax_close_kernel();
32158 hid_dev->bus = BUS_VIRTUAL;
32159 hid_dev->vendor = input_device_ctx->device_info.vendor;
32160 diff -urNp linux-2.6.39.4/drivers/staging/hv/rndis_filter.c linux-2.6.39.4/drivers/staging/hv/rndis_filter.c
32161 --- linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-05-19 00:06:34.000000000 -0400
32162 +++ linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-08-05 19:44:37.000000000 -0400
32163 @@ -49,7 +49,7 @@ struct rndis_device {
32165 enum rndis_device_state state;
32167 - atomic_t new_req_id;
32168 + atomic_unchecked_t new_req_id;
32170 spinlock_t request_lock;
32171 struct list_head req_list;
32172 @@ -144,7 +144,7 @@ static struct rndis_request *get_rndis_r
32175 set = &rndis_msg->msg.set_req;
32176 - set->req_id = atomic_inc_return(&dev->new_req_id);
32177 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32179 /* Add to the request list */
32180 spin_lock_irqsave(&dev->request_lock, flags);
32181 @@ -709,7 +709,7 @@ static void rndis_filter_halt_device(str
32183 /* Setup the rndis set */
32184 halt = &request->request_msg.msg.halt_req;
32185 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32186 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32188 /* Ignore return since this msg is optional. */
32189 rndis_filter_send_request(dev, request);
32190 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c
32191 --- linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-05-19 00:06:34.000000000 -0400
32192 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-08-05 19:44:37.000000000 -0400
32193 @@ -661,14 +661,14 @@ int vmbus_child_device_register(struct h
32197 - static atomic_t device_num = ATOMIC_INIT(0);
32198 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32200 DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
32203 /* Set the device name. Otherwise, device_register() will fail. */
32204 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32205 - atomic_inc_return(&device_num));
32206 + atomic_inc_return_unchecked(&device_num));
32208 /* The new device belongs to this bus */
32209 child_device_obj->device.bus = &vmbus_drv.bus; /* device->dev.bus; */
32210 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_private.h linux-2.6.39.4/drivers/staging/hv/vmbus_private.h
32211 --- linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-05-19 00:06:34.000000000 -0400
32212 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-08-05 19:44:37.000000000 -0400
32213 @@ -58,7 +58,7 @@ enum vmbus_connect_state {
32214 struct vmbus_connection {
32215 enum vmbus_connect_state conn_state;
32217 - atomic_t next_gpadl_handle;
32218 + atomic_unchecked_t next_gpadl_handle;
32221 * Represents channel interrupts. Each bit position represents a
32222 diff -urNp linux-2.6.39.4/drivers/staging/iio/ring_generic.h linux-2.6.39.4/drivers/staging/iio/ring_generic.h
32223 --- linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-05-19 00:06:34.000000000 -0400
32224 +++ linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-08-13 20:14:25.000000000 -0400
32225 @@ -86,7 +86,7 @@ struct iio_ring_access_funcs {
32227 int (*is_enabled)(struct iio_ring_buffer *ring);
32228 int (*enable)(struct iio_ring_buffer *ring);
32233 * struct iio_ring_buffer - general ring buffer structure
32234 @@ -134,7 +134,7 @@ struct iio_ring_buffer {
32235 struct iio_handler access_handler;
32236 struct iio_event_interface ev_int;
32237 struct iio_shared_ev_pointer shared_ev_pointer;
32238 - struct iio_ring_access_funcs access;
32239 + struct iio_ring_access_funcs access;
32240 int (*preenable)(struct iio_dev *);
32241 int (*postenable)(struct iio_dev *);
32242 int (*predisable)(struct iio_dev *);
32243 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet.c linux-2.6.39.4/drivers/staging/octeon/ethernet.c
32244 --- linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-05-19 00:06:34.000000000 -0400
32245 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-08-05 19:44:37.000000000 -0400
32246 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32247 * since the RX tasklet also increments it.
32249 #ifdef CONFIG_64BIT
32250 - atomic64_add(rx_status.dropped_packets,
32251 - (atomic64_t *)&priv->stats.rx_dropped);
32252 + atomic64_add_unchecked(rx_status.dropped_packets,
32253 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32255 - atomic_add(rx_status.dropped_packets,
32256 - (atomic_t *)&priv->stats.rx_dropped);
32257 + atomic_add_unchecked(rx_status.dropped_packets,
32258 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32262 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c
32263 --- linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-05-19 00:06:34.000000000 -0400
32264 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-08-05 19:44:37.000000000 -0400
32265 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32266 /* Increment RX stats for virtual ports */
32267 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32268 #ifdef CONFIG_64BIT
32269 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32270 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32271 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32272 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32274 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32275 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32276 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32277 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32280 netif_receive_skb(skb);
32281 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32284 #ifdef CONFIG_64BIT
32285 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32286 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32288 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32289 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32291 dev_kfree_skb_irq(skb);
32293 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/inode.c linux-2.6.39.4/drivers/staging/pohmelfs/inode.c
32294 --- linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-05-19 00:06:34.000000000 -0400
32295 +++ linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-08-05 19:44:37.000000000 -0400
32296 @@ -1855,7 +1855,7 @@ static int pohmelfs_fill_super(struct su
32297 mutex_init(&psb->mcache_lock);
32298 psb->mcache_root = RB_ROOT;
32299 psb->mcache_timeout = msecs_to_jiffies(5000);
32300 - atomic_long_set(&psb->mcache_gen, 0);
32301 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32303 psb->trans_max_pages = 100;
32305 @@ -1870,7 +1870,7 @@ static int pohmelfs_fill_super(struct su
32306 INIT_LIST_HEAD(&psb->crypto_ready_list);
32307 INIT_LIST_HEAD(&psb->crypto_active_list);
32309 - atomic_set(&psb->trans_gen, 1);
32310 + atomic_set_unchecked(&psb->trans_gen, 1);
32311 atomic_long_set(&psb->total_inodes, 0);
32313 mutex_init(&psb->state_lock);
32314 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c
32315 --- linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-05-19 00:06:34.000000000 -0400
32316 +++ linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-08-05 19:44:37.000000000 -0400
32317 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32321 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32322 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32324 mutex_lock(&psb->mcache_lock);
32325 err = pohmelfs_mcache_insert(psb, m);
32326 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h
32327 --- linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-05-19 00:06:34.000000000 -0400
32328 +++ linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-08-05 19:44:37.000000000 -0400
32329 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32330 struct pohmelfs_sb {
32331 struct rb_root mcache_root;
32332 struct mutex mcache_lock;
32333 - atomic_long_t mcache_gen;
32334 + atomic_long_unchecked_t mcache_gen;
32335 unsigned long mcache_timeout;
32339 unsigned int trans_retries;
32341 - atomic_t trans_gen;
32342 + atomic_unchecked_t trans_gen;
32344 unsigned int crypto_attached_size;
32345 unsigned int crypto_align_size;
32346 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/trans.c linux-2.6.39.4/drivers/staging/pohmelfs/trans.c
32347 --- linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-05-19 00:06:34.000000000 -0400
32348 +++ linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-08-05 19:44:37.000000000 -0400
32349 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32351 struct netfs_cmd *cmd = t->iovec.iov_base;
32353 - t->gen = atomic_inc_return(&psb->trans_gen);
32354 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32356 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32357 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32358 diff -urNp linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h
32359 --- linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-05-19 00:06:34.000000000 -0400
32360 +++ linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-13 20:31:57.000000000 -0400
32361 @@ -83,7 +83,7 @@ struct _io_ops {
32363 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32369 struct list_head list;
32370 diff -urNp linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c
32371 --- linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-05-19 00:06:34.000000000 -0400
32372 +++ linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-14 12:29:10.000000000 -0400
32373 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32374 t3e3_if_config(sc, cmd_2t3e3, (char *)¶m, &resp, &rlen);
32377 - if (copy_to_user(data, &resp, rlen))
32378 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32382 diff -urNp linux-2.6.39.4/drivers/staging/tty/istallion.c linux-2.6.39.4/drivers/staging/tty/istallion.c
32383 --- linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-05-19 00:06:34.000000000 -0400
32384 +++ linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-08-05 19:44:37.000000000 -0400
32385 @@ -186,7 +186,6 @@ static struct ktermios stli_deftermios
32386 * re-used for each stats call.
32388 static comstats_t stli_comstats;
32389 -static combrd_t stli_brdstats;
32390 static struct asystats stli_cdkstats;
32392 /*****************************************************************************/
32393 @@ -4003,6 +4002,7 @@ out:
32395 static int stli_getbrdstats(combrd_t __user *bp)
32397 + combrd_t stli_brdstats;
32398 struct stlibrd *brdp;
32401 @@ -4226,6 +4226,8 @@ static int stli_getportstruct(struct stl
32402 struct stliport stli_dummyport;
32403 struct stliport *portp;
32405 + pax_track_stack();
32407 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32409 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32410 @@ -4248,6 +4250,8 @@ static int stli_getbrdstruct(struct stli
32411 struct stlibrd stli_dummybrd;
32412 struct stlibrd *brdp;
32414 + pax_track_stack();
32416 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32418 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32419 diff -urNp linux-2.6.39.4/drivers/staging/tty/stallion.c linux-2.6.39.4/drivers/staging/tty/stallion.c
32420 --- linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-05-19 00:06:34.000000000 -0400
32421 +++ linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-08-05 19:44:37.000000000 -0400
32422 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32423 struct stlport stl_dummyport;
32424 struct stlport *portp;
32426 + pax_track_stack();
32428 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32430 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32431 diff -urNp linux-2.6.39.4/drivers/staging/usbip/stub_dev.c linux-2.6.39.4/drivers/staging/usbip/stub_dev.c
32432 --- linux-2.6.39.4/drivers/staging/usbip/stub_dev.c 2011-05-19 00:06:34.000000000 -0400
32433 +++ linux-2.6.39.4/drivers/staging/usbip/stub_dev.c 2011-08-13 20:32:52.000000000 -0400
32434 @@ -357,9 +357,11 @@ static struct stub_device *stub_device_a
32436 init_waitqueue_head(&sdev->tx_waitq);
32438 - sdev->ud.eh_ops.shutdown = stub_shutdown_connection;
32439 - sdev->ud.eh_ops.reset = stub_device_reset;
32440 - sdev->ud.eh_ops.unusable = stub_device_unusable;
32441 + pax_open_kernel();
32442 + *(void **)&sdev->ud.eh_ops.shutdown = stub_shutdown_connection;
32443 + *(void **)&sdev->ud.eh_ops.reset = stub_device_reset;
32444 + *(void **)&sdev->ud.eh_ops.unusable = stub_device_unusable;
32445 + pax_close_kernel();
32447 usbip_start_eh(&sdev->ud);
32449 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci.h linux-2.6.39.4/drivers/staging/usbip/vhci.h
32450 --- linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-05-19 00:06:34.000000000 -0400
32451 +++ linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-08-05 19:44:37.000000000 -0400
32452 @@ -92,7 +92,7 @@ struct vhci_hcd {
32453 unsigned resuming:1;
32454 unsigned long re_timeout;
32457 + atomic_unchecked_t seqnum;
32461 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c
32462 --- linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-05-19 00:06:34.000000000 -0400
32463 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-08-13 20:33:49.000000000 -0400
32464 @@ -536,7 +536,7 @@ static void vhci_tx_urb(struct urb *urb)
32468 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32469 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32470 if (priv->seqnum == 0xffff)
32471 usbip_uinfo("seqnum max\n");
32473 @@ -795,7 +795,7 @@ static int vhci_urb_dequeue(struct usb_h
32477 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32478 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32479 if (unlink->seqnum == 0xffff)
32480 usbip_uinfo("seqnum max\n");
32482 @@ -965,9 +965,11 @@ static void vhci_device_init(struct vhci
32484 init_waitqueue_head(&vdev->waitq_tx);
32486 - vdev->ud.eh_ops.shutdown = vhci_shutdown_connection;
32487 - vdev->ud.eh_ops.reset = vhci_device_reset;
32488 - vdev->ud.eh_ops.unusable = vhci_device_unusable;
32489 + pax_open_kernel();
32490 + *(void **)&vdev->ud.eh_ops.shutdown = vhci_shutdown_connection;
32491 + *(void **)&vdev->ud.eh_ops.reset = vhci_device_reset;
32492 + *(void **)&vdev->ud.eh_ops.unusable = vhci_device_unusable;
32493 + pax_close_kernel();
32495 usbip_start_eh(&vdev->ud);
32497 @@ -992,7 +994,7 @@ static int vhci_start(struct usb_hcd *hc
32498 vdev->rhport = rhport;
32501 - atomic_set(&vhci->seqnum, 0);
32502 + atomic_set_unchecked(&vhci->seqnum, 0);
32503 spin_lock_init(&vhci->lock);
32506 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c
32507 --- linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-05-19 00:06:34.000000000 -0400
32508 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-08-05 19:44:37.000000000 -0400
32509 @@ -81,7 +81,7 @@ static void vhci_recv_ret_submit(struct
32510 usbip_uerr("cannot find a urb of seqnum %u\n",
32512 usbip_uinfo("max seqnum %d\n",
32513 - atomic_read(&the_controller->seqnum));
32514 + atomic_read_unchecked(&the_controller->seqnum));
32515 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32518 diff -urNp linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c
32519 --- linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-05-19 00:06:34.000000000 -0400
32520 +++ linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-13 20:36:25.000000000 -0400
32521 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32523 struct usbctlx_completor {
32524 int (*complete) (struct usbctlx_completor *);
32529 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32530 diff -urNp linux-2.6.39.4/drivers/target/target_core_alua.c linux-2.6.39.4/drivers/target/target_core_alua.c
32531 --- linux-2.6.39.4/drivers/target/target_core_alua.c 2011-05-19 00:06:34.000000000 -0400
32532 +++ linux-2.6.39.4/drivers/target/target_core_alua.c 2011-08-05 19:44:37.000000000 -0400
32533 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32534 char path[ALUA_METADATA_PATH_LEN];
32537 + pax_track_stack();
32539 memset(path, 0, ALUA_METADATA_PATH_LEN);
32541 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32542 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32543 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32546 + pax_track_stack();
32548 memset(path, 0, ALUA_METADATA_PATH_LEN);
32549 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32551 diff -urNp linux-2.6.39.4/drivers/target/target_core_cdb.c linux-2.6.39.4/drivers/target/target_core_cdb.c
32552 --- linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-05-19 00:06:34.000000000 -0400
32553 +++ linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-08-05 19:44:37.000000000 -0400
32554 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32556 unsigned char buf[SE_MODE_PAGE_BUF];
32558 + pax_track_stack();
32560 memset(buf, 0, SE_MODE_PAGE_BUF);
32562 switch (cdb[2] & 0x3f) {
32563 diff -urNp linux-2.6.39.4/drivers/target/target_core_configfs.c linux-2.6.39.4/drivers/target/target_core_configfs.c
32564 --- linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-05-19 00:06:34.000000000 -0400
32565 +++ linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-08-05 20:34:06.000000000 -0400
32566 @@ -1280,6 +1280,8 @@ static ssize_t target_core_dev_pr_show_a
32568 int reg_count = 0, prf_isid;
32570 + pax_track_stack();
32572 if (!(su_dev->se_dev_ptr))
32575 diff -urNp linux-2.6.39.4/drivers/target/target_core_pr.c linux-2.6.39.4/drivers/target/target_core_pr.c
32576 --- linux-2.6.39.4/drivers/target/target_core_pr.c 2011-05-19 00:06:34.000000000 -0400
32577 +++ linux-2.6.39.4/drivers/target/target_core_pr.c 2011-08-05 19:44:37.000000000 -0400
32578 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32579 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32582 + pax_track_stack();
32584 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32585 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32587 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32591 + pax_track_stack();
32593 memset(buf, 0, pr_aptpl_buf_len);
32595 * Called to clear metadata once APTPL has been deactivated.
32596 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32600 + pax_track_stack();
32602 memset(iov, 0, sizeof(struct iovec));
32603 memset(path, 0, 512);
32605 diff -urNp linux-2.6.39.4/drivers/target/target_core_tmr.c linux-2.6.39.4/drivers/target/target_core_tmr.c
32606 --- linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-06-03 00:04:14.000000000 -0400
32607 +++ linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-08-05 19:44:37.000000000 -0400
32608 @@ -263,7 +263,7 @@ int core_tmr_lun_reset(
32609 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32610 T_TASK(cmd)->t_task_cdbs,
32611 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32612 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32613 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32614 atomic_read(&T_TASK(cmd)->t_transport_active),
32615 atomic_read(&T_TASK(cmd)->t_transport_stop),
32616 atomic_read(&T_TASK(cmd)->t_transport_sent));
32617 @@ -305,7 +305,7 @@ int core_tmr_lun_reset(
32618 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32619 " task: %p, t_fe_count: %d dev: %p\n", task,
32621 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32622 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32623 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32625 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32626 @@ -315,7 +315,7 @@ int core_tmr_lun_reset(
32628 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32629 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32630 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32631 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32632 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32633 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32635 diff -urNp linux-2.6.39.4/drivers/target/target_core_transport.c linux-2.6.39.4/drivers/target/target_core_transport.c
32636 --- linux-2.6.39.4/drivers/target/target_core_transport.c 2011-06-03 00:04:14.000000000 -0400
32637 +++ linux-2.6.39.4/drivers/target/target_core_transport.c 2011-08-05 19:44:37.000000000 -0400
32638 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32640 dev->queue_depth = dev_limits->queue_depth;
32641 atomic_set(&dev->depth_left, dev->queue_depth);
32642 - atomic_set(&dev->dev_ordered_id, 0);
32643 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32645 se_dev_set_default_attribs(dev, dev_limits);
32647 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32648 * Used to determine when ORDERED commands should go from
32649 * Dormant to Active status.
32651 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32652 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32653 smp_mb__after_atomic_inc();
32654 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32655 cmd->se_ordered_id, cmd->sam_task_attr,
32656 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32657 " t_transport_active: %d t_transport_stop: %d"
32658 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32659 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32660 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32661 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32662 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32663 atomic_read(&T_TASK(cmd)->t_transport_active),
32664 atomic_read(&T_TASK(cmd)->t_transport_stop),
32665 @@ -2673,9 +2673,9 @@ check_depth:
32666 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32667 atomic_set(&task->task_active, 1);
32668 atomic_set(&task->task_sent, 1);
32669 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32670 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32672 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32673 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32674 T_TASK(cmd)->t_task_cdbs)
32675 atomic_set(&cmd->transport_sent, 1);
32677 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32678 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32680 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32681 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32682 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32685 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32686 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32690 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32691 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32692 if (!(send_status) ||
32693 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32695 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32697 if (cmd->data_direction == DMA_TO_DEVICE) {
32698 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32699 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32700 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32701 smp_mb__after_atomic_inc();
32702 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32703 transport_new_cmd_failure(cmd);
32704 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32705 CMD_TFO(cmd)->get_task_tag(cmd),
32706 T_TASK(cmd)->t_task_cdbs,
32707 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32708 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32709 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32710 atomic_read(&T_TASK(cmd)->t_transport_active),
32711 atomic_read(&T_TASK(cmd)->t_transport_stop),
32712 atomic_read(&T_TASK(cmd)->t_transport_sent));
32713 diff -urNp linux-2.6.39.4/drivers/telephony/ixj.c linux-2.6.39.4/drivers/telephony/ixj.c
32714 --- linux-2.6.39.4/drivers/telephony/ixj.c 2011-05-19 00:06:34.000000000 -0400
32715 +++ linux-2.6.39.4/drivers/telephony/ixj.c 2011-08-05 19:44:37.000000000 -0400
32716 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32720 + pax_track_stack();
32722 if (!SCI_Prepare(j))
32725 diff -urNp linux-2.6.39.4/drivers/tty/hvc/hvcs.c linux-2.6.39.4/drivers/tty/hvc/hvcs.c
32726 --- linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-05-19 00:06:34.000000000 -0400
32727 +++ linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-08-05 19:44:37.000000000 -0400
32729 #include <asm/hvcserver.h>
32730 #include <asm/uaccess.h>
32731 #include <asm/vio.h>
32732 +#include <asm/local.h>
32735 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32736 @@ -270,7 +271,7 @@ struct hvcs_struct {
32737 unsigned int index;
32739 struct tty_struct *tty;
32741 + local_t open_count;
32744 * Used to tell the driver kernel_thread what operations need to take
32745 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32747 spin_lock_irqsave(&hvcsd->lock, flags);
32749 - if (hvcsd->open_count > 0) {
32750 + if (local_read(&hvcsd->open_count) > 0) {
32751 spin_unlock_irqrestore(&hvcsd->lock, flags);
32752 printk(KERN_INFO "HVCS: vterm state unchanged. "
32753 "The hvcs device node is still in use.\n");
32754 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32755 if ((retval = hvcs_partner_connect(hvcsd)))
32756 goto error_release;
32758 - hvcsd->open_count = 1;
32759 + local_set(&hvcsd->open_count, 1);
32761 tty->driver_data = hvcsd;
32763 @@ -1179,7 +1180,7 @@ fast_open:
32765 spin_lock_irqsave(&hvcsd->lock, flags);
32766 kref_get(&hvcsd->kref);
32767 - hvcsd->open_count++;
32768 + local_inc(&hvcsd->open_count);
32769 hvcsd->todo_mask |= HVCS_SCHED_READ;
32770 spin_unlock_irqrestore(&hvcsd->lock, flags);
32772 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32773 hvcsd = tty->driver_data;
32775 spin_lock_irqsave(&hvcsd->lock, flags);
32776 - if (--hvcsd->open_count == 0) {
32777 + if (local_dec_and_test(&hvcsd->open_count)) {
32779 vio_disable_interrupts(hvcsd->vdev);
32781 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32782 free_irq(irq, hvcsd);
32783 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32785 - } else if (hvcsd->open_count < 0) {
32786 + } else if (local_read(&hvcsd->open_count) < 0) {
32787 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32788 " is missmanaged.\n",
32789 - hvcsd->vdev->unit_address, hvcsd->open_count);
32790 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32793 spin_unlock_irqrestore(&hvcsd->lock, flags);
32794 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32796 spin_lock_irqsave(&hvcsd->lock, flags);
32797 /* Preserve this so that we know how many kref refs to put */
32798 - temp_open_count = hvcsd->open_count;
32799 + temp_open_count = local_read(&hvcsd->open_count);
32802 * Don't kref put inside the spinlock because the destruction
32803 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32804 hvcsd->tty->driver_data = NULL;
32807 - hvcsd->open_count = 0;
32808 + local_set(&hvcsd->open_count, 0);
32810 /* This will drop any buffered data on the floor which is OK in a hangup
32812 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32813 * the middle of a write operation? This is a crummy place to do this
32814 * but we want to keep it all in the spinlock.
32816 - if (hvcsd->open_count <= 0) {
32817 + if (local_read(&hvcsd->open_count) <= 0) {
32818 spin_unlock_irqrestore(&hvcsd->lock, flags);
32821 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32823 struct hvcs_struct *hvcsd = tty->driver_data;
32825 - if (!hvcsd || hvcsd->open_count <= 0)
32826 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32829 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32830 diff -urNp linux-2.6.39.4/drivers/tty/ipwireless/tty.c linux-2.6.39.4/drivers/tty/ipwireless/tty.c
32831 --- linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-05-19 00:06:34.000000000 -0400
32832 +++ linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-08-05 19:44:37.000000000 -0400
32834 #include <linux/tty_driver.h>
32835 #include <linux/tty_flip.h>
32836 #include <linux/uaccess.h>
32837 +#include <asm/local.h>
32840 #include "network.h"
32841 @@ -51,7 +52,7 @@ struct ipw_tty {
32843 struct ipw_network *network;
32844 struct tty_struct *linux_tty;
32846 + local_t open_count;
32847 unsigned int control_lines;
32848 struct mutex ipw_tty_mutex;
32849 int tx_bytes_queued;
32850 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32851 mutex_unlock(&tty->ipw_tty_mutex);
32854 - if (tty->open_count == 0)
32855 + if (local_read(&tty->open_count) == 0)
32856 tty->tx_bytes_queued = 0;
32858 - tty->open_count++;
32859 + local_inc(&tty->open_count);
32861 tty->linux_tty = linux_tty;
32862 linux_tty->driver_data = tty;
32863 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32865 static void do_ipw_close(struct ipw_tty *tty)
32867 - tty->open_count--;
32869 - if (tty->open_count == 0) {
32870 + if (local_dec_return(&tty->open_count) == 0) {
32871 struct tty_struct *linux_tty = tty->linux_tty;
32873 if (linux_tty != NULL) {
32874 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32877 mutex_lock(&tty->ipw_tty_mutex);
32878 - if (tty->open_count == 0) {
32879 + if (local_read(&tty->open_count) == 0) {
32880 mutex_unlock(&tty->ipw_tty_mutex);
32883 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32887 - if (!tty->open_count) {
32888 + if (!local_read(&tty->open_count)) {
32889 mutex_unlock(&tty->ipw_tty_mutex);
32892 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32895 mutex_lock(&tty->ipw_tty_mutex);
32896 - if (!tty->open_count) {
32897 + if (!local_read(&tty->open_count)) {
32898 mutex_unlock(&tty->ipw_tty_mutex);
32901 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32905 - if (!tty->open_count)
32906 + if (!local_read(&tty->open_count))
32909 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32910 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32914 - if (!tty->open_count)
32915 + if (!local_read(&tty->open_count))
32918 return tty->tx_bytes_queued;
32919 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32923 - if (!tty->open_count)
32924 + if (!local_read(&tty->open_count))
32927 return get_control_lines(tty);
32928 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32932 - if (!tty->open_count)
32933 + if (!local_read(&tty->open_count))
32936 return set_control_lines(tty, set, clear);
32937 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32941 - if (!tty->open_count)
32942 + if (!local_read(&tty->open_count))
32945 /* FIXME: Exactly how is the tty object locked here .. */
32946 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32947 against a parallel ioctl etc */
32948 mutex_lock(&ttyj->ipw_tty_mutex);
32950 - while (ttyj->open_count)
32951 + while (local_read(&ttyj->open_count))
32952 do_ipw_close(ttyj);
32953 ipwireless_disassociate_network_ttys(network,
32954 ttyj->channel_idx);
32955 diff -urNp linux-2.6.39.4/drivers/tty/n_gsm.c linux-2.6.39.4/drivers/tty/n_gsm.c
32956 --- linux-2.6.39.4/drivers/tty/n_gsm.c 2011-05-19 00:06:34.000000000 -0400
32957 +++ linux-2.6.39.4/drivers/tty/n_gsm.c 2011-08-05 19:44:37.000000000 -0400
32958 @@ -1588,7 +1588,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32960 spin_lock_init(&dlci->lock);
32961 dlci->fifo = &dlci->_fifo;
32962 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32963 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32967 diff -urNp linux-2.6.39.4/drivers/tty/n_tty.c linux-2.6.39.4/drivers/tty/n_tty.c
32968 --- linux-2.6.39.4/drivers/tty/n_tty.c 2011-05-19 00:06:34.000000000 -0400
32969 +++ linux-2.6.39.4/drivers/tty/n_tty.c 2011-08-05 19:44:37.000000000 -0400
32970 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32972 *ops = tty_ldisc_N_TTY;
32974 - ops->refcount = ops->flags = 0;
32975 + atomic_set(&ops->refcount, 0);
32978 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32979 diff -urNp linux-2.6.39.4/drivers/tty/pty.c linux-2.6.39.4/drivers/tty/pty.c
32980 --- linux-2.6.39.4/drivers/tty/pty.c 2011-05-19 00:06:34.000000000 -0400
32981 +++ linux-2.6.39.4/drivers/tty/pty.c 2011-08-05 20:34:06.000000000 -0400
32982 @@ -753,8 +753,10 @@ static void __init unix98_pty_init(void)
32983 register_sysctl_table(pty_root_table);
32985 /* Now create the /dev/ptmx special device */
32986 + pax_open_kernel();
32987 tty_default_fops(&ptmx_fops);
32988 - ptmx_fops.open = ptmx_open;
32989 + *(void **)&ptmx_fops.open = ptmx_open;
32990 + pax_close_kernel();
32992 cdev_init(&ptmx_cdev, &ptmx_fops);
32993 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32994 diff -urNp linux-2.6.39.4/drivers/tty/rocket.c linux-2.6.39.4/drivers/tty/rocket.c
32995 --- linux-2.6.39.4/drivers/tty/rocket.c 2011-05-19 00:06:34.000000000 -0400
32996 +++ linux-2.6.39.4/drivers/tty/rocket.c 2011-08-05 19:44:37.000000000 -0400
32997 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32998 struct rocket_ports tmp;
33001 + pax_track_stack();
33005 memset(&tmp, 0, sizeof (tmp));
33006 diff -urNp linux-2.6.39.4/drivers/tty/serial/kgdboc.c linux-2.6.39.4/drivers/tty/serial/kgdboc.c
33007 --- linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-05-19 00:06:34.000000000 -0400
33008 +++ linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-08-05 20:34:06.000000000 -0400
33010 #define MAX_CONFIG_LEN 40
33012 static struct kgdb_io kgdboc_io_ops;
33013 +static struct kgdb_io kgdboc_io_ops_console;
33015 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
33016 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
33017 static int configured = -1;
33019 static char config[MAX_CONFIG_LEN];
33020 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
33021 kgdboc_unregister_kbd();
33022 if (configured == 1)
33023 kgdb_unregister_io_module(&kgdboc_io_ops);
33024 + else if (configured == 2)
33025 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
33028 static int configure_kgdboc(void)
33029 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
33031 char *cptr = config;
33032 struct console *cons;
33033 + int is_console = 0;
33035 err = kgdboc_option_setup(config);
33036 if (err || !strlen(config) || isspace(config[0]))
33040 - kgdboc_io_ops.is_console = 0;
33041 kgdb_tty_driver = NULL;
33043 kgdboc_use_kms = 0;
33044 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
33046 if (cons->device && cons->device(cons, &idx) == p &&
33048 - kgdboc_io_ops.is_console = 1;
33053 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
33054 kgdb_tty_line = tty_line;
33057 - err = kgdb_register_io_module(&kgdboc_io_ops);
33058 + if (is_console) {
33059 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
33062 + err = kgdb_register_io_module(&kgdboc_io_ops);
33073 @@ -212,7 +219,7 @@ noconfig:
33074 static int __init init_kgdboc(void)
33076 /* Already configured? */
33077 - if (configured == 1)
33078 + if (configured >= 1)
33081 return configure_kgdboc();
33082 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
33083 if (config[len - 1] == '\n')
33084 config[len - 1] = '\0';
33086 - if (configured == 1)
33087 + if (configured >= 1)
33090 /* Go and configure with the new params. */
33091 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
33092 .post_exception = kgdboc_post_exp_handler,
33095 +static struct kgdb_io kgdboc_io_ops_console = {
33096 + .name = "kgdboc",
33097 + .read_char = kgdboc_get_char,
33098 + .write_char = kgdboc_put_char,
33099 + .pre_exception = kgdboc_pre_exp_handler,
33100 + .post_exception = kgdboc_post_exp_handler,
33104 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33105 /* This is only available if kgdboc is a built in for early debugging */
33106 static int __init kgdboc_early_init(char *opt)
33107 diff -urNp linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c
33108 --- linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-05-19 00:06:34.000000000 -0400
33109 +++ linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-08-05 20:34:06.000000000 -0400
33110 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33111 int loop = 1, num, total = 0;
33112 u8 recv_buf[512], *pbuf;
33114 + pax_track_stack();
33118 num = max3110_read_multi(max, pbuf);
33119 diff -urNp linux-2.6.39.4/drivers/tty/tty_io.c linux-2.6.39.4/drivers/tty/tty_io.c
33120 --- linux-2.6.39.4/drivers/tty/tty_io.c 2011-05-19 00:06:34.000000000 -0400
33121 +++ linux-2.6.39.4/drivers/tty/tty_io.c 2011-08-05 20:34:06.000000000 -0400
33122 @@ -3200,7 +3200,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33124 void tty_default_fops(struct file_operations *fops)
33126 - *fops = tty_fops;
33127 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33131 diff -urNp linux-2.6.39.4/drivers/tty/tty_ldisc.c linux-2.6.39.4/drivers/tty/tty_ldisc.c
33132 --- linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-07-09 09:18:51.000000000 -0400
33133 +++ linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-08-05 19:44:37.000000000 -0400
33134 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33135 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33136 struct tty_ldisc_ops *ldo = ld->ops;
33139 + atomic_dec(&ldo->refcount);
33140 module_put(ldo->owner);
33141 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33143 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33144 spin_lock_irqsave(&tty_ldisc_lock, flags);
33145 tty_ldiscs[disc] = new_ldisc;
33146 new_ldisc->num = disc;
33147 - new_ldisc->refcount = 0;
33148 + atomic_set(&new_ldisc->refcount, 0);
33149 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33152 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33155 spin_lock_irqsave(&tty_ldisc_lock, flags);
33156 - if (tty_ldiscs[disc]->refcount)
33157 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33160 tty_ldiscs[disc] = NULL;
33161 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33163 ret = ERR_PTR(-EAGAIN);
33164 if (try_module_get(ldops->owner)) {
33165 - ldops->refcount++;
33166 + atomic_inc(&ldops->refcount);
33170 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33171 unsigned long flags;
33173 spin_lock_irqsave(&tty_ldisc_lock, flags);
33174 - ldops->refcount--;
33175 + atomic_dec(&ldops->refcount);
33176 module_put(ldops->owner);
33177 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33179 diff -urNp linux-2.6.39.4/drivers/tty/vt/keyboard.c linux-2.6.39.4/drivers/tty/vt/keyboard.c
33180 --- linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-05-19 00:06:34.000000000 -0400
33181 +++ linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-08-05 19:44:37.000000000 -0400
33182 @@ -658,6 +658,16 @@ static void k_spec(struct vc_data *vc, u
33183 kbd->kbdmode == VC_OFF) &&
33184 value != KVAL(K_SAK))
33185 return; /* SAK is allowed even in raw mode */
33187 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33189 + void *func = fn_handler[value];
33190 + if (func == fn_show_state || func == fn_show_ptregs ||
33191 + func == fn_show_mem)
33196 fn_handler[value](vc);
33199 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt.c linux-2.6.39.4/drivers/tty/vt/vt.c
33200 --- linux-2.6.39.4/drivers/tty/vt/vt.c 2011-05-19 00:06:34.000000000 -0400
33201 +++ linux-2.6.39.4/drivers/tty/vt/vt.c 2011-08-05 19:44:37.000000000 -0400
33202 @@ -261,7 +261,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33204 static void notify_write(struct vc_data *vc, unsigned int unicode)
33206 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33207 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33208 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, ¶m);
33211 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c
33212 --- linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-05-19 00:06:34.000000000 -0400
33213 +++ linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-08-05 19:44:37.000000000 -0400
33214 @@ -209,9 +209,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33215 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33218 - if (!capable(CAP_SYS_TTY_CONFIG))
33223 key_map = key_maps[s];
33224 @@ -223,6 +220,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33225 val = (i ? K_HOLE : K_NOSUCHMAP);
33226 return put_user(val, &user_kbe->kb_value);
33228 + if (!capable(CAP_SYS_TTY_CONFIG))
33233 if (!i && v == K_NOSUCHMAP) {
33234 @@ -324,9 +324,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33238 - if (!capable(CAP_SYS_TTY_CONFIG))
33241 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33244 @@ -360,6 +357,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33246 return ((p && *p) ? -EOVERFLOW : 0);
33248 + if (!capable(CAP_SYS_TTY_CONFIG))
33254 diff -urNp linux-2.6.39.4/drivers/uio/uio.c linux-2.6.39.4/drivers/uio/uio.c
33255 --- linux-2.6.39.4/drivers/uio/uio.c 2011-05-19 00:06:34.000000000 -0400
33256 +++ linux-2.6.39.4/drivers/uio/uio.c 2011-08-05 19:44:37.000000000 -0400
33258 #include <linux/kobject.h>
33259 #include <linux/cdev.h>
33260 #include <linux/uio_driver.h>
33261 +#include <asm/local.h>
33263 #define UIO_MAX_DEVICES (1U << MINORBITS)
33265 @@ -32,10 +33,10 @@ struct uio_device {
33266 struct module *owner;
33267 struct device *dev;
33270 + atomic_unchecked_t event;
33271 struct fasync_struct *async_queue;
33272 wait_queue_head_t wait;
33274 + local_t vma_count;
33275 struct uio_info *info;
33276 struct kobject *map_dir;
33277 struct kobject *portio_dir;
33278 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33279 struct device_attribute *attr, char *buf)
33281 struct uio_device *idev = dev_get_drvdata(dev);
33282 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33283 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33286 static struct device_attribute uio_class_attributes[] = {
33287 @@ -402,7 +403,7 @@ void uio_event_notify(struct uio_info *i
33289 struct uio_device *idev = info->uio_dev;
33291 - atomic_inc(&idev->event);
33292 + atomic_inc_unchecked(&idev->event);
33293 wake_up_interruptible(&idev->wait);
33294 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33296 @@ -455,7 +456,7 @@ static int uio_open(struct inode *inode,
33299 listener->dev = idev;
33300 - listener->event_count = atomic_read(&idev->event);
33301 + listener->event_count = atomic_read_unchecked(&idev->event);
33302 filep->private_data = listener;
33304 if (idev->info->open) {
33305 @@ -506,7 +507,7 @@ static unsigned int uio_poll(struct file
33308 poll_wait(filep, &idev->wait, wait);
33309 - if (listener->event_count != atomic_read(&idev->event))
33310 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33311 return POLLIN | POLLRDNORM;
33314 @@ -531,7 +532,7 @@ static ssize_t uio_read(struct file *fil
33316 set_current_state(TASK_INTERRUPTIBLE);
33318 - event_count = atomic_read(&idev->event);
33319 + event_count = atomic_read_unchecked(&idev->event);
33320 if (event_count != listener->event_count) {
33321 if (copy_to_user(buf, &event_count, count))
33323 @@ -602,13 +603,13 @@ static int uio_find_mem_index(struct vm_
33324 static void uio_vma_open(struct vm_area_struct *vma)
33326 struct uio_device *idev = vma->vm_private_data;
33327 - idev->vma_count++;
33328 + local_inc(&idev->vma_count);
33331 static void uio_vma_close(struct vm_area_struct *vma)
33333 struct uio_device *idev = vma->vm_private_data;
33334 - idev->vma_count--;
33335 + local_dec(&idev->vma_count);
33338 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33339 @@ -819,7 +820,7 @@ int __uio_register_device(struct module
33340 idev->owner = owner;
33342 init_waitqueue_head(&idev->wait);
33343 - atomic_set(&idev->event, 0);
33344 + atomic_set_unchecked(&idev->event, 0);
33346 ret = uio_get_minor(idev);
33348 diff -urNp linux-2.6.39.4/drivers/usb/atm/cxacru.c linux-2.6.39.4/drivers/usb/atm/cxacru.c
33349 --- linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-05-19 00:06:34.000000000 -0400
33350 +++ linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-08-05 19:44:37.000000000 -0400
33351 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33352 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33355 - if (index < 0 || index > 0x7f)
33356 + if (index > 0x7f)
33360 diff -urNp linux-2.6.39.4/drivers/usb/atm/usbatm.c linux-2.6.39.4/drivers/usb/atm/usbatm.c
33361 --- linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-05-19 00:06:34.000000000 -0400
33362 +++ linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-08-05 19:44:37.000000000 -0400
33363 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33364 if (printk_ratelimit())
33365 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33366 __func__, vpi, vci);
33367 - atomic_inc(&vcc->stats->rx_err);
33368 + atomic_inc_unchecked(&vcc->stats->rx_err);
33372 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33373 if (length > ATM_MAX_AAL5_PDU) {
33374 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33375 __func__, length, vcc);
33376 - atomic_inc(&vcc->stats->rx_err);
33377 + atomic_inc_unchecked(&vcc->stats->rx_err);
33381 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33382 if (sarb->len < pdu_length) {
33383 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33384 __func__, pdu_length, sarb->len, vcc);
33385 - atomic_inc(&vcc->stats->rx_err);
33386 + atomic_inc_unchecked(&vcc->stats->rx_err);
33390 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33391 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33393 - atomic_inc(&vcc->stats->rx_err);
33394 + atomic_inc_unchecked(&vcc->stats->rx_err);
33398 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33399 if (printk_ratelimit())
33400 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33402 - atomic_inc(&vcc->stats->rx_drop);
33403 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33407 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33409 vcc->push(vcc, skb);
33411 - atomic_inc(&vcc->stats->rx);
33412 + atomic_inc_unchecked(&vcc->stats->rx);
33416 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33417 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33419 usbatm_pop(vcc, skb);
33420 - atomic_inc(&vcc->stats->tx);
33421 + atomic_inc_unchecked(&vcc->stats->tx);
33423 skb = skb_dequeue(&instance->sndqueue);
33425 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33427 return sprintf(page,
33428 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33429 - atomic_read(&atm_dev->stats.aal5.tx),
33430 - atomic_read(&atm_dev->stats.aal5.tx_err),
33431 - atomic_read(&atm_dev->stats.aal5.rx),
33432 - atomic_read(&atm_dev->stats.aal5.rx_err),
33433 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33434 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33435 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33436 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33437 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33438 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33441 if (instance->disconnected)
33442 diff -urNp linux-2.6.39.4/drivers/usb/core/devices.c linux-2.6.39.4/drivers/usb/core/devices.c
33443 --- linux-2.6.39.4/drivers/usb/core/devices.c 2011-05-19 00:06:34.000000000 -0400
33444 +++ linux-2.6.39.4/drivers/usb/core/devices.c 2011-08-05 19:44:37.000000000 -0400
33445 @@ -126,7 +126,7 @@ static const char *format_endpt =
33446 * time it gets called.
33448 static struct device_connect_event {
33450 + atomic_unchecked_t count;
33451 wait_queue_head_t wait;
33453 .count = ATOMIC_INIT(1),
33454 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33456 void usbfs_conn_disc_event(void)
33458 - atomic_add(2, &device_event.count);
33459 + atomic_add_unchecked(2, &device_event.count);
33460 wake_up(&device_event.wait);
33463 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33465 poll_wait(file, &device_event.wait, wait);
33467 - event_count = atomic_read(&device_event.count);
33468 + event_count = atomic_read_unchecked(&device_event.count);
33469 if (file->f_version != event_count) {
33470 file->f_version = event_count;
33471 return POLLIN | POLLRDNORM;
33472 diff -urNp linux-2.6.39.4/drivers/usb/core/message.c linux-2.6.39.4/drivers/usb/core/message.c
33473 --- linux-2.6.39.4/drivers/usb/core/message.c 2011-07-09 09:18:51.000000000 -0400
33474 +++ linux-2.6.39.4/drivers/usb/core/message.c 2011-08-05 19:44:37.000000000 -0400
33475 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33476 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33478 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33480 - smallbuf = kmalloc(++len, GFP_NOIO);
33482 + smallbuf = kmalloc(len, GFP_NOIO);
33485 memcpy(smallbuf, buf, len);
33486 diff -urNp linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c
33487 --- linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-05-19 00:06:34.000000000 -0400
33488 +++ linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-08-05 20:34:06.000000000 -0400
33489 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33492 static struct kgdb_io kgdbdbgp_io_ops;
33493 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33494 +static struct kgdb_io kgdbdbgp_io_ops_console;
33495 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33497 #define dbgp_kgdb_mode (0)
33499 @@ -1032,6 +1033,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33500 .write_char = kgdbdbgp_write_char,
33503 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33504 + .name = "kgdbdbgp",
33505 + .read_char = kgdbdbgp_read_char,
33506 + .write_char = kgdbdbgp_write_char,
33510 static int kgdbdbgp_wait_time;
33512 static int __init kgdbdbgp_parse_config(char *str)
33513 @@ -1047,8 +1055,10 @@ static int __init kgdbdbgp_parse_config(
33515 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33517 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33518 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33519 + if (early_dbgp_console.index != -1)
33520 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33522 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33526 diff -urNp linux-2.6.39.4/drivers/usb/host/xhci-mem.c linux-2.6.39.4/drivers/usb/host/xhci-mem.c
33527 --- linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-06-25 12:55:23.000000000 -0400
33528 +++ linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-08-05 19:44:37.000000000 -0400
33529 @@ -1680,6 +1680,8 @@ static int xhci_check_trb_in_td_math(str
33530 unsigned int num_tests;
33533 + pax_track_stack();
33535 num_tests = ARRAY_SIZE(simple_test_vector);
33536 for (i = 0; i < num_tests; i++) {
33537 ret = xhci_test_trb_in_td(xhci,
33538 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h
33539 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-05-19 00:06:34.000000000 -0400
33540 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-08-05 19:44:37.000000000 -0400
33541 @@ -192,7 +192,7 @@ struct wahc {
33542 struct list_head xfer_delayed_list;
33543 spinlock_t xfer_list_lock;
33544 struct work_struct xfer_work;
33545 - atomic_t xfer_id_count;
33546 + atomic_unchecked_t xfer_id_count;
33550 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33551 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33552 spin_lock_init(&wa->xfer_list_lock);
33553 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33554 - atomic_set(&wa->xfer_id_count, 1);
33555 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33559 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c
33560 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-05-19 00:06:34.000000000 -0400
33561 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-05 19:44:37.000000000 -0400
33562 @@ -294,7 +294,7 @@ out:
33564 static void wa_xfer_id_init(struct wa_xfer *xfer)
33566 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33567 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33571 diff -urNp linux-2.6.39.4/drivers/vhost/vhost.c linux-2.6.39.4/drivers/vhost/vhost.c
33572 --- linux-2.6.39.4/drivers/vhost/vhost.c 2011-05-19 00:06:34.000000000 -0400
33573 +++ linux-2.6.39.4/drivers/vhost/vhost.c 2011-08-05 19:44:37.000000000 -0400
33574 @@ -580,7 +580,7 @@ static int init_used(struct vhost_virtqu
33575 return get_user(vq->last_used_idx, &used->idx);
33578 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33579 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33581 struct file *eventfp, *filep = NULL,
33582 *pollstart = NULL, *pollstop = NULL;
33583 diff -urNp linux-2.6.39.4/drivers/video/fbcmap.c linux-2.6.39.4/drivers/video/fbcmap.c
33584 --- linux-2.6.39.4/drivers/video/fbcmap.c 2011-05-19 00:06:34.000000000 -0400
33585 +++ linux-2.6.39.4/drivers/video/fbcmap.c 2011-08-05 19:44:37.000000000 -0400
33586 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33590 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33591 - !info->fbops->fb_setcmap)) {
33592 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33596 diff -urNp linux-2.6.39.4/drivers/video/fbmem.c linux-2.6.39.4/drivers/video/fbmem.c
33597 --- linux-2.6.39.4/drivers/video/fbmem.c 2011-05-19 00:06:34.000000000 -0400
33598 +++ linux-2.6.39.4/drivers/video/fbmem.c 2011-08-05 19:44:37.000000000 -0400
33599 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33600 image->dx += image->width + 8;
33602 } else if (rotate == FB_ROTATE_UD) {
33603 - for (x = 0; x < num && image->dx >= 0; x++) {
33604 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33605 info->fbops->fb_imageblit(info, image);
33606 image->dx -= image->width + 8;
33608 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33609 image->dy += image->height + 8;
33611 } else if (rotate == FB_ROTATE_CCW) {
33612 - for (x = 0; x < num && image->dy >= 0; x++) {
33613 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33614 info->fbops->fb_imageblit(info, image);
33615 image->dy -= image->height + 8;
33617 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33618 int flags = info->flags;
33621 + pax_track_stack();
33623 if (var->activate & FB_ACTIVATE_INV_MODE) {
33624 struct fb_videomode mode1, mode2;
33626 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33627 void __user *argp = (void __user *)arg;
33630 + pax_track_stack();
33633 case FBIOGET_VSCREENINFO:
33634 if (!lock_fb_info(info))
33635 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33637 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33639 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33640 + if (con2fb.framebuffer >= FB_MAX)
33642 if (!registered_fb[con2fb.framebuffer])
33643 request_module("fb%d", con2fb.framebuffer);
33644 diff -urNp linux-2.6.39.4/drivers/video/i810/i810_accel.c linux-2.6.39.4/drivers/video/i810/i810_accel.c
33645 --- linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-05-19 00:06:34.000000000 -0400
33646 +++ linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-08-05 19:44:37.000000000 -0400
33647 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33650 printk("ringbuffer lockup!!!\n");
33651 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33652 i810_report_error(mmio);
33653 par->dev_flags |= LOCKUP;
33654 info->pixmap.scan_align = 1;
33655 diff -urNp linux-2.6.39.4/drivers/video/udlfb.c linux-2.6.39.4/drivers/video/udlfb.c
33656 --- linux-2.6.39.4/drivers/video/udlfb.c 2011-05-19 00:06:34.000000000 -0400
33657 +++ linux-2.6.39.4/drivers/video/udlfb.c 2011-08-05 19:44:37.000000000 -0400
33658 @@ -584,11 +584,11 @@ int dlfb_handle_damage(struct dlfb_data
33659 dlfb_urb_completion(urb);
33662 - atomic_add(bytes_sent, &dev->bytes_sent);
33663 - atomic_add(bytes_identical, &dev->bytes_identical);
33664 - atomic_add(width*height*2, &dev->bytes_rendered);
33665 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33666 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33667 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33668 end_cycles = get_cycles();
33669 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33670 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33671 >> 10)), /* Kcycles */
33672 &dev->cpu_kcycles_used);
33674 @@ -709,11 +709,11 @@ static void dlfb_dpy_deferred_io(struct
33675 dlfb_urb_completion(urb);
33678 - atomic_add(bytes_sent, &dev->bytes_sent);
33679 - atomic_add(bytes_identical, &dev->bytes_identical);
33680 - atomic_add(bytes_rendered, &dev->bytes_rendered);
33681 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33682 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33683 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33684 end_cycles = get_cycles();
33685 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33686 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33687 >> 10)), /* Kcycles */
33688 &dev->cpu_kcycles_used);
33690 @@ -1301,7 +1301,7 @@ static ssize_t metrics_bytes_rendered_sh
33691 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33692 struct dlfb_data *dev = fb_info->par;
33693 return snprintf(buf, PAGE_SIZE, "%u\n",
33694 - atomic_read(&dev->bytes_rendered));
33695 + atomic_read_unchecked(&dev->bytes_rendered));
33698 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33699 @@ -1309,7 +1309,7 @@ static ssize_t metrics_bytes_identical_s
33700 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33701 struct dlfb_data *dev = fb_info->par;
33702 return snprintf(buf, PAGE_SIZE, "%u\n",
33703 - atomic_read(&dev->bytes_identical));
33704 + atomic_read_unchecked(&dev->bytes_identical));
33707 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33708 @@ -1317,7 +1317,7 @@ static ssize_t metrics_bytes_sent_show(s
33709 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33710 struct dlfb_data *dev = fb_info->par;
33711 return snprintf(buf, PAGE_SIZE, "%u\n",
33712 - atomic_read(&dev->bytes_sent));
33713 + atomic_read_unchecked(&dev->bytes_sent));
33716 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33717 @@ -1325,7 +1325,7 @@ static ssize_t metrics_cpu_kcycles_used_
33718 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33719 struct dlfb_data *dev = fb_info->par;
33720 return snprintf(buf, PAGE_SIZE, "%u\n",
33721 - atomic_read(&dev->cpu_kcycles_used));
33722 + atomic_read_unchecked(&dev->cpu_kcycles_used));
33725 static ssize_t edid_show(
33726 @@ -1382,10 +1382,10 @@ static ssize_t metrics_reset_store(struc
33727 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33728 struct dlfb_data *dev = fb_info->par;
33730 - atomic_set(&dev->bytes_rendered, 0);
33731 - atomic_set(&dev->bytes_identical, 0);
33732 - atomic_set(&dev->bytes_sent, 0);
33733 - atomic_set(&dev->cpu_kcycles_used, 0);
33734 + atomic_set_unchecked(&dev->bytes_rendered, 0);
33735 + atomic_set_unchecked(&dev->bytes_identical, 0);
33736 + atomic_set_unchecked(&dev->bytes_sent, 0);
33737 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33741 diff -urNp linux-2.6.39.4/drivers/video/uvesafb.c linux-2.6.39.4/drivers/video/uvesafb.c
33742 --- linux-2.6.39.4/drivers/video/uvesafb.c 2011-05-19 00:06:34.000000000 -0400
33743 +++ linux-2.6.39.4/drivers/video/uvesafb.c 2011-08-05 20:34:06.000000000 -0400
33745 #include <linux/io.h>
33746 #include <linux/mutex.h>
33747 #include <linux/slab.h>
33748 +#include <linux/moduleloader.h>
33749 #include <video/edid.h>
33750 #include <video/uvesafb.h>
33752 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33756 - return call_usermodehelper(v86d_path, argv, envp, 1);
33757 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33761 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33762 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33763 par->pmi_setpal = par->ypan = 0;
33766 +#ifdef CONFIG_PAX_KERNEXEC
33767 +#ifdef CONFIG_MODULES
33768 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33770 + if (!par->pmi_code) {
33771 + par->pmi_setpal = par->ypan = 0;
33776 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33777 + task->t.regs.edi);
33779 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33780 + pax_open_kernel();
33781 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33782 + pax_close_kernel();
33784 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33785 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33787 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33788 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33791 printk(KERN_INFO "uvesafb: protected mode interface info at "
33793 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33794 @@ -1821,6 +1844,11 @@ out:
33795 if (par->vbe_modes)
33796 kfree(par->vbe_modes);
33798 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33799 + if (par->pmi_code)
33800 + module_free_exec(NULL, par->pmi_code);
33803 framebuffer_release(info);
33806 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33807 kfree(par->vbe_state_orig);
33808 if (par->vbe_state_saved)
33809 kfree(par->vbe_state_saved);
33811 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33812 + if (par->pmi_code)
33813 + module_free_exec(NULL, par->pmi_code);
33818 framebuffer_release(info);
33819 diff -urNp linux-2.6.39.4/drivers/video/vesafb.c linux-2.6.39.4/drivers/video/vesafb.c
33820 --- linux-2.6.39.4/drivers/video/vesafb.c 2011-05-19 00:06:34.000000000 -0400
33821 +++ linux-2.6.39.4/drivers/video/vesafb.c 2011-08-05 20:34:06.000000000 -0400
33825 #include <linux/module.h>
33826 +#include <linux/moduleloader.h>
33827 #include <linux/kernel.h>
33828 #include <linux/errno.h>
33829 #include <linux/string.h>
33830 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33831 static int vram_total __initdata; /* Set total amount of memory */
33832 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33833 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33834 -static void (*pmi_start)(void) __read_mostly;
33835 -static void (*pmi_pal) (void) __read_mostly;
33836 +static void (*pmi_start)(void) __read_only;
33837 +static void (*pmi_pal) (void) __read_only;
33838 static int depth __read_mostly;
33839 static int vga_compat __read_mostly;
33840 /* --------------------------------------------------------------------- */
33841 @@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
33842 unsigned int size_vmode;
33843 unsigned int size_remap;
33844 unsigned int size_total;
33845 + void *pmi_code = NULL;
33847 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33849 @@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
33850 size_remap = size_total;
33851 vesafb_fix.smem_len = size_remap;
33854 - screen_info.vesapm_seg = 0;
33857 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33858 printk(KERN_WARNING
33859 "vesafb: cannot reserve video memory at 0x%lx\n",
33860 @@ -306,9 +304,21 @@ static int __init vesafb_probe(struct pl
33861 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33862 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33866 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33867 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
33869 +#elif !defined(CONFIG_PAX_KERNEXEC)
33874 + screen_info.vesapm_seg = 0;
33876 if (screen_info.vesapm_seg) {
33877 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33878 - screen_info.vesapm_seg,screen_info.vesapm_off);
33879 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33880 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33883 if (screen_info.vesapm_seg < 0xc000)
33884 @@ -316,9 +326,25 @@ static int __init vesafb_probe(struct pl
33886 if (ypan || pmi_setpal) {
33887 unsigned short *pmi_base;
33889 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33890 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33891 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33893 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33894 + pax_open_kernel();
33895 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33897 + pmi_code = pmi_base;
33900 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33901 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33903 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33904 + pmi_start = ktva_ktla(pmi_start);
33905 + pmi_pal = ktva_ktla(pmi_pal);
33906 + pax_close_kernel();
33909 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33911 printk(KERN_INFO "vesafb: pmi: ports = ");
33912 @@ -487,6 +513,11 @@ static int __init vesafb_probe(struct pl
33913 info->node, info->fix.id);
33917 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33918 + module_free_exec(NULL, pmi_code);
33921 if (info->screen_base)
33922 iounmap(info->screen_base);
33923 framebuffer_release(info);
33924 diff -urNp linux-2.6.39.4/drivers/virtio/virtio_balloon.c linux-2.6.39.4/drivers/virtio/virtio_balloon.c
33925 --- linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-05-19 00:06:34.000000000 -0400
33926 +++ linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-08-05 19:44:37.000000000 -0400
33927 @@ -176,6 +176,8 @@ static void update_balloon_stats(struct
33931 + pax_track_stack();
33933 all_vm_events(events);
33936 diff -urNp linux-2.6.39.4/fs/9p/vfs_inode.c linux-2.6.39.4/fs/9p/vfs_inode.c
33937 --- linux-2.6.39.4/fs/9p/vfs_inode.c 2011-05-19 00:06:34.000000000 -0400
33938 +++ linux-2.6.39.4/fs/9p/vfs_inode.c 2011-08-05 19:44:37.000000000 -0400
33939 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33941 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33943 - char *s = nd_get_link(nd);
33944 + const char *s = nd_get_link(nd);
33946 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33947 IS_ERR(s) ? "<error>" : s);
33948 diff -urNp linux-2.6.39.4/fs/aio.c linux-2.6.39.4/fs/aio.c
33949 --- linux-2.6.39.4/fs/aio.c 2011-05-19 00:06:34.000000000 -0400
33950 +++ linux-2.6.39.4/fs/aio.c 2011-08-05 19:44:37.000000000 -0400
33951 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33952 size += sizeof(struct io_event) * nr_events;
33953 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33955 - if (nr_pages < 0)
33956 + if (nr_pages <= 0)
33959 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33960 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33961 struct aio_timeout to;
33964 + pax_track_stack();
33966 /* needed to zero any padding within an entry (there shouldn't be
33967 * any, but C is fun!
33969 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33970 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33973 + struct iovec iovstack;
33975 #ifdef CONFIG_COMPAT
33977 ret = compat_rw_copy_check_uvector(type,
33978 (struct compat_iovec __user *)kiocb->ki_buf,
33979 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33980 + kiocb->ki_nbytes, 1, &iovstack,
33984 ret = rw_copy_check_uvector(type,
33985 (struct iovec __user *)kiocb->ki_buf,
33986 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33987 + kiocb->ki_nbytes, 1, &iovstack,
33992 + if (kiocb->ki_iovec == &iovstack) {
33993 + kiocb->ki_inline_vec = iovstack;
33994 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
33996 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33997 kiocb->ki_cur_seg = 0;
33998 /* ki_nbytes/left now reflect bytes instead of segs */
33999 diff -urNp linux-2.6.39.4/fs/attr.c linux-2.6.39.4/fs/attr.c
34000 --- linux-2.6.39.4/fs/attr.c 2011-05-19 00:06:34.000000000 -0400
34001 +++ linux-2.6.39.4/fs/attr.c 2011-08-05 19:44:37.000000000 -0400
34002 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
34003 unsigned long limit;
34005 limit = rlimit(RLIMIT_FSIZE);
34006 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
34007 if (limit != RLIM_INFINITY && offset > limit)
34009 if (offset > inode->i_sb->s_maxbytes)
34010 diff -urNp linux-2.6.39.4/fs/befs/linuxvfs.c linux-2.6.39.4/fs/befs/linuxvfs.c
34011 --- linux-2.6.39.4/fs/befs/linuxvfs.c 2011-05-19 00:06:34.000000000 -0400
34012 +++ linux-2.6.39.4/fs/befs/linuxvfs.c 2011-08-05 19:44:37.000000000 -0400
34013 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
34015 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
34016 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
34017 - char *link = nd_get_link(nd);
34018 + const char *link = nd_get_link(nd);
34022 diff -urNp linux-2.6.39.4/fs/binfmt_aout.c linux-2.6.39.4/fs/binfmt_aout.c
34023 --- linux-2.6.39.4/fs/binfmt_aout.c 2011-05-19 00:06:34.000000000 -0400
34024 +++ linux-2.6.39.4/fs/binfmt_aout.c 2011-08-05 19:44:37.000000000 -0400
34026 #include <linux/string.h>
34027 #include <linux/fs.h>
34028 #include <linux/file.h>
34029 +#include <linux/security.h>
34030 #include <linux/stat.h>
34031 #include <linux/fcntl.h>
34032 #include <linux/ptrace.h>
34033 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
34035 # define START_STACK(u) ((void __user *)u.start_stack)
34037 + memset(&dump, 0, sizeof(dump));
34042 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
34044 /* If the size of the dump file exceeds the rlimit, then see what would happen
34045 if we wrote the stack, but not the data area. */
34046 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
34047 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
34050 /* Make sure we have enough room to write the stack and data areas. */
34051 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
34052 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
34055 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
34056 rlim = rlimit(RLIMIT_DATA);
34057 if (rlim >= RLIM_INFINITY)
34060 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
34061 if (ex.a_data + ex.a_bss > rlim)
34064 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
34065 install_exec_creds(bprm);
34066 current->flags &= ~PF_FORKNOEXEC;
34068 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34069 + current->mm->pax_flags = 0UL;
34072 +#ifdef CONFIG_PAX_PAGEEXEC
34073 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
34074 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
34076 +#ifdef CONFIG_PAX_EMUTRAMP
34077 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
34078 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
34081 +#ifdef CONFIG_PAX_MPROTECT
34082 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
34083 + current->mm->pax_flags |= MF_PAX_MPROTECT;
34089 if (N_MAGIC(ex) == OMAGIC) {
34090 unsigned long text_addr, map_size;
34092 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
34094 down_write(¤t->mm->mmap_sem);
34095 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
34096 - PROT_READ | PROT_WRITE | PROT_EXEC,
34097 + PROT_READ | PROT_WRITE,
34098 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
34099 fd_offset + ex.a_text);
34100 up_write(¤t->mm->mmap_sem);
34101 diff -urNp linux-2.6.39.4/fs/binfmt_elf.c linux-2.6.39.4/fs/binfmt_elf.c
34102 --- linux-2.6.39.4/fs/binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
34103 +++ linux-2.6.39.4/fs/binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
34104 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34105 #define elf_core_dump NULL
34108 +#ifdef CONFIG_PAX_MPROTECT
34109 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34112 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34113 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34115 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34116 .load_binary = load_elf_binary,
34117 .load_shlib = load_elf_library,
34118 .core_dump = elf_core_dump,
34120 +#ifdef CONFIG_PAX_MPROTECT
34121 + .handle_mprotect= elf_handle_mprotect,
34124 .min_coredump = ELF_EXEC_PAGESIZE,
34127 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34129 static int set_brk(unsigned long start, unsigned long end)
34131 + unsigned long e = end;
34133 start = ELF_PAGEALIGN(start);
34134 end = ELF_PAGEALIGN(end);
34136 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34137 if (BAD_ADDR(addr))
34140 - current->mm->start_brk = current->mm->brk = end;
34141 + current->mm->start_brk = current->mm->brk = e;
34145 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34146 elf_addr_t __user *u_rand_bytes;
34147 const char *k_platform = ELF_PLATFORM;
34148 const char *k_base_platform = ELF_BASE_PLATFORM;
34149 - unsigned char k_rand_bytes[16];
34150 + u32 k_rand_bytes[4];
34152 elf_addr_t *elf_info;
34154 const struct cred *cred = current_cred();
34155 struct vm_area_struct *vma;
34156 + unsigned long saved_auxv[AT_VECTOR_SIZE];
34158 + pax_track_stack();
34161 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34162 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34163 * Generate 16 random bytes for userspace PRNG seeding.
34165 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34166 - u_rand_bytes = (elf_addr_t __user *)
34167 - STACK_ALLOC(p, sizeof(k_rand_bytes));
34168 + srandom32(k_rand_bytes[0] ^ random32());
34169 + srandom32(k_rand_bytes[1] ^ random32());
34170 + srandom32(k_rand_bytes[2] ^ random32());
34171 + srandom32(k_rand_bytes[3] ^ random32());
34172 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
34173 + u_rand_bytes = (elf_addr_t __user *) p;
34174 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34177 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34179 current->mm->env_end = p;
34181 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34183 /* Put the elf_info on the stack in the right place. */
34184 sp = (elf_addr_t __user *)envp + 1;
34185 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34186 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34190 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34192 struct elf_phdr *elf_phdata;
34193 struct elf_phdr *eppnt;
34194 - unsigned long load_addr = 0;
34195 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34196 int load_addr_set = 0;
34197 unsigned long last_bss = 0, elf_bss = 0;
34198 - unsigned long error = ~0UL;
34199 + unsigned long error = -EINVAL;
34200 unsigned long total_size;
34201 int retval, i, size;
34203 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34207 +#ifdef CONFIG_PAX_SEGMEXEC
34208 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34209 + pax_task_size = SEGMEXEC_TASK_SIZE;
34212 eppnt = elf_phdata;
34213 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34214 if (eppnt->p_type == PT_LOAD) {
34215 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34216 k = load_addr + eppnt->p_vaddr;
34218 eppnt->p_filesz > eppnt->p_memsz ||
34219 - eppnt->p_memsz > TASK_SIZE ||
34220 - TASK_SIZE - eppnt->p_memsz < k) {
34221 + eppnt->p_memsz > pax_task_size ||
34222 + pax_task_size - eppnt->p_memsz < k) {
34226 @@ -528,6 +553,193 @@ out:
34230 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34231 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34233 + unsigned long pax_flags = 0UL;
34235 +#ifdef CONFIG_PAX_PAGEEXEC
34236 + if (elf_phdata->p_flags & PF_PAGEEXEC)
34237 + pax_flags |= MF_PAX_PAGEEXEC;
34240 +#ifdef CONFIG_PAX_SEGMEXEC
34241 + if (elf_phdata->p_flags & PF_SEGMEXEC)
34242 + pax_flags |= MF_PAX_SEGMEXEC;
34245 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34246 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34247 + if ((__supported_pte_mask & _PAGE_NX))
34248 + pax_flags &= ~MF_PAX_SEGMEXEC;
34250 + pax_flags &= ~MF_PAX_PAGEEXEC;
34254 +#ifdef CONFIG_PAX_EMUTRAMP
34255 + if (elf_phdata->p_flags & PF_EMUTRAMP)
34256 + pax_flags |= MF_PAX_EMUTRAMP;
34259 +#ifdef CONFIG_PAX_MPROTECT
34260 + if (elf_phdata->p_flags & PF_MPROTECT)
34261 + pax_flags |= MF_PAX_MPROTECT;
34264 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34265 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34266 + pax_flags |= MF_PAX_RANDMMAP;
34269 + return pax_flags;
34273 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34274 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34276 + unsigned long pax_flags = 0UL;
34278 +#ifdef CONFIG_PAX_PAGEEXEC
34279 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34280 + pax_flags |= MF_PAX_PAGEEXEC;
34283 +#ifdef CONFIG_PAX_SEGMEXEC
34284 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34285 + pax_flags |= MF_PAX_SEGMEXEC;
34288 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34289 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34290 + if ((__supported_pte_mask & _PAGE_NX))
34291 + pax_flags &= ~MF_PAX_SEGMEXEC;
34293 + pax_flags &= ~MF_PAX_PAGEEXEC;
34297 +#ifdef CONFIG_PAX_EMUTRAMP
34298 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34299 + pax_flags |= MF_PAX_EMUTRAMP;
34302 +#ifdef CONFIG_PAX_MPROTECT
34303 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34304 + pax_flags |= MF_PAX_MPROTECT;
34307 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34308 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34309 + pax_flags |= MF_PAX_RANDMMAP;
34312 + return pax_flags;
34316 +#ifdef CONFIG_PAX_EI_PAX
34317 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34319 + unsigned long pax_flags = 0UL;
34321 +#ifdef CONFIG_PAX_PAGEEXEC
34322 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34323 + pax_flags |= MF_PAX_PAGEEXEC;
34326 +#ifdef CONFIG_PAX_SEGMEXEC
34327 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34328 + pax_flags |= MF_PAX_SEGMEXEC;
34331 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34332 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34333 + if ((__supported_pte_mask & _PAGE_NX))
34334 + pax_flags &= ~MF_PAX_SEGMEXEC;
34336 + pax_flags &= ~MF_PAX_PAGEEXEC;
34340 +#ifdef CONFIG_PAX_EMUTRAMP
34341 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34342 + pax_flags |= MF_PAX_EMUTRAMP;
34345 +#ifdef CONFIG_PAX_MPROTECT
34346 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34347 + pax_flags |= MF_PAX_MPROTECT;
34350 +#ifdef CONFIG_PAX_ASLR
34351 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34352 + pax_flags |= MF_PAX_RANDMMAP;
34355 + return pax_flags;
34359 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34360 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34362 + unsigned long pax_flags = 0UL;
34364 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34366 + int found_flags = 0;
34369 +#ifdef CONFIG_PAX_EI_PAX
34370 + pax_flags = pax_parse_ei_pax(elf_ex);
34373 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34374 + for (i = 0UL; i < elf_ex->e_phnum; i++)
34375 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34376 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34377 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34378 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34379 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34380 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34383 +#ifdef CONFIG_PAX_SOFTMODE
34384 + if (pax_softmode)
34385 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
34389 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34395 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34396 + if (found_flags == 0) {
34397 + struct elf_phdr phdr;
34398 + memset(&phdr, 0, sizeof(phdr));
34399 + phdr.p_flags = PF_NOEMUTRAMP;
34400 +#ifdef CONFIG_PAX_SOFTMODE
34401 + if (pax_softmode)
34402 + pax_flags = pax_parse_softmode(&phdr);
34405 + pax_flags = pax_parse_hardmode(&phdr);
34409 + if (0 > pax_check_flags(&pax_flags))
34412 + current->mm->pax_flags = pax_flags;
34418 * These are the functions used to load ELF style executables and shared
34419 * libraries. There is no binary dependent code anywhere else.
34420 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34422 unsigned int random_variable = 0;
34424 +#ifdef CONFIG_PAX_RANDUSTACK
34425 + if (randomize_va_space)
34426 + return stack_top - current->mm->delta_stack;
34429 if ((current->flags & PF_RANDOMIZE) &&
34430 !(current->personality & ADDR_NO_RANDOMIZE)) {
34431 random_variable = get_random_int() & STACK_RND_MASK;
34432 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34433 unsigned long load_addr = 0, load_bias = 0;
34434 int load_addr_set = 0;
34435 char * elf_interpreter = NULL;
34436 - unsigned long error;
34437 + unsigned long error = 0;
34438 struct elf_phdr *elf_ppnt, *elf_phdata;
34439 unsigned long elf_bss, elf_brk;
34441 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34442 unsigned long start_code, end_code, start_data, end_data;
34443 unsigned long reloc_func_desc __maybe_unused = 0;
34444 int executable_stack = EXSTACK_DEFAULT;
34445 - unsigned long def_flags = 0;
34447 struct elfhdr elf_ex;
34448 struct elfhdr interp_elf_ex;
34450 + unsigned long pax_task_size = TASK_SIZE;
34452 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34454 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34456 /* OK, This is the point of no return */
34457 current->flags &= ~PF_FORKNOEXEC;
34458 - current->mm->def_flags = def_flags;
34460 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34461 + current->mm->pax_flags = 0UL;
34464 +#ifdef CONFIG_PAX_DLRESOLVE
34465 + current->mm->call_dl_resolve = 0UL;
34468 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34469 + current->mm->call_syscall = 0UL;
34472 +#ifdef CONFIG_PAX_ASLR
34473 + current->mm->delta_mmap = 0UL;
34474 + current->mm->delta_stack = 0UL;
34477 + current->mm->def_flags = 0;
34479 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34480 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34481 + send_sig(SIGKILL, current, 0);
34482 + goto out_free_dentry;
34486 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34487 + pax_set_initial_flags(bprm);
34488 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34489 + if (pax_set_initial_flags_func)
34490 + (pax_set_initial_flags_func)(bprm);
34493 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34494 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34495 + current->mm->context.user_cs_limit = PAGE_SIZE;
34496 + current->mm->def_flags |= VM_PAGEEXEC;
34500 +#ifdef CONFIG_PAX_SEGMEXEC
34501 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34502 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34503 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34504 + pax_task_size = SEGMEXEC_TASK_SIZE;
34505 + current->mm->def_flags |= VM_NOHUGEPAGE;
34509 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34510 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34511 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34516 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34517 may depend on the personality. */
34518 SET_PERSONALITY(loc->elf_ex);
34520 +#ifdef CONFIG_PAX_ASLR
34521 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34522 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34523 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34527 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34528 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34529 + executable_stack = EXSTACK_DISABLE_X;
34530 + current->personality &= ~READ_IMPLIES_EXEC;
34534 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34535 current->personality |= READ_IMPLIES_EXEC;
34537 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34539 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34542 +#ifdef CONFIG_PAX_RANDMMAP
34543 + /* PaX: randomize base address at the default exe base if requested */
34544 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34545 +#ifdef CONFIG_SPARC64
34546 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34548 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34550 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34551 + elf_flags |= MAP_FIXED;
34557 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34558 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34559 * allowed task size. Note that p_filesz must always be
34560 * <= p_memsz so it is only necessary to check p_memsz.
34562 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34563 - elf_ppnt->p_memsz > TASK_SIZE ||
34564 - TASK_SIZE - elf_ppnt->p_memsz < k) {
34565 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34566 + elf_ppnt->p_memsz > pax_task_size ||
34567 + pax_task_size - elf_ppnt->p_memsz < k) {
34568 /* set_brk can never work. Avoid overflows. */
34569 send_sig(SIGKILL, current, 0);
34571 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34572 start_data += load_bias;
34573 end_data += load_bias;
34575 +#ifdef CONFIG_PAX_RANDMMAP
34576 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34577 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34580 /* Calling set_brk effectively mmaps the pages that we need
34581 * for the bss and break sections. We must do this before
34582 * mapping in the interpreter, to make sure it doesn't wind
34583 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34584 goto out_free_dentry;
34586 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34587 - send_sig(SIGSEGV, current, 0);
34588 - retval = -EFAULT; /* Nobody gets to see this, but.. */
34589 - goto out_free_dentry;
34591 + * This bss-zeroing can fail if the ELF
34592 + * file specifies odd protections. So
34593 + * we don't check the return value
34597 if (elf_interpreter) {
34598 @@ -1090,7 +1398,7 @@ out:
34599 * Decide what to dump of a segment, part, all or none.
34601 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34602 - unsigned long mm_flags)
34603 + unsigned long mm_flags, long signr)
34605 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34607 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34608 if (vma->vm_file == NULL)
34611 - if (FILTER(MAPPED_PRIVATE))
34612 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34616 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34618 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34623 - while (auxv[i - 2] != AT_NULL);
34624 + } while (auxv[i - 2] != AT_NULL);
34625 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34628 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34631 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34632 - unsigned long mm_flags)
34633 + struct coredump_params *cprm)
34635 struct vm_area_struct *vma;
34638 for (vma = first_vma(current, gate_vma); vma != NULL;
34639 vma = next_vma(vma, gate_vma))
34640 - size += vma_dump_size(vma, mm_flags);
34641 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34645 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34647 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34649 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34650 + offset += elf_core_vma_data_size(gate_vma, cprm);
34651 offset += elf_core_extra_data_size();
34654 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34657 size += sizeof(*elf);
34658 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34659 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34662 size += sizeof(*phdr4note);
34663 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34664 if (size > cprm->limit
34665 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34667 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34668 phdr.p_offset = offset;
34669 phdr.p_vaddr = vma->vm_start;
34671 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34672 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34673 phdr.p_memsz = vma->vm_end - vma->vm_start;
34674 offset += phdr.p_filesz;
34675 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34676 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34677 phdr.p_align = ELF_EXEC_PAGESIZE;
34679 size += sizeof(phdr);
34680 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34681 if (size > cprm->limit
34682 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34684 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34685 unsigned long addr;
34688 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34689 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34691 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34693 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34694 page = get_dump_page(addr);
34696 void *kaddr = kmap(page);
34697 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34698 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34699 !dump_write(cprm->file, kaddr,
34701 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34703 if (e_phnum == PN_XNUM) {
34704 size += sizeof(*shdr4extnum);
34705 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34706 if (size > cprm->limit
34707 || !dump_write(cprm->file, shdr4extnum,
34708 sizeof(*shdr4extnum)))
34709 @@ -2067,6 +2380,97 @@ out:
34711 #endif /* CONFIG_ELF_CORE */
34713 +#ifdef CONFIG_PAX_MPROTECT
34714 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
34715 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34716 + * we'll remove VM_MAYWRITE for good on RELRO segments.
34718 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34719 + * basis because we want to allow the common case and not the special ones.
34721 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34723 + struct elfhdr elf_h;
34724 + struct elf_phdr elf_p;
34726 + unsigned long oldflags;
34727 + bool is_textrel_rw, is_textrel_rx, is_relro;
34729 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34732 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34733 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34735 +#ifdef CONFIG_PAX_ELFRELOCS
34736 + /* possible TEXTREL */
34737 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34738 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34740 + is_textrel_rw = false;
34741 + is_textrel_rx = false;
34744 + /* possible RELRO */
34745 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34747 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34750 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34751 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34753 +#ifdef CONFIG_PAX_ETEXECRELOCS
34754 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34756 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34759 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34760 + !elf_check_arch(&elf_h) ||
34761 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34762 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34765 + for (i = 0UL; i < elf_h.e_phnum; i++) {
34766 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34768 + switch (elf_p.p_type) {
34770 + if (!is_textrel_rw && !is_textrel_rx)
34773 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34776 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34778 + if (dyn.d_tag == DT_NULL)
34780 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34781 + gr_log_textrel(vma);
34782 + if (is_textrel_rw)
34783 + vma->vm_flags |= VM_MAYWRITE;
34785 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34786 + vma->vm_flags &= ~VM_MAYWRITE;
34793 + case PT_GNU_RELRO:
34796 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34797 + vma->vm_flags &= ~VM_MAYWRITE;
34804 static int __init init_elf_binfmt(void)
34806 return register_binfmt(&elf_format);
34807 diff -urNp linux-2.6.39.4/fs/binfmt_flat.c linux-2.6.39.4/fs/binfmt_flat.c
34808 --- linux-2.6.39.4/fs/binfmt_flat.c 2011-05-19 00:06:34.000000000 -0400
34809 +++ linux-2.6.39.4/fs/binfmt_flat.c 2011-08-05 19:44:37.000000000 -0400
34810 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34811 realdatastart = (unsigned long) -ENOMEM;
34812 printk("Unable to allocate RAM for process data, errno %d\n",
34813 (int)-realdatastart);
34814 + down_write(¤t->mm->mmap_sem);
34815 do_munmap(current->mm, textpos, text_len);
34816 + up_write(¤t->mm->mmap_sem);
34817 ret = realdatastart;
34820 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34822 if (IS_ERR_VALUE(result)) {
34823 printk("Unable to read data+bss, errno %d\n", (int)-result);
34824 + down_write(¤t->mm->mmap_sem);
34825 do_munmap(current->mm, textpos, text_len);
34826 do_munmap(current->mm, realdatastart, len);
34827 + up_write(¤t->mm->mmap_sem);
34831 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34833 if (IS_ERR_VALUE(result)) {
34834 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34835 + down_write(¤t->mm->mmap_sem);
34836 do_munmap(current->mm, textpos, text_len + data_len + extra +
34837 MAX_SHARED_LIBS * sizeof(unsigned long));
34838 + up_write(¤t->mm->mmap_sem);
34842 diff -urNp linux-2.6.39.4/fs/bio.c linux-2.6.39.4/fs/bio.c
34843 --- linux-2.6.39.4/fs/bio.c 2011-05-19 00:06:34.000000000 -0400
34844 +++ linux-2.6.39.4/fs/bio.c 2011-08-05 19:44:37.000000000 -0400
34845 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34846 const int read = bio_data_dir(bio) == READ;
34847 struct bio_map_data *bmd = bio->bi_private;
34849 - char *p = bmd->sgvecs[0].iov_base;
34850 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
34852 __bio_for_each_segment(bvec, bio, i, 0) {
34853 char *addr = page_address(bvec->bv_page);
34854 diff -urNp linux-2.6.39.4/fs/block_dev.c linux-2.6.39.4/fs/block_dev.c
34855 --- linux-2.6.39.4/fs/block_dev.c 2011-07-09 09:18:51.000000000 -0400
34856 +++ linux-2.6.39.4/fs/block_dev.c 2011-08-05 19:44:37.000000000 -0400
34857 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34858 else if (bdev->bd_contains == bdev)
34859 return true; /* is a whole device which isn't held */
34861 - else if (whole->bd_holder == bd_may_claim)
34862 + else if (whole->bd_holder == (void *)bd_may_claim)
34863 return true; /* is a partition of a device that is being partitioned */
34864 else if (whole->bd_holder != NULL)
34865 return false; /* is a partition of a held device */
34866 diff -urNp linux-2.6.39.4/fs/btrfs/ctree.c linux-2.6.39.4/fs/btrfs/ctree.c
34867 --- linux-2.6.39.4/fs/btrfs/ctree.c 2011-05-19 00:06:34.000000000 -0400
34868 +++ linux-2.6.39.4/fs/btrfs/ctree.c 2011-08-05 19:44:37.000000000 -0400
34869 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
34870 free_extent_buffer(buf);
34871 add_root_to_dirty_list(root);
34873 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34874 - parent_start = parent->start;
34876 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34878 + parent_start = parent->start;
34880 + parent_start = 0;
34884 WARN_ON(trans->transid != btrfs_header_generation(parent));
34885 @@ -3647,7 +3650,6 @@ setup_items_for_insert(struct btrfs_tran
34889 - struct btrfs_disk_key disk_key;
34890 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
34891 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
34893 diff -urNp linux-2.6.39.4/fs/btrfs/free-space-cache.c linux-2.6.39.4/fs/btrfs/free-space-cache.c
34894 --- linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-05-19 00:06:34.000000000 -0400
34895 +++ linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-08-05 19:44:37.000000000 -0400
34896 @@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34898 if (entry->bytes < bytes ||
34899 (!entry->bitmap && entry->offset < min_start)) {
34900 - struct rb_node *node;
34902 node = rb_next(&entry->offset_index);
34905 @@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34906 cluster, entry, bytes,
34909 - struct rb_node *node;
34910 node = rb_next(&entry->offset_index);
34913 diff -urNp linux-2.6.39.4/fs/btrfs/inode.c linux-2.6.39.4/fs/btrfs/inode.c
34914 --- linux-2.6.39.4/fs/btrfs/inode.c 2011-05-19 00:06:34.000000000 -0400
34915 +++ linux-2.6.39.4/fs/btrfs/inode.c 2011-08-05 20:34:06.000000000 -0400
34916 @@ -6947,7 +6947,7 @@ fail:
34920 -static int btrfs_getattr(struct vfsmount *mnt,
34921 +int btrfs_getattr(struct vfsmount *mnt,
34922 struct dentry *dentry, struct kstat *stat)
34924 struct inode *inode = dentry->d_inode;
34925 @@ -6959,6 +6959,14 @@ static int btrfs_getattr(struct vfsmount
34929 +EXPORT_SYMBOL(btrfs_getattr);
34931 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
34933 + return BTRFS_I(inode)->root->anon_super.s_dev;
34935 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34938 * If a file is moved, it will inherit the cow and compression flags of the new
34940 diff -urNp linux-2.6.39.4/fs/btrfs/ioctl.c linux-2.6.39.4/fs/btrfs/ioctl.c
34941 --- linux-2.6.39.4/fs/btrfs/ioctl.c 2011-05-19 00:06:34.000000000 -0400
34942 +++ linux-2.6.39.4/fs/btrfs/ioctl.c 2011-08-05 19:44:37.000000000 -0400
34943 @@ -2361,9 +2361,12 @@ long btrfs_ioctl_space_info(struct btrfs
34944 for (i = 0; i < num_types; i++) {
34945 struct btrfs_space_info *tmp;
34947 + /* Don't copy in more than we allocated */
34955 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34956 @@ -2385,10 +2388,7 @@ long btrfs_ioctl_space_info(struct btrfs
34957 memcpy(dest, &space, sizeof(space));
34959 space_args.total_spaces++;
34965 up_read(&info->groups_sem);
34967 diff -urNp linux-2.6.39.4/fs/btrfs/relocation.c linux-2.6.39.4/fs/btrfs/relocation.c
34968 --- linux-2.6.39.4/fs/btrfs/relocation.c 2011-05-19 00:06:34.000000000 -0400
34969 +++ linux-2.6.39.4/fs/btrfs/relocation.c 2011-08-05 19:44:37.000000000 -0400
34970 @@ -1239,7 +1239,7 @@ static int __update_reloc_root(struct bt
34972 spin_unlock(&rc->reloc_root_tree.lock);
34974 - BUG_ON((struct btrfs_root *)node->data != root);
34975 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
34978 spin_lock(&rc->reloc_root_tree.lock);
34979 diff -urNp linux-2.6.39.4/fs/cachefiles/bind.c linux-2.6.39.4/fs/cachefiles/bind.c
34980 --- linux-2.6.39.4/fs/cachefiles/bind.c 2011-05-19 00:06:34.000000000 -0400
34981 +++ linux-2.6.39.4/fs/cachefiles/bind.c 2011-08-05 19:44:37.000000000 -0400
34982 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34985 /* start by checking things over */
34986 - ASSERT(cache->fstop_percent >= 0 &&
34987 - cache->fstop_percent < cache->fcull_percent &&
34988 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
34989 cache->fcull_percent < cache->frun_percent &&
34990 cache->frun_percent < 100);
34992 - ASSERT(cache->bstop_percent >= 0 &&
34993 - cache->bstop_percent < cache->bcull_percent &&
34994 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
34995 cache->bcull_percent < cache->brun_percent &&
34996 cache->brun_percent < 100);
34998 diff -urNp linux-2.6.39.4/fs/cachefiles/daemon.c linux-2.6.39.4/fs/cachefiles/daemon.c
34999 --- linux-2.6.39.4/fs/cachefiles/daemon.c 2011-05-19 00:06:34.000000000 -0400
35000 +++ linux-2.6.39.4/fs/cachefiles/daemon.c 2011-08-05 19:44:37.000000000 -0400
35001 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
35005 - if (copy_to_user(_buffer, buffer, n) != 0)
35006 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
35010 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
35011 if (test_bit(CACHEFILES_DEAD, &cache->flags))
35014 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
35015 + if (datalen > PAGE_SIZE - 1)
35016 return -EOPNOTSUPP;
35018 /* drag the command string into the kernel so we can parse it */
35019 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
35020 if (args[0] != '%' || args[1] != '\0')
35023 - if (fstop < 0 || fstop >= cache->fcull_percent)
35024 + if (fstop >= cache->fcull_percent)
35025 return cachefiles_daemon_range_error(cache, args);
35027 cache->fstop_percent = fstop;
35028 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
35029 if (args[0] != '%' || args[1] != '\0')
35032 - if (bstop < 0 || bstop >= cache->bcull_percent)
35033 + if (bstop >= cache->bcull_percent)
35034 return cachefiles_daemon_range_error(cache, args);
35036 cache->bstop_percent = bstop;
35037 diff -urNp linux-2.6.39.4/fs/cachefiles/internal.h linux-2.6.39.4/fs/cachefiles/internal.h
35038 --- linux-2.6.39.4/fs/cachefiles/internal.h 2011-05-19 00:06:34.000000000 -0400
35039 +++ linux-2.6.39.4/fs/cachefiles/internal.h 2011-08-05 19:44:37.000000000 -0400
35040 @@ -57,7 +57,7 @@ struct cachefiles_cache {
35041 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
35042 struct rb_root active_nodes; /* active nodes (can't be culled) */
35043 rwlock_t active_lock; /* lock for active_nodes */
35044 - atomic_t gravecounter; /* graveyard uniquifier */
35045 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
35046 unsigned frun_percent; /* when to stop culling (% files) */
35047 unsigned fcull_percent; /* when to start culling (% files) */
35048 unsigned fstop_percent; /* when to stop allocating (% files) */
35049 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
35052 #ifdef CONFIG_CACHEFILES_HISTOGRAM
35053 -extern atomic_t cachefiles_lookup_histogram[HZ];
35054 -extern atomic_t cachefiles_mkdir_histogram[HZ];
35055 -extern atomic_t cachefiles_create_histogram[HZ];
35056 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35057 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35058 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
35060 extern int __init cachefiles_proc_init(void);
35061 extern void cachefiles_proc_cleanup(void);
35063 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
35064 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
35066 unsigned long jif = jiffies - start_jif;
35069 - atomic_inc(&histogram[jif]);
35070 + atomic_inc_unchecked(&histogram[jif]);
35074 diff -urNp linux-2.6.39.4/fs/cachefiles/namei.c linux-2.6.39.4/fs/cachefiles/namei.c
35075 --- linux-2.6.39.4/fs/cachefiles/namei.c 2011-05-19 00:06:34.000000000 -0400
35076 +++ linux-2.6.39.4/fs/cachefiles/namei.c 2011-08-05 19:44:37.000000000 -0400
35077 @@ -318,7 +318,7 @@ try_again:
35078 /* first step is to make up a grave dentry in the graveyard */
35079 sprintf(nbuffer, "%08x%08x",
35080 (uint32_t) get_seconds(),
35081 - (uint32_t) atomic_inc_return(&cache->gravecounter));
35082 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
35084 /* do the multiway lock magic */
35085 trap = lock_rename(cache->graveyard, dir);
35086 diff -urNp linux-2.6.39.4/fs/cachefiles/proc.c linux-2.6.39.4/fs/cachefiles/proc.c
35087 --- linux-2.6.39.4/fs/cachefiles/proc.c 2011-05-19 00:06:34.000000000 -0400
35088 +++ linux-2.6.39.4/fs/cachefiles/proc.c 2011-08-05 19:44:37.000000000 -0400
35090 #include <linux/seq_file.h>
35091 #include "internal.h"
35093 -atomic_t cachefiles_lookup_histogram[HZ];
35094 -atomic_t cachefiles_mkdir_histogram[HZ];
35095 -atomic_t cachefiles_create_histogram[HZ];
35096 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35097 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35098 +atomic_unchecked_t cachefiles_create_histogram[HZ];
35101 * display the latency histogram
35102 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
35105 index = (unsigned long) v - 3;
35106 - x = atomic_read(&cachefiles_lookup_histogram[index]);
35107 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
35108 - z = atomic_read(&cachefiles_create_histogram[index]);
35109 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
35110 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
35111 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
35112 if (x == 0 && y == 0 && z == 0)
35115 diff -urNp linux-2.6.39.4/fs/cachefiles/rdwr.c linux-2.6.39.4/fs/cachefiles/rdwr.c
35116 --- linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-05-19 00:06:34.000000000 -0400
35117 +++ linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-08-05 19:44:37.000000000 -0400
35118 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
35121 ret = file->f_op->write(
35122 - file, (const void __user *) data, len, &pos);
35123 + file, (__force const void __user *) data, len, &pos);
35127 diff -urNp linux-2.6.39.4/fs/ceph/dir.c linux-2.6.39.4/fs/ceph/dir.c
35128 --- linux-2.6.39.4/fs/ceph/dir.c 2011-05-19 00:06:34.000000000 -0400
35129 +++ linux-2.6.39.4/fs/ceph/dir.c 2011-08-05 19:44:37.000000000 -0400
35130 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
35131 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35132 struct ceph_mds_client *mdsc = fsc->mdsc;
35133 unsigned frag = fpos_frag(filp->f_pos);
35134 - int off = fpos_off(filp->f_pos);
35135 + unsigned int off = fpos_off(filp->f_pos);
35138 struct ceph_mds_reply_info_parsed *rinfo;
35139 @@ -360,7 +360,7 @@ more:
35140 rinfo = &fi->last_readdir->r_reply_info;
35141 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
35142 rinfo->dir_nr, off, fi->offset);
35143 - while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
35144 + while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
35145 u64 pos = ceph_make_fpos(frag, off);
35146 struct ceph_mds_reply_inode *in =
35147 rinfo->dir_in[off - fi->offset].in;
35148 diff -urNp linux-2.6.39.4/fs/cifs/cifs_debug.c linux-2.6.39.4/fs/cifs/cifs_debug.c
35149 --- linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-05-19 00:06:34.000000000 -0400
35150 +++ linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-08-05 19:44:37.000000000 -0400
35151 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35152 tcon = list_entry(tmp3,
35153 struct cifsTconInfo,
35155 - atomic_set(&tcon->num_smbs_sent, 0);
35156 - atomic_set(&tcon->num_writes, 0);
35157 - atomic_set(&tcon->num_reads, 0);
35158 - atomic_set(&tcon->num_oplock_brks, 0);
35159 - atomic_set(&tcon->num_opens, 0);
35160 - atomic_set(&tcon->num_posixopens, 0);
35161 - atomic_set(&tcon->num_posixmkdirs, 0);
35162 - atomic_set(&tcon->num_closes, 0);
35163 - atomic_set(&tcon->num_deletes, 0);
35164 - atomic_set(&tcon->num_mkdirs, 0);
35165 - atomic_set(&tcon->num_rmdirs, 0);
35166 - atomic_set(&tcon->num_renames, 0);
35167 - atomic_set(&tcon->num_t2renames, 0);
35168 - atomic_set(&tcon->num_ffirst, 0);
35169 - atomic_set(&tcon->num_fnext, 0);
35170 - atomic_set(&tcon->num_fclose, 0);
35171 - atomic_set(&tcon->num_hardlinks, 0);
35172 - atomic_set(&tcon->num_symlinks, 0);
35173 - atomic_set(&tcon->num_locks, 0);
35174 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35175 + atomic_set_unchecked(&tcon->num_writes, 0);
35176 + atomic_set_unchecked(&tcon->num_reads, 0);
35177 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35178 + atomic_set_unchecked(&tcon->num_opens, 0);
35179 + atomic_set_unchecked(&tcon->num_posixopens, 0);
35180 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35181 + atomic_set_unchecked(&tcon->num_closes, 0);
35182 + atomic_set_unchecked(&tcon->num_deletes, 0);
35183 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
35184 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
35185 + atomic_set_unchecked(&tcon->num_renames, 0);
35186 + atomic_set_unchecked(&tcon->num_t2renames, 0);
35187 + atomic_set_unchecked(&tcon->num_ffirst, 0);
35188 + atomic_set_unchecked(&tcon->num_fnext, 0);
35189 + atomic_set_unchecked(&tcon->num_fclose, 0);
35190 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
35191 + atomic_set_unchecked(&tcon->num_symlinks, 0);
35192 + atomic_set_unchecked(&tcon->num_locks, 0);
35196 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35197 if (tcon->need_reconnect)
35198 seq_puts(m, "\tDISCONNECTED ");
35199 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35200 - atomic_read(&tcon->num_smbs_sent),
35201 - atomic_read(&tcon->num_oplock_brks));
35202 + atomic_read_unchecked(&tcon->num_smbs_sent),
35203 + atomic_read_unchecked(&tcon->num_oplock_brks));
35204 seq_printf(m, "\nReads: %d Bytes: %lld",
35205 - atomic_read(&tcon->num_reads),
35206 + atomic_read_unchecked(&tcon->num_reads),
35207 (long long)(tcon->bytes_read));
35208 seq_printf(m, "\nWrites: %d Bytes: %lld",
35209 - atomic_read(&tcon->num_writes),
35210 + atomic_read_unchecked(&tcon->num_writes),
35211 (long long)(tcon->bytes_written));
35212 seq_printf(m, "\nFlushes: %d",
35213 - atomic_read(&tcon->num_flushes));
35214 + atomic_read_unchecked(&tcon->num_flushes));
35215 seq_printf(m, "\nLocks: %d HardLinks: %d "
35217 - atomic_read(&tcon->num_locks),
35218 - atomic_read(&tcon->num_hardlinks),
35219 - atomic_read(&tcon->num_symlinks));
35220 + atomic_read_unchecked(&tcon->num_locks),
35221 + atomic_read_unchecked(&tcon->num_hardlinks),
35222 + atomic_read_unchecked(&tcon->num_symlinks));
35223 seq_printf(m, "\nOpens: %d Closes: %d "
35225 - atomic_read(&tcon->num_opens),
35226 - atomic_read(&tcon->num_closes),
35227 - atomic_read(&tcon->num_deletes));
35228 + atomic_read_unchecked(&tcon->num_opens),
35229 + atomic_read_unchecked(&tcon->num_closes),
35230 + atomic_read_unchecked(&tcon->num_deletes));
35231 seq_printf(m, "\nPosix Opens: %d "
35232 "Posix Mkdirs: %d",
35233 - atomic_read(&tcon->num_posixopens),
35234 - atomic_read(&tcon->num_posixmkdirs));
35235 + atomic_read_unchecked(&tcon->num_posixopens),
35236 + atomic_read_unchecked(&tcon->num_posixmkdirs));
35237 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35238 - atomic_read(&tcon->num_mkdirs),
35239 - atomic_read(&tcon->num_rmdirs));
35240 + atomic_read_unchecked(&tcon->num_mkdirs),
35241 + atomic_read_unchecked(&tcon->num_rmdirs));
35242 seq_printf(m, "\nRenames: %d T2 Renames %d",
35243 - atomic_read(&tcon->num_renames),
35244 - atomic_read(&tcon->num_t2renames));
35245 + atomic_read_unchecked(&tcon->num_renames),
35246 + atomic_read_unchecked(&tcon->num_t2renames));
35247 seq_printf(m, "\nFindFirst: %d FNext %d "
35249 - atomic_read(&tcon->num_ffirst),
35250 - atomic_read(&tcon->num_fnext),
35251 - atomic_read(&tcon->num_fclose));
35252 + atomic_read_unchecked(&tcon->num_ffirst),
35253 + atomic_read_unchecked(&tcon->num_fnext),
35254 + atomic_read_unchecked(&tcon->num_fclose));
35258 diff -urNp linux-2.6.39.4/fs/cifs/cifsglob.h linux-2.6.39.4/fs/cifs/cifsglob.h
35259 --- linux-2.6.39.4/fs/cifs/cifsglob.h 2011-05-19 00:06:34.000000000 -0400
35260 +++ linux-2.6.39.4/fs/cifs/cifsglob.h 2011-08-05 19:44:37.000000000 -0400
35261 @@ -305,28 +305,28 @@ struct cifsTconInfo {
35262 __u16 Flags; /* optional support bits */
35263 enum statusEnum tidStatus;
35264 #ifdef CONFIG_CIFS_STATS
35265 - atomic_t num_smbs_sent;
35266 - atomic_t num_writes;
35267 - atomic_t num_reads;
35268 - atomic_t num_flushes;
35269 - atomic_t num_oplock_brks;
35270 - atomic_t num_opens;
35271 - atomic_t num_closes;
35272 - atomic_t num_deletes;
35273 - atomic_t num_mkdirs;
35274 - atomic_t num_posixopens;
35275 - atomic_t num_posixmkdirs;
35276 - atomic_t num_rmdirs;
35277 - atomic_t num_renames;
35278 - atomic_t num_t2renames;
35279 - atomic_t num_ffirst;
35280 - atomic_t num_fnext;
35281 - atomic_t num_fclose;
35282 - atomic_t num_hardlinks;
35283 - atomic_t num_symlinks;
35284 - atomic_t num_locks;
35285 - atomic_t num_acl_get;
35286 - atomic_t num_acl_set;
35287 + atomic_unchecked_t num_smbs_sent;
35288 + atomic_unchecked_t num_writes;
35289 + atomic_unchecked_t num_reads;
35290 + atomic_unchecked_t num_flushes;
35291 + atomic_unchecked_t num_oplock_brks;
35292 + atomic_unchecked_t num_opens;
35293 + atomic_unchecked_t num_closes;
35294 + atomic_unchecked_t num_deletes;
35295 + atomic_unchecked_t num_mkdirs;
35296 + atomic_unchecked_t num_posixopens;
35297 + atomic_unchecked_t num_posixmkdirs;
35298 + atomic_unchecked_t num_rmdirs;
35299 + atomic_unchecked_t num_renames;
35300 + atomic_unchecked_t num_t2renames;
35301 + atomic_unchecked_t num_ffirst;
35302 + atomic_unchecked_t num_fnext;
35303 + atomic_unchecked_t num_fclose;
35304 + atomic_unchecked_t num_hardlinks;
35305 + atomic_unchecked_t num_symlinks;
35306 + atomic_unchecked_t num_locks;
35307 + atomic_unchecked_t num_acl_get;
35308 + atomic_unchecked_t num_acl_set;
35309 #ifdef CONFIG_CIFS_STATS2
35310 unsigned long long time_writes;
35311 unsigned long long time_reads;
35312 @@ -509,7 +509,7 @@ static inline char CIFS_DIR_SEP(const st
35315 #ifdef CONFIG_CIFS_STATS
35316 -#define cifs_stats_inc atomic_inc
35317 +#define cifs_stats_inc atomic_inc_unchecked
35319 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
35320 unsigned int bytes)
35321 diff -urNp linux-2.6.39.4/fs/cifs/link.c linux-2.6.39.4/fs/cifs/link.c
35322 --- linux-2.6.39.4/fs/cifs/link.c 2011-05-19 00:06:34.000000000 -0400
35323 +++ linux-2.6.39.4/fs/cifs/link.c 2011-08-05 19:44:37.000000000 -0400
35324 @@ -577,7 +577,7 @@ symlink_exit:
35326 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35328 - char *p = nd_get_link(nd);
35329 + const char *p = nd_get_link(nd);
35333 diff -urNp linux-2.6.39.4/fs/coda/cache.c linux-2.6.39.4/fs/coda/cache.c
35334 --- linux-2.6.39.4/fs/coda/cache.c 2011-05-19 00:06:34.000000000 -0400
35335 +++ linux-2.6.39.4/fs/coda/cache.c 2011-08-05 19:44:37.000000000 -0400
35337 #include "coda_linux.h"
35338 #include "coda_cache.h"
35340 -static atomic_t permission_epoch = ATOMIC_INIT(0);
35341 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35343 /* replace or extend an acl cache hit */
35344 void coda_cache_enter(struct inode *inode, int mask)
35345 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35346 struct coda_inode_info *cii = ITOC(inode);
35348 spin_lock(&cii->c_lock);
35349 - cii->c_cached_epoch = atomic_read(&permission_epoch);
35350 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35351 if (cii->c_uid != current_fsuid()) {
35352 cii->c_uid = current_fsuid();
35353 cii->c_cached_perm = mask;
35354 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35356 struct coda_inode_info *cii = ITOC(inode);
35357 spin_lock(&cii->c_lock);
35358 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35359 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35360 spin_unlock(&cii->c_lock);
35363 /* remove all acl caches */
35364 void coda_cache_clear_all(struct super_block *sb)
35366 - atomic_inc(&permission_epoch);
35367 + atomic_inc_unchecked(&permission_epoch);
35371 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35372 spin_lock(&cii->c_lock);
35373 hit = (mask & cii->c_cached_perm) == mask &&
35374 cii->c_uid == current_fsuid() &&
35375 - cii->c_cached_epoch == atomic_read(&permission_epoch);
35376 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35377 spin_unlock(&cii->c_lock);
35380 diff -urNp linux-2.6.39.4/fs/compat_binfmt_elf.c linux-2.6.39.4/fs/compat_binfmt_elf.c
35381 --- linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
35382 +++ linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
35383 @@ -30,11 +30,13 @@
35389 #define elfhdr elf32_hdr
35390 #define elf_phdr elf32_phdr
35391 #define elf_shdr elf32_shdr
35392 #define elf_note elf32_note
35393 +#define elf_dyn Elf32_Dyn
35394 #define elf_addr_t Elf32_Addr
35397 diff -urNp linux-2.6.39.4/fs/compat.c linux-2.6.39.4/fs/compat.c
35398 --- linux-2.6.39.4/fs/compat.c 2011-05-19 00:06:34.000000000 -0400
35399 +++ linux-2.6.39.4/fs/compat.c 2011-08-05 19:44:37.000000000 -0400
35400 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35404 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35405 + if (nr_segs > UIO_MAXIOV)
35407 if (nr_segs > fast_segs) {
35409 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35411 struct compat_readdir_callback {
35412 struct compat_old_linux_dirent __user *dirent;
35413 + struct file * file;
35417 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35418 buf->result = -EOVERFLOW;
35422 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35426 dirent = buf->dirent;
35427 if (!access_ok(VERIFY_WRITE, dirent,
35428 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35431 buf.dirent = dirent;
35434 error = vfs_readdir(file, compat_fillonedir, &buf);
35436 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
35437 struct compat_getdents_callback {
35438 struct compat_linux_dirent __user *current_dir;
35439 struct compat_linux_dirent __user *previous;
35440 + struct file * file;
35444 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35445 buf->error = -EOVERFLOW;
35449 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35452 dirent = buf->previous;
35454 if (__put_user(offset, &dirent->d_off))
35455 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35456 buf.previous = NULL;
35461 error = vfs_readdir(file, compat_filldir, &buf);
35463 @@ -1006,6 +1018,7 @@ out:
35464 struct compat_getdents_callback64 {
35465 struct linux_dirent64 __user *current_dir;
35466 struct linux_dirent64 __user *previous;
35467 + struct file * file;
35471 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35472 buf->error = -EINVAL; /* only used if we fail.. */
35473 if (reclen > buf->count)
35476 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35479 dirent = buf->previous;
35482 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35483 buf.previous = NULL;
35488 error = vfs_readdir(file, compat_filldir64, &buf);
35490 @@ -1436,6 +1454,11 @@ int compat_do_execve(char * filename,
35491 compat_uptr_t __user *envp,
35492 struct pt_regs * regs)
35494 +#ifdef CONFIG_GRKERNSEC
35495 + struct file *old_exec_file;
35496 + struct acl_subject_label *old_acl;
35497 + struct rlimit old_rlim[RLIM_NLIMITS];
35499 struct linux_binprm *bprm;
35501 struct files_struct *displaced;
35502 @@ -1472,6 +1495,19 @@ int compat_do_execve(char * filename,
35503 bprm->filename = filename;
35504 bprm->interp = filename;
35506 + if (gr_process_user_ban()) {
35511 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->cred->user->processes), 1);
35512 + retval = -EAGAIN;
35513 + if (gr_handle_nproc())
35515 + retval = -EACCES;
35516 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
35519 retval = bprm_mm_init(bprm);
35522 @@ -1501,9 +1537,40 @@ int compat_do_execve(char * filename,
35526 + if (!gr_tpe_allow(file)) {
35527 + retval = -EACCES;
35531 + if (gr_check_crash_exec(file)) {
35532 + retval = -EACCES;
35536 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35538 + gr_handle_exec_args_compat(bprm, argv);
35540 +#ifdef CONFIG_GRKERNSEC
35541 + old_acl = current->acl;
35542 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35543 + old_exec_file = current->exec_file;
35545 + current->exec_file = file;
35548 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35549 + bprm->unsafe & LSM_UNSAFE_SHARE);
35553 retval = search_binary_handler(bprm, regs);
35557 +#ifdef CONFIG_GRKERNSEC
35558 + if (old_exec_file)
35559 + fput(old_exec_file);
35562 /* execve succeeded */
35563 current->fs->in_exec = 0;
35564 @@ -1514,6 +1581,14 @@ int compat_do_execve(char * filename,
35565 put_files_struct(displaced);
35569 +#ifdef CONFIG_GRKERNSEC
35570 + current->acl = old_acl;
35571 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35572 + fput(current->exec_file);
35573 + current->exec_file = old_exec_file;
35578 acct_arg_size(bprm, 0);
35579 @@ -1681,6 +1756,8 @@ int compat_core_sys_select(int n, compat
35580 struct fdtable *fdt;
35581 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35583 + pax_track_stack();
35588 diff -urNp linux-2.6.39.4/fs/compat_ioctl.c linux-2.6.39.4/fs/compat_ioctl.c
35589 --- linux-2.6.39.4/fs/compat_ioctl.c 2011-05-19 00:06:34.000000000 -0400
35590 +++ linux-2.6.39.4/fs/compat_ioctl.c 2011-08-05 19:44:37.000000000 -0400
35591 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35593 err = get_user(palp, &up->palette);
35594 err |= get_user(length, &up->length);
35598 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35599 err = put_user(compat_ptr(palp), &up_native->palette);
35600 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35601 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35604 - a = *(unsigned int *)p;
35605 - b = *(unsigned int *)q;
35606 + a = *(const unsigned int *)p;
35607 + b = *(const unsigned int *)q;
35611 diff -urNp linux-2.6.39.4/fs/configfs/dir.c linux-2.6.39.4/fs/configfs/dir.c
35612 --- linux-2.6.39.4/fs/configfs/dir.c 2011-05-19 00:06:34.000000000 -0400
35613 +++ linux-2.6.39.4/fs/configfs/dir.c 2011-08-05 19:44:37.000000000 -0400
35614 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35616 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35617 struct configfs_dirent *next;
35618 - const char * name;
35619 + const unsigned char * name;
35620 + char d_name[sizeof(next->s_dentry->d_iname)];
35622 struct inode *inode = NULL;
35624 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35627 name = configfs_get_name(next);
35628 - len = strlen(name);
35629 + if (next->s_dentry && name == next->s_dentry->d_iname) {
35630 + len = next->s_dentry->d_name.len;
35631 + memcpy(d_name, name, len);
35634 + len = strlen(name);
35637 * We'll have a dentry and an inode for
35638 diff -urNp linux-2.6.39.4/fs/dcache.c linux-2.6.39.4/fs/dcache.c
35639 --- linux-2.6.39.4/fs/dcache.c 2011-05-19 00:06:34.000000000 -0400
35640 +++ linux-2.6.39.4/fs/dcache.c 2011-08-05 19:44:37.000000000 -0400
35641 @@ -3069,7 +3069,7 @@ void __init vfs_caches_init(unsigned lon
35642 mempages -= reserve;
35644 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35645 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35646 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35650 diff -urNp linux-2.6.39.4/fs/ecryptfs/inode.c linux-2.6.39.4/fs/ecryptfs/inode.c
35651 --- linux-2.6.39.4/fs/ecryptfs/inode.c 2011-06-03 00:04:14.000000000 -0400
35652 +++ linux-2.6.39.4/fs/ecryptfs/inode.c 2011-08-05 19:44:37.000000000 -0400
35653 @@ -623,7 +623,7 @@ static int ecryptfs_readlink_lower(struc
35656 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35657 - (char __user *)lower_buf,
35658 + (__force char __user *)lower_buf,
35662 @@ -669,7 +669,7 @@ static void *ecryptfs_follow_link(struct
35666 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35667 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35671 @@ -684,7 +684,7 @@ out:
35673 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35675 - char *buf = nd_get_link(nd);
35676 + const char *buf = nd_get_link(nd);
35677 if (!IS_ERR(buf)) {
35678 /* Free the char* */
35680 diff -urNp linux-2.6.39.4/fs/ecryptfs/miscdev.c linux-2.6.39.4/fs/ecryptfs/miscdev.c
35681 --- linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-05-19 00:06:34.000000000 -0400
35682 +++ linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-08-05 19:44:37.000000000 -0400
35683 @@ -328,7 +328,7 @@ check_list:
35684 goto out_unlock_msg_ctx;
35686 if (msg_ctx->msg) {
35687 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
35688 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35689 goto out_unlock_msg_ctx;
35690 i += packet_length_size;
35691 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35692 diff -urNp linux-2.6.39.4/fs/exec.c linux-2.6.39.4/fs/exec.c
35693 --- linux-2.6.39.4/fs/exec.c 2011-06-25 12:55:23.000000000 -0400
35694 +++ linux-2.6.39.4/fs/exec.c 2011-08-05 19:44:37.000000000 -0400
35695 @@ -55,12 +55,24 @@
35696 #include <linux/fs_struct.h>
35697 #include <linux/pipe_fs_i.h>
35698 #include <linux/oom.h>
35699 +#include <linux/random.h>
35700 +#include <linux/seq_file.h>
35702 +#ifdef CONFIG_PAX_REFCOUNT
35703 +#include <linux/kallsyms.h>
35704 +#include <linux/kdebug.h>
35707 #include <asm/uaccess.h>
35708 #include <asm/mmu_context.h>
35709 #include <asm/tlb.h>
35710 #include "internal.h"
35712 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35713 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35714 +EXPORT_SYMBOL(pax_set_initial_flags_func);
35718 char core_pattern[CORENAME_MAX_SIZE] = "core";
35719 unsigned int core_pipe_limit;
35720 @@ -70,7 +82,7 @@ struct core_name {
35724 -static atomic_t call_count = ATOMIC_INIT(1);
35725 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35727 /* The maximal length of core_pattern is also specified in sysctl.c */
35729 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35730 char *tmp = getname(library);
35731 int error = PTR_ERR(tmp);
35732 static const struct open_flags uselib_flags = {
35733 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35734 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35735 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35736 .intent = LOOKUP_OPEN
35738 @@ -190,18 +202,10 @@ struct page *get_arg_page(struct linux_b
35744 -#ifdef CONFIG_STACK_GROWSUP
35746 - ret = expand_stack_downwards(bprm->vma, pos);
35751 - ret = get_user_pages(current, bprm->mm, pos,
35752 - 1, write, 1, &page, NULL);
35754 + if (0 > expand_stack_downwards(bprm->vma, pos))
35756 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35760 @@ -276,6 +280,11 @@ static int __bprm_mm_init(struct linux_b
35761 vma->vm_end = STACK_TOP_MAX;
35762 vma->vm_start = vma->vm_end - PAGE_SIZE;
35763 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35765 +#ifdef CONFIG_PAX_SEGMEXEC
35766 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35769 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35770 INIT_LIST_HEAD(&vma->anon_vma_chain);
35772 @@ -290,6 +299,12 @@ static int __bprm_mm_init(struct linux_b
35773 mm->stack_vm = mm->total_vm = 1;
35774 up_write(&mm->mmap_sem);
35775 bprm->p = vma->vm_end - sizeof(void *);
35777 +#ifdef CONFIG_PAX_RANDUSTACK
35778 + if (randomize_va_space)
35779 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35784 up_write(&mm->mmap_sem);
35785 @@ -525,7 +540,7 @@ int copy_strings_kernel(int argc, const
35787 mm_segment_t oldfs = get_fs();
35789 - r = copy_strings(argc, (const char __user *const __user *)argv, bprm);
35790 + r = copy_strings(argc, (__force const char __user *const __user *)argv, bprm);
35794 @@ -555,7 +570,8 @@ static int shift_arg_pages(struct vm_are
35795 unsigned long new_end = old_end - shift;
35796 struct mmu_gather *tlb;
35798 - BUG_ON(new_start > new_end);
35799 + if (new_start >= new_end || new_start < mmap_min_addr)
35803 * ensure there are no vmas between where we want to go
35804 @@ -564,6 +580,10 @@ static int shift_arg_pages(struct vm_are
35805 if (vma != find_vma(mm, new_start))
35808 +#ifdef CONFIG_PAX_SEGMEXEC
35809 + BUG_ON(pax_find_mirror_vma(vma));
35813 * cover the whole range: [new_start, old_end)
35815 @@ -644,10 +664,6 @@ int setup_arg_pages(struct linux_binprm
35816 stack_top = arch_align_stack(stack_top);
35817 stack_top = PAGE_ALIGN(stack_top);
35819 - if (unlikely(stack_top < mmap_min_addr) ||
35820 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35823 stack_shift = vma->vm_end - stack_top;
35825 bprm->p -= stack_shift;
35826 @@ -659,8 +675,28 @@ int setup_arg_pages(struct linux_binprm
35827 bprm->exec -= stack_shift;
35829 down_write(&mm->mmap_sem);
35831 + /* Move stack pages down in memory. */
35832 + if (stack_shift) {
35833 + ret = shift_arg_pages(vma, stack_shift);
35838 vm_flags = VM_STACK_FLAGS;
35840 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35841 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35842 + vm_flags &= ~VM_EXEC;
35844 +#ifdef CONFIG_PAX_MPROTECT
35845 + if (mm->pax_flags & MF_PAX_MPROTECT)
35846 + vm_flags &= ~VM_MAYEXEC;
35853 * Adjust stack execute permissions; explicitly enable for
35854 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35855 @@ -679,13 +715,6 @@ int setup_arg_pages(struct linux_binprm
35857 BUG_ON(prev != vma);
35859 - /* Move stack pages down in memory. */
35860 - if (stack_shift) {
35861 - ret = shift_arg_pages(vma, stack_shift);
35866 /* mprotect_fixup is overkill to remove the temporary stack flags */
35867 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35869 @@ -725,7 +754,7 @@ struct file *open_exec(const char *name)
35872 static const struct open_flags open_exec_flags = {
35873 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35874 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35875 .acc_mode = MAY_EXEC | MAY_OPEN,
35876 .intent = LOOKUP_OPEN
35878 @@ -766,7 +795,7 @@ int kernel_read(struct file *file, loff_
35881 /* The cast to a user pointer is valid due to the set_fs() */
35882 - result = vfs_read(file, (void __user *)addr, count, &pos);
35883 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
35887 @@ -1189,7 +1218,7 @@ int check_unsafe_exec(struct linux_binpr
35891 - if (p->fs->users > n_fs) {
35892 + if (atomic_read(&p->fs->users) > n_fs) {
35893 bprm->unsafe |= LSM_UNSAFE_SHARE;
35896 @@ -1381,6 +1410,11 @@ int do_execve(const char * filename,
35897 const char __user *const __user *envp,
35898 struct pt_regs * regs)
35900 +#ifdef CONFIG_GRKERNSEC
35901 + struct file *old_exec_file;
35902 + struct acl_subject_label *old_acl;
35903 + struct rlimit old_rlim[RLIM_NLIMITS];
35905 struct linux_binprm *bprm;
35907 struct files_struct *displaced;
35908 @@ -1417,6 +1451,23 @@ int do_execve(const char * filename,
35909 bprm->filename = filename;
35910 bprm->interp = filename;
35912 + if (gr_process_user_ban()) {
35917 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->cred->user->processes), 1);
35919 + if (gr_handle_nproc()) {
35920 + retval = -EAGAIN;
35924 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35925 + retval = -EACCES;
35929 retval = bprm_mm_init(bprm);
35932 @@ -1446,9 +1497,40 @@ int do_execve(const char * filename,
35936 + if (!gr_tpe_allow(file)) {
35937 + retval = -EACCES;
35941 + if (gr_check_crash_exec(file)) {
35942 + retval = -EACCES;
35946 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35948 + gr_handle_exec_args(bprm, argv);
35950 +#ifdef CONFIG_GRKERNSEC
35951 + old_acl = current->acl;
35952 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35953 + old_exec_file = current->exec_file;
35955 + current->exec_file = file;
35958 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35959 + bprm->unsafe & LSM_UNSAFE_SHARE);
35963 retval = search_binary_handler(bprm,regs);
35967 +#ifdef CONFIG_GRKERNSEC
35968 + if (old_exec_file)
35969 + fput(old_exec_file);
35972 /* execve succeeded */
35973 current->fs->in_exec = 0;
35974 @@ -1459,6 +1541,14 @@ int do_execve(const char * filename,
35975 put_files_struct(displaced);
35979 +#ifdef CONFIG_GRKERNSEC
35980 + current->acl = old_acl;
35981 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35982 + fput(current->exec_file);
35983 + current->exec_file = old_exec_file;
35988 acct_arg_size(bprm, 0);
35989 @@ -1504,7 +1594,7 @@ static int expand_corename(struct core_n
35991 char *old_corename = cn->corename;
35993 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35994 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35995 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35997 if (!cn->corename) {
35998 @@ -1557,7 +1647,7 @@ static int format_corename(struct core_n
35999 int pid_in_pattern = 0;
36002 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
36003 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
36004 cn->corename = kmalloc(cn->size, GFP_KERNEL);
36007 @@ -1645,6 +1735,219 @@ out:
36011 +int pax_check_flags(unsigned long *flags)
36015 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
36016 + if (*flags & MF_PAX_SEGMEXEC)
36018 + *flags &= ~MF_PAX_SEGMEXEC;
36019 + retval = -EINVAL;
36023 + if ((*flags & MF_PAX_PAGEEXEC)
36025 +#ifdef CONFIG_PAX_PAGEEXEC
36026 + && (*flags & MF_PAX_SEGMEXEC)
36031 + *flags &= ~MF_PAX_PAGEEXEC;
36032 + retval = -EINVAL;
36035 + if ((*flags & MF_PAX_MPROTECT)
36037 +#ifdef CONFIG_PAX_MPROTECT
36038 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36043 + *flags &= ~MF_PAX_MPROTECT;
36044 + retval = -EINVAL;
36047 + if ((*flags & MF_PAX_EMUTRAMP)
36049 +#ifdef CONFIG_PAX_EMUTRAMP
36050 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36055 + *flags &= ~MF_PAX_EMUTRAMP;
36056 + retval = -EINVAL;
36062 +EXPORT_SYMBOL(pax_check_flags);
36064 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36065 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
36067 + struct task_struct *tsk = current;
36068 + struct mm_struct *mm = current->mm;
36069 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
36070 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
36071 + char *path_exec = NULL;
36072 + char *path_fault = NULL;
36073 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
36075 + if (buffer_exec && buffer_fault) {
36076 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
36078 + down_read(&mm->mmap_sem);
36080 + while (vma && (!vma_exec || !vma_fault)) {
36081 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
36083 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
36085 + vma = vma->vm_next;
36088 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
36089 + if (IS_ERR(path_exec))
36090 + path_exec = "<path too long>";
36092 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
36095 + path_exec = buffer_exec;
36097 + path_exec = "<path too long>";
36101 + start = vma_fault->vm_start;
36102 + end = vma_fault->vm_end;
36103 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
36104 + if (vma_fault->vm_file) {
36105 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36106 + if (IS_ERR(path_fault))
36107 + path_fault = "<path too long>";
36109 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36110 + if (path_fault) {
36112 + path_fault = buffer_fault;
36114 + path_fault = "<path too long>";
36117 + path_fault = "<anonymous mapping>";
36119 + up_read(&mm->mmap_sem);
36121 + if (tsk->signal->curr_ip)
36122 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36124 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36125 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36126 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36127 + task_uid(tsk), task_euid(tsk), pc, sp);
36128 + free_page((unsigned long)buffer_exec);
36129 + free_page((unsigned long)buffer_fault);
36130 + pax_report_insns(pc, sp);
36131 + do_coredump(SIGKILL, SIGKILL, regs);
36135 +#ifdef CONFIG_PAX_REFCOUNT
36136 +void pax_report_refcount_overflow(struct pt_regs *regs)
36138 + if (current->signal->curr_ip)
36139 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36140 + ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36142 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36143 + current->comm, task_pid_nr(current), current_uid(), current_euid());
36144 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36146 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36150 +#ifdef CONFIG_PAX_USERCOPY
36151 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36152 +int object_is_on_stack(const void *obj, unsigned long len)
36154 + const void * const stack = task_stack_page(current);
36155 + const void * const stackend = stack + THREAD_SIZE;
36157 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36158 + const void *frame = NULL;
36159 + const void *oldframe;
36162 + if (obj + len < obj)
36165 + if (obj + len <= stack || stackend <= obj)
36168 + if (obj < stack || stackend < obj + len)
36171 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36172 + oldframe = __builtin_frame_address(1);
36174 + frame = __builtin_frame_address(2);
36176 + low ----------------------------------------------> high
36177 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
36178 + ^----------------^
36179 + allow copies only within here
36181 + while (stack <= frame && frame < stackend) {
36182 + /* if obj + len extends past the last frame, this
36183 + check won't pass and the next frame will be 0,
36184 + causing us to bail out and correctly report
36185 + the copy as invalid
36187 + if (obj + len <= frame)
36188 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36189 + oldframe = frame;
36190 + frame = *(const void * const *)frame;
36199 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36201 + if (current->signal->curr_ip)
36202 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36203 + ¤t->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36205 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36206 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36208 + gr_handle_kernel_exploit();
36209 + do_group_exit(SIGKILL);
36213 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36214 +void pax_track_stack(void)
36216 + unsigned long sp = (unsigned long)&sp;
36217 + if (sp < current_thread_info()->lowest_stack &&
36218 + sp > (unsigned long)task_stack_page(current))
36219 + current_thread_info()->lowest_stack = sp;
36221 +EXPORT_SYMBOL(pax_track_stack);
36224 static int zap_process(struct task_struct *start, int exit_code)
36226 struct task_struct *t;
36227 @@ -1855,17 +2158,17 @@ static void wait_for_dump_helpers(struct
36228 pipe = file->f_path.dentry->d_inode->i_pipe;
36233 + atomic_inc(&pipe->readers);
36234 + atomic_dec(&pipe->writers);
36236 - while ((pipe->readers > 1) && (!signal_pending(current))) {
36237 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36238 wake_up_interruptible_sync(&pipe->wait);
36239 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36245 + atomic_dec(&pipe->readers);
36246 + atomic_inc(&pipe->writers);
36250 @@ -1926,7 +2229,7 @@ void do_coredump(long signr, int exit_co
36254 - static atomic_t core_dump_count = ATOMIC_INIT(0);
36255 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36256 struct coredump_params cprm = {
36259 @@ -1941,6 +2244,9 @@ void do_coredump(long signr, int exit_co
36261 audit_core_dumps(signr);
36263 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36264 + gr_handle_brute_attach(current, cprm.mm_flags);
36266 binfmt = mm->binfmt;
36267 if (!binfmt || !binfmt->core_dump)
36269 @@ -1981,6 +2287,8 @@ void do_coredump(long signr, int exit_co
36270 goto fail_corename;
36273 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36277 char **helper_argv;
36278 @@ -2008,7 +2316,7 @@ void do_coredump(long signr, int exit_co
36280 cprm.limit = RLIM_INFINITY;
36282 - dump_count = atomic_inc_return(&core_dump_count);
36283 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
36284 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36285 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36286 task_tgid_vnr(current), current->comm);
36287 @@ -2078,7 +2386,7 @@ close_fail:
36288 filp_close(cprm.file, NULL);
36291 - atomic_dec(&core_dump_count);
36292 + atomic_dec_unchecked(&core_dump_count);
36294 kfree(cn.corename);
36296 diff -urNp linux-2.6.39.4/fs/ext2/balloc.c linux-2.6.39.4/fs/ext2/balloc.c
36297 --- linux-2.6.39.4/fs/ext2/balloc.c 2011-05-19 00:06:34.000000000 -0400
36298 +++ linux-2.6.39.4/fs/ext2/balloc.c 2011-08-05 19:44:37.000000000 -0400
36299 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36301 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36302 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36303 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36304 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36305 sbi->s_resuid != current_fsuid() &&
36306 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36308 diff -urNp linux-2.6.39.4/fs/ext3/balloc.c linux-2.6.39.4/fs/ext3/balloc.c
36309 --- linux-2.6.39.4/fs/ext3/balloc.c 2011-05-19 00:06:34.000000000 -0400
36310 +++ linux-2.6.39.4/fs/ext3/balloc.c 2011-08-05 19:44:37.000000000 -0400
36311 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36313 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36314 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36315 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36316 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36317 sbi->s_resuid != current_fsuid() &&
36318 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36320 diff -urNp linux-2.6.39.4/fs/ext4/balloc.c linux-2.6.39.4/fs/ext4/balloc.c
36321 --- linux-2.6.39.4/fs/ext4/balloc.c 2011-05-19 00:06:34.000000000 -0400
36322 +++ linux-2.6.39.4/fs/ext4/balloc.c 2011-08-05 19:44:37.000000000 -0400
36323 @@ -522,7 +522,7 @@ static int ext4_has_free_blocks(struct e
36324 /* Hm, nope. Are (enough) root reserved blocks available? */
36325 if (sbi->s_resuid == current_fsuid() ||
36326 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36327 - capable(CAP_SYS_RESOURCE)) {
36328 + capable_nolog(CAP_SYS_RESOURCE)) {
36329 if (free_blocks >= (nblocks + dirty_blocks))
36332 diff -urNp linux-2.6.39.4/fs/ext4/ext4.h linux-2.6.39.4/fs/ext4/ext4.h
36333 --- linux-2.6.39.4/fs/ext4/ext4.h 2011-06-03 00:04:14.000000000 -0400
36334 +++ linux-2.6.39.4/fs/ext4/ext4.h 2011-08-05 19:44:37.000000000 -0400
36335 @@ -1166,19 +1166,19 @@ struct ext4_sb_info {
36336 unsigned long s_mb_last_start;
36338 /* stats for buddy allocator */
36339 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36340 - atomic_t s_bal_success; /* we found long enough chunks */
36341 - atomic_t s_bal_allocated; /* in blocks */
36342 - atomic_t s_bal_ex_scanned; /* total extents scanned */
36343 - atomic_t s_bal_goals; /* goal hits */
36344 - atomic_t s_bal_breaks; /* too long searches */
36345 - atomic_t s_bal_2orders; /* 2^order hits */
36346 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36347 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36348 + atomic_unchecked_t s_bal_allocated; /* in blocks */
36349 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36350 + atomic_unchecked_t s_bal_goals; /* goal hits */
36351 + atomic_unchecked_t s_bal_breaks; /* too long searches */
36352 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36353 spinlock_t s_bal_lock;
36354 unsigned long s_mb_buddies_generated;
36355 unsigned long long s_mb_generation_time;
36356 - atomic_t s_mb_lost_chunks;
36357 - atomic_t s_mb_preallocated;
36358 - atomic_t s_mb_discarded;
36359 + atomic_unchecked_t s_mb_lost_chunks;
36360 + atomic_unchecked_t s_mb_preallocated;
36361 + atomic_unchecked_t s_mb_discarded;
36362 atomic_t s_lock_busy;
36364 /* locality groups */
36365 diff -urNp linux-2.6.39.4/fs/ext4/mballoc.c linux-2.6.39.4/fs/ext4/mballoc.c
36366 --- linux-2.6.39.4/fs/ext4/mballoc.c 2011-06-03 00:04:14.000000000 -0400
36367 +++ linux-2.6.39.4/fs/ext4/mballoc.c 2011-08-05 19:44:37.000000000 -0400
36368 @@ -1853,7 +1853,7 @@ void ext4_mb_simple_scan_group(struct ex
36369 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36371 if (EXT4_SB(sb)->s_mb_stats)
36372 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36373 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36377 @@ -2147,7 +2147,7 @@ repeat:
36378 ac->ac_status = AC_STATUS_CONTINUE;
36379 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36381 - atomic_inc(&sbi->s_mb_lost_chunks);
36382 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36386 @@ -2190,6 +2190,8 @@ static int ext4_mb_seq_groups_show(struc
36387 ext4_grpblk_t counters[16];
36390 + pax_track_stack();
36394 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36395 @@ -2613,25 +2615,25 @@ int ext4_mb_release(struct super_block *
36396 if (sbi->s_mb_stats) {
36398 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36399 - atomic_read(&sbi->s_bal_allocated),
36400 - atomic_read(&sbi->s_bal_reqs),
36401 - atomic_read(&sbi->s_bal_success));
36402 + atomic_read_unchecked(&sbi->s_bal_allocated),
36403 + atomic_read_unchecked(&sbi->s_bal_reqs),
36404 + atomic_read_unchecked(&sbi->s_bal_success));
36406 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36407 "%u 2^N hits, %u breaks, %u lost\n",
36408 - atomic_read(&sbi->s_bal_ex_scanned),
36409 - atomic_read(&sbi->s_bal_goals),
36410 - atomic_read(&sbi->s_bal_2orders),
36411 - atomic_read(&sbi->s_bal_breaks),
36412 - atomic_read(&sbi->s_mb_lost_chunks));
36413 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36414 + atomic_read_unchecked(&sbi->s_bal_goals),
36415 + atomic_read_unchecked(&sbi->s_bal_2orders),
36416 + atomic_read_unchecked(&sbi->s_bal_breaks),
36417 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36419 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36420 sbi->s_mb_buddies_generated++,
36421 sbi->s_mb_generation_time);
36423 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36424 - atomic_read(&sbi->s_mb_preallocated),
36425 - atomic_read(&sbi->s_mb_discarded));
36426 + atomic_read_unchecked(&sbi->s_mb_preallocated),
36427 + atomic_read_unchecked(&sbi->s_mb_discarded));
36430 free_percpu(sbi->s_locality_groups);
36431 @@ -3107,16 +3109,16 @@ static void ext4_mb_collect_stats(struct
36432 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36434 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36435 - atomic_inc(&sbi->s_bal_reqs);
36436 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36437 + atomic_inc_unchecked(&sbi->s_bal_reqs);
36438 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36439 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36440 - atomic_inc(&sbi->s_bal_success);
36441 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36442 + atomic_inc_unchecked(&sbi->s_bal_success);
36443 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36444 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36445 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36446 - atomic_inc(&sbi->s_bal_goals);
36447 + atomic_inc_unchecked(&sbi->s_bal_goals);
36448 if (ac->ac_found > sbi->s_mb_max_to_scan)
36449 - atomic_inc(&sbi->s_bal_breaks);
36450 + atomic_inc_unchecked(&sbi->s_bal_breaks);
36453 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36454 @@ -3514,7 +3516,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36455 trace_ext4_mb_new_inode_pa(ac, pa);
36457 ext4_mb_use_inode_pa(ac, pa);
36458 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36459 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36461 ei = EXT4_I(ac->ac_inode);
36462 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36463 @@ -3574,7 +3576,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36464 trace_ext4_mb_new_group_pa(ac, pa);
36466 ext4_mb_use_group_pa(ac, pa);
36467 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36468 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36470 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36472 @@ -3661,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36473 * from the bitmap and continue.
36476 - atomic_add(free, &sbi->s_mb_discarded);
36477 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
36481 @@ -3679,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36482 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36483 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36484 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36485 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36486 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36487 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36490 diff -urNp linux-2.6.39.4/fs/fcntl.c linux-2.6.39.4/fs/fcntl.c
36491 --- linux-2.6.39.4/fs/fcntl.c 2011-05-19 00:06:34.000000000 -0400
36492 +++ linux-2.6.39.4/fs/fcntl.c 2011-08-05 19:44:37.000000000 -0400
36493 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36497 + if (gr_handle_chroot_fowner(pid, type))
36499 + if (gr_check_protected_task_fowner(pid, type))
36502 f_modown(filp, pid, type, force);
36505 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36508 case F_DUPFD_CLOEXEC:
36509 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36510 if (arg >= rlimit(RLIMIT_NOFILE))
36512 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36513 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36514 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36515 * is defined as O_NONBLOCK on some platforms and not on others.
36517 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36518 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36519 O_RDONLY | O_WRONLY | O_RDWR |
36520 O_CREAT | O_EXCL | O_NOCTTY |
36521 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36522 __O_SYNC | O_DSYNC | FASYNC |
36523 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36524 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36525 - __FMODE_EXEC | O_PATH
36526 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
36529 fasync_cache = kmem_cache_create("fasync_cache",
36530 diff -urNp linux-2.6.39.4/fs/fifo.c linux-2.6.39.4/fs/fifo.c
36531 --- linux-2.6.39.4/fs/fifo.c 2011-05-19 00:06:34.000000000 -0400
36532 +++ linux-2.6.39.4/fs/fifo.c 2011-08-05 19:44:37.000000000 -0400
36533 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36535 filp->f_op = &read_pipefifo_fops;
36537 - if (pipe->readers++ == 0)
36538 + if (atomic_inc_return(&pipe->readers) == 1)
36539 wake_up_partner(inode);
36541 - if (!pipe->writers) {
36542 + if (!atomic_read(&pipe->writers)) {
36543 if ((filp->f_flags & O_NONBLOCK)) {
36544 /* suppress POLLHUP until we have
36546 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36547 * errno=ENXIO when there is no process reading the FIFO.
36550 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36551 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36554 filp->f_op = &write_pipefifo_fops;
36556 - if (!pipe->writers++)
36557 + if (atomic_inc_return(&pipe->writers) == 1)
36558 wake_up_partner(inode);
36560 - if (!pipe->readers) {
36561 + if (!atomic_read(&pipe->readers)) {
36562 wait_for_partner(inode, &pipe->r_counter);
36563 if (signal_pending(current))
36565 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36567 filp->f_op = &rdwr_pipefifo_fops;
36571 + atomic_inc(&pipe->readers);
36572 + atomic_inc(&pipe->writers);
36575 - if (pipe->readers == 1 || pipe->writers == 1)
36576 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36577 wake_up_partner(inode);
36580 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36584 - if (!--pipe->readers)
36585 + if (atomic_dec_and_test(&pipe->readers))
36586 wake_up_interruptible(&pipe->wait);
36587 ret = -ERESTARTSYS;
36591 - if (!--pipe->writers)
36592 + if (atomic_dec_and_test(&pipe->writers))
36593 wake_up_interruptible(&pipe->wait);
36594 ret = -ERESTARTSYS;
36598 - if (!pipe->readers && !pipe->writers)
36599 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36600 free_pipe_info(inode);
36603 diff -urNp linux-2.6.39.4/fs/file.c linux-2.6.39.4/fs/file.c
36604 --- linux-2.6.39.4/fs/file.c 2011-05-19 00:06:34.000000000 -0400
36605 +++ linux-2.6.39.4/fs/file.c 2011-08-05 19:44:37.000000000 -0400
36607 #include <linux/slab.h>
36608 #include <linux/vmalloc.h>
36609 #include <linux/file.h>
36610 +#include <linux/security.h>
36611 #include <linux/fdtable.h>
36612 #include <linux/bitops.h>
36613 #include <linux/interrupt.h>
36614 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36615 * N.B. For clone tasks sharing a files structure, this test
36616 * will limit the total number of files that can be opened.
36618 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36619 if (nr >= rlimit(RLIMIT_NOFILE))
36622 diff -urNp linux-2.6.39.4/fs/filesystems.c linux-2.6.39.4/fs/filesystems.c
36623 --- linux-2.6.39.4/fs/filesystems.c 2011-05-19 00:06:34.000000000 -0400
36624 +++ linux-2.6.39.4/fs/filesystems.c 2011-08-05 19:44:37.000000000 -0400
36625 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36626 int len = dot ? dot - name : strlen(name);
36628 fs = __get_fs_type(name, len);
36630 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
36631 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36633 if (!fs && (request_module("%.*s", len, name) == 0))
36635 fs = __get_fs_type(name, len);
36637 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36638 diff -urNp linux-2.6.39.4/fs/fscache/cookie.c linux-2.6.39.4/fs/fscache/cookie.c
36639 --- linux-2.6.39.4/fs/fscache/cookie.c 2011-05-19 00:06:34.000000000 -0400
36640 +++ linux-2.6.39.4/fs/fscache/cookie.c 2011-08-05 19:44:37.000000000 -0400
36641 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36642 parent ? (char *) parent->def->name : "<no-parent>",
36643 def->name, netfs_data);
36645 - fscache_stat(&fscache_n_acquires);
36646 + fscache_stat_unchecked(&fscache_n_acquires);
36648 /* if there's no parent cookie, then we don't create one here either */
36650 - fscache_stat(&fscache_n_acquires_null);
36651 + fscache_stat_unchecked(&fscache_n_acquires_null);
36652 _leave(" [no parent]");
36655 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36656 /* allocate and initialise a cookie */
36657 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36659 - fscache_stat(&fscache_n_acquires_oom);
36660 + fscache_stat_unchecked(&fscache_n_acquires_oom);
36661 _leave(" [ENOMEM]");
36664 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36666 switch (cookie->def->type) {
36667 case FSCACHE_COOKIE_TYPE_INDEX:
36668 - fscache_stat(&fscache_n_cookie_index);
36669 + fscache_stat_unchecked(&fscache_n_cookie_index);
36671 case FSCACHE_COOKIE_TYPE_DATAFILE:
36672 - fscache_stat(&fscache_n_cookie_data);
36673 + fscache_stat_unchecked(&fscache_n_cookie_data);
36676 - fscache_stat(&fscache_n_cookie_special);
36677 + fscache_stat_unchecked(&fscache_n_cookie_special);
36681 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36682 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36683 atomic_dec(&parent->n_children);
36684 __fscache_cookie_put(cookie);
36685 - fscache_stat(&fscache_n_acquires_nobufs);
36686 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36692 - fscache_stat(&fscache_n_acquires_ok);
36693 + fscache_stat_unchecked(&fscache_n_acquires_ok);
36694 _leave(" = %p", cookie);
36697 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36698 cache = fscache_select_cache_for_object(cookie->parent);
36700 up_read(&fscache_addremove_sem);
36701 - fscache_stat(&fscache_n_acquires_no_cache);
36702 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36703 _leave(" = -ENOMEDIUM [no cache]");
36706 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36707 object = cache->ops->alloc_object(cache, cookie);
36708 fscache_stat_d(&fscache_n_cop_alloc_object);
36709 if (IS_ERR(object)) {
36710 - fscache_stat(&fscache_n_object_no_alloc);
36711 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
36712 ret = PTR_ERR(object);
36716 - fscache_stat(&fscache_n_object_alloc);
36717 + fscache_stat_unchecked(&fscache_n_object_alloc);
36719 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36721 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36722 struct fscache_object *object;
36723 struct hlist_node *_p;
36725 - fscache_stat(&fscache_n_updates);
36726 + fscache_stat_unchecked(&fscache_n_updates);
36729 - fscache_stat(&fscache_n_updates_null);
36730 + fscache_stat_unchecked(&fscache_n_updates_null);
36731 _leave(" [no cookie]");
36734 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36735 struct fscache_object *object;
36736 unsigned long event;
36738 - fscache_stat(&fscache_n_relinquishes);
36739 + fscache_stat_unchecked(&fscache_n_relinquishes);
36741 - fscache_stat(&fscache_n_relinquishes_retire);
36742 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36745 - fscache_stat(&fscache_n_relinquishes_null);
36746 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
36747 _leave(" [no cookie]");
36750 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36752 /* wait for the cookie to finish being instantiated (or to fail) */
36753 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36754 - fscache_stat(&fscache_n_relinquishes_waitcrt);
36755 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36756 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36757 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36759 diff -urNp linux-2.6.39.4/fs/fscache/internal.h linux-2.6.39.4/fs/fscache/internal.h
36760 --- linux-2.6.39.4/fs/fscache/internal.h 2011-05-19 00:06:34.000000000 -0400
36761 +++ linux-2.6.39.4/fs/fscache/internal.h 2011-08-05 19:44:37.000000000 -0400
36762 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36763 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36764 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36766 -extern atomic_t fscache_n_op_pend;
36767 -extern atomic_t fscache_n_op_run;
36768 -extern atomic_t fscache_n_op_enqueue;
36769 -extern atomic_t fscache_n_op_deferred_release;
36770 -extern atomic_t fscache_n_op_release;
36771 -extern atomic_t fscache_n_op_gc;
36772 -extern atomic_t fscache_n_op_cancelled;
36773 -extern atomic_t fscache_n_op_rejected;
36775 -extern atomic_t fscache_n_attr_changed;
36776 -extern atomic_t fscache_n_attr_changed_ok;
36777 -extern atomic_t fscache_n_attr_changed_nobufs;
36778 -extern atomic_t fscache_n_attr_changed_nomem;
36779 -extern atomic_t fscache_n_attr_changed_calls;
36781 -extern atomic_t fscache_n_allocs;
36782 -extern atomic_t fscache_n_allocs_ok;
36783 -extern atomic_t fscache_n_allocs_wait;
36784 -extern atomic_t fscache_n_allocs_nobufs;
36785 -extern atomic_t fscache_n_allocs_intr;
36786 -extern atomic_t fscache_n_allocs_object_dead;
36787 -extern atomic_t fscache_n_alloc_ops;
36788 -extern atomic_t fscache_n_alloc_op_waits;
36790 -extern atomic_t fscache_n_retrievals;
36791 -extern atomic_t fscache_n_retrievals_ok;
36792 -extern atomic_t fscache_n_retrievals_wait;
36793 -extern atomic_t fscache_n_retrievals_nodata;
36794 -extern atomic_t fscache_n_retrievals_nobufs;
36795 -extern atomic_t fscache_n_retrievals_intr;
36796 -extern atomic_t fscache_n_retrievals_nomem;
36797 -extern atomic_t fscache_n_retrievals_object_dead;
36798 -extern atomic_t fscache_n_retrieval_ops;
36799 -extern atomic_t fscache_n_retrieval_op_waits;
36801 -extern atomic_t fscache_n_stores;
36802 -extern atomic_t fscache_n_stores_ok;
36803 -extern atomic_t fscache_n_stores_again;
36804 -extern atomic_t fscache_n_stores_nobufs;
36805 -extern atomic_t fscache_n_stores_oom;
36806 -extern atomic_t fscache_n_store_ops;
36807 -extern atomic_t fscache_n_store_calls;
36808 -extern atomic_t fscache_n_store_pages;
36809 -extern atomic_t fscache_n_store_radix_deletes;
36810 -extern atomic_t fscache_n_store_pages_over_limit;
36812 -extern atomic_t fscache_n_store_vmscan_not_storing;
36813 -extern atomic_t fscache_n_store_vmscan_gone;
36814 -extern atomic_t fscache_n_store_vmscan_busy;
36815 -extern atomic_t fscache_n_store_vmscan_cancelled;
36817 -extern atomic_t fscache_n_marks;
36818 -extern atomic_t fscache_n_uncaches;
36820 -extern atomic_t fscache_n_acquires;
36821 -extern atomic_t fscache_n_acquires_null;
36822 -extern atomic_t fscache_n_acquires_no_cache;
36823 -extern atomic_t fscache_n_acquires_ok;
36824 -extern atomic_t fscache_n_acquires_nobufs;
36825 -extern atomic_t fscache_n_acquires_oom;
36827 -extern atomic_t fscache_n_updates;
36828 -extern atomic_t fscache_n_updates_null;
36829 -extern atomic_t fscache_n_updates_run;
36831 -extern atomic_t fscache_n_relinquishes;
36832 -extern atomic_t fscache_n_relinquishes_null;
36833 -extern atomic_t fscache_n_relinquishes_waitcrt;
36834 -extern atomic_t fscache_n_relinquishes_retire;
36836 -extern atomic_t fscache_n_cookie_index;
36837 -extern atomic_t fscache_n_cookie_data;
36838 -extern atomic_t fscache_n_cookie_special;
36840 -extern atomic_t fscache_n_object_alloc;
36841 -extern atomic_t fscache_n_object_no_alloc;
36842 -extern atomic_t fscache_n_object_lookups;
36843 -extern atomic_t fscache_n_object_lookups_negative;
36844 -extern atomic_t fscache_n_object_lookups_positive;
36845 -extern atomic_t fscache_n_object_lookups_timed_out;
36846 -extern atomic_t fscache_n_object_created;
36847 -extern atomic_t fscache_n_object_avail;
36848 -extern atomic_t fscache_n_object_dead;
36850 -extern atomic_t fscache_n_checkaux_none;
36851 -extern atomic_t fscache_n_checkaux_okay;
36852 -extern atomic_t fscache_n_checkaux_update;
36853 -extern atomic_t fscache_n_checkaux_obsolete;
36854 +extern atomic_unchecked_t fscache_n_op_pend;
36855 +extern atomic_unchecked_t fscache_n_op_run;
36856 +extern atomic_unchecked_t fscache_n_op_enqueue;
36857 +extern atomic_unchecked_t fscache_n_op_deferred_release;
36858 +extern atomic_unchecked_t fscache_n_op_release;
36859 +extern atomic_unchecked_t fscache_n_op_gc;
36860 +extern atomic_unchecked_t fscache_n_op_cancelled;
36861 +extern atomic_unchecked_t fscache_n_op_rejected;
36863 +extern atomic_unchecked_t fscache_n_attr_changed;
36864 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
36865 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36866 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36867 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
36869 +extern atomic_unchecked_t fscache_n_allocs;
36870 +extern atomic_unchecked_t fscache_n_allocs_ok;
36871 +extern atomic_unchecked_t fscache_n_allocs_wait;
36872 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
36873 +extern atomic_unchecked_t fscache_n_allocs_intr;
36874 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
36875 +extern atomic_unchecked_t fscache_n_alloc_ops;
36876 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
36878 +extern atomic_unchecked_t fscache_n_retrievals;
36879 +extern atomic_unchecked_t fscache_n_retrievals_ok;
36880 +extern atomic_unchecked_t fscache_n_retrievals_wait;
36881 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
36882 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36883 +extern atomic_unchecked_t fscache_n_retrievals_intr;
36884 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
36885 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36886 +extern atomic_unchecked_t fscache_n_retrieval_ops;
36887 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36889 +extern atomic_unchecked_t fscache_n_stores;
36890 +extern atomic_unchecked_t fscache_n_stores_ok;
36891 +extern atomic_unchecked_t fscache_n_stores_again;
36892 +extern atomic_unchecked_t fscache_n_stores_nobufs;
36893 +extern atomic_unchecked_t fscache_n_stores_oom;
36894 +extern atomic_unchecked_t fscache_n_store_ops;
36895 +extern atomic_unchecked_t fscache_n_store_calls;
36896 +extern atomic_unchecked_t fscache_n_store_pages;
36897 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
36898 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36900 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36901 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36902 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36903 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36905 +extern atomic_unchecked_t fscache_n_marks;
36906 +extern atomic_unchecked_t fscache_n_uncaches;
36908 +extern atomic_unchecked_t fscache_n_acquires;
36909 +extern atomic_unchecked_t fscache_n_acquires_null;
36910 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
36911 +extern atomic_unchecked_t fscache_n_acquires_ok;
36912 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
36913 +extern atomic_unchecked_t fscache_n_acquires_oom;
36915 +extern atomic_unchecked_t fscache_n_updates;
36916 +extern atomic_unchecked_t fscache_n_updates_null;
36917 +extern atomic_unchecked_t fscache_n_updates_run;
36919 +extern atomic_unchecked_t fscache_n_relinquishes;
36920 +extern atomic_unchecked_t fscache_n_relinquishes_null;
36921 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36922 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
36924 +extern atomic_unchecked_t fscache_n_cookie_index;
36925 +extern atomic_unchecked_t fscache_n_cookie_data;
36926 +extern atomic_unchecked_t fscache_n_cookie_special;
36928 +extern atomic_unchecked_t fscache_n_object_alloc;
36929 +extern atomic_unchecked_t fscache_n_object_no_alloc;
36930 +extern atomic_unchecked_t fscache_n_object_lookups;
36931 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
36932 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
36933 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36934 +extern atomic_unchecked_t fscache_n_object_created;
36935 +extern atomic_unchecked_t fscache_n_object_avail;
36936 +extern atomic_unchecked_t fscache_n_object_dead;
36938 +extern atomic_unchecked_t fscache_n_checkaux_none;
36939 +extern atomic_unchecked_t fscache_n_checkaux_okay;
36940 +extern atomic_unchecked_t fscache_n_checkaux_update;
36941 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36943 extern atomic_t fscache_n_cop_alloc_object;
36944 extern atomic_t fscache_n_cop_lookup_object;
36945 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36949 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36951 + atomic_inc_unchecked(stat);
36954 static inline void fscache_stat_d(atomic_t *stat)
36957 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
36959 #define __fscache_stat(stat) (NULL)
36960 #define fscache_stat(stat) do {} while (0)
36961 +#define fscache_stat_unchecked(stat) do {} while (0)
36962 #define fscache_stat_d(stat) do {} while (0)
36965 diff -urNp linux-2.6.39.4/fs/fscache/object.c linux-2.6.39.4/fs/fscache/object.c
36966 --- linux-2.6.39.4/fs/fscache/object.c 2011-05-19 00:06:34.000000000 -0400
36967 +++ linux-2.6.39.4/fs/fscache/object.c 2011-08-05 19:44:37.000000000 -0400
36968 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
36969 /* update the object metadata on disk */
36970 case FSCACHE_OBJECT_UPDATING:
36971 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36972 - fscache_stat(&fscache_n_updates_run);
36973 + fscache_stat_unchecked(&fscache_n_updates_run);
36974 fscache_stat(&fscache_n_cop_update_object);
36975 object->cache->ops->update_object(object);
36976 fscache_stat_d(&fscache_n_cop_update_object);
36977 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
36978 spin_lock(&object->lock);
36979 object->state = FSCACHE_OBJECT_DEAD;
36980 spin_unlock(&object->lock);
36981 - fscache_stat(&fscache_n_object_dead);
36982 + fscache_stat_unchecked(&fscache_n_object_dead);
36983 goto terminal_transit;
36985 /* handle the parent cache of this object being withdrawn from
36986 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
36987 spin_lock(&object->lock);
36988 object->state = FSCACHE_OBJECT_DEAD;
36989 spin_unlock(&object->lock);
36990 - fscache_stat(&fscache_n_object_dead);
36991 + fscache_stat_unchecked(&fscache_n_object_dead);
36992 goto terminal_transit;
36994 /* complain about the object being woken up once it is
36995 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36996 parent->cookie->def->name, cookie->def->name,
36997 object->cache->tag->name);
36999 - fscache_stat(&fscache_n_object_lookups);
37000 + fscache_stat_unchecked(&fscache_n_object_lookups);
37001 fscache_stat(&fscache_n_cop_lookup_object);
37002 ret = object->cache->ops->lookup_object(object);
37003 fscache_stat_d(&fscache_n_cop_lookup_object);
37004 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
37005 if (ret == -ETIMEDOUT) {
37006 /* probably stuck behind another object, so move this one to
37007 * the back of the queue */
37008 - fscache_stat(&fscache_n_object_lookups_timed_out);
37009 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
37010 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37013 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
37015 spin_lock(&object->lock);
37016 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37017 - fscache_stat(&fscache_n_object_lookups_negative);
37018 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
37020 /* transit here to allow write requests to begin stacking up
37021 * and read requests to begin returning ENODATA */
37022 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
37023 * result, in which case there may be data available */
37024 spin_lock(&object->lock);
37025 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37026 - fscache_stat(&fscache_n_object_lookups_positive);
37027 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
37029 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
37031 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
37032 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37034 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
37035 - fscache_stat(&fscache_n_object_created);
37036 + fscache_stat_unchecked(&fscache_n_object_created);
37038 object->state = FSCACHE_OBJECT_AVAILABLE;
37039 spin_unlock(&object->lock);
37040 @@ -602,7 +602,7 @@ static void fscache_object_available(str
37041 fscache_enqueue_dependents(object);
37043 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
37044 - fscache_stat(&fscache_n_object_avail);
37045 + fscache_stat_unchecked(&fscache_n_object_avail);
37049 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
37050 enum fscache_checkaux result;
37052 if (!object->cookie->def->check_aux) {
37053 - fscache_stat(&fscache_n_checkaux_none);
37054 + fscache_stat_unchecked(&fscache_n_checkaux_none);
37055 return FSCACHE_CHECKAUX_OKAY;
37058 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
37060 /* entry okay as is */
37061 case FSCACHE_CHECKAUX_OKAY:
37062 - fscache_stat(&fscache_n_checkaux_okay);
37063 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
37066 /* entry requires update */
37067 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
37068 - fscache_stat(&fscache_n_checkaux_update);
37069 + fscache_stat_unchecked(&fscache_n_checkaux_update);
37072 /* entry requires deletion */
37073 case FSCACHE_CHECKAUX_OBSOLETE:
37074 - fscache_stat(&fscache_n_checkaux_obsolete);
37075 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
37079 diff -urNp linux-2.6.39.4/fs/fscache/operation.c linux-2.6.39.4/fs/fscache/operation.c
37080 --- linux-2.6.39.4/fs/fscache/operation.c 2011-05-19 00:06:34.000000000 -0400
37081 +++ linux-2.6.39.4/fs/fscache/operation.c 2011-08-05 19:44:37.000000000 -0400
37083 #include <linux/slab.h>
37084 #include "internal.h"
37086 -atomic_t fscache_op_debug_id;
37087 +atomic_unchecked_t fscache_op_debug_id;
37088 EXPORT_SYMBOL(fscache_op_debug_id);
37091 @@ -40,7 +40,7 @@ void fscache_enqueue_operation(struct fs
37092 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
37093 ASSERTCMP(atomic_read(&op->usage), >, 0);
37095 - fscache_stat(&fscache_n_op_enqueue);
37096 + fscache_stat_unchecked(&fscache_n_op_enqueue);
37097 switch (op->flags & FSCACHE_OP_TYPE) {
37098 case FSCACHE_OP_ASYNC:
37099 _debug("queue async");
37100 @@ -73,7 +73,7 @@ static void fscache_run_op(struct fscach
37101 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
37103 fscache_enqueue_operation(op);
37104 - fscache_stat(&fscache_n_op_run);
37105 + fscache_stat_unchecked(&fscache_n_op_run);
37109 @@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct f
37110 if (object->n_ops > 1) {
37111 atomic_inc(&op->usage);
37112 list_add_tail(&op->pend_link, &object->pending_ops);
37113 - fscache_stat(&fscache_n_op_pend);
37114 + fscache_stat_unchecked(&fscache_n_op_pend);
37115 } else if (!list_empty(&object->pending_ops)) {
37116 atomic_inc(&op->usage);
37117 list_add_tail(&op->pend_link, &object->pending_ops);
37118 - fscache_stat(&fscache_n_op_pend);
37119 + fscache_stat_unchecked(&fscache_n_op_pend);
37120 fscache_start_operations(object);
37122 ASSERTCMP(object->n_in_progress, ==, 0);
37123 @@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct f
37124 object->n_exclusive++; /* reads and writes must wait */
37125 atomic_inc(&op->usage);
37126 list_add_tail(&op->pend_link, &object->pending_ops);
37127 - fscache_stat(&fscache_n_op_pend);
37128 + fscache_stat_unchecked(&fscache_n_op_pend);
37131 /* not allowed to submit ops in any other state */
37132 @@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_obj
37133 if (object->n_exclusive > 0) {
37134 atomic_inc(&op->usage);
37135 list_add_tail(&op->pend_link, &object->pending_ops);
37136 - fscache_stat(&fscache_n_op_pend);
37137 + fscache_stat_unchecked(&fscache_n_op_pend);
37138 } else if (!list_empty(&object->pending_ops)) {
37139 atomic_inc(&op->usage);
37140 list_add_tail(&op->pend_link, &object->pending_ops);
37141 - fscache_stat(&fscache_n_op_pend);
37142 + fscache_stat_unchecked(&fscache_n_op_pend);
37143 fscache_start_operations(object);
37145 ASSERTCMP(object->n_exclusive, ==, 0);
37146 @@ -227,12 +227,12 @@ int fscache_submit_op(struct fscache_obj
37148 atomic_inc(&op->usage);
37149 list_add_tail(&op->pend_link, &object->pending_ops);
37150 - fscache_stat(&fscache_n_op_pend);
37151 + fscache_stat_unchecked(&fscache_n_op_pend);
37153 } else if (object->state == FSCACHE_OBJECT_DYING ||
37154 object->state == FSCACHE_OBJECT_LC_DYING ||
37155 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37156 - fscache_stat(&fscache_n_op_rejected);
37157 + fscache_stat_unchecked(&fscache_n_op_rejected);
37159 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37160 fscache_report_unexpected_submission(object, op, ostate);
37161 @@ -302,7 +302,7 @@ int fscache_cancel_op(struct fscache_ope
37164 if (!list_empty(&op->pend_link)) {
37165 - fscache_stat(&fscache_n_op_cancelled);
37166 + fscache_stat_unchecked(&fscache_n_op_cancelled);
37167 list_del_init(&op->pend_link);
37169 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37170 @@ -341,7 +341,7 @@ void fscache_put_operation(struct fscach
37171 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37174 - fscache_stat(&fscache_n_op_release);
37175 + fscache_stat_unchecked(&fscache_n_op_release);
37179 @@ -358,7 +358,7 @@ void fscache_put_operation(struct fscach
37180 * lock, and defer it otherwise */
37181 if (!spin_trylock(&object->lock)) {
37182 _debug("defer put");
37183 - fscache_stat(&fscache_n_op_deferred_release);
37184 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
37186 cache = object->cache;
37187 spin_lock(&cache->op_gc_list_lock);
37188 @@ -420,7 +420,7 @@ void fscache_operation_gc(struct work_st
37190 _debug("GC DEFERRED REL OBJ%x OP%x",
37191 object->debug_id, op->debug_id);
37192 - fscache_stat(&fscache_n_op_gc);
37193 + fscache_stat_unchecked(&fscache_n_op_gc);
37195 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37197 diff -urNp linux-2.6.39.4/fs/fscache/page.c linux-2.6.39.4/fs/fscache/page.c
37198 --- linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:11:51.000000000 -0400
37199 +++ linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:12:20.000000000 -0400
37200 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37201 val = radix_tree_lookup(&cookie->stores, page->index);
37204 - fscache_stat(&fscache_n_store_vmscan_not_storing);
37205 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37206 __fscache_uncache_page(cookie, page);
37209 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37210 spin_unlock(&cookie->stores_lock);
37213 - fscache_stat(&fscache_n_store_vmscan_cancelled);
37214 - fscache_stat(&fscache_n_store_radix_deletes);
37215 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37216 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37217 ASSERTCMP(xpage, ==, page);
37219 - fscache_stat(&fscache_n_store_vmscan_gone);
37220 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37223 wake_up_bit(&cookie->flags, 0);
37224 @@ -107,7 +107,7 @@ page_busy:
37225 /* we might want to wait here, but that could deadlock the allocator as
37226 * the work threads writing to the cache may all end up sleeping
37227 * on memory allocation */
37228 - fscache_stat(&fscache_n_store_vmscan_busy);
37229 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37232 EXPORT_SYMBOL(__fscache_maybe_release_page);
37233 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37234 FSCACHE_COOKIE_STORING_TAG);
37235 if (!radix_tree_tag_get(&cookie->stores, page->index,
37236 FSCACHE_COOKIE_PENDING_TAG)) {
37237 - fscache_stat(&fscache_n_store_radix_deletes);
37238 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37239 xpage = radix_tree_delete(&cookie->stores, page->index);
37241 spin_unlock(&cookie->stores_lock);
37242 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37244 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37246 - fscache_stat(&fscache_n_attr_changed_calls);
37247 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37249 if (fscache_object_is_active(object)) {
37250 fscache_set_op_state(op, "CallFS");
37251 @@ -179,11 +179,11 @@ int __fscache_attr_changed(struct fscach
37253 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37255 - fscache_stat(&fscache_n_attr_changed);
37256 + fscache_stat_unchecked(&fscache_n_attr_changed);
37258 op = kzalloc(sizeof(*op), GFP_KERNEL);
37260 - fscache_stat(&fscache_n_attr_changed_nomem);
37261 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37262 _leave(" = -ENOMEM");
37265 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
37266 if (fscache_submit_exclusive_op(object, op) < 0)
37268 spin_unlock(&cookie->lock);
37269 - fscache_stat(&fscache_n_attr_changed_ok);
37270 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37271 fscache_put_operation(op);
37274 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
37276 spin_unlock(&cookie->lock);
37278 - fscache_stat(&fscache_n_attr_changed_nobufs);
37279 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37280 _leave(" = %d", -ENOBUFS);
37283 @@ -246,7 +246,7 @@ static struct fscache_retrieval *fscache
37284 /* allocate a retrieval operation and attempt to submit it */
37285 op = kzalloc(sizeof(*op), GFP_NOIO);
37287 - fscache_stat(&fscache_n_retrievals_nomem);
37288 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37292 @@ -275,13 +275,13 @@ static int fscache_wait_for_deferred_loo
37296 - fscache_stat(&fscache_n_retrievals_wait);
37297 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
37300 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37301 fscache_wait_bit_interruptible,
37302 TASK_INTERRUPTIBLE) != 0) {
37303 - fscache_stat(&fscache_n_retrievals_intr);
37304 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37305 _leave(" = -ERESTARTSYS");
37306 return -ERESTARTSYS;
37308 @@ -299,8 +299,8 @@ static int fscache_wait_for_deferred_loo
37310 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37311 struct fscache_retrieval *op,
37312 - atomic_t *stat_op_waits,
37313 - atomic_t *stat_object_dead)
37314 + atomic_unchecked_t *stat_op_waits,
37315 + atomic_unchecked_t *stat_object_dead)
37319 @@ -308,7 +308,7 @@ static int fscache_wait_for_retrieval_ac
37320 goto check_if_dead;
37323 - fscache_stat(stat_op_waits);
37324 + fscache_stat_unchecked(stat_op_waits);
37325 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37326 fscache_wait_bit_interruptible,
37327 TASK_INTERRUPTIBLE) < 0) {
37328 @@ -325,7 +325,7 @@ static int fscache_wait_for_retrieval_ac
37331 if (unlikely(fscache_object_is_dead(object))) {
37332 - fscache_stat(stat_object_dead);
37333 + fscache_stat_unchecked(stat_object_dead);
37337 @@ -352,7 +352,7 @@ int __fscache_read_or_alloc_page(struct
37339 _enter("%p,%p,,,", cookie, page);
37341 - fscache_stat(&fscache_n_retrievals);
37342 + fscache_stat_unchecked(&fscache_n_retrievals);
37344 if (hlist_empty(&cookie->backing_objects))
37346 @@ -386,7 +386,7 @@ int __fscache_read_or_alloc_page(struct
37347 goto nobufs_unlock;
37348 spin_unlock(&cookie->lock);
37350 - fscache_stat(&fscache_n_retrieval_ops);
37351 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37353 /* pin the netfs read context in case we need to do the actual netfs
37354 * read because we've encountered a cache read failure */
37355 @@ -416,15 +416,15 @@ int __fscache_read_or_alloc_page(struct
37358 if (ret == -ENOMEM)
37359 - fscache_stat(&fscache_n_retrievals_nomem);
37360 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37361 else if (ret == -ERESTARTSYS)
37362 - fscache_stat(&fscache_n_retrievals_intr);
37363 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37364 else if (ret == -ENODATA)
37365 - fscache_stat(&fscache_n_retrievals_nodata);
37366 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37368 - fscache_stat(&fscache_n_retrievals_nobufs);
37369 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37371 - fscache_stat(&fscache_n_retrievals_ok);
37372 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37374 fscache_put_retrieval(op);
37375 _leave(" = %d", ret);
37376 @@ -434,7 +434,7 @@ nobufs_unlock:
37377 spin_unlock(&cookie->lock);
37380 - fscache_stat(&fscache_n_retrievals_nobufs);
37381 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37382 _leave(" = -ENOBUFS");
37385 @@ -472,7 +472,7 @@ int __fscache_read_or_alloc_pages(struct
37387 _enter("%p,,%d,,,", cookie, *nr_pages);
37389 - fscache_stat(&fscache_n_retrievals);
37390 + fscache_stat_unchecked(&fscache_n_retrievals);
37392 if (hlist_empty(&cookie->backing_objects))
37394 @@ -503,7 +503,7 @@ int __fscache_read_or_alloc_pages(struct
37395 goto nobufs_unlock;
37396 spin_unlock(&cookie->lock);
37398 - fscache_stat(&fscache_n_retrieval_ops);
37399 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37401 /* pin the netfs read context in case we need to do the actual netfs
37402 * read because we've encountered a cache read failure */
37403 @@ -533,15 +533,15 @@ int __fscache_read_or_alloc_pages(struct
37406 if (ret == -ENOMEM)
37407 - fscache_stat(&fscache_n_retrievals_nomem);
37408 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37409 else if (ret == -ERESTARTSYS)
37410 - fscache_stat(&fscache_n_retrievals_intr);
37411 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37412 else if (ret == -ENODATA)
37413 - fscache_stat(&fscache_n_retrievals_nodata);
37414 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37416 - fscache_stat(&fscache_n_retrievals_nobufs);
37417 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37419 - fscache_stat(&fscache_n_retrievals_ok);
37420 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37422 fscache_put_retrieval(op);
37423 _leave(" = %d", ret);
37424 @@ -551,7 +551,7 @@ nobufs_unlock:
37425 spin_unlock(&cookie->lock);
37428 - fscache_stat(&fscache_n_retrievals_nobufs);
37429 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37430 _leave(" = -ENOBUFS");
37433 @@ -575,7 +575,7 @@ int __fscache_alloc_page(struct fscache_
37435 _enter("%p,%p,,,", cookie, page);
37437 - fscache_stat(&fscache_n_allocs);
37438 + fscache_stat_unchecked(&fscache_n_allocs);
37440 if (hlist_empty(&cookie->backing_objects))
37442 @@ -602,7 +602,7 @@ int __fscache_alloc_page(struct fscache_
37443 goto nobufs_unlock;
37444 spin_unlock(&cookie->lock);
37446 - fscache_stat(&fscache_n_alloc_ops);
37447 + fscache_stat_unchecked(&fscache_n_alloc_ops);
37449 ret = fscache_wait_for_retrieval_activation(
37451 @@ -618,11 +618,11 @@ int __fscache_alloc_page(struct fscache_
37454 if (ret == -ERESTARTSYS)
37455 - fscache_stat(&fscache_n_allocs_intr);
37456 + fscache_stat_unchecked(&fscache_n_allocs_intr);
37458 - fscache_stat(&fscache_n_allocs_nobufs);
37459 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37461 - fscache_stat(&fscache_n_allocs_ok);
37462 + fscache_stat_unchecked(&fscache_n_allocs_ok);
37464 fscache_put_retrieval(op);
37465 _leave(" = %d", ret);
37466 @@ -632,7 +632,7 @@ nobufs_unlock:
37467 spin_unlock(&cookie->lock);
37470 - fscache_stat(&fscache_n_allocs_nobufs);
37471 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37472 _leave(" = -ENOBUFS");
37475 @@ -675,7 +675,7 @@ static void fscache_write_op(struct fsca
37477 spin_lock(&cookie->stores_lock);
37479 - fscache_stat(&fscache_n_store_calls);
37480 + fscache_stat_unchecked(&fscache_n_store_calls);
37482 /* find a page to store */
37484 @@ -686,7 +686,7 @@ static void fscache_write_op(struct fsca
37486 _debug("gang %d [%lx]", n, page->index);
37487 if (page->index > op->store_limit) {
37488 - fscache_stat(&fscache_n_store_pages_over_limit);
37489 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37493 @@ -699,7 +699,7 @@ static void fscache_write_op(struct fsca
37494 spin_unlock(&object->lock);
37496 fscache_set_op_state(&op->op, "Store");
37497 - fscache_stat(&fscache_n_store_pages);
37498 + fscache_stat_unchecked(&fscache_n_store_pages);
37499 fscache_stat(&fscache_n_cop_write_page);
37500 ret = object->cache->ops->write_page(op, page);
37501 fscache_stat_d(&fscache_n_cop_write_page);
37502 @@ -769,7 +769,7 @@ int __fscache_write_page(struct fscache_
37503 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37504 ASSERT(PageFsCache(page));
37506 - fscache_stat(&fscache_n_stores);
37507 + fscache_stat_unchecked(&fscache_n_stores);
37509 op = kzalloc(sizeof(*op), GFP_NOIO);
37511 @@ -821,7 +821,7 @@ int __fscache_write_page(struct fscache_
37512 spin_unlock(&cookie->stores_lock);
37513 spin_unlock(&object->lock);
37515 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37516 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37517 op->store_limit = object->store_limit;
37519 if (fscache_submit_op(object, &op->op) < 0)
37520 @@ -829,8 +829,8 @@ int __fscache_write_page(struct fscache_
37522 spin_unlock(&cookie->lock);
37523 radix_tree_preload_end();
37524 - fscache_stat(&fscache_n_store_ops);
37525 - fscache_stat(&fscache_n_stores_ok);
37526 + fscache_stat_unchecked(&fscache_n_store_ops);
37527 + fscache_stat_unchecked(&fscache_n_stores_ok);
37529 /* the work queue now carries its own ref on the object */
37530 fscache_put_operation(&op->op);
37531 @@ -838,14 +838,14 @@ int __fscache_write_page(struct fscache_
37535 - fscache_stat(&fscache_n_stores_again);
37536 + fscache_stat_unchecked(&fscache_n_stores_again);
37538 spin_unlock(&cookie->stores_lock);
37539 spin_unlock(&object->lock);
37540 spin_unlock(&cookie->lock);
37541 radix_tree_preload_end();
37543 - fscache_stat(&fscache_n_stores_ok);
37544 + fscache_stat_unchecked(&fscache_n_stores_ok);
37548 @@ -864,14 +864,14 @@ nobufs:
37549 spin_unlock(&cookie->lock);
37550 radix_tree_preload_end();
37552 - fscache_stat(&fscache_n_stores_nobufs);
37553 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
37554 _leave(" = -ENOBUFS");
37560 - fscache_stat(&fscache_n_stores_oom);
37561 + fscache_stat_unchecked(&fscache_n_stores_oom);
37562 _leave(" = -ENOMEM");
37565 @@ -889,7 +889,7 @@ void __fscache_uncache_page(struct fscac
37566 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37567 ASSERTCMP(page, !=, NULL);
37569 - fscache_stat(&fscache_n_uncaches);
37570 + fscache_stat_unchecked(&fscache_n_uncaches);
37572 /* cache withdrawal may beat us to it */
37573 if (!PageFsCache(page))
37574 @@ -942,7 +942,7 @@ void fscache_mark_pages_cached(struct fs
37575 unsigned long loop;
37577 #ifdef CONFIG_FSCACHE_STATS
37578 - atomic_add(pagevec->nr, &fscache_n_marks);
37579 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37582 for (loop = 0; loop < pagevec->nr; loop++) {
37583 diff -urNp linux-2.6.39.4/fs/fscache/stats.c linux-2.6.39.4/fs/fscache/stats.c
37584 --- linux-2.6.39.4/fs/fscache/stats.c 2011-05-19 00:06:34.000000000 -0400
37585 +++ linux-2.6.39.4/fs/fscache/stats.c 2011-08-05 19:44:37.000000000 -0400
37586 @@ -18,95 +18,95 @@
37588 * operation counters
37590 -atomic_t fscache_n_op_pend;
37591 -atomic_t fscache_n_op_run;
37592 -atomic_t fscache_n_op_enqueue;
37593 -atomic_t fscache_n_op_requeue;
37594 -atomic_t fscache_n_op_deferred_release;
37595 -atomic_t fscache_n_op_release;
37596 -atomic_t fscache_n_op_gc;
37597 -atomic_t fscache_n_op_cancelled;
37598 -atomic_t fscache_n_op_rejected;
37600 -atomic_t fscache_n_attr_changed;
37601 -atomic_t fscache_n_attr_changed_ok;
37602 -atomic_t fscache_n_attr_changed_nobufs;
37603 -atomic_t fscache_n_attr_changed_nomem;
37604 -atomic_t fscache_n_attr_changed_calls;
37606 -atomic_t fscache_n_allocs;
37607 -atomic_t fscache_n_allocs_ok;
37608 -atomic_t fscache_n_allocs_wait;
37609 -atomic_t fscache_n_allocs_nobufs;
37610 -atomic_t fscache_n_allocs_intr;
37611 -atomic_t fscache_n_allocs_object_dead;
37612 -atomic_t fscache_n_alloc_ops;
37613 -atomic_t fscache_n_alloc_op_waits;
37615 -atomic_t fscache_n_retrievals;
37616 -atomic_t fscache_n_retrievals_ok;
37617 -atomic_t fscache_n_retrievals_wait;
37618 -atomic_t fscache_n_retrievals_nodata;
37619 -atomic_t fscache_n_retrievals_nobufs;
37620 -atomic_t fscache_n_retrievals_intr;
37621 -atomic_t fscache_n_retrievals_nomem;
37622 -atomic_t fscache_n_retrievals_object_dead;
37623 -atomic_t fscache_n_retrieval_ops;
37624 -atomic_t fscache_n_retrieval_op_waits;
37626 -atomic_t fscache_n_stores;
37627 -atomic_t fscache_n_stores_ok;
37628 -atomic_t fscache_n_stores_again;
37629 -atomic_t fscache_n_stores_nobufs;
37630 -atomic_t fscache_n_stores_oom;
37631 -atomic_t fscache_n_store_ops;
37632 -atomic_t fscache_n_store_calls;
37633 -atomic_t fscache_n_store_pages;
37634 -atomic_t fscache_n_store_radix_deletes;
37635 -atomic_t fscache_n_store_pages_over_limit;
37637 -atomic_t fscache_n_store_vmscan_not_storing;
37638 -atomic_t fscache_n_store_vmscan_gone;
37639 -atomic_t fscache_n_store_vmscan_busy;
37640 -atomic_t fscache_n_store_vmscan_cancelled;
37642 -atomic_t fscache_n_marks;
37643 -atomic_t fscache_n_uncaches;
37645 -atomic_t fscache_n_acquires;
37646 -atomic_t fscache_n_acquires_null;
37647 -atomic_t fscache_n_acquires_no_cache;
37648 -atomic_t fscache_n_acquires_ok;
37649 -atomic_t fscache_n_acquires_nobufs;
37650 -atomic_t fscache_n_acquires_oom;
37652 -atomic_t fscache_n_updates;
37653 -atomic_t fscache_n_updates_null;
37654 -atomic_t fscache_n_updates_run;
37656 -atomic_t fscache_n_relinquishes;
37657 -atomic_t fscache_n_relinquishes_null;
37658 -atomic_t fscache_n_relinquishes_waitcrt;
37659 -atomic_t fscache_n_relinquishes_retire;
37661 -atomic_t fscache_n_cookie_index;
37662 -atomic_t fscache_n_cookie_data;
37663 -atomic_t fscache_n_cookie_special;
37665 -atomic_t fscache_n_object_alloc;
37666 -atomic_t fscache_n_object_no_alloc;
37667 -atomic_t fscache_n_object_lookups;
37668 -atomic_t fscache_n_object_lookups_negative;
37669 -atomic_t fscache_n_object_lookups_positive;
37670 -atomic_t fscache_n_object_lookups_timed_out;
37671 -atomic_t fscache_n_object_created;
37672 -atomic_t fscache_n_object_avail;
37673 -atomic_t fscache_n_object_dead;
37675 -atomic_t fscache_n_checkaux_none;
37676 -atomic_t fscache_n_checkaux_okay;
37677 -atomic_t fscache_n_checkaux_update;
37678 -atomic_t fscache_n_checkaux_obsolete;
37679 +atomic_unchecked_t fscache_n_op_pend;
37680 +atomic_unchecked_t fscache_n_op_run;
37681 +atomic_unchecked_t fscache_n_op_enqueue;
37682 +atomic_unchecked_t fscache_n_op_requeue;
37683 +atomic_unchecked_t fscache_n_op_deferred_release;
37684 +atomic_unchecked_t fscache_n_op_release;
37685 +atomic_unchecked_t fscache_n_op_gc;
37686 +atomic_unchecked_t fscache_n_op_cancelled;
37687 +atomic_unchecked_t fscache_n_op_rejected;
37689 +atomic_unchecked_t fscache_n_attr_changed;
37690 +atomic_unchecked_t fscache_n_attr_changed_ok;
37691 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
37692 +atomic_unchecked_t fscache_n_attr_changed_nomem;
37693 +atomic_unchecked_t fscache_n_attr_changed_calls;
37695 +atomic_unchecked_t fscache_n_allocs;
37696 +atomic_unchecked_t fscache_n_allocs_ok;
37697 +atomic_unchecked_t fscache_n_allocs_wait;
37698 +atomic_unchecked_t fscache_n_allocs_nobufs;
37699 +atomic_unchecked_t fscache_n_allocs_intr;
37700 +atomic_unchecked_t fscache_n_allocs_object_dead;
37701 +atomic_unchecked_t fscache_n_alloc_ops;
37702 +atomic_unchecked_t fscache_n_alloc_op_waits;
37704 +atomic_unchecked_t fscache_n_retrievals;
37705 +atomic_unchecked_t fscache_n_retrievals_ok;
37706 +atomic_unchecked_t fscache_n_retrievals_wait;
37707 +atomic_unchecked_t fscache_n_retrievals_nodata;
37708 +atomic_unchecked_t fscache_n_retrievals_nobufs;
37709 +atomic_unchecked_t fscache_n_retrievals_intr;
37710 +atomic_unchecked_t fscache_n_retrievals_nomem;
37711 +atomic_unchecked_t fscache_n_retrievals_object_dead;
37712 +atomic_unchecked_t fscache_n_retrieval_ops;
37713 +atomic_unchecked_t fscache_n_retrieval_op_waits;
37715 +atomic_unchecked_t fscache_n_stores;
37716 +atomic_unchecked_t fscache_n_stores_ok;
37717 +atomic_unchecked_t fscache_n_stores_again;
37718 +atomic_unchecked_t fscache_n_stores_nobufs;
37719 +atomic_unchecked_t fscache_n_stores_oom;
37720 +atomic_unchecked_t fscache_n_store_ops;
37721 +atomic_unchecked_t fscache_n_store_calls;
37722 +atomic_unchecked_t fscache_n_store_pages;
37723 +atomic_unchecked_t fscache_n_store_radix_deletes;
37724 +atomic_unchecked_t fscache_n_store_pages_over_limit;
37726 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37727 +atomic_unchecked_t fscache_n_store_vmscan_gone;
37728 +atomic_unchecked_t fscache_n_store_vmscan_busy;
37729 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37731 +atomic_unchecked_t fscache_n_marks;
37732 +atomic_unchecked_t fscache_n_uncaches;
37734 +atomic_unchecked_t fscache_n_acquires;
37735 +atomic_unchecked_t fscache_n_acquires_null;
37736 +atomic_unchecked_t fscache_n_acquires_no_cache;
37737 +atomic_unchecked_t fscache_n_acquires_ok;
37738 +atomic_unchecked_t fscache_n_acquires_nobufs;
37739 +atomic_unchecked_t fscache_n_acquires_oom;
37741 +atomic_unchecked_t fscache_n_updates;
37742 +atomic_unchecked_t fscache_n_updates_null;
37743 +atomic_unchecked_t fscache_n_updates_run;
37745 +atomic_unchecked_t fscache_n_relinquishes;
37746 +atomic_unchecked_t fscache_n_relinquishes_null;
37747 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37748 +atomic_unchecked_t fscache_n_relinquishes_retire;
37750 +atomic_unchecked_t fscache_n_cookie_index;
37751 +atomic_unchecked_t fscache_n_cookie_data;
37752 +atomic_unchecked_t fscache_n_cookie_special;
37754 +atomic_unchecked_t fscache_n_object_alloc;
37755 +atomic_unchecked_t fscache_n_object_no_alloc;
37756 +atomic_unchecked_t fscache_n_object_lookups;
37757 +atomic_unchecked_t fscache_n_object_lookups_negative;
37758 +atomic_unchecked_t fscache_n_object_lookups_positive;
37759 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
37760 +atomic_unchecked_t fscache_n_object_created;
37761 +atomic_unchecked_t fscache_n_object_avail;
37762 +atomic_unchecked_t fscache_n_object_dead;
37764 +atomic_unchecked_t fscache_n_checkaux_none;
37765 +atomic_unchecked_t fscache_n_checkaux_okay;
37766 +atomic_unchecked_t fscache_n_checkaux_update;
37767 +atomic_unchecked_t fscache_n_checkaux_obsolete;
37769 atomic_t fscache_n_cop_alloc_object;
37770 atomic_t fscache_n_cop_lookup_object;
37771 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37772 seq_puts(m, "FS-Cache statistics\n");
37774 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37775 - atomic_read(&fscache_n_cookie_index),
37776 - atomic_read(&fscache_n_cookie_data),
37777 - atomic_read(&fscache_n_cookie_special));
37778 + atomic_read_unchecked(&fscache_n_cookie_index),
37779 + atomic_read_unchecked(&fscache_n_cookie_data),
37780 + atomic_read_unchecked(&fscache_n_cookie_special));
37782 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37783 - atomic_read(&fscache_n_object_alloc),
37784 - atomic_read(&fscache_n_object_no_alloc),
37785 - atomic_read(&fscache_n_object_avail),
37786 - atomic_read(&fscache_n_object_dead));
37787 + atomic_read_unchecked(&fscache_n_object_alloc),
37788 + atomic_read_unchecked(&fscache_n_object_no_alloc),
37789 + atomic_read_unchecked(&fscache_n_object_avail),
37790 + atomic_read_unchecked(&fscache_n_object_dead));
37791 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37792 - atomic_read(&fscache_n_checkaux_none),
37793 - atomic_read(&fscache_n_checkaux_okay),
37794 - atomic_read(&fscache_n_checkaux_update),
37795 - atomic_read(&fscache_n_checkaux_obsolete));
37796 + atomic_read_unchecked(&fscache_n_checkaux_none),
37797 + atomic_read_unchecked(&fscache_n_checkaux_okay),
37798 + atomic_read_unchecked(&fscache_n_checkaux_update),
37799 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37801 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37802 - atomic_read(&fscache_n_marks),
37803 - atomic_read(&fscache_n_uncaches));
37804 + atomic_read_unchecked(&fscache_n_marks),
37805 + atomic_read_unchecked(&fscache_n_uncaches));
37807 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37809 - atomic_read(&fscache_n_acquires),
37810 - atomic_read(&fscache_n_acquires_null),
37811 - atomic_read(&fscache_n_acquires_no_cache),
37812 - atomic_read(&fscache_n_acquires_ok),
37813 - atomic_read(&fscache_n_acquires_nobufs),
37814 - atomic_read(&fscache_n_acquires_oom));
37815 + atomic_read_unchecked(&fscache_n_acquires),
37816 + atomic_read_unchecked(&fscache_n_acquires_null),
37817 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
37818 + atomic_read_unchecked(&fscache_n_acquires_ok),
37819 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
37820 + atomic_read_unchecked(&fscache_n_acquires_oom));
37822 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37823 - atomic_read(&fscache_n_object_lookups),
37824 - atomic_read(&fscache_n_object_lookups_negative),
37825 - atomic_read(&fscache_n_object_lookups_positive),
37826 - atomic_read(&fscache_n_object_created),
37827 - atomic_read(&fscache_n_object_lookups_timed_out));
37828 + atomic_read_unchecked(&fscache_n_object_lookups),
37829 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
37830 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
37831 + atomic_read_unchecked(&fscache_n_object_created),
37832 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37834 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37835 - atomic_read(&fscache_n_updates),
37836 - atomic_read(&fscache_n_updates_null),
37837 - atomic_read(&fscache_n_updates_run));
37838 + atomic_read_unchecked(&fscache_n_updates),
37839 + atomic_read_unchecked(&fscache_n_updates_null),
37840 + atomic_read_unchecked(&fscache_n_updates_run));
37842 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37843 - atomic_read(&fscache_n_relinquishes),
37844 - atomic_read(&fscache_n_relinquishes_null),
37845 - atomic_read(&fscache_n_relinquishes_waitcrt),
37846 - atomic_read(&fscache_n_relinquishes_retire));
37847 + atomic_read_unchecked(&fscache_n_relinquishes),
37848 + atomic_read_unchecked(&fscache_n_relinquishes_null),
37849 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37850 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
37852 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37853 - atomic_read(&fscache_n_attr_changed),
37854 - atomic_read(&fscache_n_attr_changed_ok),
37855 - atomic_read(&fscache_n_attr_changed_nobufs),
37856 - atomic_read(&fscache_n_attr_changed_nomem),
37857 - atomic_read(&fscache_n_attr_changed_calls));
37858 + atomic_read_unchecked(&fscache_n_attr_changed),
37859 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
37860 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37861 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37862 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
37864 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37865 - atomic_read(&fscache_n_allocs),
37866 - atomic_read(&fscache_n_allocs_ok),
37867 - atomic_read(&fscache_n_allocs_wait),
37868 - atomic_read(&fscache_n_allocs_nobufs),
37869 - atomic_read(&fscache_n_allocs_intr));
37870 + atomic_read_unchecked(&fscache_n_allocs),
37871 + atomic_read_unchecked(&fscache_n_allocs_ok),
37872 + atomic_read_unchecked(&fscache_n_allocs_wait),
37873 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
37874 + atomic_read_unchecked(&fscache_n_allocs_intr));
37875 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37876 - atomic_read(&fscache_n_alloc_ops),
37877 - atomic_read(&fscache_n_alloc_op_waits),
37878 - atomic_read(&fscache_n_allocs_object_dead));
37879 + atomic_read_unchecked(&fscache_n_alloc_ops),
37880 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
37881 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
37883 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37884 " int=%u oom=%u\n",
37885 - atomic_read(&fscache_n_retrievals),
37886 - atomic_read(&fscache_n_retrievals_ok),
37887 - atomic_read(&fscache_n_retrievals_wait),
37888 - atomic_read(&fscache_n_retrievals_nodata),
37889 - atomic_read(&fscache_n_retrievals_nobufs),
37890 - atomic_read(&fscache_n_retrievals_intr),
37891 - atomic_read(&fscache_n_retrievals_nomem));
37892 + atomic_read_unchecked(&fscache_n_retrievals),
37893 + atomic_read_unchecked(&fscache_n_retrievals_ok),
37894 + atomic_read_unchecked(&fscache_n_retrievals_wait),
37895 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
37896 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37897 + atomic_read_unchecked(&fscache_n_retrievals_intr),
37898 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
37899 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37900 - atomic_read(&fscache_n_retrieval_ops),
37901 - atomic_read(&fscache_n_retrieval_op_waits),
37902 - atomic_read(&fscache_n_retrievals_object_dead));
37903 + atomic_read_unchecked(&fscache_n_retrieval_ops),
37904 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37905 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37907 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37908 - atomic_read(&fscache_n_stores),
37909 - atomic_read(&fscache_n_stores_ok),
37910 - atomic_read(&fscache_n_stores_again),
37911 - atomic_read(&fscache_n_stores_nobufs),
37912 - atomic_read(&fscache_n_stores_oom));
37913 + atomic_read_unchecked(&fscache_n_stores),
37914 + atomic_read_unchecked(&fscache_n_stores_ok),
37915 + atomic_read_unchecked(&fscache_n_stores_again),
37916 + atomic_read_unchecked(&fscache_n_stores_nobufs),
37917 + atomic_read_unchecked(&fscache_n_stores_oom));
37918 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37919 - atomic_read(&fscache_n_store_ops),
37920 - atomic_read(&fscache_n_store_calls),
37921 - atomic_read(&fscache_n_store_pages),
37922 - atomic_read(&fscache_n_store_radix_deletes),
37923 - atomic_read(&fscache_n_store_pages_over_limit));
37924 + atomic_read_unchecked(&fscache_n_store_ops),
37925 + atomic_read_unchecked(&fscache_n_store_calls),
37926 + atomic_read_unchecked(&fscache_n_store_pages),
37927 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
37928 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37930 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37931 - atomic_read(&fscache_n_store_vmscan_not_storing),
37932 - atomic_read(&fscache_n_store_vmscan_gone),
37933 - atomic_read(&fscache_n_store_vmscan_busy),
37934 - atomic_read(&fscache_n_store_vmscan_cancelled));
37935 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37936 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37937 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37938 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37940 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37941 - atomic_read(&fscache_n_op_pend),
37942 - atomic_read(&fscache_n_op_run),
37943 - atomic_read(&fscache_n_op_enqueue),
37944 - atomic_read(&fscache_n_op_cancelled),
37945 - atomic_read(&fscache_n_op_rejected));
37946 + atomic_read_unchecked(&fscache_n_op_pend),
37947 + atomic_read_unchecked(&fscache_n_op_run),
37948 + atomic_read_unchecked(&fscache_n_op_enqueue),
37949 + atomic_read_unchecked(&fscache_n_op_cancelled),
37950 + atomic_read_unchecked(&fscache_n_op_rejected));
37951 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37952 - atomic_read(&fscache_n_op_deferred_release),
37953 - atomic_read(&fscache_n_op_release),
37954 - atomic_read(&fscache_n_op_gc));
37955 + atomic_read_unchecked(&fscache_n_op_deferred_release),
37956 + atomic_read_unchecked(&fscache_n_op_release),
37957 + atomic_read_unchecked(&fscache_n_op_gc));
37959 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37960 atomic_read(&fscache_n_cop_alloc_object),
37961 diff -urNp linux-2.6.39.4/fs/fs_struct.c linux-2.6.39.4/fs/fs_struct.c
37962 --- linux-2.6.39.4/fs/fs_struct.c 2011-05-19 00:06:34.000000000 -0400
37963 +++ linux-2.6.39.4/fs/fs_struct.c 2011-08-05 19:44:37.000000000 -0400
37965 #include <linux/path.h>
37966 #include <linux/slab.h>
37967 #include <linux/fs_struct.h>
37968 +#include <linux/grsecurity.h>
37969 #include "internal.h"
37971 static inline void path_get_longterm(struct path *path)
37972 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37973 old_root = fs->root;
37975 path_get_longterm(path);
37976 + gr_set_chroot_entries(current, path);
37977 write_seqcount_end(&fs->seq);
37978 spin_unlock(&fs->lock);
37979 if (old_root.dentry)
37980 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37981 && fs->root.mnt == old_root->mnt) {
37982 path_get_longterm(new_root);
37983 fs->root = *new_root;
37984 + gr_set_chroot_entries(p, new_root);
37987 if (fs->pwd.dentry == old_root->dentry
37988 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37989 spin_lock(&fs->lock);
37990 write_seqcount_begin(&fs->seq);
37992 - kill = !--fs->users;
37993 + gr_clear_chroot_entries(tsk);
37994 + kill = !atomic_dec_return(&fs->users);
37995 write_seqcount_end(&fs->seq);
37996 spin_unlock(&fs->lock);
37998 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37999 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
38000 /* We don't need to lock fs - think why ;-) */
38003 + atomic_set(&fs->users, 1);
38005 spin_lock_init(&fs->lock);
38006 seqcount_init(&fs->seq);
38007 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
38008 spin_lock(&old->lock);
38009 fs->root = old->root;
38010 path_get_longterm(&fs->root);
38011 + /* instead of calling gr_set_chroot_entries here,
38012 + we call it from every caller of this function
38014 fs->pwd = old->pwd;
38015 path_get_longterm(&fs->pwd);
38016 spin_unlock(&old->lock);
38017 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
38019 task_lock(current);
38020 spin_lock(&fs->lock);
38021 - kill = !--fs->users;
38022 + kill = !atomic_dec_return(&fs->users);
38023 current->fs = new_fs;
38024 + gr_set_chroot_entries(current, &new_fs->root);
38025 spin_unlock(&fs->lock);
38026 task_unlock(current);
38028 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
38030 /* to be mentioned only in INIT_TASK */
38031 struct fs_struct init_fs = {
38033 + .users = ATOMIC_INIT(1),
38034 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
38035 .seq = SEQCNT_ZERO,
38037 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
38038 task_lock(current);
38040 spin_lock(&init_fs.lock);
38042 + atomic_inc(&init_fs.users);
38043 spin_unlock(&init_fs.lock);
38045 spin_lock(&fs->lock);
38046 current->fs = &init_fs;
38047 - kill = !--fs->users;
38048 + gr_set_chroot_entries(current, ¤t->fs->root);
38049 + kill = !atomic_dec_return(&fs->users);
38050 spin_unlock(&fs->lock);
38052 task_unlock(current);
38053 diff -urNp linux-2.6.39.4/fs/fuse/cuse.c linux-2.6.39.4/fs/fuse/cuse.c
38054 --- linux-2.6.39.4/fs/fuse/cuse.c 2011-05-19 00:06:34.000000000 -0400
38055 +++ linux-2.6.39.4/fs/fuse/cuse.c 2011-08-05 20:34:06.000000000 -0400
38056 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
38057 INIT_LIST_HEAD(&cuse_conntbl[i]);
38059 /* inherit and extend fuse_dev_operations */
38060 - cuse_channel_fops = fuse_dev_operations;
38061 - cuse_channel_fops.owner = THIS_MODULE;
38062 - cuse_channel_fops.open = cuse_channel_open;
38063 - cuse_channel_fops.release = cuse_channel_release;
38064 + pax_open_kernel();
38065 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
38066 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
38067 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
38068 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
38069 + pax_close_kernel();
38071 cuse_class = class_create(THIS_MODULE, "cuse");
38072 if (IS_ERR(cuse_class))
38073 diff -urNp linux-2.6.39.4/fs/fuse/dev.c linux-2.6.39.4/fs/fuse/dev.c
38074 --- linux-2.6.39.4/fs/fuse/dev.c 2011-05-19 00:06:34.000000000 -0400
38075 +++ linux-2.6.39.4/fs/fuse/dev.c 2011-08-05 20:34:06.000000000 -0400
38076 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
38080 - if (!pipe->readers) {
38081 + if (!atomic_read(&pipe->readers)) {
38082 send_sig(SIGPIPE, current, 0);
38085 diff -urNp linux-2.6.39.4/fs/fuse/dir.c linux-2.6.39.4/fs/fuse/dir.c
38086 --- linux-2.6.39.4/fs/fuse/dir.c 2011-05-19 00:06:34.000000000 -0400
38087 +++ linux-2.6.39.4/fs/fuse/dir.c 2011-08-05 19:44:37.000000000 -0400
38088 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
38092 -static void free_link(char *link)
38093 +static void free_link(const char *link)
38096 free_page((unsigned long) link);
38097 diff -urNp linux-2.6.39.4/fs/gfs2/ops_inode.c linux-2.6.39.4/fs/gfs2/ops_inode.c
38098 --- linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-05-19 00:06:34.000000000 -0400
38099 +++ linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-08-05 19:44:37.000000000 -0400
38100 @@ -740,6 +740,8 @@ static int gfs2_rename(struct inode *odi
38104 + pax_track_stack();
38106 if (ndentry->d_inode) {
38107 nip = GFS2_I(ndentry->d_inode);
38109 @@ -1019,7 +1021,7 @@ out:
38111 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38113 - char *s = nd_get_link(nd);
38114 + const char *s = nd_get_link(nd);
38118 diff -urNp linux-2.6.39.4/fs/hfsplus/catalog.c linux-2.6.39.4/fs/hfsplus/catalog.c
38119 --- linux-2.6.39.4/fs/hfsplus/catalog.c 2011-05-19 00:06:34.000000000 -0400
38120 +++ linux-2.6.39.4/fs/hfsplus/catalog.c 2011-08-05 19:44:37.000000000 -0400
38121 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38125 + pax_track_stack();
38127 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38128 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38130 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38134 + pax_track_stack();
38136 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38137 str->name, cnid, inode->i_nlink);
38138 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38139 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38140 int entry_size, type;
38143 + pax_track_stack();
38145 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38146 cnid, src_dir->i_ino, src_name->name,
38147 dst_dir->i_ino, dst_name->name);
38148 diff -urNp linux-2.6.39.4/fs/hfsplus/dir.c linux-2.6.39.4/fs/hfsplus/dir.c
38149 --- linux-2.6.39.4/fs/hfsplus/dir.c 2011-05-19 00:06:34.000000000 -0400
38150 +++ linux-2.6.39.4/fs/hfsplus/dir.c 2011-08-05 19:44:37.000000000 -0400
38151 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38152 struct hfsplus_readdir_data *rd;
38155 + pax_track_stack();
38157 if (filp->f_pos >= inode->i_size)
38160 diff -urNp linux-2.6.39.4/fs/hfsplus/inode.c linux-2.6.39.4/fs/hfsplus/inode.c
38161 --- linux-2.6.39.4/fs/hfsplus/inode.c 2011-05-19 00:06:34.000000000 -0400
38162 +++ linux-2.6.39.4/fs/hfsplus/inode.c 2011-08-05 19:44:37.000000000 -0400
38163 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38167 + pax_track_stack();
38169 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38171 HFSPLUS_I(inode)->linkid = 0;
38172 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38173 struct hfs_find_data fd;
38174 hfsplus_cat_entry entry;
38176 + pax_track_stack();
38178 if (HFSPLUS_IS_RSRC(inode))
38179 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38181 diff -urNp linux-2.6.39.4/fs/hfsplus/ioctl.c linux-2.6.39.4/fs/hfsplus/ioctl.c
38182 --- linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-05-19 00:06:34.000000000 -0400
38183 +++ linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-08-05 19:44:37.000000000 -0400
38184 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38185 struct hfsplus_cat_file *file;
38188 + pax_track_stack();
38190 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38191 return -EOPNOTSUPP;
38193 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38194 struct hfsplus_cat_file *file;
38197 + pax_track_stack();
38199 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38200 return -EOPNOTSUPP;
38202 diff -urNp linux-2.6.39.4/fs/hfsplus/super.c linux-2.6.39.4/fs/hfsplus/super.c
38203 --- linux-2.6.39.4/fs/hfsplus/super.c 2011-05-19 00:06:34.000000000 -0400
38204 +++ linux-2.6.39.4/fs/hfsplus/super.c 2011-08-05 19:44:37.000000000 -0400
38205 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38206 struct nls_table *nls = NULL;
38209 + pax_track_stack();
38212 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38214 diff -urNp linux-2.6.39.4/fs/hugetlbfs/inode.c linux-2.6.39.4/fs/hugetlbfs/inode.c
38215 --- linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-05-19 00:06:34.000000000 -0400
38216 +++ linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38217 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38218 .kill_sb = kill_litter_super,
38221 -static struct vfsmount *hugetlbfs_vfsmount;
38222 +struct vfsmount *hugetlbfs_vfsmount;
38224 static int can_do_hugetlb_shm(void)
38226 diff -urNp linux-2.6.39.4/fs/inode.c linux-2.6.39.4/fs/inode.c
38227 --- linux-2.6.39.4/fs/inode.c 2011-05-19 00:06:34.000000000 -0400
38228 +++ linux-2.6.39.4/fs/inode.c 2011-08-05 19:44:37.000000000 -0400
38229 @@ -862,8 +862,8 @@ unsigned int get_next_ino(void)
38232 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38233 - static atomic_t shared_last_ino;
38234 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38235 + static atomic_unchecked_t shared_last_ino;
38236 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38238 res = next - LAST_INO_BATCH;
38240 diff -urNp linux-2.6.39.4/fs/jbd/checkpoint.c linux-2.6.39.4/fs/jbd/checkpoint.c
38241 --- linux-2.6.39.4/fs/jbd/checkpoint.c 2011-05-19 00:06:34.000000000 -0400
38242 +++ linux-2.6.39.4/fs/jbd/checkpoint.c 2011-08-05 19:44:37.000000000 -0400
38243 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38247 + pax_track_stack();
38249 jbd_debug(1, "Start checkpoint\n");
38252 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rtime.c linux-2.6.39.4/fs/jffs2/compr_rtime.c
38253 --- linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-05-19 00:06:34.000000000 -0400
38254 +++ linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-08-05 19:44:37.000000000 -0400
38255 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38259 + pax_track_stack();
38261 memset(positions,0,sizeof(positions));
38263 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38264 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38268 + pax_track_stack();
38270 memset(positions,0,sizeof(positions));
38272 while (outpos<destlen) {
38273 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rubin.c linux-2.6.39.4/fs/jffs2/compr_rubin.c
38274 --- linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-05-19 00:06:34.000000000 -0400
38275 +++ linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-08-05 19:44:37.000000000 -0400
38276 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38278 uint32_t mysrclen, mydstlen;
38280 + pax_track_stack();
38282 mysrclen = *sourcelen;
38283 mydstlen = *dstlen - 8;
38285 diff -urNp linux-2.6.39.4/fs/jffs2/erase.c linux-2.6.39.4/fs/jffs2/erase.c
38286 --- linux-2.6.39.4/fs/jffs2/erase.c 2011-05-19 00:06:34.000000000 -0400
38287 +++ linux-2.6.39.4/fs/jffs2/erase.c 2011-08-05 19:44:37.000000000 -0400
38288 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38289 struct jffs2_unknown_node marker = {
38290 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38291 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38292 - .totlen = cpu_to_je32(c->cleanmarker_size)
38293 + .totlen = cpu_to_je32(c->cleanmarker_size),
38294 + .hdr_crc = cpu_to_je32(0)
38297 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38298 diff -urNp linux-2.6.39.4/fs/jffs2/wbuf.c linux-2.6.39.4/fs/jffs2/wbuf.c
38299 --- linux-2.6.39.4/fs/jffs2/wbuf.c 2011-05-19 00:06:34.000000000 -0400
38300 +++ linux-2.6.39.4/fs/jffs2/wbuf.c 2011-08-05 19:44:37.000000000 -0400
38301 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38303 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38304 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38305 - .totlen = constant_cpu_to_je32(8)
38306 + .totlen = constant_cpu_to_je32(8),
38307 + .hdr_crc = constant_cpu_to_je32(0)
38311 diff -urNp linux-2.6.39.4/fs/jffs2/xattr.c linux-2.6.39.4/fs/jffs2/xattr.c
38312 --- linux-2.6.39.4/fs/jffs2/xattr.c 2011-05-19 00:06:34.000000000 -0400
38313 +++ linux-2.6.39.4/fs/jffs2/xattr.c 2011-08-05 19:44:37.000000000 -0400
38314 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38316 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38318 + pax_track_stack();
38320 /* Phase.1 : Merge same xref */
38321 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38322 xref_tmphash[i] = NULL;
38323 diff -urNp linux-2.6.39.4/fs/jfs/super.c linux-2.6.39.4/fs/jfs/super.c
38324 --- linux-2.6.39.4/fs/jfs/super.c 2011-05-19 00:06:34.000000000 -0400
38325 +++ linux-2.6.39.4/fs/jfs/super.c 2011-08-05 19:44:37.000000000 -0400
38326 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38329 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38330 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38331 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38333 if (jfs_inode_cachep == NULL)
38335 diff -urNp linux-2.6.39.4/fs/Kconfig.binfmt linux-2.6.39.4/fs/Kconfig.binfmt
38336 --- linux-2.6.39.4/fs/Kconfig.binfmt 2011-05-19 00:06:34.000000000 -0400
38337 +++ linux-2.6.39.4/fs/Kconfig.binfmt 2011-08-05 19:44:37.000000000 -0400
38338 @@ -86,7 +86,7 @@ config HAVE_AOUT
38341 tristate "Kernel support for a.out and ECOFF binaries"
38342 - depends on HAVE_AOUT
38343 + depends on HAVE_AOUT && BROKEN
38345 A.out (Assembler.OUTput) is a set of formats for libraries and
38346 executables used in the earliest versions of UNIX. Linux used
38347 diff -urNp linux-2.6.39.4/fs/libfs.c linux-2.6.39.4/fs/libfs.c
38348 --- linux-2.6.39.4/fs/libfs.c 2011-05-19 00:06:34.000000000 -0400
38349 +++ linux-2.6.39.4/fs/libfs.c 2011-08-05 19:44:37.000000000 -0400
38350 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38352 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38353 struct dentry *next;
38354 + char d_name[sizeof(next->d_iname)];
38355 + const unsigned char *name;
38357 next = list_entry(p, struct dentry, d_u.d_child);
38358 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38359 if (!simple_positive(next)) {
38360 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38362 spin_unlock(&next->d_lock);
38363 spin_unlock(&dentry->d_lock);
38364 - if (filldir(dirent, next->d_name.name,
38365 + name = next->d_name.name;
38366 + if (name == next->d_iname) {
38367 + memcpy(d_name, name, next->d_name.len);
38370 + if (filldir(dirent, name,
38371 next->d_name.len, filp->f_pos,
38372 next->d_inode->i_ino,
38373 dt_type(next->d_inode)) < 0)
38374 diff -urNp linux-2.6.39.4/fs/lockd/clntproc.c linux-2.6.39.4/fs/lockd/clntproc.c
38375 --- linux-2.6.39.4/fs/lockd/clntproc.c 2011-07-09 09:18:51.000000000 -0400
38376 +++ linux-2.6.39.4/fs/lockd/clntproc.c 2011-08-05 19:44:37.000000000 -0400
38377 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38379 * Cookie counter for NLM requests
38381 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38382 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38384 void nlmclnt_next_cookie(struct nlm_cookie *c)
38386 - u32 cookie = atomic_inc_return(&nlm_cookie);
38387 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38389 memcpy(c->data, &cookie, 4);
38391 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38392 struct nlm_rqst reqst, *req;
38395 + pax_track_stack();
38398 memset(req, 0, sizeof(*req));
38399 locks_init_lock(&req->a_args.lock.fl);
38400 diff -urNp linux-2.6.39.4/fs/locks.c linux-2.6.39.4/fs/locks.c
38401 --- linux-2.6.39.4/fs/locks.c 2011-07-09 09:18:51.000000000 -0400
38402 +++ linux-2.6.39.4/fs/locks.c 2011-08-05 19:44:37.000000000 -0400
38403 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38406 if (filp->f_op && filp->f_op->flock) {
38407 - struct file_lock fl = {
38408 + struct file_lock flock = {
38409 .fl_pid = current->tgid,
38411 .fl_flags = FL_FLOCK,
38412 .fl_type = F_UNLCK,
38413 .fl_end = OFFSET_MAX,
38415 - filp->f_op->flock(filp, F_SETLKW, &fl);
38416 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
38417 - fl.fl_ops->fl_release_private(&fl);
38418 + filp->f_op->flock(filp, F_SETLKW, &flock);
38419 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
38420 + flock.fl_ops->fl_release_private(&flock);
38424 diff -urNp linux-2.6.39.4/fs/logfs/super.c linux-2.6.39.4/fs/logfs/super.c
38425 --- linux-2.6.39.4/fs/logfs/super.c 2011-05-19 00:06:34.000000000 -0400
38426 +++ linux-2.6.39.4/fs/logfs/super.c 2011-08-05 19:44:37.000000000 -0400
38427 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38428 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38429 int err, valid0, valid1;
38431 + pax_track_stack();
38433 /* read first superblock */
38434 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38436 diff -urNp linux-2.6.39.4/fs/namei.c linux-2.6.39.4/fs/namei.c
38437 --- linux-2.6.39.4/fs/namei.c 2011-08-05 21:11:51.000000000 -0400
38438 +++ linux-2.6.39.4/fs/namei.c 2011-08-05 21:12:20.000000000 -0400
38439 @@ -237,20 +237,30 @@ int generic_permission(struct inode *ino
38443 - * Read/write DACs are always overridable.
38444 - * Executable DACs are overridable if at least one exec bit is set.
38445 + * Searching includes executable on directories, else just read.
38447 - if (!(mask & MAY_EXEC) || execute_ok(inode))
38448 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38449 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38450 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38451 +#ifdef CONFIG_GRKERNSEC
38452 + if (flags & IPERM_FLAG_RCU)
38455 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38460 - * Searching includes executable on directories, else just read.
38461 + * Read/write DACs are always overridable.
38462 + * Executable DACs are overridable if at least one exec bit is set.
38464 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38465 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38466 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38467 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38468 +#ifdef CONFIG_GRKERNSEC
38469 + if (flags & IPERM_FLAG_RCU)
38472 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38478 @@ -626,6 +636,9 @@ static inline int handle_reval_path(stru
38479 struct dentry *dentry = nd->path.dentry;
38482 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38485 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38488 @@ -671,9 +684,16 @@ static inline int exec_permission(struct
38489 if (ret == -ECHILD)
38492 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38493 - ns_capable(ns, CAP_DAC_READ_SEARCH))
38494 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38497 +#ifdef CONFIG_GRKERNSEC
38498 + if (flags & IPERM_FLAG_RCU)
38501 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38507 @@ -781,11 +801,19 @@ follow_link(struct path *link, struct na
38511 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
38512 + dentry->d_inode, dentry, nd->path.mnt)) {
38514 + *p = ERR_PTR(error); /* no ->put_link(), please */
38515 + path_put(&nd->path);
38519 nd->last_type = LAST_BIND;
38520 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38521 error = PTR_ERR(*p);
38523 - char *s = nd_get_link(nd);
38524 + const char *s = nd_get_link(nd);
38527 error = __vfs_follow_link(nd, s);
38528 @@ -1702,6 +1730,9 @@ static int do_path_lookup(int dfd, const
38529 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38531 if (likely(!retval)) {
38532 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38535 if (unlikely(!audit_dummy_context())) {
38536 if (nd->path.dentry && nd->inode)
38537 audit_inode(name, nd->path.dentry);
38538 @@ -2012,6 +2043,30 @@ int vfs_create(struct inode *dir, struct
38543 + * Note that while the flag value (low two bits) for sys_open means:
38545 + * 01 - write-only
38546 + * 10 - read-write
38548 + * it is changed into
38549 + * 00 - no permissions needed
38550 + * 01 - read-permission
38551 + * 10 - write-permission
38552 + * 11 - read-write
38553 + * for the internal routines (ie open_namei()/follow_link() etc)
38554 + * This is more logical, and also allows the 00 "no perm needed"
38555 + * to be used for symlinks (where the permissions are checked
38559 +static inline int open_to_namei_flags(int flag)
38561 + if ((flag+1) & O_ACCMODE)
38566 static int may_open(struct path *path, int acc_mode, int flag)
38568 struct dentry *dentry = path->dentry;
38569 @@ -2064,7 +2119,27 @@ static int may_open(struct path *path, i
38571 * Ensure there are no outstanding leases on the file.
38573 - return break_lease(inode, flag);
38574 + error = break_lease(inode, flag);
38579 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38584 + if (gr_handle_rawio(inode)) {
38589 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38597 static int handle_truncate(struct file *filp)
38598 @@ -2090,30 +2165,6 @@ static int handle_truncate(struct file *
38602 - * Note that while the flag value (low two bits) for sys_open means:
38604 - * 01 - write-only
38605 - * 10 - read-write
38607 - * it is changed into
38608 - * 00 - no permissions needed
38609 - * 01 - read-permission
38610 - * 10 - write-permission
38611 - * 11 - read-write
38612 - * for the internal routines (ie open_namei()/follow_link() etc)
38613 - * This is more logical, and also allows the 00 "no perm needed"
38614 - * to be used for symlinks (where the permissions are checked
38618 -static inline int open_to_namei_flags(int flag)
38620 - if ((flag+1) & O_ACCMODE)
38626 * Handle the last step of open()
38628 static struct file *do_last(struct nameidata *nd, struct path *path,
38629 @@ -2122,6 +2173,7 @@ static struct file *do_last(struct namei
38630 struct dentry *dir = nd->path.dentry;
38631 struct dentry *dentry;
38632 int open_flag = op->open_flag;
38633 + int flag = open_to_namei_flags(open_flag);
38634 int will_truncate = open_flag & O_TRUNC;
38635 int want_write = 0;
38636 int acc_mode = op->acc_mode;
38637 @@ -2217,6 +2269,12 @@ static struct file *do_last(struct namei
38638 /* Negative dentry, just create the file */
38639 if (!dentry->d_inode) {
38640 int mode = op->mode;
38642 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38644 + goto exit_mutex_unlock;
38647 if (!IS_POSIXACL(dir->d_inode))
38648 mode &= ~current_umask();
38650 @@ -2240,6 +2298,8 @@ static struct file *do_last(struct namei
38651 error = vfs_create(dir->d_inode, dentry, mode, nd);
38653 goto exit_mutex_unlock;
38655 + gr_handle_create(path->dentry, path->mnt);
38656 mutex_unlock(&dir->d_inode->i_mutex);
38657 dput(nd->path.dentry);
38658 nd->path.dentry = dentry;
38659 @@ -2249,6 +2309,14 @@ static struct file *do_last(struct namei
38661 * It already exists.
38664 + /* only check if O_CREAT is specified, all other checks need to go
38666 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38668 + goto exit_mutex_unlock;
38671 mutex_unlock(&dir->d_inode->i_mutex);
38672 audit_inode(pathname, path->dentry);
38674 @@ -2535,6 +2603,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38675 error = may_mknod(mode);
38679 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38684 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38689 error = mnt_want_write(nd.path.mnt);
38692 @@ -2555,6 +2634,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38695 mnt_drop_write(nd.path.mnt);
38698 + gr_handle_create(dentry, nd.path.mnt);
38702 @@ -2607,6 +2689,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38703 if (IS_ERR(dentry))
38706 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38711 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38712 mode &= ~current_umask();
38713 error = mnt_want_write(nd.path.mnt);
38714 @@ -2618,6 +2705,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38715 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38717 mnt_drop_write(nd.path.mnt);
38720 + gr_handle_create(dentry, nd.path.mnt);
38725 @@ -2697,6 +2788,8 @@ static long do_rmdir(int dfd, const char
38727 struct dentry *dentry;
38728 struct nameidata nd;
38729 + ino_t saved_ino = 0;
38730 + dev_t saved_dev = 0;
38732 error = user_path_parent(dfd, pathname, &nd, &name);
38734 @@ -2721,6 +2814,19 @@ static long do_rmdir(int dfd, const char
38735 error = PTR_ERR(dentry);
38736 if (IS_ERR(dentry))
38739 + if (dentry->d_inode != NULL) {
38740 + if (dentry->d_inode->i_nlink <= 1) {
38741 + saved_ino = dentry->d_inode->i_ino;
38742 + saved_dev = gr_get_dev_from_dentry(dentry);
38745 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38751 error = mnt_want_write(nd.path.mnt);
38754 @@ -2728,6 +2834,8 @@ static long do_rmdir(int dfd, const char
38757 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38758 + if (!error && (saved_dev || saved_ino))
38759 + gr_handle_delete(saved_ino, saved_dev);
38761 mnt_drop_write(nd.path.mnt);
38763 @@ -2790,6 +2898,8 @@ static long do_unlinkat(int dfd, const c
38764 struct dentry *dentry;
38765 struct nameidata nd;
38766 struct inode *inode = NULL;
38767 + ino_t saved_ino = 0;
38768 + dev_t saved_dev = 0;
38770 error = user_path_parent(dfd, pathname, &nd, &name);
38772 @@ -2809,8 +2919,17 @@ static long do_unlinkat(int dfd, const c
38773 if (nd.last.name[nd.last.len])
38775 inode = dentry->d_inode;
38779 + if (inode->i_nlink <= 1) {
38780 + saved_ino = inode->i_ino;
38781 + saved_dev = gr_get_dev_from_dentry(dentry);
38783 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38788 error = mnt_want_write(nd.path.mnt);
38791 @@ -2818,6 +2937,8 @@ static long do_unlinkat(int dfd, const c
38794 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38795 + if (!error && (saved_ino || saved_dev))
38796 + gr_handle_delete(saved_ino, saved_dev);
38798 mnt_drop_write(nd.path.mnt);
38800 @@ -2895,6 +3016,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38801 if (IS_ERR(dentry))
38804 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38809 error = mnt_want_write(nd.path.mnt);
38812 @@ -2902,6 +3028,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38814 goto out_drop_write;
38815 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38817 + gr_handle_create(dentry, nd.path.mnt);
38819 mnt_drop_write(nd.path.mnt);
38821 @@ -3010,6 +3138,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38822 error = PTR_ERR(new_dentry);
38823 if (IS_ERR(new_dentry))
38826 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38827 + old_path.dentry->d_inode,
38828 + old_path.dentry->d_inode->i_mode, to)) {
38833 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38834 + old_path.dentry, old_path.mnt, to)) {
38839 error = mnt_want_write(nd.path.mnt);
38842 @@ -3017,6 +3159,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38844 goto out_drop_write;
38845 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38847 + gr_handle_create(new_dentry, nd.path.mnt);
38849 mnt_drop_write(nd.path.mnt);
38851 @@ -3194,6 +3338,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38855 + pax_track_stack();
38857 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38860 @@ -3250,6 +3396,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38861 if (new_dentry == trap)
38864 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38865 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
38870 error = mnt_want_write(oldnd.path.mnt);
38873 @@ -3259,6 +3411,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38875 error = vfs_rename(old_dir->d_inode, old_dentry,
38876 new_dir->d_inode, new_dentry);
38878 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38879 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38881 mnt_drop_write(oldnd.path.mnt);
38883 @@ -3284,6 +3439,8 @@ SYSCALL_DEFINE2(rename, const char __use
38885 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38888 + const char *newlink;
38891 len = PTR_ERR(link);
38892 @@ -3293,7 +3450,14 @@ int vfs_readlink(struct dentry *dentry,
38893 len = strlen(link);
38894 if (len > (unsigned) buflen)
38896 - if (copy_to_user(buffer, link, len))
38898 + if (len < sizeof(tmpbuf)) {
38899 + memcpy(tmpbuf, link, len);
38900 + newlink = tmpbuf;
38904 + if (copy_to_user(buffer, newlink, len))
38908 diff -urNp linux-2.6.39.4/fs/namespace.c linux-2.6.39.4/fs/namespace.c
38909 --- linux-2.6.39.4/fs/namespace.c 2011-05-19 00:06:34.000000000 -0400
38910 +++ linux-2.6.39.4/fs/namespace.c 2011-08-05 19:44:37.000000000 -0400
38911 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38912 if (!(sb->s_flags & MS_RDONLY))
38913 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38914 up_write(&sb->s_umount);
38916 + gr_log_remount(mnt->mnt_devname, retval);
38921 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38922 br_write_unlock(vfsmount_lock);
38923 up_write(&namespace_sem);
38924 release_mounts(&umount_list);
38926 + gr_log_unmount(mnt->mnt_devname, retval);
38931 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38932 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38935 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38940 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38945 if (flags & MS_REMOUNT)
38946 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38948 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38949 dev_name, data_page);
38953 + gr_log_mount(dev_name, dir_name, retval);
38958 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38962 + if (gr_handle_chroot_pivot()) {
38967 get_fs_root(current->fs, &root);
38968 error = lock_mount(&old);
38970 diff -urNp linux-2.6.39.4/fs/ncpfs/dir.c linux-2.6.39.4/fs/ncpfs/dir.c
38971 --- linux-2.6.39.4/fs/ncpfs/dir.c 2011-05-19 00:06:34.000000000 -0400
38972 +++ linux-2.6.39.4/fs/ncpfs/dir.c 2011-08-05 19:44:37.000000000 -0400
38973 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38974 int res, val = 0, len;
38975 __u8 __name[NCP_MAXPATHLEN + 1];
38977 + pax_track_stack();
38979 if (dentry == dentry->d_sb->s_root)
38982 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38983 int error, res, len;
38984 __u8 __name[NCP_MAXPATHLEN + 1];
38986 + pax_track_stack();
38989 if (!ncp_conn_valid(server))
38991 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38992 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38993 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38995 + pax_track_stack();
38997 ncp_age_dentry(server, dentry);
38998 len = sizeof(__name);
38999 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
39000 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
39002 __u8 __name[NCP_MAXPATHLEN + 1];
39004 + pax_track_stack();
39006 DPRINTK("ncp_mkdir: making %s/%s\n",
39007 dentry->d_parent->d_name.name, dentry->d_name.name);
39009 @@ -1135,6 +1143,8 @@ static int ncp_rename(struct inode *old_
39010 int old_len, new_len;
39011 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
39013 + pax_track_stack();
39015 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
39016 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
39017 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
39018 diff -urNp linux-2.6.39.4/fs/ncpfs/inode.c linux-2.6.39.4/fs/ncpfs/inode.c
39019 --- linux-2.6.39.4/fs/ncpfs/inode.c 2011-05-19 00:06:34.000000000 -0400
39020 +++ linux-2.6.39.4/fs/ncpfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39021 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
39023 struct ncp_entry_info finfo;
39025 + pax_track_stack();
39027 data.wdog_pid = NULL;
39028 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
39030 diff -urNp linux-2.6.39.4/fs/nfs/inode.c linux-2.6.39.4/fs/nfs/inode.c
39031 --- linux-2.6.39.4/fs/nfs/inode.c 2011-07-09 09:18:51.000000000 -0400
39032 +++ linux-2.6.39.4/fs/nfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39033 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
39034 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
39035 nfsi->attrtimeo_timestamp = jiffies;
39037 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
39038 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
39039 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
39040 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
39042 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
39043 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
39046 -static atomic_long_t nfs_attr_generation_counter;
39047 +static atomic_long_unchecked_t nfs_attr_generation_counter;
39049 static unsigned long nfs_read_attr_generation_counter(void)
39051 - return atomic_long_read(&nfs_attr_generation_counter);
39052 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
39055 unsigned long nfs_inc_attr_generation_counter(void)
39057 - return atomic_long_inc_return(&nfs_attr_generation_counter);
39058 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
39061 void nfs_fattr_init(struct nfs_fattr *fattr)
39062 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4state.c linux-2.6.39.4/fs/nfsd/nfs4state.c
39063 --- linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-05-19 00:06:34.000000000 -0400
39064 +++ linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-08-05 19:44:37.000000000 -0400
39065 @@ -3784,6 +3784,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
39066 unsigned int strhashval;
39069 + pax_track_stack();
39071 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
39072 (long long) lock->lk_offset,
39073 (long long) lock->lk_length);
39074 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4xdr.c linux-2.6.39.4/fs/nfsd/nfs4xdr.c
39075 --- linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-05-19 00:06:34.000000000 -0400
39076 +++ linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-08-05 19:44:37.000000000 -0400
39077 @@ -1793,6 +1793,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
39081 + pax_track_stack();
39083 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
39084 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
39085 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
39086 diff -urNp linux-2.6.39.4/fs/nfsd/vfs.c linux-2.6.39.4/fs/nfsd/vfs.c
39087 --- linux-2.6.39.4/fs/nfsd/vfs.c 2011-07-09 09:18:51.000000000 -0400
39088 +++ linux-2.6.39.4/fs/nfsd/vfs.c 2011-08-05 19:44:37.000000000 -0400
39089 @@ -901,7 +901,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
39093 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
39094 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
39098 @@ -1005,7 +1005,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
39100 /* Write the data. */
39101 oldfs = get_fs(); set_fs(KERNEL_DS);
39102 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
39103 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
39107 @@ -1528,7 +1528,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
39110 oldfs = get_fs(); set_fs(KERNEL_DS);
39111 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
39112 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
39116 diff -urNp linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c
39117 --- linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-05-19 00:06:34.000000000 -0400
39118 +++ linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-08-14 11:28:46.000000000 -0400
39119 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
39123 - if (copy_to_user(buf, &fanotify_event_metadata,
39124 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
39125 + copy_to_user(buf, &fanotify_event_metadata,
39126 fanotify_event_metadata.event_len))
39127 goto out_kill_access_response;
39129 diff -urNp linux-2.6.39.4/fs/notify/notification.c linux-2.6.39.4/fs/notify/notification.c
39130 --- linux-2.6.39.4/fs/notify/notification.c 2011-05-19 00:06:34.000000000 -0400
39131 +++ linux-2.6.39.4/fs/notify/notification.c 2011-08-05 19:44:37.000000000 -0400
39132 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39133 * get set to 0 so it will never get 'freed'
39135 static struct fsnotify_event *q_overflow_event;
39136 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39137 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39140 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39141 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39143 u32 fsnotify_get_cookie(void)
39145 - return atomic_inc_return(&fsnotify_sync_cookie);
39146 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39148 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39150 diff -urNp linux-2.6.39.4/fs/ntfs/dir.c linux-2.6.39.4/fs/ntfs/dir.c
39151 --- linux-2.6.39.4/fs/ntfs/dir.c 2011-05-19 00:06:34.000000000 -0400
39152 +++ linux-2.6.39.4/fs/ntfs/dir.c 2011-08-05 19:44:37.000000000 -0400
39153 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
39154 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39155 ~(s64)(ndir->itype.index.block_size - 1)));
39156 /* Bounds checks. */
39157 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39158 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39159 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39160 "inode 0x%lx or driver bug.", vdir->i_ino);
39162 diff -urNp linux-2.6.39.4/fs/ntfs/file.c linux-2.6.39.4/fs/ntfs/file.c
39163 --- linux-2.6.39.4/fs/ntfs/file.c 2011-05-19 00:06:34.000000000 -0400
39164 +++ linux-2.6.39.4/fs/ntfs/file.c 2011-08-05 19:44:37.000000000 -0400
39165 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39166 #endif /* NTFS_RW */
39169 -const struct file_operations ntfs_empty_file_ops = {};
39170 +const struct file_operations ntfs_empty_file_ops __read_only;
39172 -const struct inode_operations ntfs_empty_inode_ops = {};
39173 +const struct inode_operations ntfs_empty_inode_ops __read_only;
39174 diff -urNp linux-2.6.39.4/fs/ocfs2/localalloc.c linux-2.6.39.4/fs/ocfs2/localalloc.c
39175 --- linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-05-19 00:06:34.000000000 -0400
39176 +++ linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-08-05 19:44:37.000000000 -0400
39177 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39181 - atomic_inc(&osb->alloc_stats.moves);
39182 + atomic_inc_unchecked(&osb->alloc_stats.moves);
39186 diff -urNp linux-2.6.39.4/fs/ocfs2/namei.c linux-2.6.39.4/fs/ocfs2/namei.c
39187 --- linux-2.6.39.4/fs/ocfs2/namei.c 2011-05-19 00:06:34.000000000 -0400
39188 +++ linux-2.6.39.4/fs/ocfs2/namei.c 2011-08-05 19:44:37.000000000 -0400
39189 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39190 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39191 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39193 + pax_track_stack();
39195 /* At some point it might be nice to break this function up a
39198 diff -urNp linux-2.6.39.4/fs/ocfs2/ocfs2.h linux-2.6.39.4/fs/ocfs2/ocfs2.h
39199 --- linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-05-19 00:06:34.000000000 -0400
39200 +++ linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-08-05 19:44:37.000000000 -0400
39201 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
39203 struct ocfs2_alloc_stats
39206 - atomic_t local_data;
39207 - atomic_t bitmap_data;
39208 - atomic_t bg_allocs;
39209 - atomic_t bg_extends;
39210 + atomic_unchecked_t moves;
39211 + atomic_unchecked_t local_data;
39212 + atomic_unchecked_t bitmap_data;
39213 + atomic_unchecked_t bg_allocs;
39214 + atomic_unchecked_t bg_extends;
39217 enum ocfs2_local_alloc_state
39218 diff -urNp linux-2.6.39.4/fs/ocfs2/suballoc.c linux-2.6.39.4/fs/ocfs2/suballoc.c
39219 --- linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-05-19 00:06:34.000000000 -0400
39220 +++ linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-08-05 19:44:37.000000000 -0400
39221 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39222 mlog_errno(status);
39225 - atomic_inc(&osb->alloc_stats.bg_extends);
39226 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39228 /* You should never ask for this much metadata */
39229 BUG_ON(bits_wanted >
39230 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39231 mlog_errno(status);
39234 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39235 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39237 *suballoc_loc = res.sr_bg_blkno;
39238 *suballoc_bit_start = res.sr_bit_offset;
39239 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39240 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39243 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39244 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39246 BUG_ON(res->sr_bits != 1);
39248 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39249 mlog_errno(status);
39252 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39253 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39255 BUG_ON(res.sr_bits != 1);
39257 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39261 - atomic_inc(&osb->alloc_stats.local_data);
39262 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
39264 if (min_clusters > (osb->bitmap_cpg - 1)) {
39265 /* The only paths asking for contiguousness
39266 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39267 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39269 res.sr_bit_offset);
39270 - atomic_inc(&osb->alloc_stats.bitmap_data);
39271 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39272 *num_clusters = res.sr_bits;
39275 diff -urNp linux-2.6.39.4/fs/ocfs2/super.c linux-2.6.39.4/fs/ocfs2/super.c
39276 --- linux-2.6.39.4/fs/ocfs2/super.c 2011-05-19 00:06:34.000000000 -0400
39277 +++ linux-2.6.39.4/fs/ocfs2/super.c 2011-08-05 19:44:37.000000000 -0400
39278 @@ -299,11 +299,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39279 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39280 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39282 - atomic_read(&osb->alloc_stats.bitmap_data),
39283 - atomic_read(&osb->alloc_stats.local_data),
39284 - atomic_read(&osb->alloc_stats.bg_allocs),
39285 - atomic_read(&osb->alloc_stats.moves),
39286 - atomic_read(&osb->alloc_stats.bg_extends));
39287 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39288 + atomic_read_unchecked(&osb->alloc_stats.local_data),
39289 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39290 + atomic_read_unchecked(&osb->alloc_stats.moves),
39291 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39293 out += snprintf(buf + out, len - out,
39294 "%10s => State: %u Descriptor: %llu Size: %u bits "
39295 @@ -2111,11 +2111,11 @@ static int ocfs2_initialize_super(struct
39296 spin_lock_init(&osb->osb_xattr_lock);
39297 ocfs2_init_steal_slots(osb);
39299 - atomic_set(&osb->alloc_stats.moves, 0);
39300 - atomic_set(&osb->alloc_stats.local_data, 0);
39301 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
39302 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
39303 - atomic_set(&osb->alloc_stats.bg_extends, 0);
39304 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39305 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39306 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39307 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39308 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39310 /* Copy the blockcheck stats from the superblock probe */
39311 osb->osb_ecc_stats = *stats;
39312 diff -urNp linux-2.6.39.4/fs/ocfs2/symlink.c linux-2.6.39.4/fs/ocfs2/symlink.c
39313 --- linux-2.6.39.4/fs/ocfs2/symlink.c 2011-05-19 00:06:34.000000000 -0400
39314 +++ linux-2.6.39.4/fs/ocfs2/symlink.c 2011-08-05 19:44:37.000000000 -0400
39315 @@ -142,7 +142,7 @@ bail:
39317 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39319 - char *link = nd_get_link(nd);
39320 + const char *link = nd_get_link(nd);
39324 diff -urNp linux-2.6.39.4/fs/open.c linux-2.6.39.4/fs/open.c
39325 --- linux-2.6.39.4/fs/open.c 2011-05-19 00:06:34.000000000 -0400
39326 +++ linux-2.6.39.4/fs/open.c 2011-08-05 19:44:37.000000000 -0400
39327 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39328 error = locks_verify_truncate(inode, NULL, length);
39330 error = security_path_truncate(&path);
39332 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39336 error = do_truncate(path.dentry, length, 0, NULL);
39338 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39339 if (__mnt_is_readonly(path.mnt))
39342 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39348 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39352 + gr_log_chdir(path.dentry, path.mnt);
39354 set_fs_pwd(current->fs, &path);
39357 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39360 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39362 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39366 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39369 set_fs_pwd(current->fs, &file->f_path);
39371 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39375 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39376 + goto dput_and_out;
39378 + if (gr_handle_chroot_caps(&path)) {
39380 + goto dput_and_out;
39383 set_fs_root(current->fs, &path);
39385 + gr_handle_chroot_chdir(&path);
39390 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39391 err = mnt_want_write_file(file);
39395 mutex_lock(&inode->i_mutex);
39397 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39402 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39405 if (mode == (mode_t) -1)
39406 mode = inode->i_mode;
39408 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39413 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39414 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39415 err = notify_change(dentry, &newattrs);
39416 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39417 error = mnt_want_write(path.mnt);
39421 mutex_lock(&inode->i_mutex);
39423 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39428 error = security_path_chmod(path.dentry, path.mnt, mode);
39431 if (mode == (mode_t) -1)
39432 mode = inode->i_mode;
39434 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39439 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39440 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39441 error = notify_change(path.dentry, &newattrs);
39442 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39444 struct iattr newattrs;
39446 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
39449 newattrs.ia_valid = ATTR_CTIME;
39450 if (user != (uid_t) -1) {
39451 newattrs.ia_valid |= ATTR_UID;
39452 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39453 if (!IS_ERR(tmp)) {
39454 fd = get_unused_fd_flags(flags);
39456 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39458 + /* don't allow to be set by userland */
39459 + flags &= ~FMODE_GREXEC;
39460 + f = do_filp_open(dfd, tmp, &op, lookup);
39464 diff -urNp linux-2.6.39.4/fs/partitions/ldm.c linux-2.6.39.4/fs/partitions/ldm.c
39465 --- linux-2.6.39.4/fs/partitions/ldm.c 2011-06-03 00:04:14.000000000 -0400
39466 +++ linux-2.6.39.4/fs/partitions/ldm.c 2011-08-05 19:44:37.000000000 -0400
39467 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39468 ldm_error ("A VBLK claims to have %d parts.", num);
39473 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39475 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39479 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39480 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39482 ldm_crit ("Out of memory.");
39484 diff -urNp linux-2.6.39.4/fs/pipe.c linux-2.6.39.4/fs/pipe.c
39485 --- linux-2.6.39.4/fs/pipe.c 2011-05-19 00:06:34.000000000 -0400
39486 +++ linux-2.6.39.4/fs/pipe.c 2011-08-05 19:44:37.000000000 -0400
39487 @@ -420,9 +420,9 @@ redo:
39489 if (bufs) /* More to do? */
39491 - if (!pipe->writers)
39492 + if (!atomic_read(&pipe->writers))
39494 - if (!pipe->waiting_writers) {
39495 + if (!atomic_read(&pipe->waiting_writers)) {
39496 /* syscall merging: Usually we must not sleep
39497 * if O_NONBLOCK is set, or if we got some data.
39498 * But if a writer sleeps in kernel space, then
39499 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39500 mutex_lock(&inode->i_mutex);
39501 pipe = inode->i_pipe;
39503 - if (!pipe->readers) {
39504 + if (!atomic_read(&pipe->readers)) {
39505 send_sig(SIGPIPE, current, 0);
39508 @@ -530,7 +530,7 @@ redo1:
39512 - if (!pipe->readers) {
39513 + if (!atomic_read(&pipe->readers)) {
39514 send_sig(SIGPIPE, current, 0);
39517 @@ -616,9 +616,9 @@ redo2:
39518 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39521 - pipe->waiting_writers++;
39522 + atomic_inc(&pipe->waiting_writers);
39524 - pipe->waiting_writers--;
39525 + atomic_dec(&pipe->waiting_writers);
39528 mutex_unlock(&inode->i_mutex);
39529 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39531 if (filp->f_mode & FMODE_READ) {
39532 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39533 - if (!pipe->writers && filp->f_version != pipe->w_counter)
39534 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39538 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39539 * Most Unices do not set POLLERR for FIFOs but on Linux they
39540 * behave exactly like pipes for poll().
39542 - if (!pipe->readers)
39543 + if (!atomic_read(&pipe->readers))
39547 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39549 mutex_lock(&inode->i_mutex);
39550 pipe = inode->i_pipe;
39551 - pipe->readers -= decr;
39552 - pipe->writers -= decw;
39553 + atomic_sub(decr, &pipe->readers);
39554 + atomic_sub(decw, &pipe->writers);
39556 - if (!pipe->readers && !pipe->writers) {
39557 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39558 free_pipe_info(inode);
39560 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39561 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39563 if (inode->i_pipe) {
39565 - inode->i_pipe->readers++;
39566 + atomic_inc(&inode->i_pipe->readers);
39569 mutex_unlock(&inode->i_mutex);
39570 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39572 if (inode->i_pipe) {
39574 - inode->i_pipe->writers++;
39575 + atomic_inc(&inode->i_pipe->writers);
39578 mutex_unlock(&inode->i_mutex);
39579 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39580 if (inode->i_pipe) {
39582 if (filp->f_mode & FMODE_READ)
39583 - inode->i_pipe->readers++;
39584 + atomic_inc(&inode->i_pipe->readers);
39585 if (filp->f_mode & FMODE_WRITE)
39586 - inode->i_pipe->writers++;
39587 + atomic_inc(&inode->i_pipe->writers);
39590 mutex_unlock(&inode->i_mutex);
39591 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39592 inode->i_pipe = NULL;
39595 -static struct vfsmount *pipe_mnt __read_mostly;
39596 +struct vfsmount *pipe_mnt __read_mostly;
39599 * pipefs_dname() is called from d_path().
39600 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39602 inode->i_pipe = pipe;
39604 - pipe->readers = pipe->writers = 1;
39605 + atomic_set(&pipe->readers, 1);
39606 + atomic_set(&pipe->writers, 1);
39607 inode->i_fop = &rdwr_pipefifo_fops;
39610 diff -urNp linux-2.6.39.4/fs/proc/array.c linux-2.6.39.4/fs/proc/array.c
39611 --- linux-2.6.39.4/fs/proc/array.c 2011-05-19 00:06:34.000000000 -0400
39612 +++ linux-2.6.39.4/fs/proc/array.c 2011-08-05 19:44:37.000000000 -0400
39614 #include <linux/tty.h>
39615 #include <linux/string.h>
39616 #include <linux/mman.h>
39617 +#include <linux/grsecurity.h>
39618 #include <linux/proc_fs.h>
39619 #include <linux/ioport.h>
39620 #include <linux/uaccess.h>
39621 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39625 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39626 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
39629 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39630 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39631 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39632 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39633 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39634 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39636 + seq_printf(m, "PaX:\t-----\n");
39640 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39641 struct pid *pid, struct task_struct *task)
39643 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39644 task_cpus_allowed(m, task);
39645 cpuset_task_status_allowed(m, task);
39646 task_context_switch_counts(m, task);
39648 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39649 + task_pax(m, task);
39652 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39653 + task_grsec_rbac(m, task);
39659 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39660 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39661 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39662 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39665 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39666 struct pid *pid, struct task_struct *task, int whole)
39668 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39669 cputime_t cutime, cstime, utime, stime;
39670 cputime_t cgtime, gtime;
39671 unsigned long rsslim = 0;
39672 - char tcomm[sizeof(task->comm)];
39673 + char tcomm[sizeof(task->comm)] = { 0 };
39674 unsigned long flags;
39676 + pax_track_stack();
39678 state = *get_task_state(task);
39679 vsize = eip = esp = 0;
39680 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39681 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39682 gtime = task->gtime;
39685 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39686 + if (PAX_RAND_FLAGS(mm)) {
39692 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39698 /* scale priority and nice values from timeslices to -20..20 */
39699 /* to make it look like a "normal" Unix priority/nice value */
39700 priority = task_prio(task);
39701 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39703 mm ? get_mm_rss(mm) : 0,
39705 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39706 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39707 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39708 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39710 mm ? (permitted ? mm->start_code : 1) : 0,
39711 mm ? (permitted ? mm->end_code : 1) : 0,
39712 (permitted && mm) ? mm->start_stack : 0,
39716 /* The signal information here is obsolete.
39717 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39722 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39723 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39726 + unsigned long flags;
39728 + if (lock_task_sighand(task, &flags)) {
39729 + curr_ip = task->signal->curr_ip;
39730 + unlock_task_sighand(task, &flags);
39733 + return sprintf(buffer, "%pI4\n", &curr_ip);
39736 diff -urNp linux-2.6.39.4/fs/proc/base.c linux-2.6.39.4/fs/proc/base.c
39737 --- linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:11:51.000000000 -0400
39738 +++ linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:13:18.000000000 -0400
39739 @@ -104,6 +104,22 @@ struct pid_entry {
39743 +struct getdents_callback {
39744 + struct linux_dirent __user * current_dir;
39745 + struct linux_dirent __user * previous;
39746 + struct file * file;
39751 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39752 + loff_t offset, u64 ino, unsigned int d_type)
39754 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
39755 + buf->error = -EINVAL;
39759 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39761 .len = sizeof(NAME) - 1, \
39762 @@ -206,6 +222,9 @@ static struct mm_struct *__check_mem_per
39763 if (task == current)
39766 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39767 + return ERR_PTR(-EPERM);
39770 * If current is actively ptrace'ing, and would also be
39771 * permitted to freshly attach with ptrace now, permit it.
39772 @@ -279,6 +298,9 @@ static int proc_pid_cmdline(struct task_
39774 goto out_mm; /* Shh! No looking before we're done */
39776 + if (gr_acl_handle_procpidmem(task))
39779 len = mm->arg_end - mm->arg_start;
39781 if (len > PAGE_SIZE)
39782 @@ -306,12 +328,28 @@ out:
39786 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39787 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39788 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39789 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39792 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39794 struct mm_struct *mm = mm_for_maps(task);
39795 int res = PTR_ERR(mm);
39796 if (mm && !IS_ERR(mm)) {
39797 unsigned int nwords = 0;
39799 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39800 + /* allow if we're currently ptracing this task */
39801 + if (PAX_RAND_FLAGS(mm) &&
39802 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39810 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39811 @@ -325,7 +363,7 @@ static int proc_pid_auxv(struct task_str
39815 -#ifdef CONFIG_KALLSYMS
39816 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39818 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39819 * Returns the resolved symbol. If that fails, simply return the address.
39820 @@ -364,7 +402,7 @@ static void unlock_trace(struct task_str
39821 mutex_unlock(&task->signal->cred_guard_mutex);
39824 -#ifdef CONFIG_STACKTRACE
39825 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39827 #define MAX_STACK_TRACE_DEPTH 64
39829 @@ -555,7 +593,7 @@ static int proc_pid_limits(struct task_s
39833 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39834 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39835 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39838 @@ -584,7 +622,7 @@ static int proc_pid_syscall(struct task_
39839 /************************************************************************/
39841 /* permission checks */
39842 -static int proc_fd_access_allowed(struct inode *inode)
39843 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39845 struct task_struct *task;
39847 @@ -594,7 +632,10 @@ static int proc_fd_access_allowed(struct
39849 task = get_proc_task(inode);
39851 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39853 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39855 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39856 put_task_struct(task);
39859 @@ -973,6 +1014,9 @@ static ssize_t environ_read(struct file
39863 + if (gr_acl_handle_procpidmem(task))
39867 page = (char *)__get_free_page(GFP_TEMPORARY);
39869 @@ -1660,7 +1704,7 @@ static void *proc_pid_follow_link(struct
39870 path_put(&nd->path);
39872 /* Are we allowed to snoop on the tasks file descriptors? */
39873 - if (!proc_fd_access_allowed(inode))
39874 + if (!proc_fd_access_allowed(inode,0))
39877 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39878 @@ -1699,8 +1743,18 @@ static int proc_pid_readlink(struct dent
39881 /* Are we allowed to snoop on the tasks file descriptors? */
39882 - if (!proc_fd_access_allowed(inode))
39884 + /* logging this is needed for learning on chromium to work properly,
39885 + but we don't want to flood the logs from 'ps' which does a readlink
39886 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39887 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
39889 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39890 + if (!proc_fd_access_allowed(inode,0))
39893 + if (!proc_fd_access_allowed(inode,1))
39897 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39899 @@ -1766,7 +1820,11 @@ static struct inode *proc_pid_make_inode
39901 cred = __task_cred(task);
39902 inode->i_uid = cred->euid;
39903 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39904 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39906 inode->i_gid = cred->egid;
39910 security_task_to_inode(task, inode);
39911 @@ -1784,6 +1842,9 @@ static int pid_getattr(struct vfsmount *
39912 struct inode *inode = dentry->d_inode;
39913 struct task_struct *task;
39914 const struct cred *cred;
39915 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39916 + const struct cred *tmpcred = current_cred();
39919 generic_fillattr(inode, stat);
39921 @@ -1791,13 +1852,41 @@ static int pid_getattr(struct vfsmount *
39924 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39926 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39927 + rcu_read_unlock();
39932 + cred = __task_cred(task);
39933 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39934 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39935 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39936 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39940 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39941 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39942 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39943 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39944 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39946 task_dumpable(task)) {
39947 - cred = __task_cred(task);
39948 stat->uid = cred->euid;
39949 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39950 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39952 stat->gid = cred->egid;
39955 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39957 + rcu_read_unlock();
39964 @@ -1834,11 +1923,20 @@ static int pid_revalidate(struct dentry
39967 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39968 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39969 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39970 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39971 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39973 task_dumpable(task)) {
39975 cred = __task_cred(task);
39976 inode->i_uid = cred->euid;
39977 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39978 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39980 inode->i_gid = cred->egid;
39985 @@ -1959,7 +2057,8 @@ static int proc_fd_info(struct inode *in
39986 int fd = proc_fd(inode);
39989 - files = get_files_struct(task);
39990 + if (!gr_acl_handle_procpidmem(task))
39991 + files = get_files_struct(task);
39992 put_task_struct(task);
39995 @@ -2219,15 +2318,25 @@ static const struct file_operations proc
39997 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39999 + struct task_struct *task;
40002 if (flags & IPERM_FLAG_RCU)
40004 rv = generic_permission(inode, mask, flags, NULL);
40008 if (task_pid(current) == proc_pid(inode))
40011 + task = get_proc_task(inode);
40012 + if (task == NULL)
40015 + if (gr_acl_handle_procpidmem(task))
40018 + put_task_struct(task);
40023 @@ -2337,6 +2446,9 @@ static struct dentry *proc_pident_lookup
40027 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40031 * Yes, it does not scale. And it should not. Don't add
40032 * new entries into /proc/<tgid>/ without very good reasons.
40033 @@ -2381,6 +2493,9 @@ static int proc_pident_readdir(struct fi
40037 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40043 @@ -2651,7 +2766,7 @@ static void *proc_self_follow_link(struc
40044 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
40047 - char *s = nd_get_link(nd);
40048 + const char *s = nd_get_link(nd);
40052 @@ -2838,7 +2953,7 @@ static const struct pid_entry tgid_base_
40053 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
40055 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40056 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40057 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40058 INF("syscall", S_IRUGO, proc_pid_syscall),
40060 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40061 @@ -2863,10 +2978,10 @@ static const struct pid_entry tgid_base_
40062 #ifdef CONFIG_SECURITY
40063 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40065 -#ifdef CONFIG_KALLSYMS
40066 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40067 INF("wchan", S_IRUGO, proc_pid_wchan),
40069 -#ifdef CONFIG_STACKTRACE
40070 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40071 ONE("stack", S_IRUGO, proc_pid_stack),
40073 #ifdef CONFIG_SCHEDSTATS
40074 @@ -2897,6 +3012,9 @@ static const struct pid_entry tgid_base_
40075 #ifdef CONFIG_TASK_IO_ACCOUNTING
40076 INF("io", S_IRUSR, proc_tgid_io_accounting),
40078 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40079 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
40083 static int proc_tgid_base_readdir(struct file * filp,
40084 @@ -3022,7 +3140,14 @@ static struct dentry *proc_pid_instantia
40088 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40089 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
40090 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40091 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40092 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
40094 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
40096 inode->i_op = &proc_tgid_base_inode_operations;
40097 inode->i_fop = &proc_tgid_base_operations;
40098 inode->i_flags|=S_IMMUTABLE;
40099 @@ -3064,7 +3189,11 @@ struct dentry *proc_pid_lookup(struct in
40103 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40104 + goto out_put_task;
40106 result = proc_pid_instantiate(dir, dentry, task, NULL);
40108 put_task_struct(task);
40111 @@ -3129,6 +3258,11 @@ int proc_pid_readdir(struct file * filp,
40114 struct task_struct *reaper;
40115 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40116 + const struct cred *tmpcred = current_cred();
40117 + const struct cred *itercred;
40119 + filldir_t __filldir = filldir;
40120 struct tgid_iter iter;
40121 struct pid_namespace *ns;
40123 @@ -3152,8 +3286,27 @@ int proc_pid_readdir(struct file * filp,
40124 for (iter = next_tgid(ns, iter);
40126 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40127 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40129 + itercred = __task_cred(iter.task);
40131 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40132 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40133 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40134 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40135 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40140 + __filldir = &gr_fake_filldir;
40142 + __filldir = filldir;
40143 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40144 + rcu_read_unlock();
40146 filp->f_pos = iter.tgid + TGID_OFFSET;
40147 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40148 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40149 put_task_struct(iter.task);
40152 @@ -3180,7 +3333,7 @@ static const struct pid_entry tid_base_s
40153 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40155 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40156 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40157 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40158 INF("syscall", S_IRUGO, proc_pid_syscall),
40160 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40161 @@ -3204,10 +3357,10 @@ static const struct pid_entry tid_base_s
40162 #ifdef CONFIG_SECURITY
40163 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40165 -#ifdef CONFIG_KALLSYMS
40166 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40167 INF("wchan", S_IRUGO, proc_pid_wchan),
40169 -#ifdef CONFIG_STACKTRACE
40170 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40171 ONE("stack", S_IRUGO, proc_pid_stack),
40173 #ifdef CONFIG_SCHEDSTATS
40174 diff -urNp linux-2.6.39.4/fs/proc/cmdline.c linux-2.6.39.4/fs/proc/cmdline.c
40175 --- linux-2.6.39.4/fs/proc/cmdline.c 2011-05-19 00:06:34.000000000 -0400
40176 +++ linux-2.6.39.4/fs/proc/cmdline.c 2011-08-05 19:44:37.000000000 -0400
40177 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
40179 static int __init proc_cmdline_init(void)
40181 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40182 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40184 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40188 module_init(proc_cmdline_init);
40189 diff -urNp linux-2.6.39.4/fs/proc/devices.c linux-2.6.39.4/fs/proc/devices.c
40190 --- linux-2.6.39.4/fs/proc/devices.c 2011-05-19 00:06:34.000000000 -0400
40191 +++ linux-2.6.39.4/fs/proc/devices.c 2011-08-05 19:44:37.000000000 -0400
40192 @@ -64,7 +64,11 @@ static const struct file_operations proc
40194 static int __init proc_devices_init(void)
40196 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40197 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40199 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40203 module_init(proc_devices_init);
40204 diff -urNp linux-2.6.39.4/fs/proc/inode.c linux-2.6.39.4/fs/proc/inode.c
40205 --- linux-2.6.39.4/fs/proc/inode.c 2011-05-19 00:06:34.000000000 -0400
40206 +++ linux-2.6.39.4/fs/proc/inode.c 2011-08-05 19:44:37.000000000 -0400
40207 @@ -433,7 +433,11 @@ struct inode *proc_get_inode(struct supe
40209 inode->i_mode = de->mode;
40210 inode->i_uid = de->uid;
40211 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40212 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40214 inode->i_gid = de->gid;
40218 inode->i_size = de->size;
40219 diff -urNp linux-2.6.39.4/fs/proc/internal.h linux-2.6.39.4/fs/proc/internal.h
40220 --- linux-2.6.39.4/fs/proc/internal.h 2011-05-19 00:06:34.000000000 -0400
40221 +++ linux-2.6.39.4/fs/proc/internal.h 2011-08-05 19:44:37.000000000 -0400
40222 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40223 struct pid *pid, struct task_struct *task);
40224 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40225 struct pid *pid, struct task_struct *task);
40226 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40227 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40229 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40231 extern const struct file_operations proc_maps_operations;
40232 diff -urNp linux-2.6.39.4/fs/proc/Kconfig linux-2.6.39.4/fs/proc/Kconfig
40233 --- linux-2.6.39.4/fs/proc/Kconfig 2011-05-19 00:06:34.000000000 -0400
40234 +++ linux-2.6.39.4/fs/proc/Kconfig 2011-08-05 19:44:37.000000000 -0400
40235 @@ -30,12 +30,12 @@ config PROC_FS
40238 bool "/proc/kcore support" if !ARM
40239 - depends on PROC_FS && MMU
40240 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40243 bool "/proc/vmcore support"
40244 - depends on PROC_FS && CRASH_DUMP
40246 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40249 Exports the dump image of crashed kernel in ELF format.
40251 @@ -59,8 +59,8 @@ config PROC_SYSCTL
40254 config PROC_PAGE_MONITOR
40256 - depends on PROC_FS && MMU
40258 + depends on PROC_FS && MMU && !GRKERNSEC
40259 bool "Enable /proc page monitoring" if EXPERT
40261 Various /proc files exist to monitor process memory utilization:
40262 diff -urNp linux-2.6.39.4/fs/proc/kcore.c linux-2.6.39.4/fs/proc/kcore.c
40263 --- linux-2.6.39.4/fs/proc/kcore.c 2011-05-19 00:06:34.000000000 -0400
40264 +++ linux-2.6.39.4/fs/proc/kcore.c 2011-08-05 19:44:37.000000000 -0400
40265 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40267 struct kcore_list *m;
40269 + pax_track_stack();
40271 /* setup ELF header */
40272 elf = (struct elfhdr *) bufp;
40273 bufp += sizeof(struct elfhdr);
40274 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40275 * the addresses in the elf_phdr on our list.
40277 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40278 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40279 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40280 + if (tsz > buflen)
40285 struct kcore_list *m;
40287 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40290 if (kern_addr_valid(start)) {
40293 + mm_segment_t oldfs;
40295 - n = copy_to_user(buffer, (char *)start, tsz);
40297 - * We cannot distingush between fault on source
40298 - * and fault on destination. When this happens
40299 - * we clear too and hope it will trigger the
40303 - if (clear_user(buffer + tsz - n,
40305 + elf_buf = kmalloc(tsz, GFP_KERNEL);
40308 + oldfs = get_fs();
40309 + set_fs(KERNEL_DS);
40310 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40312 + if (copy_to_user(buffer, elf_buf, tsz)) {
40320 if (clear_user(buffer, tsz))
40322 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40324 static int open_kcore(struct inode *inode, struct file *filp)
40326 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40329 if (!capable(CAP_SYS_RAWIO))
40331 if (kcore_need_update)
40332 diff -urNp linux-2.6.39.4/fs/proc/meminfo.c linux-2.6.39.4/fs/proc/meminfo.c
40333 --- linux-2.6.39.4/fs/proc/meminfo.c 2011-05-19 00:06:34.000000000 -0400
40334 +++ linux-2.6.39.4/fs/proc/meminfo.c 2011-08-05 19:44:37.000000000 -0400
40335 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40336 unsigned long pages[NR_LRU_LISTS];
40339 + pax_track_stack();
40342 * display in kilobytes.
40344 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40346 vmi.largest_chunk >> 10
40347 #ifdef CONFIG_MEMORY_FAILURE
40348 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40349 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40351 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40352 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40353 diff -urNp linux-2.6.39.4/fs/proc/nommu.c linux-2.6.39.4/fs/proc/nommu.c
40354 --- linux-2.6.39.4/fs/proc/nommu.c 2011-05-19 00:06:34.000000000 -0400
40355 +++ linux-2.6.39.4/fs/proc/nommu.c 2011-08-05 19:44:37.000000000 -0400
40356 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40359 seq_printf(m, "%*c", len, ' ');
40360 - seq_path(m, &file->f_path, "");
40361 + seq_path(m, &file->f_path, "\n\\");
40365 diff -urNp linux-2.6.39.4/fs/proc/proc_net.c linux-2.6.39.4/fs/proc/proc_net.c
40366 --- linux-2.6.39.4/fs/proc/proc_net.c 2011-05-19 00:06:34.000000000 -0400
40367 +++ linux-2.6.39.4/fs/proc/proc_net.c 2011-08-05 19:44:37.000000000 -0400
40368 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40369 struct task_struct *task;
40370 struct nsproxy *ns;
40371 struct net *net = NULL;
40372 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40373 + const struct cred *cred = current_cred();
40376 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40379 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40380 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40385 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40386 diff -urNp linux-2.6.39.4/fs/proc/proc_sysctl.c linux-2.6.39.4/fs/proc/proc_sysctl.c
40387 --- linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-05-19 00:06:34.000000000 -0400
40388 +++ linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-08-05 19:44:37.000000000 -0400
40390 #include <linux/namei.h>
40391 #include "internal.h"
40393 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40395 static const struct dentry_operations proc_sys_dentry_operations;
40396 static const struct file_operations proc_sys_file_operations;
40397 static const struct inode_operations proc_sys_inode_operations;
40398 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40402 + if (gr_handle_sysctl(p, MAY_EXEC))
40405 err = ERR_PTR(-ENOMEM);
40406 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40408 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40409 if (*pos < file->f_pos)
40412 + if (gr_handle_sysctl(table, 0))
40415 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40418 @@ -358,6 +366,9 @@ static int proc_sys_getattr(struct vfsmo
40420 return PTR_ERR(head);
40422 + if (table && gr_handle_sysctl(table, MAY_EXEC))
40425 generic_fillattr(inode, stat);
40427 stat->mode = (stat->mode & S_IFMT) | table->mode;
40428 diff -urNp linux-2.6.39.4/fs/proc/root.c linux-2.6.39.4/fs/proc/root.c
40429 --- linux-2.6.39.4/fs/proc/root.c 2011-05-19 00:06:34.000000000 -0400
40430 +++ linux-2.6.39.4/fs/proc/root.c 2011-08-05 19:44:37.000000000 -0400
40431 @@ -122,7 +122,15 @@ void __init proc_root_init(void)
40432 #ifdef CONFIG_PROC_DEVICETREE
40433 proc_device_tree_init();
40435 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40436 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40437 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40438 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40439 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40442 proc_mkdir("bus", NULL);
40447 diff -urNp linux-2.6.39.4/fs/proc/task_mmu.c linux-2.6.39.4/fs/proc/task_mmu.c
40448 --- linux-2.6.39.4/fs/proc/task_mmu.c 2011-05-19 00:06:34.000000000 -0400
40449 +++ linux-2.6.39.4/fs/proc/task_mmu.c 2011-08-05 19:44:37.000000000 -0400
40450 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40451 "VmExe:\t%8lu kB\n"
40452 "VmLib:\t%8lu kB\n"
40453 "VmPTE:\t%8lu kB\n"
40454 - "VmSwap:\t%8lu kB\n",
40455 - hiwater_vm << (PAGE_SHIFT-10),
40456 + "VmSwap:\t%8lu kB\n"
40458 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40459 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40462 + ,hiwater_vm << (PAGE_SHIFT-10),
40463 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40464 mm->locked_vm << (PAGE_SHIFT-10),
40465 hiwater_rss << (PAGE_SHIFT-10),
40466 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40467 data << (PAGE_SHIFT-10),
40468 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40469 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40470 - swap << (PAGE_SHIFT-10));
40471 + swap << (PAGE_SHIFT-10)
40473 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40474 + , mm->context.user_cs_base, mm->context.user_cs_limit
40480 unsigned long task_vsize(struct mm_struct *mm)
40481 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40485 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40486 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40487 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
40488 + _mm->pax_flags & MF_PAX_SEGMEXEC))
40491 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40493 struct mm_struct *mm = vma->vm_mm;
40494 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40495 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40498 - /* We don't show the stack guard page in /proc/maps */
40499 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40500 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40501 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40503 start = vma->vm_start;
40504 - if (stack_guard_page_start(vma, start))
40505 - start += PAGE_SIZE;
40507 - if (stack_guard_page_end(vma, end))
40508 - end -= PAGE_SIZE;
40511 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40513 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40514 flags & VM_WRITE ? 'w' : '-',
40515 flags & VM_EXEC ? 'x' : '-',
40516 flags & VM_MAYSHARE ? 's' : 'p',
40517 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40518 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40522 MAJOR(dev), MINOR(dev), ino, &len);
40525 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40528 pad_len_spaces(m, len);
40529 - seq_path(m, &file->f_path, "\n");
40530 + seq_path(m, &file->f_path, "\n\\");
40532 const char *name = arch_vma_name(vma);
40534 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40535 if (vma->vm_start <= mm->brk &&
40536 vma->vm_end >= mm->start_brk) {
40538 - } else if (vma->vm_start <= mm->start_stack &&
40539 - vma->vm_end >= mm->start_stack) {
40540 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40541 + (vma->vm_start <= mm->start_stack &&
40542 + vma->vm_end >= mm->start_stack)) {
40546 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40549 memset(&mss, 0, sizeof mss);
40551 - /* mmap_sem is held in m_start */
40552 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40553 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40555 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40556 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40559 + /* mmap_sem is held in m_start */
40560 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40561 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40562 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40565 show_map_vma(m, vma);
40568 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40569 "KernelPageSize: %8lu kB\n"
40570 "MMUPageSize: %8lu kB\n"
40571 "Locked: %8lu kB\n",
40572 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40573 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40575 (vma->vm_end - vma->vm_start) >> 10,
40577 mss.resident >> 10,
40578 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40579 mss.shared_clean >> 10,
40580 diff -urNp linux-2.6.39.4/fs/proc/task_nommu.c linux-2.6.39.4/fs/proc/task_nommu.c
40581 --- linux-2.6.39.4/fs/proc/task_nommu.c 2011-05-19 00:06:34.000000000 -0400
40582 +++ linux-2.6.39.4/fs/proc/task_nommu.c 2011-08-05 19:44:37.000000000 -0400
40583 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40585 bytes += kobjsize(mm);
40587 - if (current->fs && current->fs->users > 1)
40588 + if (current->fs && atomic_read(¤t->fs->users) > 1)
40589 sbytes += kobjsize(current->fs);
40591 bytes += kobjsize(current->fs);
40592 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40595 pad_len_spaces(m, len);
40596 - seq_path(m, &file->f_path, "");
40597 + seq_path(m, &file->f_path, "\n\\");
40599 if (vma->vm_start <= mm->start_stack &&
40600 vma->vm_end >= mm->start_stack) {
40601 diff -urNp linux-2.6.39.4/fs/quota/netlink.c linux-2.6.39.4/fs/quota/netlink.c
40602 --- linux-2.6.39.4/fs/quota/netlink.c 2011-05-19 00:06:34.000000000 -0400
40603 +++ linux-2.6.39.4/fs/quota/netlink.c 2011-08-05 19:44:37.000000000 -0400
40604 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40605 void quota_send_warning(short type, unsigned int id, dev_t dev,
40606 const char warntype)
40608 - static atomic_t seq;
40609 + static atomic_unchecked_t seq;
40610 struct sk_buff *skb;
40613 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40614 "VFS: Not enough memory to send quota warning.\n");
40617 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40618 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40619 "a_genl_family, 0, QUOTA_NL_C_WARNING);
40622 diff -urNp linux-2.6.39.4/fs/readdir.c linux-2.6.39.4/fs/readdir.c
40623 --- linux-2.6.39.4/fs/readdir.c 2011-05-19 00:06:34.000000000 -0400
40624 +++ linux-2.6.39.4/fs/readdir.c 2011-08-05 19:44:37.000000000 -0400
40626 #include <linux/security.h>
40627 #include <linux/syscalls.h>
40628 #include <linux/unistd.h>
40629 +#include <linux/namei.h>
40631 #include <asm/uaccess.h>
40633 @@ -67,6 +68,7 @@ struct old_linux_dirent {
40635 struct readdir_callback {
40636 struct old_linux_dirent __user * dirent;
40637 + struct file * file;
40641 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40642 buf->result = -EOVERFLOW;
40646 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40650 dirent = buf->dirent;
40651 if (!access_ok(VERIFY_WRITE, dirent,
40652 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40655 buf.dirent = dirent;
40658 error = vfs_readdir(file, fillonedir, &buf);
40660 @@ -142,6 +149,7 @@ struct linux_dirent {
40661 struct getdents_callback {
40662 struct linux_dirent __user * current_dir;
40663 struct linux_dirent __user * previous;
40664 + struct file * file;
40668 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40669 buf->error = -EOVERFLOW;
40673 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40676 dirent = buf->previous;
40678 if (__put_user(offset, &dirent->d_off))
40679 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40680 buf.previous = NULL;
40685 error = vfs_readdir(file, filldir, &buf);
40687 @@ -229,6 +242,7 @@ out:
40688 struct getdents_callback64 {
40689 struct linux_dirent64 __user * current_dir;
40690 struct linux_dirent64 __user * previous;
40691 + struct file *file;
40695 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40696 buf->error = -EINVAL; /* only used if we fail.. */
40697 if (reclen > buf->count)
40700 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40703 dirent = buf->previous;
40705 if (__put_user(offset, &dirent->d_off))
40706 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40708 buf.current_dir = dirent;
40709 buf.previous = NULL;
40714 diff -urNp linux-2.6.39.4/fs/reiserfs/dir.c linux-2.6.39.4/fs/reiserfs/dir.c
40715 --- linux-2.6.39.4/fs/reiserfs/dir.c 2011-05-19 00:06:34.000000000 -0400
40716 +++ linux-2.6.39.4/fs/reiserfs/dir.c 2011-08-05 19:44:37.000000000 -0400
40717 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40718 struct reiserfs_dir_entry de;
40721 + pax_track_stack();
40723 reiserfs_write_lock(inode->i_sb);
40725 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40726 diff -urNp linux-2.6.39.4/fs/reiserfs/do_balan.c linux-2.6.39.4/fs/reiserfs/do_balan.c
40727 --- linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-05-19 00:06:34.000000000 -0400
40728 +++ linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-08-05 19:44:37.000000000 -0400
40729 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40733 - atomic_inc(&(fs_generation(tb->tb_sb)));
40734 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40735 do_balance_starts(tb);
40737 /* balance leaf returns 0 except if combining L R and S into
40738 diff -urNp linux-2.6.39.4/fs/reiserfs/journal.c linux-2.6.39.4/fs/reiserfs/journal.c
40739 --- linux-2.6.39.4/fs/reiserfs/journal.c 2011-05-19 00:06:34.000000000 -0400
40740 +++ linux-2.6.39.4/fs/reiserfs/journal.c 2011-08-05 19:44:37.000000000 -0400
40741 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40742 struct buffer_head *bh;
40745 + pax_track_stack();
40747 bh = __getblk(dev, block, bufsize);
40748 if (buffer_uptodate(bh))
40750 diff -urNp linux-2.6.39.4/fs/reiserfs/namei.c linux-2.6.39.4/fs/reiserfs/namei.c
40751 --- linux-2.6.39.4/fs/reiserfs/namei.c 2011-05-19 00:06:34.000000000 -0400
40752 +++ linux-2.6.39.4/fs/reiserfs/namei.c 2011-08-05 19:44:37.000000000 -0400
40753 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40754 unsigned long savelink = 1;
40755 struct timespec ctime;
40757 + pax_track_stack();
40759 /* three balancings: (1) old name removal, (2) new name insertion
40760 and (3) maybe "save" link insertion
40761 stat data updates: (1) old directory,
40762 diff -urNp linux-2.6.39.4/fs/reiserfs/procfs.c linux-2.6.39.4/fs/reiserfs/procfs.c
40763 --- linux-2.6.39.4/fs/reiserfs/procfs.c 2011-05-19 00:06:34.000000000 -0400
40764 +++ linux-2.6.39.4/fs/reiserfs/procfs.c 2011-08-05 19:44:37.000000000 -0400
40765 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40766 "SMALL_TAILS " : "NO_TAILS ",
40767 replay_only(sb) ? "REPLAY_ONLY " : "",
40768 convert_reiserfs(sb) ? "CONV " : "",
40769 - atomic_read(&r->s_generation_counter),
40770 + atomic_read_unchecked(&r->s_generation_counter),
40771 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40772 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40773 SF(s_good_search_by_key_reada), SF(s_bmaps),
40774 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40775 struct journal_params *jp = &rs->s_v1.s_journal;
40776 char b[BDEVNAME_SIZE];
40778 + pax_track_stack();
40780 seq_printf(m, /* on-disk fields */
40781 "jp_journal_1st_block: \t%i\n"
40782 "jp_journal_dev: \t%s[%x]\n"
40783 diff -urNp linux-2.6.39.4/fs/reiserfs/stree.c linux-2.6.39.4/fs/reiserfs/stree.c
40784 --- linux-2.6.39.4/fs/reiserfs/stree.c 2011-05-19 00:06:34.000000000 -0400
40785 +++ linux-2.6.39.4/fs/reiserfs/stree.c 2011-08-05 19:44:37.000000000 -0400
40786 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40790 + pax_track_stack();
40792 BUG_ON(!th->t_trans_id);
40794 init_tb_struct(th, &s_del_balance, sb, path,
40795 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40797 int quota_cut_bytes = 0;
40799 + pax_track_stack();
40801 BUG_ON(!th->t_trans_id);
40803 le_key2cpu_key(&cpu_key, key);
40804 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40805 int quota_cut_bytes;
40806 loff_t tail_pos = 0;
40808 + pax_track_stack();
40810 BUG_ON(!th->t_trans_id);
40812 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40813 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40817 + pax_track_stack();
40819 BUG_ON(!th->t_trans_id);
40821 fs_gen = get_generation(inode->i_sb);
40822 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40824 int quota_bytes = 0;
40826 + pax_track_stack();
40828 BUG_ON(!th->t_trans_id);
40830 if (inode) { /* Do we count quotas for item? */
40831 diff -urNp linux-2.6.39.4/fs/reiserfs/super.c linux-2.6.39.4/fs/reiserfs/super.c
40832 --- linux-2.6.39.4/fs/reiserfs/super.c 2011-05-19 00:06:34.000000000 -0400
40833 +++ linux-2.6.39.4/fs/reiserfs/super.c 2011-08-05 19:44:37.000000000 -0400
40834 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40835 {.option_name = NULL}
40838 + pax_track_stack();
40841 if (!options || !*options)
40842 /* use default configuration: create tails, journaling on, no
40843 diff -urNp linux-2.6.39.4/fs/select.c linux-2.6.39.4/fs/select.c
40844 --- linux-2.6.39.4/fs/select.c 2011-05-19 00:06:34.000000000 -0400
40845 +++ linux-2.6.39.4/fs/select.c 2011-08-05 19:44:37.000000000 -0400
40847 #include <linux/module.h>
40848 #include <linux/slab.h>
40849 #include <linux/poll.h>
40850 +#include <linux/security.h>
40851 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40852 #include <linux/file.h>
40853 #include <linux/fdtable.h>
40854 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40855 int retval, i, timed_out = 0;
40856 unsigned long slack = 0;
40858 + pax_track_stack();
40861 retval = max_select_fd(n, fds);
40863 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40864 /* Allocate small arguments on the stack to save memory and be faster */
40865 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40867 + pax_track_stack();
40872 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40873 struct poll_list *walk = head;
40874 unsigned long todo = nfds;
40876 + pax_track_stack();
40878 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40879 if (nfds > rlimit(RLIMIT_NOFILE))
40882 diff -urNp linux-2.6.39.4/fs/seq_file.c linux-2.6.39.4/fs/seq_file.c
40883 --- linux-2.6.39.4/fs/seq_file.c 2011-05-19 00:06:34.000000000 -0400
40884 +++ linux-2.6.39.4/fs/seq_file.c 2011-08-05 20:34:06.000000000 -0400
40885 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40889 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40890 + m->size = PAGE_SIZE;
40891 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40895 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40899 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40901 + m->buf = kmalloc(m->size, GFP_KERNEL);
40902 return !m->buf ? -ENOMEM : -EAGAIN;
40905 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40906 m->version = file->f_version;
40907 /* grab buffer if we didn't have one */
40909 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40910 + m->size = PAGE_SIZE;
40911 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40915 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40919 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40921 + m->buf = kmalloc(m->size, GFP_KERNEL);
40925 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40926 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40929 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40930 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40934 diff -urNp linux-2.6.39.4/fs/splice.c linux-2.6.39.4/fs/splice.c
40935 --- linux-2.6.39.4/fs/splice.c 2011-05-19 00:06:34.000000000 -0400
40936 +++ linux-2.6.39.4/fs/splice.c 2011-08-05 19:44:37.000000000 -0400
40937 @@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40941 - if (!pipe->readers) {
40942 + if (!atomic_read(&pipe->readers)) {
40943 send_sig(SIGPIPE, current, 0);
40946 @@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40950 - pipe->waiting_writers++;
40951 + atomic_inc(&pipe->waiting_writers);
40953 - pipe->waiting_writers--;
40954 + atomic_dec(&pipe->waiting_writers);
40958 @@ -316,6 +316,8 @@ __generic_file_splice_read(struct file *
40959 .spd_release = spd_release_page,
40962 + pax_track_stack();
40964 if (splice_grow_spd(pipe, &spd))
40967 @@ -556,7 +558,7 @@ static ssize_t kernel_readv(struct file
40970 /* The cast to a user pointer is valid due to the set_fs() */
40971 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40972 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40976 @@ -571,7 +573,7 @@ static ssize_t kernel_write(struct file
40979 /* The cast to a user pointer is valid due to the set_fs() */
40980 - res = vfs_write(file, (const char __user *)buf, count, &pos);
40981 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40985 @@ -599,6 +601,8 @@ ssize_t default_file_splice_read(struct
40986 .spd_release = spd_release_page,
40989 + pax_track_stack();
40991 if (splice_grow_spd(pipe, &spd))
40994 @@ -622,7 +626,7 @@ ssize_t default_file_splice_read(struct
40997 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40998 - vec[i].iov_base = (void __user *) page_address(page);
40999 + vec[i].iov_base = (__force void __user *) page_address(page);
41000 vec[i].iov_len = this_len;
41001 spd.pages[i] = page;
41003 @@ -842,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
41004 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
41006 while (!pipe->nrbufs) {
41007 - if (!pipe->writers)
41008 + if (!atomic_read(&pipe->writers))
41011 - if (!pipe->waiting_writers && sd->num_spliced)
41012 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
41015 if (sd->flags & SPLICE_F_NONBLOCK)
41016 @@ -1178,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct fi
41017 * out of the pipe right after the splice_to_pipe(). So set
41018 * PIPE_READERS appropriately.
41020 - pipe->readers = 1;
41021 + atomic_set(&pipe->readers, 1);
41023 current->splice_pipe = pipe;
41025 @@ -1615,6 +1619,8 @@ static long vmsplice_to_pipe(struct file
41029 + pax_track_stack();
41031 pipe = get_pipe_info(file);
41034 @@ -1730,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_
41035 ret = -ERESTARTSYS;
41038 - if (!pipe->writers)
41039 + if (!atomic_read(&pipe->writers))
41041 - if (!pipe->waiting_writers) {
41042 + if (!atomic_read(&pipe->waiting_writers)) {
41043 if (flags & SPLICE_F_NONBLOCK) {
41046 @@ -1764,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_
41049 while (pipe->nrbufs >= pipe->buffers) {
41050 - if (!pipe->readers) {
41051 + if (!atomic_read(&pipe->readers)) {
41052 send_sig(SIGPIPE, current, 0);
41055 @@ -1777,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_
41056 ret = -ERESTARTSYS;
41059 - pipe->waiting_writers++;
41060 + atomic_inc(&pipe->waiting_writers);
41062 - pipe->waiting_writers--;
41063 + atomic_dec(&pipe->waiting_writers);
41067 @@ -1815,14 +1821,14 @@ retry:
41068 pipe_double_lock(ipipe, opipe);
41071 - if (!opipe->readers) {
41072 + if (!atomic_read(&opipe->readers)) {
41073 send_sig(SIGPIPE, current, 0);
41079 - if (!ipipe->nrbufs && !ipipe->writers)
41080 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
41084 @@ -1922,7 +1928,7 @@ static int link_pipe(struct pipe_inode_i
41085 pipe_double_lock(ipipe, opipe);
41088 - if (!opipe->readers) {
41089 + if (!atomic_read(&opipe->readers)) {
41090 send_sig(SIGPIPE, current, 0);
41093 @@ -1967,7 +1973,7 @@ static int link_pipe(struct pipe_inode_i
41094 * return EAGAIN if we have the potential of some data in the
41095 * future, otherwise just return 0
41097 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
41098 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
41101 pipe_unlock(ipipe);
41102 diff -urNp linux-2.6.39.4/fs/sysfs/file.c linux-2.6.39.4/fs/sysfs/file.c
41103 --- linux-2.6.39.4/fs/sysfs/file.c 2011-05-19 00:06:34.000000000 -0400
41104 +++ linux-2.6.39.4/fs/sysfs/file.c 2011-08-05 19:44:37.000000000 -0400
41105 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
41107 struct sysfs_open_dirent {
41110 + atomic_unchecked_t event;
41111 wait_queue_head_t poll;
41112 struct list_head buffers; /* goes through sysfs_buffer.list */
41114 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
41115 if (!sysfs_get_active(attr_sd))
41118 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41119 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41120 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41122 sysfs_put_active(attr_sd);
41123 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
41126 atomic_set(&new_od->refcnt, 0);
41127 - atomic_set(&new_od->event, 1);
41128 + atomic_set_unchecked(&new_od->event, 1);
41129 init_waitqueue_head(&new_od->poll);
41130 INIT_LIST_HEAD(&new_od->buffers);
41132 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
41134 sysfs_put_active(attr_sd);
41136 - if (buffer->event != atomic_read(&od->event))
41137 + if (buffer->event != atomic_read_unchecked(&od->event))
41140 return DEFAULT_POLLMASK;
41141 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
41143 od = sd->s_attr.open;
41145 - atomic_inc(&od->event);
41146 + atomic_inc_unchecked(&od->event);
41147 wake_up_interruptible(&od->poll);
41150 diff -urNp linux-2.6.39.4/fs/sysfs/mount.c linux-2.6.39.4/fs/sysfs/mount.c
41151 --- linux-2.6.39.4/fs/sysfs/mount.c 2011-05-19 00:06:34.000000000 -0400
41152 +++ linux-2.6.39.4/fs/sysfs/mount.c 2011-08-05 19:44:37.000000000 -0400
41153 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41155 .s_count = ATOMIC_INIT(1),
41156 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41157 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41158 + .s_mode = S_IFDIR | S_IRWXU,
41160 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41165 diff -urNp linux-2.6.39.4/fs/sysfs/symlink.c linux-2.6.39.4/fs/sysfs/symlink.c
41166 --- linux-2.6.39.4/fs/sysfs/symlink.c 2011-05-19 00:06:34.000000000 -0400
41167 +++ linux-2.6.39.4/fs/sysfs/symlink.c 2011-08-05 19:44:37.000000000 -0400
41168 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41170 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41172 - char *page = nd_get_link(nd);
41173 + const char *page = nd_get_link(nd);
41175 free_page((unsigned long)page);
41177 diff -urNp linux-2.6.39.4/fs/udf/inode.c linux-2.6.39.4/fs/udf/inode.c
41178 --- linux-2.6.39.4/fs/udf/inode.c 2011-05-19 00:06:34.000000000 -0400
41179 +++ linux-2.6.39.4/fs/udf/inode.c 2011-08-05 19:44:37.000000000 -0400
41180 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41181 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41184 + pax_track_stack();
41186 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41187 prev_epos.block = iinfo->i_location;
41188 prev_epos.bh = NULL;
41189 diff -urNp linux-2.6.39.4/fs/udf/misc.c linux-2.6.39.4/fs/udf/misc.c
41190 --- linux-2.6.39.4/fs/udf/misc.c 2011-05-19 00:06:34.000000000 -0400
41191 +++ linux-2.6.39.4/fs/udf/misc.c 2011-08-05 19:44:37.000000000 -0400
41192 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41194 u8 udf_tag_checksum(const struct tag *t)
41196 - u8 *data = (u8 *)t;
41197 + const u8 *data = (const u8 *)t;
41200 for (i = 0; i < sizeof(struct tag); ++i)
41201 diff -urNp linux-2.6.39.4/fs/utimes.c linux-2.6.39.4/fs/utimes.c
41202 --- linux-2.6.39.4/fs/utimes.c 2011-05-19 00:06:34.000000000 -0400
41203 +++ linux-2.6.39.4/fs/utimes.c 2011-08-05 19:44:37.000000000 -0400
41205 #include <linux/compiler.h>
41206 #include <linux/file.h>
41207 #include <linux/fs.h>
41208 +#include <linux/security.h>
41209 #include <linux/linkage.h>
41210 #include <linux/mount.h>
41211 #include <linux/namei.h>
41212 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41213 goto mnt_drop_write_and_out;
41217 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41219 + goto mnt_drop_write_and_out;
41222 mutex_lock(&inode->i_mutex);
41223 error = notify_change(path->dentry, &newattrs);
41224 mutex_unlock(&inode->i_mutex);
41225 diff -urNp linux-2.6.39.4/fs/xattr_acl.c linux-2.6.39.4/fs/xattr_acl.c
41226 --- linux-2.6.39.4/fs/xattr_acl.c 2011-05-19 00:06:34.000000000 -0400
41227 +++ linux-2.6.39.4/fs/xattr_acl.c 2011-08-05 19:44:37.000000000 -0400
41230 posix_acl_from_xattr(const void *value, size_t size)
41232 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41233 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41234 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41235 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41237 struct posix_acl *acl;
41238 struct posix_acl_entry *acl_e;
41239 diff -urNp linux-2.6.39.4/fs/xattr.c linux-2.6.39.4/fs/xattr.c
41240 --- linux-2.6.39.4/fs/xattr.c 2011-05-19 00:06:34.000000000 -0400
41241 +++ linux-2.6.39.4/fs/xattr.c 2011-08-05 19:44:37.000000000 -0400
41242 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41243 * Extended attribute SET operations
41246 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
41247 +setxattr(struct path *path, const char __user *name, const void __user *value,
41248 size_t size, int flags)
41251 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
41252 return PTR_ERR(kvalue);
41255 - error = vfs_setxattr(d, kname, kvalue, size, flags);
41256 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41261 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41266 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41268 error = mnt_want_write(path.mnt);
41270 - error = setxattr(path.dentry, name, value, size, flags);
41271 + error = setxattr(&path, name, value, size, flags);
41272 mnt_drop_write(path.mnt);
41275 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41277 error = mnt_want_write(path.mnt);
41279 - error = setxattr(path.dentry, name, value, size, flags);
41280 + error = setxattr(&path, name, value, size, flags);
41281 mnt_drop_write(path.mnt);
41284 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41285 const void __user *,value, size_t, size, int, flags)
41288 - struct dentry *dentry;
41289 int error = -EBADF;
41294 - dentry = f->f_path.dentry;
41295 - audit_inode(NULL, dentry);
41296 + audit_inode(NULL, f->f_path.dentry);
41297 error = mnt_want_write_file(f);
41299 - error = setxattr(dentry, name, value, size, flags);
41300 + error = setxattr(&f->f_path, name, value, size, flags);
41301 mnt_drop_write(f->f_path.mnt);
41304 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c
41305 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-05-19 00:06:34.000000000 -0400
41306 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-05 19:44:37.000000000 -0400
41307 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41308 xfs_fsop_geom_t fsgeo;
41311 + memset(&fsgeo, 0, sizeof(fsgeo));
41312 error = xfs_fs_geometry(mp, &fsgeo, 3);
41315 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c
41316 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-05-19 00:06:34.000000000 -0400
41317 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-05 19:44:37.000000000 -0400
41318 @@ -128,7 +128,7 @@ xfs_find_handle(
41322 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41323 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41324 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41327 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c
41328 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-05-19 00:06:34.000000000 -0400
41329 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-05 19:44:37.000000000 -0400
41330 @@ -437,7 +437,7 @@ xfs_vn_put_link(
41331 struct nameidata *nd,
41334 - char *s = nd_get_link(nd);
41335 + const char *s = nd_get_link(nd);
41339 diff -urNp linux-2.6.39.4/fs/xfs/xfs_bmap.c linux-2.6.39.4/fs/xfs/xfs_bmap.c
41340 --- linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-05-19 00:06:34.000000000 -0400
41341 +++ linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-08-05 19:44:37.000000000 -0400
41342 @@ -287,7 +287,7 @@ xfs_bmap_validate_ret(
41346 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41347 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41351 diff -urNp linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c
41352 --- linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-05-19 00:06:34.000000000 -0400
41353 +++ linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-08-05 19:44:37.000000000 -0400
41354 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41357 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41358 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41359 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41360 + char name[sfep->namelen];
41361 + memcpy(name, sfep->name, sfep->namelen);
41362 + if (filldir(dirent, name, sfep->namelen,
41363 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
41364 + *offset = off & 0x7fffffff;
41367 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41368 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41369 *offset = off & 0x7fffffff;
41371 diff -urNp linux-2.6.39.4/grsecurity/gracl_alloc.c linux-2.6.39.4/grsecurity/gracl_alloc.c
41372 --- linux-2.6.39.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41373 +++ linux-2.6.39.4/grsecurity/gracl_alloc.c 2011-08-05 19:44:37.000000000 -0400
41375 +#include <linux/kernel.h>
41376 +#include <linux/mm.h>
41377 +#include <linux/slab.h>
41378 +#include <linux/vmalloc.h>
41379 +#include <linux/gracl.h>
41380 +#include <linux/grsecurity.h>
41382 +static unsigned long alloc_stack_next = 1;
41383 +static unsigned long alloc_stack_size = 1;
41384 +static void **alloc_stack;
41386 +static __inline__ int
41389 + if (alloc_stack_next == 1)
41392 + kfree(alloc_stack[alloc_stack_next - 2]);
41394 + alloc_stack_next--;
41399 +static __inline__ int
41400 +alloc_push(void *buf)
41402 + if (alloc_stack_next >= alloc_stack_size)
41405 + alloc_stack[alloc_stack_next - 1] = buf;
41407 + alloc_stack_next++;
41413 +acl_alloc(unsigned long len)
41415 + void *ret = NULL;
41417 + if (!len || len > PAGE_SIZE)
41420 + ret = kmalloc(len, GFP_KERNEL);
41423 + if (alloc_push(ret)) {
41434 +acl_alloc_num(unsigned long num, unsigned long len)
41436 + if (!len || (num > (PAGE_SIZE / len)))
41439 + return acl_alloc(num * len);
41443 +acl_free_all(void)
41445 + if (gr_acl_is_enabled() || !alloc_stack)
41448 + while (alloc_pop()) ;
41450 + if (alloc_stack) {
41451 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41452 + kfree(alloc_stack);
41454 + vfree(alloc_stack);
41457 + alloc_stack = NULL;
41458 + alloc_stack_size = 1;
41459 + alloc_stack_next = 1;
41465 +acl_alloc_stack_init(unsigned long size)
41467 + if ((size * sizeof (void *)) <= PAGE_SIZE)
41469 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41471 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
41473 + alloc_stack_size = size;
41475 + if (!alloc_stack)
41480 diff -urNp linux-2.6.39.4/grsecurity/gracl.c linux-2.6.39.4/grsecurity/gracl.c
41481 --- linux-2.6.39.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41482 +++ linux-2.6.39.4/grsecurity/gracl.c 2011-08-05 19:44:37.000000000 -0400
41484 +#include <linux/kernel.h>
41485 +#include <linux/module.h>
41486 +#include <linux/sched.h>
41487 +#include <linux/mm.h>
41488 +#include <linux/file.h>
41489 +#include <linux/fs.h>
41490 +#include <linux/namei.h>
41491 +#include <linux/mount.h>
41492 +#include <linux/tty.h>
41493 +#include <linux/proc_fs.h>
41494 +#include <linux/lglock.h>
41495 +#include <linux/slab.h>
41496 +#include <linux/vmalloc.h>
41497 +#include <linux/types.h>
41498 +#include <linux/sysctl.h>
41499 +#include <linux/netdevice.h>
41500 +#include <linux/ptrace.h>
41501 +#include <linux/gracl.h>
41502 +#include <linux/gralloc.h>
41503 +#include <linux/grsecurity.h>
41504 +#include <linux/grinternal.h>
41505 +#include <linux/pid_namespace.h>
41506 +#include <linux/fdtable.h>
41507 +#include <linux/percpu.h>
41509 +#include <asm/uaccess.h>
41510 +#include <asm/errno.h>
41511 +#include <asm/mman.h>
41513 +static struct acl_role_db acl_role_set;
41514 +static struct name_db name_set;
41515 +static struct inodev_db inodev_set;
41517 +/* for keeping track of userspace pointers used for subjects, so we
41518 + can share references in the kernel as well
41521 +static struct path real_root;
41523 +static struct acl_subj_map_db subj_map_set;
41525 +static struct acl_role_label *default_role;
41527 +static struct acl_role_label *role_list;
41529 +static u16 acl_sp_role_value;
41531 +extern char *gr_shared_page[4];
41532 +static DEFINE_MUTEX(gr_dev_mutex);
41533 +DEFINE_RWLOCK(gr_inode_lock);
41535 +struct gr_arg *gr_usermode;
41537 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
41539 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41540 +extern void gr_clear_learn_entries(void);
41542 +#ifdef CONFIG_GRKERNSEC_RESLOG
41543 +extern void gr_log_resource(const struct task_struct *task,
41544 + const int res, const unsigned long wanted, const int gt);
41547 +unsigned char *gr_system_salt;
41548 +unsigned char *gr_system_sum;
41550 +static struct sprole_pw **acl_special_roles = NULL;
41551 +static __u16 num_sprole_pws = 0;
41553 +static struct acl_role_label *kernel_role = NULL;
41555 +static unsigned int gr_auth_attempts = 0;
41556 +static unsigned long gr_auth_expires = 0UL;
41559 +extern struct vfsmount *sock_mnt;
41562 +extern struct vfsmount *pipe_mnt;
41563 +extern struct vfsmount *shm_mnt;
41564 +#ifdef CONFIG_HUGETLBFS
41565 +extern struct vfsmount *hugetlbfs_vfsmount;
41568 +static struct acl_object_label *fakefs_obj_rw;
41569 +static struct acl_object_label *fakefs_obj_rwx;
41571 +extern int gr_init_uidset(void);
41572 +extern void gr_free_uidset(void);
41573 +extern void gr_remove_uid(uid_t uid);
41574 +extern int gr_find_uid(uid_t uid);
41576 +DECLARE_BRLOCK(vfsmount_lock);
41579 +gr_acl_is_enabled(void)
41581 + return (gr_status & GR_READY);
41584 +#ifdef CONFIG_BTRFS_FS
41585 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41586 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41589 +static inline dev_t __get_dev(const struct dentry *dentry)
41591 +#ifdef CONFIG_BTRFS_FS
41592 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41593 + return get_btrfs_dev_from_inode(dentry->d_inode);
41596 + return dentry->d_inode->i_sb->s_dev;
41599 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41601 + return __get_dev(dentry);
41604 +static char gr_task_roletype_to_char(struct task_struct *task)
41606 + switch (task->role->roletype &
41607 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41608 + GR_ROLE_SPECIAL)) {
41609 + case GR_ROLE_DEFAULT:
41611 + case GR_ROLE_USER:
41613 + case GR_ROLE_GROUP:
41615 + case GR_ROLE_SPECIAL:
41622 +char gr_roletype_to_char(void)
41624 + return gr_task_roletype_to_char(current);
41628 +gr_acl_tpe_check(void)
41630 + if (unlikely(!(gr_status & GR_READY)))
41632 + if (current->role->roletype & GR_ROLE_TPE)
41639 +gr_handle_rawio(const struct inode *inode)
41641 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41642 + if (inode && S_ISBLK(inode->i_mode) &&
41643 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41644 + !capable(CAP_SYS_RAWIO))
41651 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41653 + if (likely(lena != lenb))
41656 + return !memcmp(a, b, lena);
41659 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41661 + *buflen -= namelen;
41663 + return -ENAMETOOLONG;
41664 + *buffer -= namelen;
41665 + memcpy(*buffer, str, namelen);
41669 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41671 + return prepend(buffer, buflen, name->name, name->len);
41674 +static int prepend_path(const struct path *path, struct path *root,
41675 + char **buffer, int *buflen)
41677 + struct dentry *dentry = path->dentry;
41678 + struct vfsmount *vfsmnt = path->mnt;
41679 + bool slash = false;
41682 + while (dentry != root->dentry || vfsmnt != root->mnt) {
41683 + struct dentry * parent;
41685 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41686 + /* Global root? */
41687 + if (vfsmnt->mnt_parent == vfsmnt) {
41690 + dentry = vfsmnt->mnt_mountpoint;
41691 + vfsmnt = vfsmnt->mnt_parent;
41694 + parent = dentry->d_parent;
41695 + prefetch(parent);
41696 + spin_lock(&dentry->d_lock);
41697 + error = prepend_name(buffer, buflen, &dentry->d_name);
41698 + spin_unlock(&dentry->d_lock);
41700 + error = prepend(buffer, buflen, "/", 1);
41709 + if (!error && !slash)
41710 + error = prepend(buffer, buflen, "/", 1);
41715 +/* this must be called with vfsmount_lock and rename_lock held */
41717 +static char *__our_d_path(const struct path *path, struct path *root,
41718 + char *buf, int buflen)
41720 + char *res = buf + buflen;
41723 + prepend(&res, &buflen, "\0", 1);
41724 + error = prepend_path(path, root, &res, &buflen);
41726 + return ERR_PTR(error);
41732 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41736 + retval = __our_d_path(path, root, buf, buflen);
41737 + if (unlikely(IS_ERR(retval)))
41738 + retval = strcpy(buf, "<path too long>");
41739 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41740 + retval[1] = '\0';
41746 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41747 + char *buf, int buflen)
41749 + struct path path;
41752 + path.dentry = (struct dentry *)dentry;
41753 + path.mnt = (struct vfsmount *)vfsmnt;
41755 + /* we can use real_root.dentry, real_root.mnt, because this is only called
41756 + by the RBAC system */
41757 + res = gen_full_path(&path, &real_root, buf, buflen);
41763 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41764 + char *buf, int buflen)
41767 + struct path path;
41768 + struct path root;
41769 + struct task_struct *reaper = &init_task;
41771 + path.dentry = (struct dentry *)dentry;
41772 + path.mnt = (struct vfsmount *)vfsmnt;
41774 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41775 + get_fs_root(reaper->fs, &root);
41777 + write_seqlock(&rename_lock);
41778 + br_read_lock(vfsmount_lock);
41779 + res = gen_full_path(&path, &root, buf, buflen);
41780 + br_read_unlock(vfsmount_lock);
41781 + write_sequnlock(&rename_lock);
41788 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41791 + write_seqlock(&rename_lock);
41792 + br_read_lock(vfsmount_lock);
41793 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41795 + br_read_unlock(vfsmount_lock);
41796 + write_sequnlock(&rename_lock);
41801 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41803 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41808 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41810 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41815 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41817 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41822 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41824 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41829 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41831 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41836 +to_gr_audit(const __u32 reqmode)
41838 + /* masks off auditable permission flags, then shifts them to create
41839 + auditing flags, and adds the special case of append auditing if
41840 + we're requesting write */
41841 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41844 +struct acl_subject_label *
41845 +lookup_subject_map(const struct acl_subject_label *userp)
41847 + unsigned int index = shash(userp, subj_map_set.s_size);
41848 + struct subject_map *match;
41850 + match = subj_map_set.s_hash[index];
41852 + while (match && match->user != userp)
41853 + match = match->next;
41855 + if (match != NULL)
41856 + return match->kernel;
41862 +insert_subj_map_entry(struct subject_map *subjmap)
41864 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41865 + struct subject_map **curr;
41867 + subjmap->prev = NULL;
41869 + curr = &subj_map_set.s_hash[index];
41870 + if (*curr != NULL)
41871 + (*curr)->prev = subjmap;
41873 + subjmap->next = *curr;
41879 +static struct acl_role_label *
41880 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41883 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41884 + struct acl_role_label *match;
41885 + struct role_allowed_ip *ipp;
41887 + u32 curr_ip = task->signal->curr_ip;
41889 + task->signal->saved_ip = curr_ip;
41891 + match = acl_role_set.r_hash[index];
41894 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41895 + for (x = 0; x < match->domain_child_num; x++) {
41896 + if (match->domain_children[x] == uid)
41899 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41901 + match = match->next;
41904 + if (match == NULL) {
41906 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41907 + match = acl_role_set.r_hash[index];
41910 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41911 + for (x = 0; x < match->domain_child_num; x++) {
41912 + if (match->domain_children[x] == gid)
41915 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41917 + match = match->next;
41920 + if (match == NULL)
41921 + match = default_role;
41922 + if (match->allowed_ips == NULL)
41925 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41927 + ((ntohl(curr_ip) & ipp->netmask) ==
41928 + (ntohl(ipp->addr) & ipp->netmask)))
41931 + match = default_role;
41933 + } else if (match->allowed_ips == NULL) {
41936 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41938 + ((ntohl(curr_ip) & ipp->netmask) ==
41939 + (ntohl(ipp->addr) & ipp->netmask)))
41948 +struct acl_subject_label *
41949 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41950 + const struct acl_role_label *role)
41952 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41953 + struct acl_subject_label *match;
41955 + match = role->subj_hash[index];
41957 + while (match && (match->inode != ino || match->device != dev ||
41958 + (match->mode & GR_DELETED))) {
41959 + match = match->next;
41962 + if (match && !(match->mode & GR_DELETED))
41968 +struct acl_subject_label *
41969 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41970 + const struct acl_role_label *role)
41972 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41973 + struct acl_subject_label *match;
41975 + match = role->subj_hash[index];
41977 + while (match && (match->inode != ino || match->device != dev ||
41978 + !(match->mode & GR_DELETED))) {
41979 + match = match->next;
41982 + if (match && (match->mode & GR_DELETED))
41988 +static struct acl_object_label *
41989 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41990 + const struct acl_subject_label *subj)
41992 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41993 + struct acl_object_label *match;
41995 + match = subj->obj_hash[index];
41997 + while (match && (match->inode != ino || match->device != dev ||
41998 + (match->mode & GR_DELETED))) {
41999 + match = match->next;
42002 + if (match && !(match->mode & GR_DELETED))
42008 +static struct acl_object_label *
42009 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
42010 + const struct acl_subject_label *subj)
42012 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
42013 + struct acl_object_label *match;
42015 + match = subj->obj_hash[index];
42017 + while (match && (match->inode != ino || match->device != dev ||
42018 + !(match->mode & GR_DELETED))) {
42019 + match = match->next;
42022 + if (match && (match->mode & GR_DELETED))
42025 + match = subj->obj_hash[index];
42027 + while (match && (match->inode != ino || match->device != dev ||
42028 + (match->mode & GR_DELETED))) {
42029 + match = match->next;
42032 + if (match && !(match->mode & GR_DELETED))
42038 +static struct name_entry *
42039 +lookup_name_entry(const char *name)
42041 + unsigned int len = strlen(name);
42042 + unsigned int key = full_name_hash(name, len);
42043 + unsigned int index = key % name_set.n_size;
42044 + struct name_entry *match;
42046 + match = name_set.n_hash[index];
42048 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
42049 + match = match->next;
42054 +static struct name_entry *
42055 +lookup_name_entry_create(const char *name)
42057 + unsigned int len = strlen(name);
42058 + unsigned int key = full_name_hash(name, len);
42059 + unsigned int index = key % name_set.n_size;
42060 + struct name_entry *match;
42062 + match = name_set.n_hash[index];
42064 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42065 + !match->deleted))
42066 + match = match->next;
42068 + if (match && match->deleted)
42071 + match = name_set.n_hash[index];
42073 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42075 + match = match->next;
42077 + if (match && !match->deleted)
42083 +static struct inodev_entry *
42084 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
42086 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
42087 + struct inodev_entry *match;
42089 + match = inodev_set.i_hash[index];
42091 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
42092 + match = match->next;
42098 +insert_inodev_entry(struct inodev_entry *entry)
42100 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
42101 + inodev_set.i_size);
42102 + struct inodev_entry **curr;
42104 + entry->prev = NULL;
42106 + curr = &inodev_set.i_hash[index];
42107 + if (*curr != NULL)
42108 + (*curr)->prev = entry;
42110 + entry->next = *curr;
42117 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42119 + unsigned int index =
42120 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42121 + struct acl_role_label **curr;
42122 + struct acl_role_label *tmp;
42124 + curr = &acl_role_set.r_hash[index];
42126 + /* if role was already inserted due to domains and already has
42127 + a role in the same bucket as it attached, then we need to
42128 + combine these two buckets
42130 + if (role->next) {
42131 + tmp = role->next;
42132 + while (tmp->next)
42134 + tmp->next = *curr;
42136 + role->next = *curr;
42143 +insert_acl_role_label(struct acl_role_label *role)
42147 + if (role_list == NULL) {
42148 + role_list = role;
42149 + role->prev = NULL;
42151 + role->prev = role_list;
42152 + role_list = role;
42155 + /* used for hash chains */
42156 + role->next = NULL;
42158 + if (role->roletype & GR_ROLE_DOMAIN) {
42159 + for (i = 0; i < role->domain_child_num; i++)
42160 + __insert_acl_role_label(role, role->domain_children[i]);
42162 + __insert_acl_role_label(role, role->uidgid);
42166 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42168 + struct name_entry **curr, *nentry;
42169 + struct inodev_entry *ientry;
42170 + unsigned int len = strlen(name);
42171 + unsigned int key = full_name_hash(name, len);
42172 + unsigned int index = key % name_set.n_size;
42174 + curr = &name_set.n_hash[index];
42176 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42177 + curr = &((*curr)->next);
42179 + if (*curr != NULL)
42182 + nentry = acl_alloc(sizeof (struct name_entry));
42183 + if (nentry == NULL)
42185 + ientry = acl_alloc(sizeof (struct inodev_entry));
42186 + if (ientry == NULL)
42188 + ientry->nentry = nentry;
42190 + nentry->key = key;
42191 + nentry->name = name;
42192 + nentry->inode = inode;
42193 + nentry->device = device;
42194 + nentry->len = len;
42195 + nentry->deleted = deleted;
42197 + nentry->prev = NULL;
42198 + curr = &name_set.n_hash[index];
42199 + if (*curr != NULL)
42200 + (*curr)->prev = nentry;
42201 + nentry->next = *curr;
42204 + /* insert us into the table searchable by inode/dev */
42205 + insert_inodev_entry(ientry);
42211 +insert_acl_obj_label(struct acl_object_label *obj,
42212 + struct acl_subject_label *subj)
42214 + unsigned int index =
42215 + fhash(obj->inode, obj->device, subj->obj_hash_size);
42216 + struct acl_object_label **curr;
42219 + obj->prev = NULL;
42221 + curr = &subj->obj_hash[index];
42222 + if (*curr != NULL)
42223 + (*curr)->prev = obj;
42225 + obj->next = *curr;
42232 +insert_acl_subj_label(struct acl_subject_label *obj,
42233 + struct acl_role_label *role)
42235 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42236 + struct acl_subject_label **curr;
42238 + obj->prev = NULL;
42240 + curr = &role->subj_hash[index];
42241 + if (*curr != NULL)
42242 + (*curr)->prev = obj;
42244 + obj->next = *curr;
42250 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42253 +create_table(__u32 * len, int elementsize)
42255 + unsigned int table_sizes[] = {
42256 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42257 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42258 + 4194301, 8388593, 16777213, 33554393, 67108859
42260 + void *newtable = NULL;
42261 + unsigned int pwr = 0;
42263 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42264 + table_sizes[pwr] <= *len)
42267 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42270 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42272 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42274 + newtable = vmalloc(table_sizes[pwr] * elementsize);
42276 + *len = table_sizes[pwr];
42282 +init_variables(const struct gr_arg *arg)
42284 + struct task_struct *reaper = &init_task;
42285 + unsigned int stacksize;
42287 + subj_map_set.s_size = arg->role_db.num_subjects;
42288 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42289 + name_set.n_size = arg->role_db.num_objects;
42290 + inodev_set.i_size = arg->role_db.num_objects;
42292 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
42293 + !name_set.n_size || !inodev_set.i_size)
42296 + if (!gr_init_uidset())
42299 + /* set up the stack that holds allocation info */
42301 + stacksize = arg->role_db.num_pointers + 5;
42303 + if (!acl_alloc_stack_init(stacksize))
42306 + /* grab reference for the real root dentry and vfsmount */
42307 + get_fs_root(reaper->fs, &real_root);
42309 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42310 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42313 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42314 + if (fakefs_obj_rw == NULL)
42316 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42318 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42319 + if (fakefs_obj_rwx == NULL)
42321 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42323 + subj_map_set.s_hash =
42324 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42325 + acl_role_set.r_hash =
42326 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42327 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42328 + inodev_set.i_hash =
42329 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42331 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42332 + !name_set.n_hash || !inodev_set.i_hash)
42335 + memset(subj_map_set.s_hash, 0,
42336 + sizeof(struct subject_map *) * subj_map_set.s_size);
42337 + memset(acl_role_set.r_hash, 0,
42338 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
42339 + memset(name_set.n_hash, 0,
42340 + sizeof (struct name_entry *) * name_set.n_size);
42341 + memset(inodev_set.i_hash, 0,
42342 + sizeof (struct inodev_entry *) * inodev_set.i_size);
42347 +/* free information not needed after startup
42348 + currently contains user->kernel pointer mappings for subjects
42352 +free_init_variables(void)
42356 + if (subj_map_set.s_hash) {
42357 + for (i = 0; i < subj_map_set.s_size; i++) {
42358 + if (subj_map_set.s_hash[i]) {
42359 + kfree(subj_map_set.s_hash[i]);
42360 + subj_map_set.s_hash[i] = NULL;
42364 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42366 + kfree(subj_map_set.s_hash);
42368 + vfree(subj_map_set.s_hash);
42375 +free_variables(void)
42377 + struct acl_subject_label *s;
42378 + struct acl_role_label *r;
42379 + struct task_struct *task, *task2;
42382 + gr_clear_learn_entries();
42384 + read_lock(&tasklist_lock);
42385 + do_each_thread(task2, task) {
42386 + task->acl_sp_role = 0;
42387 + task->acl_role_id = 0;
42388 + task->acl = NULL;
42389 + task->role = NULL;
42390 + } while_each_thread(task2, task);
42391 + read_unlock(&tasklist_lock);
42393 + /* release the reference to the real root dentry and vfsmount */
42394 + path_put(&real_root);
42396 + /* free all object hash tables */
42398 + FOR_EACH_ROLE_START(r)
42399 + if (r->subj_hash == NULL)
42401 + FOR_EACH_SUBJECT_START(r, s, x)
42402 + if (s->obj_hash == NULL)
42404 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42405 + kfree(s->obj_hash);
42407 + vfree(s->obj_hash);
42408 + FOR_EACH_SUBJECT_END(s, x)
42409 + FOR_EACH_NESTED_SUBJECT_START(r, s)
42410 + if (s->obj_hash == NULL)
42412 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42413 + kfree(s->obj_hash);
42415 + vfree(s->obj_hash);
42416 + FOR_EACH_NESTED_SUBJECT_END(s)
42417 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42418 + kfree(r->subj_hash);
42420 + vfree(r->subj_hash);
42421 + r->subj_hash = NULL;
42423 + FOR_EACH_ROLE_END(r)
42427 + if (acl_role_set.r_hash) {
42428 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42430 + kfree(acl_role_set.r_hash);
42432 + vfree(acl_role_set.r_hash);
42434 + if (name_set.n_hash) {
42435 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
42437 + kfree(name_set.n_hash);
42439 + vfree(name_set.n_hash);
42442 + if (inodev_set.i_hash) {
42443 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42445 + kfree(inodev_set.i_hash);
42447 + vfree(inodev_set.i_hash);
42450 + gr_free_uidset();
42452 + memset(&name_set, 0, sizeof (struct name_db));
42453 + memset(&inodev_set, 0, sizeof (struct inodev_db));
42454 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42455 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42457 + default_role = NULL;
42458 + role_list = NULL;
42464 +count_user_objs(struct acl_object_label *userp)
42466 + struct acl_object_label o_tmp;
42470 + if (copy_from_user(&o_tmp, userp,
42471 + sizeof (struct acl_object_label)))
42474 + userp = o_tmp.prev;
42481 +static struct acl_subject_label *
42482 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42485 +copy_user_glob(struct acl_object_label *obj)
42487 + struct acl_object_label *g_tmp, **guser;
42488 + unsigned int len;
42491 + if (obj->globbed == NULL)
42494 + guser = &obj->globbed;
42496 + g_tmp = (struct acl_object_label *)
42497 + acl_alloc(sizeof (struct acl_object_label));
42498 + if (g_tmp == NULL)
42501 + if (copy_from_user(g_tmp, *guser,
42502 + sizeof (struct acl_object_label)))
42505 + len = strnlen_user(g_tmp->filename, PATH_MAX);
42507 + if (!len || len >= PATH_MAX)
42510 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42513 + if (copy_from_user(tmp, g_tmp->filename, len))
42515 + tmp[len-1] = '\0';
42516 + g_tmp->filename = tmp;
42519 + guser = &(g_tmp->next);
42526 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42527 + struct acl_role_label *role)
42529 + struct acl_object_label *o_tmp;
42530 + unsigned int len;
42535 + if ((o_tmp = (struct acl_object_label *)
42536 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
42539 + if (copy_from_user(o_tmp, userp,
42540 + sizeof (struct acl_object_label)))
42543 + userp = o_tmp->prev;
42545 + len = strnlen_user(o_tmp->filename, PATH_MAX);
42547 + if (!len || len >= PATH_MAX)
42550 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42553 + if (copy_from_user(tmp, o_tmp->filename, len))
42555 + tmp[len-1] = '\0';
42556 + o_tmp->filename = tmp;
42558 + insert_acl_obj_label(o_tmp, subj);
42559 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42560 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42563 + ret = copy_user_glob(o_tmp);
42567 + if (o_tmp->nested) {
42568 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42569 + if (IS_ERR(o_tmp->nested))
42570 + return PTR_ERR(o_tmp->nested);
42572 + /* insert into nested subject list */
42573 + o_tmp->nested->next = role->hash->first;
42574 + role->hash->first = o_tmp->nested;
42582 +count_user_subjs(struct acl_subject_label *userp)
42584 + struct acl_subject_label s_tmp;
42588 + if (copy_from_user(&s_tmp, userp,
42589 + sizeof (struct acl_subject_label)))
42592 + userp = s_tmp.prev;
42593 + /* do not count nested subjects against this count, since
42594 + they are not included in the hash table, but are
42595 + attached to objects. We have already counted
42596 + the subjects in userspace for the allocation
42599 + if (!(s_tmp.mode & GR_NESTED))
42607 +copy_user_allowedips(struct acl_role_label *rolep)
42609 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42611 + ruserip = rolep->allowed_ips;
42613 + while (ruserip) {
42616 + if ((rtmp = (struct role_allowed_ip *)
42617 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42620 + if (copy_from_user(rtmp, ruserip,
42621 + sizeof (struct role_allowed_ip)))
42624 + ruserip = rtmp->prev;
42627 + rtmp->prev = NULL;
42628 + rolep->allowed_ips = rtmp;
42630 + rlast->next = rtmp;
42631 + rtmp->prev = rlast;
42635 + rtmp->next = NULL;
42642 +copy_user_transitions(struct acl_role_label *rolep)
42644 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
42646 + unsigned int len;
42649 + rusertp = rolep->transitions;
42651 + while (rusertp) {
42654 + if ((rtmp = (struct role_transition *)
42655 + acl_alloc(sizeof (struct role_transition))) == NULL)
42658 + if (copy_from_user(rtmp, rusertp,
42659 + sizeof (struct role_transition)))
42662 + rusertp = rtmp->prev;
42664 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42666 + if (!len || len >= GR_SPROLE_LEN)
42669 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42672 + if (copy_from_user(tmp, rtmp->rolename, len))
42674 + tmp[len-1] = '\0';
42675 + rtmp->rolename = tmp;
42678 + rtmp->prev = NULL;
42679 + rolep->transitions = rtmp;
42681 + rlast->next = rtmp;
42682 + rtmp->prev = rlast;
42686 + rtmp->next = NULL;
42692 +static struct acl_subject_label *
42693 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42695 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42696 + unsigned int len;
42699 + struct acl_ip_label **i_tmp, *i_utmp2;
42700 + struct gr_hash_struct ghash;
42701 + struct subject_map *subjmap;
42702 + unsigned int i_num;
42705 + s_tmp = lookup_subject_map(userp);
42707 + /* we've already copied this subject into the kernel, just return
42708 + the reference to it, and don't copy it over again
42713 + if ((s_tmp = (struct acl_subject_label *)
42714 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42715 + return ERR_PTR(-ENOMEM);
42717 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42718 + if (subjmap == NULL)
42719 + return ERR_PTR(-ENOMEM);
42721 + subjmap->user = userp;
42722 + subjmap->kernel = s_tmp;
42723 + insert_subj_map_entry(subjmap);
42725 + if (copy_from_user(s_tmp, userp,
42726 + sizeof (struct acl_subject_label)))
42727 + return ERR_PTR(-EFAULT);
42729 + len = strnlen_user(s_tmp->filename, PATH_MAX);
42731 + if (!len || len >= PATH_MAX)
42732 + return ERR_PTR(-EINVAL);
42734 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42735 + return ERR_PTR(-ENOMEM);
42737 + if (copy_from_user(tmp, s_tmp->filename, len))
42738 + return ERR_PTR(-EFAULT);
42739 + tmp[len-1] = '\0';
42740 + s_tmp->filename = tmp;
42742 + if (!strcmp(s_tmp->filename, "/"))
42743 + role->root_label = s_tmp;
42745 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42746 + return ERR_PTR(-EFAULT);
42748 + /* copy user and group transition tables */
42750 + if (s_tmp->user_trans_num) {
42753 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42754 + if (uidlist == NULL)
42755 + return ERR_PTR(-ENOMEM);
42756 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42757 + return ERR_PTR(-EFAULT);
42759 + s_tmp->user_transitions = uidlist;
42762 + if (s_tmp->group_trans_num) {
42765 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42766 + if (gidlist == NULL)
42767 + return ERR_PTR(-ENOMEM);
42768 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42769 + return ERR_PTR(-EFAULT);
42771 + s_tmp->group_transitions = gidlist;
42774 + /* set up object hash table */
42775 + num_objs = count_user_objs(ghash.first);
42777 + s_tmp->obj_hash_size = num_objs;
42778 + s_tmp->obj_hash =
42779 + (struct acl_object_label **)
42780 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42782 + if (!s_tmp->obj_hash)
42783 + return ERR_PTR(-ENOMEM);
42785 + memset(s_tmp->obj_hash, 0,
42786 + s_tmp->obj_hash_size *
42787 + sizeof (struct acl_object_label *));
42789 + /* add in objects */
42790 + err = copy_user_objs(ghash.first, s_tmp, role);
42793 + return ERR_PTR(err);
42795 + /* set pointer for parent subject */
42796 + if (s_tmp->parent_subject) {
42797 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42799 + if (IS_ERR(s_tmp2))
42802 + s_tmp->parent_subject = s_tmp2;
42805 + /* add in ip acls */
42807 + if (!s_tmp->ip_num) {
42808 + s_tmp->ips = NULL;
42813 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42814 + sizeof (struct acl_ip_label *));
42817 + return ERR_PTR(-ENOMEM);
42819 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42820 + *(i_tmp + i_num) =
42821 + (struct acl_ip_label *)
42822 + acl_alloc(sizeof (struct acl_ip_label));
42823 + if (!*(i_tmp + i_num))
42824 + return ERR_PTR(-ENOMEM);
42826 + if (copy_from_user
42827 + (&i_utmp2, s_tmp->ips + i_num,
42828 + sizeof (struct acl_ip_label *)))
42829 + return ERR_PTR(-EFAULT);
42831 + if (copy_from_user
42832 + (*(i_tmp + i_num), i_utmp2,
42833 + sizeof (struct acl_ip_label)))
42834 + return ERR_PTR(-EFAULT);
42836 + if ((*(i_tmp + i_num))->iface == NULL)
42839 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42840 + if (!len || len >= IFNAMSIZ)
42841 + return ERR_PTR(-EINVAL);
42842 + tmp = acl_alloc(len);
42844 + return ERR_PTR(-ENOMEM);
42845 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42846 + return ERR_PTR(-EFAULT);
42847 + (*(i_tmp + i_num))->iface = tmp;
42850 + s_tmp->ips = i_tmp;
42853 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42854 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42855 + return ERR_PTR(-ENOMEM);
42861 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42863 + struct acl_subject_label s_pre;
42864 + struct acl_subject_label * ret;
42868 + if (copy_from_user(&s_pre, userp,
42869 + sizeof (struct acl_subject_label)))
42872 + /* do not add nested subjects here, add
42873 + while parsing objects
42876 + if (s_pre.mode & GR_NESTED) {
42877 + userp = s_pre.prev;
42881 + ret = do_copy_user_subj(userp, role);
42883 + err = PTR_ERR(ret);
42887 + insert_acl_subj_label(ret, role);
42889 + userp = s_pre.prev;
42896 +copy_user_acl(struct gr_arg *arg)
42898 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42899 + struct sprole_pw *sptmp;
42900 + struct gr_hash_struct *ghash;
42901 + uid_t *domainlist;
42902 + unsigned int r_num;
42903 + unsigned int len;
42909 + /* we need a default and kernel role */
42910 + if (arg->role_db.num_roles < 2)
42913 + /* copy special role authentication info from userspace */
42915 + num_sprole_pws = arg->num_sprole_pws;
42916 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42918 + if (!acl_special_roles) {
42923 + for (i = 0; i < num_sprole_pws; i++) {
42924 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42929 + if (copy_from_user(sptmp, arg->sprole_pws + i,
42930 + sizeof (struct sprole_pw))) {
42936 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42938 + if (!len || len >= GR_SPROLE_LEN) {
42943 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42948 + if (copy_from_user(tmp, sptmp->rolename, len)) {
42952 + tmp[len-1] = '\0';
42953 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42954 + printk(KERN_ALERT "Copying special role %s\n", tmp);
42956 + sptmp->rolename = tmp;
42957 + acl_special_roles[i] = sptmp;
42960 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42962 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42963 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
42970 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
42971 + sizeof (struct acl_role_label *))) {
42976 + if (copy_from_user(r_tmp, r_utmp2,
42977 + sizeof (struct acl_role_label))) {
42982 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42984 + if (!len || len >= PATH_MAX) {
42989 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42993 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
42997 + tmp[len-1] = '\0';
42998 + r_tmp->rolename = tmp;
43000 + if (!strcmp(r_tmp->rolename, "default")
43001 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
43002 + default_role = r_tmp;
43003 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
43004 + kernel_role = r_tmp;
43007 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
43011 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
43016 + r_tmp->hash = ghash;
43018 + num_subjs = count_user_subjs(r_tmp->hash->first);
43020 + r_tmp->subj_hash_size = num_subjs;
43021 + r_tmp->subj_hash =
43022 + (struct acl_subject_label **)
43023 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
43025 + if (!r_tmp->subj_hash) {
43030 + err = copy_user_allowedips(r_tmp);
43034 + /* copy domain info */
43035 + if (r_tmp->domain_children != NULL) {
43036 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
43037 + if (domainlist == NULL) {
43041 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
43045 + r_tmp->domain_children = domainlist;
43048 + err = copy_user_transitions(r_tmp);
43052 + memset(r_tmp->subj_hash, 0,
43053 + r_tmp->subj_hash_size *
43054 + sizeof (struct acl_subject_label *));
43056 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
43061 + /* set nested subject list to null */
43062 + r_tmp->hash->first = NULL;
43064 + insert_acl_role_label(r_tmp);
43069 + free_variables();
43076 +gracl_init(struct gr_arg *args)
43080 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
43081 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
43083 + if (init_variables(args)) {
43084 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
43086 + free_variables();
43090 + error = copy_user_acl(args);
43091 + free_init_variables();
43093 + free_variables();
43097 + if ((error = gr_set_acls(0))) {
43098 + free_variables();
43102 + pax_open_kernel();
43103 + gr_status |= GR_READY;
43104 + pax_close_kernel();
43110 +/* derived from glibc fnmatch() 0: match, 1: no match*/
43113 +glob_match(const char *p, const char *n)
43117 + while ((c = *p++) != '\0') {
43122 + else if (*n == '/')
43130 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
43133 + else if (c == '?') {
43143 + const char *endp;
43145 + if ((endp = strchr(n, '/')) == NULL)
43146 + endp = n + strlen(n);
43149 + for (--p; n < endp; ++n)
43150 + if (!glob_match(p, n))
43152 + } else if (c == '/') {
43153 + while (*n != '\0' && *n != '/')
43155 + if (*n == '/' && !glob_match(p, n + 1))
43158 + for (--p; n < endp; ++n)
43159 + if (*n == c && !glob_match(p, n))
43170 + if (*n == '\0' || *n == '/')
43173 + not = (*p == '!' || *p == '^');
43179 + unsigned char fn = (unsigned char)*n;
43189 + if (c == '-' && *p != ']') {
43190 + unsigned char cend = *p++;
43192 + if (cend == '\0')
43195 + if (cold <= fn && fn <= cend)
43209 + while (c != ']') {
43236 +static struct acl_object_label *
43237 +chk_glob_label(struct acl_object_label *globbed,
43238 + struct dentry *dentry, struct vfsmount *mnt, char **path)
43240 + struct acl_object_label *tmp;
43242 + if (*path == NULL)
43243 + *path = gr_to_filename_nolock(dentry, mnt);
43248 + if (!glob_match(tmp->filename, *path))
43256 +static struct acl_object_label *
43257 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43258 + const ino_t curr_ino, const dev_t curr_dev,
43259 + const struct acl_subject_label *subj, char **path, const int checkglob)
43261 + struct acl_subject_label *tmpsubj;
43262 + struct acl_object_label *retval;
43263 + struct acl_object_label *retval2;
43265 + tmpsubj = (struct acl_subject_label *) subj;
43266 + read_lock(&gr_inode_lock);
43268 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43270 + if (checkglob && retval->globbed) {
43271 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43272 + (struct vfsmount *)orig_mnt, path);
43274 + retval = retval2;
43278 + } while ((tmpsubj = tmpsubj->parent_subject));
43279 + read_unlock(&gr_inode_lock);
43284 +static __inline__ struct acl_object_label *
43285 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43286 + struct dentry *curr_dentry,
43287 + const struct acl_subject_label *subj, char **path, const int checkglob)
43289 + int newglob = checkglob;
43293 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43294 + as we don't want a / * rule to match instead of the / object
43295 + don't do this for create lookups that call this function though, since they're looking up
43296 + on the parent and thus need globbing checks on all paths
43298 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43299 + newglob = GR_NO_GLOB;
43301 + spin_lock(&curr_dentry->d_lock);
43302 + inode = curr_dentry->d_inode->i_ino;
43303 + device = __get_dev(curr_dentry);
43304 + spin_unlock(&curr_dentry->d_lock);
43306 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43309 +static struct acl_object_label *
43310 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43311 + const struct acl_subject_label *subj, char *path, const int checkglob)
43313 + struct dentry *dentry = (struct dentry *) l_dentry;
43314 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43315 + struct acl_object_label *retval;
43316 + struct dentry *parent;
43318 + write_seqlock(&rename_lock);
43319 + br_read_lock(vfsmount_lock);
43321 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43323 + mnt == sock_mnt ||
43325 +#ifdef CONFIG_HUGETLBFS
43326 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43328 + /* ignore Eric Biederman */
43329 + IS_PRIVATE(l_dentry->d_inode))) {
43330 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43335 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43338 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43339 + if (mnt->mnt_parent == mnt)
43342 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43343 + if (retval != NULL)
43346 + dentry = mnt->mnt_mountpoint;
43347 + mnt = mnt->mnt_parent;
43351 + parent = dentry->d_parent;
43352 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43353 + if (retval != NULL)
43359 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43361 + /* real_root is pinned so we don't have to hold a reference */
43362 + if (retval == NULL)
43363 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43365 + br_read_unlock(vfsmount_lock);
43366 + write_sequnlock(&rename_lock);
43368 + BUG_ON(retval == NULL);
43373 +static __inline__ struct acl_object_label *
43374 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43375 + const struct acl_subject_label *subj)
43377 + char *path = NULL;
43378 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43381 +static __inline__ struct acl_object_label *
43382 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43383 + const struct acl_subject_label *subj)
43385 + char *path = NULL;
43386 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43389 +static __inline__ struct acl_object_label *
43390 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43391 + const struct acl_subject_label *subj, char *path)
43393 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43396 +static struct acl_subject_label *
43397 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43398 + const struct acl_role_label *role)
43400 + struct dentry *dentry = (struct dentry *) l_dentry;
43401 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43402 + struct acl_subject_label *retval;
43403 + struct dentry *parent;
43405 + write_seqlock(&rename_lock);
43406 + br_read_lock(vfsmount_lock);
43409 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43411 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43412 + if (mnt->mnt_parent == mnt)
43415 + spin_lock(&dentry->d_lock);
43416 + read_lock(&gr_inode_lock);
43418 + lookup_acl_subj_label(dentry->d_inode->i_ino,
43419 + __get_dev(dentry), role);
43420 + read_unlock(&gr_inode_lock);
43421 + spin_unlock(&dentry->d_lock);
43422 + if (retval != NULL)
43425 + dentry = mnt->mnt_mountpoint;
43426 + mnt = mnt->mnt_parent;
43430 + spin_lock(&dentry->d_lock);
43431 + read_lock(&gr_inode_lock);
43432 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43433 + __get_dev(dentry), role);
43434 + read_unlock(&gr_inode_lock);
43435 + parent = dentry->d_parent;
43436 + spin_unlock(&dentry->d_lock);
43438 + if (retval != NULL)
43444 + spin_lock(&dentry->d_lock);
43445 + read_lock(&gr_inode_lock);
43446 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43447 + __get_dev(dentry), role);
43448 + read_unlock(&gr_inode_lock);
43449 + spin_unlock(&dentry->d_lock);
43451 + if (unlikely(retval == NULL)) {
43452 + /* real_root is pinned, we don't need to hold a reference */
43453 + read_lock(&gr_inode_lock);
43454 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43455 + __get_dev(real_root.dentry), role);
43456 + read_unlock(&gr_inode_lock);
43459 + br_read_unlock(vfsmount_lock);
43460 + write_sequnlock(&rename_lock);
43462 + BUG_ON(retval == NULL);
43468 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43470 + struct task_struct *task = current;
43471 + const struct cred *cred = current_cred();
43473 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43474 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43475 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43476 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43482 +gr_log_learn_sysctl(const char *path, const __u32 mode)
43484 + struct task_struct *task = current;
43485 + const struct cred *cred = current_cred();
43487 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43488 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43489 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43490 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43496 +gr_log_learn_id_change(const char type, const unsigned int real,
43497 + const unsigned int effective, const unsigned int fs)
43499 + struct task_struct *task = current;
43500 + const struct cred *cred = current_cred();
43502 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43503 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43504 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43505 + type, real, effective, fs, &task->signal->saved_ip);
43511 +gr_check_link(const struct dentry * new_dentry,
43512 + const struct dentry * parent_dentry,
43513 + const struct vfsmount * parent_mnt,
43514 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43516 + struct acl_object_label *obj;
43517 + __u32 oldmode, newmode;
43520 + if (unlikely(!(gr_status & GR_READY)))
43521 + return (GR_CREATE | GR_LINK);
43523 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43524 + oldmode = obj->mode;
43526 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43527 + oldmode |= (GR_CREATE | GR_LINK);
43529 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43530 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43531 + needmode |= GR_SETID | GR_AUDIT_SETID;
43534 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
43535 + oldmode | needmode);
43537 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43538 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43539 + GR_INHERIT | GR_AUDIT_INHERIT);
43541 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43544 + if ((oldmode & needmode) != needmode)
43547 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43548 + if ((newmode & needmode) != needmode)
43551 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43554 + needmode = oldmode;
43555 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43556 + needmode |= GR_SETID;
43558 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43559 + gr_log_learn(old_dentry, old_mnt, needmode);
43560 + return (GR_CREATE | GR_LINK);
43561 + } else if (newmode & GR_SUPPRESS)
43562 + return GR_SUPPRESS;
43568 +gr_search_file(const struct dentry * dentry, const __u32 mode,
43569 + const struct vfsmount * mnt)
43571 + __u32 retval = mode;
43572 + struct acl_subject_label *curracl;
43573 + struct acl_object_label *currobj;
43575 + if (unlikely(!(gr_status & GR_READY)))
43576 + return (mode & ~GR_AUDITS);
43578 + curracl = current->acl;
43580 + currobj = chk_obj_label(dentry, mnt, curracl);
43581 + retval = currobj->mode & mode;
43583 + /* if we're opening a specified transfer file for writing
43584 + (e.g. /dev/initctl), then transfer our role to init
43586 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43587 + current->role->roletype & GR_ROLE_PERSIST)) {
43588 + struct task_struct *task = init_pid_ns.child_reaper;
43590 + if (task->role != current->role) {
43591 + task->acl_sp_role = 0;
43592 + task->acl_role_id = current->acl_role_id;
43593 + task->role = current->role;
43595 + read_lock(&grsec_exec_file_lock);
43596 + gr_apply_subject_to_task(task);
43597 + read_unlock(&grsec_exec_file_lock);
43598 + rcu_read_unlock();
43599 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43604 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43605 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43606 + __u32 new_mode = mode;
43608 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43610 + retval = new_mode;
43612 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43613 + new_mode |= GR_INHERIT;
43615 + if (!(mode & GR_NOLEARN))
43616 + gr_log_learn(dentry, mnt, new_mode);
43623 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43624 + const struct vfsmount * mnt, const __u32 mode)
43626 + struct name_entry *match;
43627 + struct acl_object_label *matchpo;
43628 + struct acl_subject_label *curracl;
43632 + if (unlikely(!(gr_status & GR_READY)))
43633 + return (mode & ~GR_AUDITS);
43635 + preempt_disable();
43636 + path = gr_to_filename_rbac(new_dentry, mnt);
43637 + match = lookup_name_entry_create(path);
43640 + goto check_parent;
43642 + curracl = current->acl;
43644 + read_lock(&gr_inode_lock);
43645 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43646 + read_unlock(&gr_inode_lock);
43649 + if ((matchpo->mode & mode) !=
43650 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
43651 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43652 + __u32 new_mode = mode;
43654 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43656 + gr_log_learn(new_dentry, mnt, new_mode);
43658 + preempt_enable();
43661 + preempt_enable();
43662 + return (matchpo->mode & mode);
43666 + curracl = current->acl;
43668 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43669 + retval = matchpo->mode & mode;
43671 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43672 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43673 + __u32 new_mode = mode;
43675 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43677 + gr_log_learn(new_dentry, mnt, new_mode);
43678 + preempt_enable();
43682 + preempt_enable();
43687 +gr_check_hidden_task(const struct task_struct *task)
43689 + if (unlikely(!(gr_status & GR_READY)))
43692 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43699 +gr_check_protected_task(const struct task_struct *task)
43701 + if (unlikely(!(gr_status & GR_READY) || !task))
43704 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43705 + task->acl != current->acl)
43712 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43714 + struct task_struct *p;
43717 + if (unlikely(!(gr_status & GR_READY) || !pid))
43720 + read_lock(&tasklist_lock);
43721 + do_each_pid_task(pid, type, p) {
43722 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43723 + p->acl != current->acl) {
43727 + } while_each_pid_task(pid, type, p);
43729 + read_unlock(&tasklist_lock);
43735 +gr_copy_label(struct task_struct *tsk)
43737 + tsk->signal->used_accept = 0;
43738 + tsk->acl_sp_role = 0;
43739 + tsk->acl_role_id = current->acl_role_id;
43740 + tsk->acl = current->acl;
43741 + tsk->role = current->role;
43742 + tsk->signal->curr_ip = current->signal->curr_ip;
43743 + tsk->signal->saved_ip = current->signal->saved_ip;
43744 + if (current->exec_file)
43745 + get_file(current->exec_file);
43746 + tsk->exec_file = current->exec_file;
43747 + tsk->is_writable = current->is_writable;
43748 + if (unlikely(current->signal->used_accept)) {
43749 + current->signal->curr_ip = 0;
43750 + current->signal->saved_ip = 0;
43757 +gr_set_proc_res(struct task_struct *task)
43759 + struct acl_subject_label *proc;
43760 + unsigned short i;
43762 + proc = task->acl;
43764 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43767 + for (i = 0; i < RLIM_NLIMITS; i++) {
43768 + if (!(proc->resmask & (1 << i)))
43771 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43772 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43778 +extern int __gr_process_user_ban(struct user_struct *user);
43781 +gr_check_user_change(int real, int effective, int fs)
43788 + int effectiveok = 0;
43791 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43792 + struct user_struct *user;
43797 + user = find_user(real);
43798 + if (user == NULL)
43801 + if (__gr_process_user_ban(user)) {
43802 + /* for find_user */
43807 + /* for find_user */
43813 + if (unlikely(!(gr_status & GR_READY)))
43816 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43817 + gr_log_learn_id_change('u', real, effective, fs);
43819 + num = current->acl->user_trans_num;
43820 + uidlist = current->acl->user_transitions;
43822 + if (uidlist == NULL)
43827 + if (effective == -1)
43832 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
43833 + for (i = 0; i < num; i++) {
43834 + curuid = (int)uidlist[i];
43835 + if (real == curuid)
43837 + if (effective == curuid)
43839 + if (fs == curuid)
43842 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
43843 + for (i = 0; i < num; i++) {
43844 + curuid = (int)uidlist[i];
43845 + if (real == curuid)
43847 + if (effective == curuid)
43849 + if (fs == curuid)
43852 + /* not in deny list */
43860 + if (realok && effectiveok && fsok)
43863 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43869 +gr_check_group_change(int real, int effective, int fs)
43876 + int effectiveok = 0;
43879 + if (unlikely(!(gr_status & GR_READY)))
43882 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43883 + gr_log_learn_id_change('g', real, effective, fs);
43885 + num = current->acl->group_trans_num;
43886 + gidlist = current->acl->group_transitions;
43888 + if (gidlist == NULL)
43893 + if (effective == -1)
43898 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
43899 + for (i = 0; i < num; i++) {
43900 + curgid = (int)gidlist[i];
43901 + if (real == curgid)
43903 + if (effective == curgid)
43905 + if (fs == curgid)
43908 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
43909 + for (i = 0; i < num; i++) {
43910 + curgid = (int)gidlist[i];
43911 + if (real == curgid)
43913 + if (effective == curgid)
43915 + if (fs == curgid)
43918 + /* not in deny list */
43926 + if (realok && effectiveok && fsok)
43929 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43935 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43937 + struct acl_role_label *role = task->role;
43938 + struct acl_subject_label *subj = NULL;
43939 + struct acl_object_label *obj;
43940 + struct file *filp;
43942 + if (unlikely(!(gr_status & GR_READY)))
43945 + filp = task->exec_file;
43947 + /* kernel process, we'll give them the kernel role */
43948 + if (unlikely(!filp)) {
43949 + task->role = kernel_role;
43950 + task->acl = kernel_role->root_label;
43952 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43953 + role = lookup_acl_role_label(task, uid, gid);
43955 + /* perform subject lookup in possibly new role
43956 + we can use this result below in the case where role == task->role
43958 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43960 + /* if we changed uid/gid, but result in the same role
43961 + and are using inheritance, don't lose the inherited subject
43962 + if current subject is other than what normal lookup
43963 + would result in, we arrived via inheritance, don't
43966 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43967 + (subj == task->acl)))
43968 + task->acl = subj;
43970 + task->role = role;
43972 + task->is_writable = 0;
43974 + /* ignore additional mmap checks for processes that are writable
43975 + by the default ACL */
43976 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43977 + if (unlikely(obj->mode & GR_WRITE))
43978 + task->is_writable = 1;
43979 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43980 + if (unlikely(obj->mode & GR_WRITE))
43981 + task->is_writable = 1;
43983 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43984 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43987 + gr_set_proc_res(task);
43993 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43994 + const int unsafe_share)
43996 + struct task_struct *task = current;
43997 + struct acl_subject_label *newacl;
43998 + struct acl_object_label *obj;
44001 + if (unlikely(!(gr_status & GR_READY)))
44004 + newacl = chk_subj_label(dentry, mnt, task->role);
44007 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
44008 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
44009 + !(task->role->roletype & GR_ROLE_GOD) &&
44010 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
44011 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
44012 + task_unlock(task);
44013 + if (unsafe_share)
44014 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
44016 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
44019 + task_unlock(task);
44021 + obj = chk_obj_label(dentry, mnt, task->acl);
44022 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
44024 + if (!(task->acl->mode & GR_INHERITLEARN) &&
44025 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
44027 + task->acl = obj->nested;
44029 + task->acl = newacl;
44030 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
44031 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
44033 + task->is_writable = 0;
44035 + /* ignore additional mmap checks for processes that are writable
44036 + by the default ACL */
44037 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
44038 + if (unlikely(obj->mode & GR_WRITE))
44039 + task->is_writable = 1;
44040 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
44041 + if (unlikely(obj->mode & GR_WRITE))
44042 + task->is_writable = 1;
44044 + gr_set_proc_res(task);
44046 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44047 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44052 +/* always called with valid inodev ptr */
44054 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
44056 + struct acl_object_label *matchpo;
44057 + struct acl_subject_label *matchps;
44058 + struct acl_subject_label *subj;
44059 + struct acl_role_label *role;
44062 + FOR_EACH_ROLE_START(role)
44063 + FOR_EACH_SUBJECT_START(role, subj, x)
44064 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
44065 + matchpo->mode |= GR_DELETED;
44066 + FOR_EACH_SUBJECT_END(subj,x)
44067 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44068 + if (subj->inode == ino && subj->device == dev)
44069 + subj->mode |= GR_DELETED;
44070 + FOR_EACH_NESTED_SUBJECT_END(subj)
44071 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
44072 + matchps->mode |= GR_DELETED;
44073 + FOR_EACH_ROLE_END(role)
44075 + inodev->nentry->deleted = 1;
44081 +gr_handle_delete(const ino_t ino, const dev_t dev)
44083 + struct inodev_entry *inodev;
44085 + if (unlikely(!(gr_status & GR_READY)))
44088 + write_lock(&gr_inode_lock);
44089 + inodev = lookup_inodev_entry(ino, dev);
44090 + if (inodev != NULL)
44091 + do_handle_delete(inodev, ino, dev);
44092 + write_unlock(&gr_inode_lock);
44098 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
44099 + const ino_t newinode, const dev_t newdevice,
44100 + struct acl_subject_label *subj)
44102 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
44103 + struct acl_object_label *match;
44105 + match = subj->obj_hash[index];
44107 + while (match && (match->inode != oldinode ||
44108 + match->device != olddevice ||
44109 + !(match->mode & GR_DELETED)))
44110 + match = match->next;
44112 + if (match && (match->inode == oldinode)
44113 + && (match->device == olddevice)
44114 + && (match->mode & GR_DELETED)) {
44115 + if (match->prev == NULL) {
44116 + subj->obj_hash[index] = match->next;
44117 + if (match->next != NULL)
44118 + match->next->prev = NULL;
44120 + match->prev->next = match->next;
44121 + if (match->next != NULL)
44122 + match->next->prev = match->prev;
44124 + match->prev = NULL;
44125 + match->next = NULL;
44126 + match->inode = newinode;
44127 + match->device = newdevice;
44128 + match->mode &= ~GR_DELETED;
44130 + insert_acl_obj_label(match, subj);
44137 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44138 + const ino_t newinode, const dev_t newdevice,
44139 + struct acl_role_label *role)
44141 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44142 + struct acl_subject_label *match;
44144 + match = role->subj_hash[index];
44146 + while (match && (match->inode != oldinode ||
44147 + match->device != olddevice ||
44148 + !(match->mode & GR_DELETED)))
44149 + match = match->next;
44151 + if (match && (match->inode == oldinode)
44152 + && (match->device == olddevice)
44153 + && (match->mode & GR_DELETED)) {
44154 + if (match->prev == NULL) {
44155 + role->subj_hash[index] = match->next;
44156 + if (match->next != NULL)
44157 + match->next->prev = NULL;
44159 + match->prev->next = match->next;
44160 + if (match->next != NULL)
44161 + match->next->prev = match->prev;
44163 + match->prev = NULL;
44164 + match->next = NULL;
44165 + match->inode = newinode;
44166 + match->device = newdevice;
44167 + match->mode &= ~GR_DELETED;
44169 + insert_acl_subj_label(match, role);
44176 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44177 + const ino_t newinode, const dev_t newdevice)
44179 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44180 + struct inodev_entry *match;
44182 + match = inodev_set.i_hash[index];
44184 + while (match && (match->nentry->inode != oldinode ||
44185 + match->nentry->device != olddevice || !match->nentry->deleted))
44186 + match = match->next;
44188 + if (match && (match->nentry->inode == oldinode)
44189 + && (match->nentry->device == olddevice) &&
44190 + match->nentry->deleted) {
44191 + if (match->prev == NULL) {
44192 + inodev_set.i_hash[index] = match->next;
44193 + if (match->next != NULL)
44194 + match->next->prev = NULL;
44196 + match->prev->next = match->next;
44197 + if (match->next != NULL)
44198 + match->next->prev = match->prev;
44200 + match->prev = NULL;
44201 + match->next = NULL;
44202 + match->nentry->inode = newinode;
44203 + match->nentry->device = newdevice;
44204 + match->nentry->deleted = 0;
44206 + insert_inodev_entry(match);
44213 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44214 + const struct vfsmount *mnt)
44216 + struct acl_subject_label *subj;
44217 + struct acl_role_label *role;
44219 + ino_t ino = dentry->d_inode->i_ino;
44220 + dev_t dev = __get_dev(dentry);
44222 + FOR_EACH_ROLE_START(role)
44223 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44225 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44226 + if ((subj->inode == ino) && (subj->device == dev)) {
44227 + subj->inode = ino;
44228 + subj->device = dev;
44230 + FOR_EACH_NESTED_SUBJECT_END(subj)
44231 + FOR_EACH_SUBJECT_START(role, subj, x)
44232 + update_acl_obj_label(matchn->inode, matchn->device,
44234 + FOR_EACH_SUBJECT_END(subj,x)
44235 + FOR_EACH_ROLE_END(role)
44237 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44243 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44245 + struct name_entry *matchn;
44247 + if (unlikely(!(gr_status & GR_READY)))
44250 + preempt_disable();
44251 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44253 + if (unlikely((unsigned long)matchn)) {
44254 + write_lock(&gr_inode_lock);
44255 + do_handle_create(matchn, dentry, mnt);
44256 + write_unlock(&gr_inode_lock);
44258 + preempt_enable();
44264 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44265 + struct dentry *old_dentry,
44266 + struct dentry *new_dentry,
44267 + struct vfsmount *mnt, const __u8 replace)
44269 + struct name_entry *matchn;
44270 + struct inodev_entry *inodev;
44271 + ino_t old_ino = old_dentry->d_inode->i_ino;
44272 + dev_t old_dev = __get_dev(old_dentry);
44274 + /* vfs_rename swaps the name and parent link for old_dentry and
44276 + at this point, old_dentry has the new name, parent link, and inode
44277 + for the renamed file
44278 + if a file is being replaced by a rename, new_dentry has the inode
44279 + and name for the replaced file
44282 + if (unlikely(!(gr_status & GR_READY)))
44285 + preempt_disable();
44286 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44288 + /* we wouldn't have to check d_inode if it weren't for
44289 + NFS silly-renaming
44292 + write_lock(&gr_inode_lock);
44293 + if (unlikely(replace && new_dentry->d_inode)) {
44294 + ino_t new_ino = new_dentry->d_inode->i_ino;
44295 + dev_t new_dev = __get_dev(new_dentry);
44297 + inodev = lookup_inodev_entry(new_ino, new_dev);
44298 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44299 + do_handle_delete(inodev, new_ino, new_dev);
44302 + inodev = lookup_inodev_entry(old_ino, old_dev);
44303 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44304 + do_handle_delete(inodev, old_ino, old_dev);
44306 + if (unlikely((unsigned long)matchn))
44307 + do_handle_create(matchn, old_dentry, mnt);
44309 + write_unlock(&gr_inode_lock);
44310 + preempt_enable();
44316 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44317 + unsigned char **sum)
44319 + struct acl_role_label *r;
44320 + struct role_allowed_ip *ipp;
44321 + struct role_transition *trans;
44324 + u32 curr_ip = current->signal->curr_ip;
44326 + current->signal->saved_ip = curr_ip;
44328 + /* check transition table */
44330 + for (trans = current->role->transitions; trans; trans = trans->next) {
44331 + if (!strcmp(rolename, trans->rolename)) {
44340 + /* handle special roles that do not require authentication
44343 + FOR_EACH_ROLE_START(r)
44344 + if (!strcmp(rolename, r->rolename) &&
44345 + (r->roletype & GR_ROLE_SPECIAL)) {
44347 + if (r->allowed_ips != NULL) {
44348 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44349 + if ((ntohl(curr_ip) & ipp->netmask) ==
44350 + (ntohl(ipp->addr) & ipp->netmask))
44358 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44359 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44365 + FOR_EACH_ROLE_END(r)
44367 + for (i = 0; i < num_sprole_pws; i++) {
44368 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44369 + *salt = acl_special_roles[i]->salt;
44370 + *sum = acl_special_roles[i]->sum;
44379 +assign_special_role(char *rolename)
44381 + struct acl_object_label *obj;
44382 + struct acl_role_label *r;
44383 + struct acl_role_label *assigned = NULL;
44384 + struct task_struct *tsk;
44385 + struct file *filp;
44387 + FOR_EACH_ROLE_START(r)
44388 + if (!strcmp(rolename, r->rolename) &&
44389 + (r->roletype & GR_ROLE_SPECIAL)) {
44393 + FOR_EACH_ROLE_END(r)
44398 + read_lock(&tasklist_lock);
44399 + read_lock(&grsec_exec_file_lock);
44401 + tsk = current->real_parent;
44405 + filp = tsk->exec_file;
44406 + if (filp == NULL)
44409 + tsk->is_writable = 0;
44411 + tsk->acl_sp_role = 1;
44412 + tsk->acl_role_id = ++acl_sp_role_value;
44413 + tsk->role = assigned;
44414 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44416 + /* ignore additional mmap checks for processes that are writable
44417 + by the default ACL */
44418 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44419 + if (unlikely(obj->mode & GR_WRITE))
44420 + tsk->is_writable = 1;
44421 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44422 + if (unlikely(obj->mode & GR_WRITE))
44423 + tsk->is_writable = 1;
44425 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44426 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44430 + read_unlock(&grsec_exec_file_lock);
44431 + read_unlock(&tasklist_lock);
44435 +int gr_check_secure_terminal(struct task_struct *task)
44437 + struct task_struct *p, *p2, *p3;
44438 + struct files_struct *files;
44439 + struct fdtable *fdt;
44440 + struct file *our_file = NULL, *file;
44443 + if (task->signal->tty == NULL)
44446 + files = get_files_struct(task);
44447 + if (files != NULL) {
44449 + fdt = files_fdtable(files);
44450 + for (i=0; i < fdt->max_fds; i++) {
44451 + file = fcheck_files(files, i);
44452 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44457 + rcu_read_unlock();
44458 + put_files_struct(files);
44461 + if (our_file == NULL)
44464 + read_lock(&tasklist_lock);
44465 + do_each_thread(p2, p) {
44466 + files = get_files_struct(p);
44467 + if (files == NULL ||
44468 + (p->signal && p->signal->tty == task->signal->tty)) {
44469 + if (files != NULL)
44470 + put_files_struct(files);
44474 + fdt = files_fdtable(files);
44475 + for (i=0; i < fdt->max_fds; i++) {
44476 + file = fcheck_files(files, i);
44477 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44478 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44480 + while (p3->pid > 0) {
44483 + p3 = p3->real_parent;
44487 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44488 + gr_handle_alertkill(p);
44489 + rcu_read_unlock();
44490 + put_files_struct(files);
44491 + read_unlock(&tasklist_lock);
44496 + rcu_read_unlock();
44497 + put_files_struct(files);
44498 + } while_each_thread(p2, p);
44499 + read_unlock(&tasklist_lock);
44506 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44508 + struct gr_arg_wrapper uwrap;
44509 + unsigned char *sprole_salt = NULL;
44510 + unsigned char *sprole_sum = NULL;
44511 + int error = sizeof (struct gr_arg_wrapper);
44514 + mutex_lock(&gr_dev_mutex);
44516 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44521 + if (count != sizeof (struct gr_arg_wrapper)) {
44522 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44528 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44529 + gr_auth_expires = 0;
44530 + gr_auth_attempts = 0;
44533 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44538 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44543 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44548 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44549 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44550 + time_after(gr_auth_expires, get_seconds())) {
44555 + /* if non-root trying to do anything other than use a special role,
44556 + do not attempt authentication, do not count towards authentication
44560 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44561 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44567 + /* ensure pw and special role name are null terminated */
44569 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44570 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44573 + * We have our enough of the argument structure..(we have yet
44574 + * to copy_from_user the tables themselves) . Copy the tables
44575 + * only if we need them, i.e. for loading operations. */
44577 + switch (gr_usermode->mode) {
44579 + if (gr_status & GR_READY) {
44581 + if (!gr_check_secure_terminal(current))
44586 + case GR_SHUTDOWN:
44587 + if ((gr_status & GR_READY)
44588 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44589 + pax_open_kernel();
44590 + gr_status &= ~GR_READY;
44591 + pax_close_kernel();
44593 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44594 + free_variables();
44595 + memset(gr_usermode, 0, sizeof (struct gr_arg));
44596 + memset(gr_system_salt, 0, GR_SALT_LEN);
44597 + memset(gr_system_sum, 0, GR_SHA_LEN);
44598 + } else if (gr_status & GR_READY) {
44599 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44602 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44607 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44608 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44610 + if (gr_status & GR_READY)
44614 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44618 + if (!(gr_status & GR_READY)) {
44619 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44621 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44622 + preempt_disable();
44624 + pax_open_kernel();
44625 + gr_status &= ~GR_READY;
44626 + pax_close_kernel();
44628 + free_variables();
44629 + if (!(error2 = gracl_init(gr_usermode))) {
44630 + preempt_enable();
44631 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44633 + preempt_enable();
44635 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44638 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44643 + if (unlikely(!(gr_status & GR_READY))) {
44644 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44649 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44650 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44651 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44652 + struct acl_subject_label *segvacl;
44654 + lookup_acl_subj_label(gr_usermode->segv_inode,
44655 + gr_usermode->segv_device,
44658 + segvacl->crashes = 0;
44659 + segvacl->expires = 0;
44661 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44662 + gr_remove_uid(gr_usermode->segv_uid);
44665 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44670 + case GR_SPROLEPAM:
44671 + if (unlikely(!(gr_status & GR_READY))) {
44672 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44677 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44678 + current->role->expires = 0;
44679 + current->role->auth_attempts = 0;
44682 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44683 + time_after(current->role->expires, get_seconds())) {
44688 + if (lookup_special_role_auth
44689 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44690 + && ((!sprole_salt && !sprole_sum)
44691 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44693 + assign_special_role(gr_usermode->sp_role);
44694 + read_lock(&tasklist_lock);
44695 + if (current->real_parent)
44696 + p = current->real_parent->role->rolename;
44697 + read_unlock(&tasklist_lock);
44698 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44699 + p, acl_sp_role_value);
44701 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44703 + if(!(current->role->auth_attempts++))
44704 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44709 + case GR_UNSPROLE:
44710 + if (unlikely(!(gr_status & GR_READY))) {
44711 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44716 + if (current->role->roletype & GR_ROLE_SPECIAL) {
44720 + read_lock(&tasklist_lock);
44721 + if (current->real_parent) {
44722 + p = current->real_parent->role->rolename;
44723 + i = current->real_parent->acl_role_id;
44725 + read_unlock(&tasklist_lock);
44727 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44735 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44740 + if (error != -EPERM)
44743 + if(!(gr_auth_attempts++))
44744 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44747 + mutex_unlock(&gr_dev_mutex);
44751 +/* must be called with
44753 + read_lock(&tasklist_lock);
44754 + read_lock(&grsec_exec_file_lock);
44756 +int gr_apply_subject_to_task(struct task_struct *task)
44758 + struct acl_object_label *obj;
44760 + struct acl_subject_label *tmpsubj;
44761 + struct file *filp;
44762 + struct name_entry *nmatch;
44764 + filp = task->exec_file;
44765 + if (filp == NULL)
44768 + /* the following is to apply the correct subject
44769 + on binaries running when the RBAC system
44770 + is enabled, when the binaries have been
44771 + replaced or deleted since their execution
44773 + when the RBAC system starts, the inode/dev
44774 + from exec_file will be one the RBAC system
44775 + is unaware of. It only knows the inode/dev
44776 + of the present file on disk, or the absence
44779 + preempt_disable();
44780 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44782 + nmatch = lookup_name_entry(tmpname);
44783 + preempt_enable();
44786 + if (nmatch->deleted)
44787 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44789 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44790 + if (tmpsubj != NULL)
44791 + task->acl = tmpsubj;
44793 + if (tmpsubj == NULL)
44794 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44797 + task->is_writable = 0;
44798 + /* ignore additional mmap checks for processes that are writable
44799 + by the default ACL */
44800 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44801 + if (unlikely(obj->mode & GR_WRITE))
44802 + task->is_writable = 1;
44803 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44804 + if (unlikely(obj->mode & GR_WRITE))
44805 + task->is_writable = 1;
44807 + gr_set_proc_res(task);
44809 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44810 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44820 +gr_set_acls(const int type)
44822 + struct task_struct *task, *task2;
44823 + struct acl_role_label *role = current->role;
44824 + __u16 acl_role_id = current->acl_role_id;
44825 + const struct cred *cred;
44829 + read_lock(&tasklist_lock);
44830 + read_lock(&grsec_exec_file_lock);
44831 + do_each_thread(task2, task) {
44832 + /* check to see if we're called from the exit handler,
44833 + if so, only replace ACLs that have inherited the admin
44836 + if (type && (task->role != role ||
44837 + task->acl_role_id != acl_role_id))
44840 + task->acl_role_id = 0;
44841 + task->acl_sp_role = 0;
44843 + if (task->exec_file) {
44844 + cred = __task_cred(task);
44845 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44846 + ret = gr_apply_subject_to_task(task);
44848 + read_unlock(&grsec_exec_file_lock);
44849 + read_unlock(&tasklist_lock);
44850 + rcu_read_unlock();
44851 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44855 + // it's a kernel process
44856 + task->role = kernel_role;
44857 + task->acl = kernel_role->root_label;
44858 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44859 + task->acl->mode &= ~GR_PROCFIND;
44862 + } while_each_thread(task2, task);
44863 + read_unlock(&grsec_exec_file_lock);
44864 + read_unlock(&tasklist_lock);
44865 + rcu_read_unlock();
44871 +gr_learn_resource(const struct task_struct *task,
44872 + const int res, const unsigned long wanted, const int gt)
44874 + struct acl_subject_label *acl;
44875 + const struct cred *cred;
44877 + if (unlikely((gr_status & GR_READY) &&
44878 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44879 + goto skip_reslog;
44881 +#ifdef CONFIG_GRKERNSEC_RESLOG
44882 + gr_log_resource(task, res, wanted, gt);
44886 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44891 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44892 + !(acl->resmask & (1 << (unsigned short) res))))
44895 + if (wanted >= acl->res[res].rlim_cur) {
44896 + unsigned long res_add;
44898 + res_add = wanted;
44901 + res_add += GR_RLIM_CPU_BUMP;
44903 + case RLIMIT_FSIZE:
44904 + res_add += GR_RLIM_FSIZE_BUMP;
44906 + case RLIMIT_DATA:
44907 + res_add += GR_RLIM_DATA_BUMP;
44909 + case RLIMIT_STACK:
44910 + res_add += GR_RLIM_STACK_BUMP;
44912 + case RLIMIT_CORE:
44913 + res_add += GR_RLIM_CORE_BUMP;
44916 + res_add += GR_RLIM_RSS_BUMP;
44918 + case RLIMIT_NPROC:
44919 + res_add += GR_RLIM_NPROC_BUMP;
44921 + case RLIMIT_NOFILE:
44922 + res_add += GR_RLIM_NOFILE_BUMP;
44924 + case RLIMIT_MEMLOCK:
44925 + res_add += GR_RLIM_MEMLOCK_BUMP;
44928 + res_add += GR_RLIM_AS_BUMP;
44930 + case RLIMIT_LOCKS:
44931 + res_add += GR_RLIM_LOCKS_BUMP;
44933 + case RLIMIT_SIGPENDING:
44934 + res_add += GR_RLIM_SIGPENDING_BUMP;
44936 + case RLIMIT_MSGQUEUE:
44937 + res_add += GR_RLIM_MSGQUEUE_BUMP;
44939 + case RLIMIT_NICE:
44940 + res_add += GR_RLIM_NICE_BUMP;
44942 + case RLIMIT_RTPRIO:
44943 + res_add += GR_RLIM_RTPRIO_BUMP;
44945 + case RLIMIT_RTTIME:
44946 + res_add += GR_RLIM_RTTIME_BUMP;
44950 + acl->res[res].rlim_cur = res_add;
44952 + if (wanted > acl->res[res].rlim_max)
44953 + acl->res[res].rlim_max = res_add;
44955 + /* only log the subject filename, since resource logging is supported for
44956 + single-subject learning only */
44958 + cred = __task_cred(task);
44959 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44960 + task->role->roletype, cred->uid, cred->gid, acl->filename,
44961 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44962 + "", (unsigned long) res, &task->signal->saved_ip);
44963 + rcu_read_unlock();
44969 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44971 +pax_set_initial_flags(struct linux_binprm *bprm)
44973 + struct task_struct *task = current;
44974 + struct acl_subject_label *proc;
44975 + unsigned long flags;
44977 + if (unlikely(!(gr_status & GR_READY)))
44980 + flags = pax_get_flags(task);
44982 + proc = task->acl;
44984 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44985 + flags &= ~MF_PAX_PAGEEXEC;
44986 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44987 + flags &= ~MF_PAX_SEGMEXEC;
44988 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44989 + flags &= ~MF_PAX_RANDMMAP;
44990 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44991 + flags &= ~MF_PAX_EMUTRAMP;
44992 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44993 + flags &= ~MF_PAX_MPROTECT;
44995 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44996 + flags |= MF_PAX_PAGEEXEC;
44997 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44998 + flags |= MF_PAX_SEGMEXEC;
44999 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
45000 + flags |= MF_PAX_RANDMMAP;
45001 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
45002 + flags |= MF_PAX_EMUTRAMP;
45003 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
45004 + flags |= MF_PAX_MPROTECT;
45006 + pax_set_flags(task, flags);
45012 +#ifdef CONFIG_SYSCTL
45013 +/* Eric Biederman likes breaking userland ABI and every inode-based security
45014 + system to save 35kb of memory */
45016 +/* we modify the passed in filename, but adjust it back before returning */
45017 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
45019 + struct name_entry *nmatch;
45020 + char *p, *lastp = NULL;
45021 + struct acl_object_label *obj = NULL, *tmp;
45022 + struct acl_subject_label *tmpsubj;
45025 + read_lock(&gr_inode_lock);
45027 + p = name + len - 1;
45029 + nmatch = lookup_name_entry(name);
45030 + if (lastp != NULL)
45033 + if (nmatch == NULL)
45034 + goto next_component;
45035 + tmpsubj = current->acl;
45037 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
45038 + if (obj != NULL) {
45039 + tmp = obj->globbed;
45041 + if (!glob_match(tmp->filename, name)) {
45049 + } while ((tmpsubj = tmpsubj->parent_subject));
45055 + while (*p != '/')
45067 + read_unlock(&gr_inode_lock);
45068 + /* obj returned will always be non-null */
45072 +/* returns 0 when allowing, non-zero on error
45073 + op of 0 is used for readdir, so we don't log the names of hidden files
45076 +gr_handle_sysctl(const struct ctl_table *table, const int op)
45078 + struct ctl_table *tmp;
45079 + const char *proc_sys = "/proc/sys";
45081 + struct acl_object_label *obj;
45082 + unsigned short len = 0, pos = 0, depth = 0, i;
45086 + if (unlikely(!(gr_status & GR_READY)))
45089 + /* for now, ignore operations on non-sysctl entries if it's not a
45091 + if (table->child != NULL && op != 0)
45095 + /* it's only a read if it's an entry, read on dirs is for readdir */
45096 + if (op & MAY_READ)
45098 + if (op & MAY_WRITE)
45099 + mode |= GR_WRITE;
45101 + preempt_disable();
45103 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
45105 + /* it's only a read/write if it's an actual entry, not a dir
45106 + (which are opened for readdir)
45109 + /* convert the requested sysctl entry into a pathname */
45111 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45112 + len += strlen(tmp->procname);
45117 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45122 + memset(path, 0, PAGE_SIZE);
45124 + memcpy(path, proc_sys, strlen(proc_sys));
45126 + pos += strlen(proc_sys);
45128 + for (; depth > 0; depth--) {
45131 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45132 + if (depth == i) {
45133 + memcpy(path + pos, tmp->procname,
45134 + strlen(tmp->procname));
45135 + pos += strlen(tmp->procname);
45141 + obj = gr_lookup_by_name(path, pos);
45142 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45144 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45145 + ((err & mode) != mode))) {
45146 + __u32 new_mode = mode;
45148 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45151 + gr_log_learn_sysctl(path, new_mode);
45152 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45153 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45155 + } else if (!(err & GR_FIND)) {
45157 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45158 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45159 + path, (mode & GR_READ) ? " reading" : "",
45160 + (mode & GR_WRITE) ? " writing" : "");
45162 + } else if ((err & mode) != mode) {
45164 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45165 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45166 + path, (mode & GR_READ) ? " reading" : "",
45167 + (mode & GR_WRITE) ? " writing" : "");
45173 + preempt_enable();
45180 +gr_handle_proc_ptrace(struct task_struct *task)
45182 + struct file *filp;
45183 + struct task_struct *tmp = task;
45184 + struct task_struct *curtemp = current;
45187 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45188 + if (unlikely(!(gr_status & GR_READY)))
45192 + read_lock(&tasklist_lock);
45193 + read_lock(&grsec_exec_file_lock);
45194 + filp = task->exec_file;
45196 + while (tmp->pid > 0) {
45197 + if (tmp == curtemp)
45199 + tmp = tmp->real_parent;
45202 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45203 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45204 + read_unlock(&grsec_exec_file_lock);
45205 + read_unlock(&tasklist_lock);
45209 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45210 + if (!(gr_status & GR_READY)) {
45211 + read_unlock(&grsec_exec_file_lock);
45212 + read_unlock(&tasklist_lock);
45217 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45218 + read_unlock(&grsec_exec_file_lock);
45219 + read_unlock(&tasklist_lock);
45221 + if (retmode & GR_NOPTRACE)
45224 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45225 + && (current->acl != task->acl || (current->acl != current->role->root_label
45226 + && current->pid != task->pid)))
45232 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45234 + if (unlikely(!(gr_status & GR_READY)))
45237 + if (!(current->role->roletype & GR_ROLE_GOD))
45240 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45241 + p->role->rolename, gr_task_roletype_to_char(p),
45242 + p->acl->filename);
45246 +gr_handle_ptrace(struct task_struct *task, const long request)
45248 + struct task_struct *tmp = task;
45249 + struct task_struct *curtemp = current;
45252 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45253 + if (unlikely(!(gr_status & GR_READY)))
45257 + read_lock(&tasklist_lock);
45258 + while (tmp->pid > 0) {
45259 + if (tmp == curtemp)
45261 + tmp = tmp->real_parent;
45264 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45265 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45266 + read_unlock(&tasklist_lock);
45267 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45270 + read_unlock(&tasklist_lock);
45272 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45273 + if (!(gr_status & GR_READY))
45277 + read_lock(&grsec_exec_file_lock);
45278 + if (unlikely(!task->exec_file)) {
45279 + read_unlock(&grsec_exec_file_lock);
45283 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45284 + read_unlock(&grsec_exec_file_lock);
45286 + if (retmode & GR_NOPTRACE) {
45287 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45291 + if (retmode & GR_PTRACERD) {
45292 + switch (request) {
45293 + case PTRACE_POKETEXT:
45294 + case PTRACE_POKEDATA:
45295 + case PTRACE_POKEUSR:
45296 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45297 + case PTRACE_SETREGS:
45298 + case PTRACE_SETFPREGS:
45301 + case PTRACE_SETFPXREGS:
45303 +#ifdef CONFIG_ALTIVEC
45304 + case PTRACE_SETVRREGS:
45310 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
45311 + !(current->role->roletype & GR_ROLE_GOD) &&
45312 + (current->acl != task->acl)) {
45313 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45320 +static int is_writable_mmap(const struct file *filp)
45322 + struct task_struct *task = current;
45323 + struct acl_object_label *obj, *obj2;
45325 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45326 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45327 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45328 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45329 + task->role->root_label);
45330 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45331 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45339 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45343 + if (unlikely(!file || !(prot & PROT_EXEC)))
45346 + if (is_writable_mmap(file))
45350 + gr_search_file(file->f_path.dentry,
45351 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45352 + file->f_path.mnt);
45354 + if (!gr_tpe_allow(file))
45357 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45358 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45360 + } else if (unlikely(!(mode & GR_EXEC))) {
45362 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45363 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45371 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45375 + if (unlikely(!file || !(prot & PROT_EXEC)))
45378 + if (is_writable_mmap(file))
45382 + gr_search_file(file->f_path.dentry,
45383 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45384 + file->f_path.mnt);
45386 + if (!gr_tpe_allow(file))
45389 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45390 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45392 + } else if (unlikely(!(mode & GR_EXEC))) {
45394 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45395 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45403 +gr_acl_handle_psacct(struct task_struct *task, const long code)
45405 + unsigned long runtime;
45406 + unsigned long cputime;
45407 + unsigned int wday, cday;
45411 + struct timespec timeval;
45413 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45414 + !(task->acl->mode & GR_PROCACCT)))
45417 + do_posix_clock_monotonic_gettime(&timeval);
45418 + runtime = timeval.tv_sec - task->start_time.tv_sec;
45419 + wday = runtime / (3600 * 24);
45420 + runtime -= wday * (3600 * 24);
45421 + whr = runtime / 3600;
45422 + runtime -= whr * 3600;
45423 + wmin = runtime / 60;
45424 + runtime -= wmin * 60;
45427 + cputime = (task->utime + task->stime) / HZ;
45428 + cday = cputime / (3600 * 24);
45429 + cputime -= cday * (3600 * 24);
45430 + chr = cputime / 3600;
45431 + cputime -= chr * 3600;
45432 + cmin = cputime / 60;
45433 + cputime -= cmin * 60;
45436 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45441 +void gr_set_kernel_label(struct task_struct *task)
45443 + if (gr_status & GR_READY) {
45444 + task->role = kernel_role;
45445 + task->acl = kernel_role->root_label;
45450 +#ifdef CONFIG_TASKSTATS
45451 +int gr_is_taskstats_denied(int pid)
45453 + struct task_struct *task;
45454 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45455 + const struct cred *cred;
45459 + /* restrict taskstats viewing to un-chrooted root users
45460 + who have the 'view' subject flag if the RBAC system is enabled
45464 + read_lock(&tasklist_lock);
45465 + task = find_task_by_vpid(pid);
45467 +#ifdef CONFIG_GRKERNSEC_CHROOT
45468 + if (proc_is_chrooted(task))
45471 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45472 + cred = __task_cred(task);
45473 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45474 + if (cred->uid != 0)
45476 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45477 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45481 + if (gr_status & GR_READY) {
45482 + if (!(task->acl->mode & GR_VIEW))
45488 + read_unlock(&tasklist_lock);
45489 + rcu_read_unlock();
45495 +/* AUXV entries are filled via a descendant of search_binary_handler
45496 + after we've already applied the subject for the target
45498 +int gr_acl_enable_at_secure(void)
45500 + if (unlikely(!(gr_status & GR_READY)))
45503 + if (current->acl->mode & GR_ATSECURE)
45509 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45511 + struct task_struct *task = current;
45512 + struct dentry *dentry = file->f_path.dentry;
45513 + struct vfsmount *mnt = file->f_path.mnt;
45514 + struct acl_object_label *obj, *tmp;
45515 + struct acl_subject_label *subj;
45516 + unsigned int bufsize;
45519 + dev_t dev = __get_dev(dentry);
45521 + if (unlikely(!(gr_status & GR_READY)))
45524 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45527 + /* ignore Eric Biederman */
45528 + if (IS_PRIVATE(dentry->d_inode))
45531 + subj = task->acl;
45533 + obj = lookup_acl_obj_label(ino, dev, subj);
45535 + return (obj->mode & GR_FIND) ? 1 : 0;
45536 + } while ((subj = subj->parent_subject));
45538 + /* this is purely an optimization since we're looking for an object
45539 + for the directory we're doing a readdir on
45540 + if it's possible for any globbed object to match the entry we're
45541 + filling into the directory, then the object we find here will be
45542 + an anchor point with attached globbed objects
45544 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45545 + if (obj->globbed == NULL)
45546 + return (obj->mode & GR_FIND) ? 1 : 0;
45548 + is_not_root = ((obj->filename[0] == '/') &&
45549 + (obj->filename[1] == '\0')) ? 0 : 1;
45550 + bufsize = PAGE_SIZE - namelen - is_not_root;
45552 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
45553 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45556 + preempt_disable();
45557 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45560 + bufsize = strlen(path);
45562 + /* if base is "/", don't append an additional slash */
45564 + *(path + bufsize) = '/';
45565 + memcpy(path + bufsize + is_not_root, name, namelen);
45566 + *(path + bufsize + namelen + is_not_root) = '\0';
45568 + tmp = obj->globbed;
45570 + if (!glob_match(tmp->filename, path)) {
45571 + preempt_enable();
45572 + return (tmp->mode & GR_FIND) ? 1 : 0;
45576 + preempt_enable();
45577 + return (obj->mode & GR_FIND) ? 1 : 0;
45580 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45581 +EXPORT_SYMBOL(gr_acl_is_enabled);
45583 +EXPORT_SYMBOL(gr_learn_resource);
45584 +EXPORT_SYMBOL(gr_set_kernel_label);
45585 +#ifdef CONFIG_SECURITY
45586 +EXPORT_SYMBOL(gr_check_user_change);
45587 +EXPORT_SYMBOL(gr_check_group_change);
45590 diff -urNp linux-2.6.39.4/grsecurity/gracl_cap.c linux-2.6.39.4/grsecurity/gracl_cap.c
45591 --- linux-2.6.39.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45592 +++ linux-2.6.39.4/grsecurity/gracl_cap.c 2011-08-05 19:44:37.000000000 -0400
45594 +#include <linux/kernel.h>
45595 +#include <linux/module.h>
45596 +#include <linux/sched.h>
45597 +#include <linux/gracl.h>
45598 +#include <linux/grsecurity.h>
45599 +#include <linux/grinternal.h>
45601 +static const char *captab_log[] = {
45603 + "CAP_DAC_OVERRIDE",
45604 + "CAP_DAC_READ_SEARCH",
45611 + "CAP_LINUX_IMMUTABLE",
45612 + "CAP_NET_BIND_SERVICE",
45613 + "CAP_NET_BROADCAST",
45618 + "CAP_SYS_MODULE",
45620 + "CAP_SYS_CHROOT",
45621 + "CAP_SYS_PTRACE",
45626 + "CAP_SYS_RESOURCE",
45628 + "CAP_SYS_TTY_CONFIG",
45631 + "CAP_AUDIT_WRITE",
45632 + "CAP_AUDIT_CONTROL",
45634 + "CAP_MAC_OVERRIDE",
45639 +EXPORT_SYMBOL(gr_is_capable);
45640 +EXPORT_SYMBOL(gr_is_capable_nolog);
45643 +gr_is_capable(const int cap)
45645 + struct task_struct *task = current;
45646 + const struct cred *cred = current_cred();
45647 + struct acl_subject_label *curracl;
45648 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45649 + kernel_cap_t cap_audit = __cap_empty_set;
45651 + if (!gr_acl_is_enabled())
45654 + curracl = task->acl;
45656 + cap_drop = curracl->cap_lower;
45657 + cap_mask = curracl->cap_mask;
45658 + cap_audit = curracl->cap_invert_audit;
45660 + while ((curracl = curracl->parent_subject)) {
45661 + /* if the cap isn't specified in the current computed mask but is specified in the
45662 + current level subject, and is lowered in the current level subject, then add
45663 + it to the set of dropped capabilities
45664 + otherwise, add the current level subject's mask to the current computed mask
45666 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45667 + cap_raise(cap_mask, cap);
45668 + if (cap_raised(curracl->cap_lower, cap))
45669 + cap_raise(cap_drop, cap);
45670 + if (cap_raised(curracl->cap_invert_audit, cap))
45671 + cap_raise(cap_audit, cap);
45675 + if (!cap_raised(cap_drop, cap)) {
45676 + if (cap_raised(cap_audit, cap))
45677 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45681 + curracl = task->acl;
45683 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45684 + && cap_raised(cred->cap_effective, cap)) {
45685 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45686 + task->role->roletype, cred->uid,
45687 + cred->gid, task->exec_file ?
45688 + gr_to_filename(task->exec_file->f_path.dentry,
45689 + task->exec_file->f_path.mnt) : curracl->filename,
45690 + curracl->filename, 0UL,
45691 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45695 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45696 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45701 +gr_is_capable_nolog(const int cap)
45703 + struct acl_subject_label *curracl;
45704 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45706 + if (!gr_acl_is_enabled())
45709 + curracl = current->acl;
45711 + cap_drop = curracl->cap_lower;
45712 + cap_mask = curracl->cap_mask;
45714 + while ((curracl = curracl->parent_subject)) {
45715 + /* if the cap isn't specified in the current computed mask but is specified in the
45716 + current level subject, and is lowered in the current level subject, then add
45717 + it to the set of dropped capabilities
45718 + otherwise, add the current level subject's mask to the current computed mask
45720 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45721 + cap_raise(cap_mask, cap);
45722 + if (cap_raised(curracl->cap_lower, cap))
45723 + cap_raise(cap_drop, cap);
45727 + if (!cap_raised(cap_drop, cap))
45733 diff -urNp linux-2.6.39.4/grsecurity/gracl_fs.c linux-2.6.39.4/grsecurity/gracl_fs.c
45734 --- linux-2.6.39.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45735 +++ linux-2.6.39.4/grsecurity/gracl_fs.c 2011-08-05 19:44:37.000000000 -0400
45737 +#include <linux/kernel.h>
45738 +#include <linux/sched.h>
45739 +#include <linux/types.h>
45740 +#include <linux/fs.h>
45741 +#include <linux/file.h>
45742 +#include <linux/stat.h>
45743 +#include <linux/grsecurity.h>
45744 +#include <linux/grinternal.h>
45745 +#include <linux/gracl.h>
45748 +gr_acl_handle_hidden_file(const struct dentry * dentry,
45749 + const struct vfsmount * mnt)
45753 + if (unlikely(!dentry->d_inode))
45757 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45759 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45760 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45762 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45763 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45765 + } else if (unlikely(!(mode & GR_FIND)))
45772 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45775 + __u32 reqmode = GR_FIND;
45778 + if (unlikely(!dentry->d_inode))
45781 + if (unlikely(fmode & O_APPEND))
45782 + reqmode |= GR_APPEND;
45783 + else if (unlikely(fmode & FMODE_WRITE))
45784 + reqmode |= GR_WRITE;
45785 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45786 + reqmode |= GR_READ;
45787 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45788 + reqmode &= ~GR_READ;
45790 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45793 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45794 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45795 + reqmode & GR_READ ? " reading" : "",
45796 + reqmode & GR_WRITE ? " writing" : reqmode &
45797 + GR_APPEND ? " appending" : "");
45800 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45802 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45803 + reqmode & GR_READ ? " reading" : "",
45804 + reqmode & GR_WRITE ? " writing" : reqmode &
45805 + GR_APPEND ? " appending" : "");
45807 + } else if (unlikely((mode & reqmode) != reqmode))
45814 +gr_acl_handle_creat(const struct dentry * dentry,
45815 + const struct dentry * p_dentry,
45816 + const struct vfsmount * p_mnt, const int fmode,
45819 + __u32 reqmode = GR_WRITE | GR_CREATE;
45822 + if (unlikely(fmode & O_APPEND))
45823 + reqmode |= GR_APPEND;
45824 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45825 + reqmode |= GR_READ;
45826 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45827 + reqmode |= GR_SETID;
45830 + gr_check_create(dentry, p_dentry, p_mnt,
45831 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45833 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45834 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45835 + reqmode & GR_READ ? " reading" : "",
45836 + reqmode & GR_WRITE ? " writing" : reqmode &
45837 + GR_APPEND ? " appending" : "");
45840 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45842 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45843 + reqmode & GR_READ ? " reading" : "",
45844 + reqmode & GR_WRITE ? " writing" : reqmode &
45845 + GR_APPEND ? " appending" : "");
45847 + } else if (unlikely((mode & reqmode) != reqmode))
45854 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45857 + __u32 mode, reqmode = GR_FIND;
45859 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45860 + reqmode |= GR_EXEC;
45861 + if (fmode & S_IWOTH)
45862 + reqmode |= GR_WRITE;
45863 + if (fmode & S_IROTH)
45864 + reqmode |= GR_READ;
45867 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45870 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45871 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45872 + reqmode & GR_READ ? " reading" : "",
45873 + reqmode & GR_WRITE ? " writing" : "",
45874 + reqmode & GR_EXEC ? " executing" : "");
45877 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45879 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45880 + reqmode & GR_READ ? " reading" : "",
45881 + reqmode & GR_WRITE ? " writing" : "",
45882 + reqmode & GR_EXEC ? " executing" : "");
45884 + } else if (unlikely((mode & reqmode) != reqmode))
45890 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45894 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45896 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45897 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45899 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45900 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45902 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45905 + return (reqmode);
45909 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45911 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45915 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45917 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45921 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45923 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45927 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45929 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45933 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45936 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45939 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45940 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45941 + GR_FCHMOD_ACL_MSG);
45943 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45948 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45951 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45952 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45953 + GR_CHMOD_ACL_MSG);
45955 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45960 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45962 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45966 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45968 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45972 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45974 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45978 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45980 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45981 + GR_UNIXCONNECT_ACL_MSG);
45984 +/* hardlinks require at minimum create permission,
45985 + any additional privilege required is based on the
45986 + privilege of the file being linked to
45989 +gr_acl_handle_link(const struct dentry * new_dentry,
45990 + const struct dentry * parent_dentry,
45991 + const struct vfsmount * parent_mnt,
45992 + const struct dentry * old_dentry,
45993 + const struct vfsmount * old_mnt, const char *to)
45996 + __u32 needmode = GR_CREATE | GR_LINK;
45997 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
46000 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
46003 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
46004 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46006 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46007 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46009 + } else if (unlikely((mode & needmode) != needmode))
46016 +gr_acl_handle_symlink(const struct dentry * new_dentry,
46017 + const struct dentry * parent_dentry,
46018 + const struct vfsmount * parent_mnt, const char *from)
46020 + __u32 needmode = GR_WRITE | GR_CREATE;
46024 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
46025 + GR_CREATE | GR_AUDIT_CREATE |
46026 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
46028 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
46029 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46031 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46032 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46034 + } else if (unlikely((mode & needmode) != needmode))
46037 + return (GR_WRITE | GR_CREATE);
46040 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
46044 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
46046 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
46047 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
46049 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
46050 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
46052 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
46055 + return (reqmode);
46059 +gr_acl_handle_mknod(const struct dentry * new_dentry,
46060 + const struct dentry * parent_dentry,
46061 + const struct vfsmount * parent_mnt,
46064 + __u32 reqmode = GR_WRITE | GR_CREATE;
46065 + if (unlikely(mode & (S_ISUID | S_ISGID)))
46066 + reqmode |= GR_SETID;
46068 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46069 + reqmode, GR_MKNOD_ACL_MSG);
46073 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
46074 + const struct dentry *parent_dentry,
46075 + const struct vfsmount *parent_mnt)
46077 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46078 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
46081 +#define RENAME_CHECK_SUCCESS(old, new) \
46082 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
46083 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
46086 +gr_acl_handle_rename(struct dentry *new_dentry,
46087 + struct dentry *parent_dentry,
46088 + const struct vfsmount *parent_mnt,
46089 + struct dentry *old_dentry,
46090 + struct inode *old_parent_inode,
46091 + struct vfsmount *old_mnt, const char *newname)
46093 + __u32 comp1, comp2;
46096 + if (unlikely(!gr_acl_is_enabled()))
46099 + if (!new_dentry->d_inode) {
46100 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
46101 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
46102 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
46103 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
46104 + GR_DELETE | GR_AUDIT_DELETE |
46105 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46106 + GR_SUPPRESS, old_mnt);
46108 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
46109 + GR_CREATE | GR_DELETE |
46110 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46111 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46112 + GR_SUPPRESS, parent_mnt);
46114 + gr_search_file(old_dentry,
46115 + GR_READ | GR_WRITE | GR_AUDIT_READ |
46116 + GR_DELETE | GR_AUDIT_DELETE |
46117 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46120 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46121 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46122 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46123 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46124 + && !(comp2 & GR_SUPPRESS)) {
46125 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46127 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46134 +gr_acl_handle_exit(void)
46138 + struct file *exec_file;
46140 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46141 + !(current->role->roletype & GR_ROLE_PERSIST))) {
46142 + id = current->acl_role_id;
46143 + rolename = current->role->rolename;
46145 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46148 + write_lock(&grsec_exec_file_lock);
46149 + exec_file = current->exec_file;
46150 + current->exec_file = NULL;
46151 + write_unlock(&grsec_exec_file_lock);
46158 +gr_acl_handle_procpidmem(const struct task_struct *task)
46160 + if (unlikely(!gr_acl_is_enabled()))
46163 + if (task != current && task->acl->mode & GR_PROTPROCFD)
46168 diff -urNp linux-2.6.39.4/grsecurity/gracl_ip.c linux-2.6.39.4/grsecurity/gracl_ip.c
46169 --- linux-2.6.39.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46170 +++ linux-2.6.39.4/grsecurity/gracl_ip.c 2011-08-05 19:44:37.000000000 -0400
46172 +#include <linux/kernel.h>
46173 +#include <asm/uaccess.h>
46174 +#include <asm/errno.h>
46175 +#include <net/sock.h>
46176 +#include <linux/file.h>
46177 +#include <linux/fs.h>
46178 +#include <linux/net.h>
46179 +#include <linux/in.h>
46180 +#include <linux/skbuff.h>
46181 +#include <linux/ip.h>
46182 +#include <linux/udp.h>
46183 +#include <linux/types.h>
46184 +#include <linux/sched.h>
46185 +#include <linux/netdevice.h>
46186 +#include <linux/inetdevice.h>
46187 +#include <linux/gracl.h>
46188 +#include <linux/grsecurity.h>
46189 +#include <linux/grinternal.h>
46191 +#define GR_BIND 0x01
46192 +#define GR_CONNECT 0x02
46193 +#define GR_INVERT 0x04
46194 +#define GR_BINDOVERRIDE 0x08
46195 +#define GR_CONNECTOVERRIDE 0x10
46196 +#define GR_SOCK_FAMILY 0x20
46198 +static const char * gr_protocols[IPPROTO_MAX] = {
46199 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46200 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46201 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46202 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46203 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46204 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46205 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46206 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46207 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46208 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46209 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46210 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46211 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46212 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46213 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46214 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46215 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46216 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46217 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46218 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46219 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46220 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46221 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46222 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46223 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46224 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46225 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46226 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46227 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46228 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46229 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46230 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46233 +static const char * gr_socktypes[SOCK_MAX] = {
46234 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46235 + "unknown:7", "unknown:8", "unknown:9", "packet"
46238 +static const char * gr_sockfamilies[AF_MAX+1] = {
46239 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46240 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46241 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46242 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46246 +gr_proto_to_name(unsigned char proto)
46248 + return gr_protocols[proto];
46252 +gr_socktype_to_name(unsigned char type)
46254 + return gr_socktypes[type];
46258 +gr_sockfamily_to_name(unsigned char family)
46260 + return gr_sockfamilies[family];
46264 +gr_search_socket(const int domain, const int type, const int protocol)
46266 + struct acl_subject_label *curr;
46267 + const struct cred *cred = current_cred();
46269 + if (unlikely(!gr_acl_is_enabled()))
46272 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
46273 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46274 + goto exit; // let the kernel handle it
46276 + curr = current->acl;
46278 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46279 + /* the family is allowed, if this is PF_INET allow it only if
46280 + the extra sock type/protocol checks pass */
46281 + if (domain == PF_INET)
46285 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46286 + __u32 fakeip = 0;
46287 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46288 + current->role->roletype, cred->uid,
46289 + cred->gid, current->exec_file ?
46290 + gr_to_filename(current->exec_file->f_path.dentry,
46291 + current->exec_file->f_path.mnt) :
46292 + curr->filename, curr->filename,
46293 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46294 + ¤t->signal->saved_ip);
46301 + /* the rest of this checking is for IPv4 only */
46305 + if ((curr->ip_type & (1 << type)) &&
46306 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46309 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46310 + /* we don't place acls on raw sockets , and sometimes
46311 + dgram/ip sockets are opened for ioctl and not
46312 + bind/connect, so we'll fake a bind learn log */
46313 + if (type == SOCK_RAW || type == SOCK_PACKET) {
46314 + __u32 fakeip = 0;
46315 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46316 + current->role->roletype, cred->uid,
46317 + cred->gid, current->exec_file ?
46318 + gr_to_filename(current->exec_file->f_path.dentry,
46319 + current->exec_file->f_path.mnt) :
46320 + curr->filename, curr->filename,
46321 + &fakeip, 0, type,
46322 + protocol, GR_CONNECT, ¤t->signal->saved_ip);
46323 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46324 + __u32 fakeip = 0;
46325 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46326 + current->role->roletype, cred->uid,
46327 + cred->gid, current->exec_file ?
46328 + gr_to_filename(current->exec_file->f_path.dentry,
46329 + current->exec_file->f_path.mnt) :
46330 + curr->filename, curr->filename,
46331 + &fakeip, 0, type,
46332 + protocol, GR_BIND, ¤t->signal->saved_ip);
46334 + /* we'll log when they use connect or bind */
46339 + if (domain == PF_INET)
46340 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46341 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
46343 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46344 + gr_socktype_to_name(type), protocol);
46351 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46353 + if ((ip->mode & mode) &&
46354 + (ip_port >= ip->low) &&
46355 + (ip_port <= ip->high) &&
46356 + ((ntohl(ip_addr) & our_netmask) ==
46357 + (ntohl(our_addr) & our_netmask))
46358 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46359 + && (ip->type & (1 << type))) {
46360 + if (ip->mode & GR_INVERT)
46361 + return 2; // specifically denied
46363 + return 1; // allowed
46366 + return 0; // not specifically allowed, may continue parsing
46370 +gr_search_connectbind(const int full_mode, struct sock *sk,
46371 + struct sockaddr_in *addr, const int type)
46373 + char iface[IFNAMSIZ] = {0};
46374 + struct acl_subject_label *curr;
46375 + struct acl_ip_label *ip;
46376 + struct inet_sock *isk;
46377 + struct net_device *dev;
46378 + struct in_device *idev;
46381 + int mode = full_mode & (GR_BIND | GR_CONNECT);
46382 + __u32 ip_addr = 0;
46384 + __u32 our_netmask;
46386 + __u16 ip_port = 0;
46387 + const struct cred *cred = current_cred();
46389 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46392 + curr = current->acl;
46393 + isk = inet_sk(sk);
46395 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46396 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46397 + addr->sin_addr.s_addr = curr->inaddr_any_override;
46398 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46399 + struct sockaddr_in saddr;
46402 + saddr.sin_family = AF_INET;
46403 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
46404 + saddr.sin_port = isk->inet_sport;
46406 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46410 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46418 + ip_addr = addr->sin_addr.s_addr;
46419 + ip_port = ntohs(addr->sin_port);
46421 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46422 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46423 + current->role->roletype, cred->uid,
46424 + cred->gid, current->exec_file ?
46425 + gr_to_filename(current->exec_file->f_path.dentry,
46426 + current->exec_file->f_path.mnt) :
46427 + curr->filename, curr->filename,
46428 + &ip_addr, ip_port, type,
46429 + sk->sk_protocol, mode, ¤t->signal->saved_ip);
46433 + for (i = 0; i < curr->ip_num; i++) {
46434 + ip = *(curr->ips + i);
46435 + if (ip->iface != NULL) {
46436 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
46437 + p = strchr(iface, ':');
46440 + dev = dev_get_by_name(sock_net(sk), iface);
46443 + idev = in_dev_get(dev);
46444 + if (idev == NULL) {
46450 + if (!strcmp(ip->iface, ifa->ifa_label)) {
46451 + our_addr = ifa->ifa_address;
46452 + our_netmask = 0xffffffff;
46453 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46455 + rcu_read_unlock();
46456 + in_dev_put(idev);
46459 + } else if (ret == 2) {
46460 + rcu_read_unlock();
46461 + in_dev_put(idev);
46466 + } endfor_ifa(idev);
46467 + rcu_read_unlock();
46468 + in_dev_put(idev);
46471 + our_addr = ip->addr;
46472 + our_netmask = ip->netmask;
46473 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46476 + else if (ret == 2)
46482 + if (mode == GR_BIND)
46483 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46484 + else if (mode == GR_CONNECT)
46485 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46491 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46493 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46497 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46499 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46502 +int gr_search_listen(struct socket *sock)
46504 + struct sock *sk = sock->sk;
46505 + struct sockaddr_in addr;
46507 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46508 + addr.sin_port = inet_sk(sk)->inet_sport;
46510 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46513 +int gr_search_accept(struct socket *sock)
46515 + struct sock *sk = sock->sk;
46516 + struct sockaddr_in addr;
46518 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46519 + addr.sin_port = inet_sk(sk)->inet_sport;
46521 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46525 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46528 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46530 + struct sockaddr_in sin;
46531 + const struct inet_sock *inet = inet_sk(sk);
46533 + sin.sin_addr.s_addr = inet->inet_daddr;
46534 + sin.sin_port = inet->inet_dport;
46536 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46541 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46543 + struct sockaddr_in sin;
46545 + if (unlikely(skb->len < sizeof (struct udphdr)))
46546 + return 0; // skip this packet
46548 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46549 + sin.sin_port = udp_hdr(skb)->source;
46551 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46553 diff -urNp linux-2.6.39.4/grsecurity/gracl_learn.c linux-2.6.39.4/grsecurity/gracl_learn.c
46554 --- linux-2.6.39.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46555 +++ linux-2.6.39.4/grsecurity/gracl_learn.c 2011-08-05 19:44:37.000000000 -0400
46557 +#include <linux/kernel.h>
46558 +#include <linux/mm.h>
46559 +#include <linux/sched.h>
46560 +#include <linux/poll.h>
46561 +#include <linux/string.h>
46562 +#include <linux/file.h>
46563 +#include <linux/types.h>
46564 +#include <linux/vmalloc.h>
46565 +#include <linux/grinternal.h>
46567 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46568 + size_t count, loff_t *ppos);
46569 +extern int gr_acl_is_enabled(void);
46571 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46572 +static int gr_learn_attached;
46574 +/* use a 512k buffer */
46575 +#define LEARN_BUFFER_SIZE (512 * 1024)
46577 +static DEFINE_SPINLOCK(gr_learn_lock);
46578 +static DEFINE_MUTEX(gr_learn_user_mutex);
46580 +/* we need to maintain two buffers, so that the kernel context of grlearn
46581 + uses a semaphore around the userspace copying, and the other kernel contexts
46582 + use a spinlock when copying into the buffer, since they cannot sleep
46584 +static char *learn_buffer;
46585 +static char *learn_buffer_user;
46586 +static int learn_buffer_len;
46587 +static int learn_buffer_user_len;
46590 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46592 + DECLARE_WAITQUEUE(wait, current);
46593 + ssize_t retval = 0;
46595 + add_wait_queue(&learn_wait, &wait);
46596 + set_current_state(TASK_INTERRUPTIBLE);
46598 + mutex_lock(&gr_learn_user_mutex);
46599 + spin_lock(&gr_learn_lock);
46600 + if (learn_buffer_len)
46602 + spin_unlock(&gr_learn_lock);
46603 + mutex_unlock(&gr_learn_user_mutex);
46604 + if (file->f_flags & O_NONBLOCK) {
46605 + retval = -EAGAIN;
46608 + if (signal_pending(current)) {
46609 + retval = -ERESTARTSYS;
46616 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46617 + learn_buffer_user_len = learn_buffer_len;
46618 + retval = learn_buffer_len;
46619 + learn_buffer_len = 0;
46621 + spin_unlock(&gr_learn_lock);
46623 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46624 + retval = -EFAULT;
46626 + mutex_unlock(&gr_learn_user_mutex);
46628 + set_current_state(TASK_RUNNING);
46629 + remove_wait_queue(&learn_wait, &wait);
46633 +static unsigned int
46634 +poll_learn(struct file * file, poll_table * wait)
46636 + poll_wait(file, &learn_wait, wait);
46638 + if (learn_buffer_len)
46639 + return (POLLIN | POLLRDNORM);
46645 +gr_clear_learn_entries(void)
46649 + mutex_lock(&gr_learn_user_mutex);
46650 + spin_lock(&gr_learn_lock);
46651 + tmp = learn_buffer;
46652 + learn_buffer = NULL;
46653 + spin_unlock(&gr_learn_lock);
46656 + if (learn_buffer_user != NULL) {
46657 + vfree(learn_buffer_user);
46658 + learn_buffer_user = NULL;
46660 + learn_buffer_len = 0;
46661 + mutex_unlock(&gr_learn_user_mutex);
46667 +gr_add_learn_entry(const char *fmt, ...)
46670 + unsigned int len;
46672 + if (!gr_learn_attached)
46675 + spin_lock(&gr_learn_lock);
46677 + /* leave a gap at the end so we know when it's "full" but don't have to
46678 + compute the exact length of the string we're trying to append
46680 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46681 + spin_unlock(&gr_learn_lock);
46682 + wake_up_interruptible(&learn_wait);
46685 + if (learn_buffer == NULL) {
46686 + spin_unlock(&gr_learn_lock);
46690 + va_start(args, fmt);
46691 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46694 + learn_buffer_len += len + 1;
46696 + spin_unlock(&gr_learn_lock);
46697 + wake_up_interruptible(&learn_wait);
46703 +open_learn(struct inode *inode, struct file *file)
46705 + if (file->f_mode & FMODE_READ && gr_learn_attached)
46707 + if (file->f_mode & FMODE_READ) {
46709 + mutex_lock(&gr_learn_user_mutex);
46710 + if (learn_buffer == NULL)
46711 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46712 + if (learn_buffer_user == NULL)
46713 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46714 + if (learn_buffer == NULL) {
46715 + retval = -ENOMEM;
46718 + if (learn_buffer_user == NULL) {
46719 + retval = -ENOMEM;
46722 + learn_buffer_len = 0;
46723 + learn_buffer_user_len = 0;
46724 + gr_learn_attached = 1;
46726 + mutex_unlock(&gr_learn_user_mutex);
46733 +close_learn(struct inode *inode, struct file *file)
46735 + if (file->f_mode & FMODE_READ) {
46736 + char *tmp = NULL;
46737 + mutex_lock(&gr_learn_user_mutex);
46738 + spin_lock(&gr_learn_lock);
46739 + tmp = learn_buffer;
46740 + learn_buffer = NULL;
46741 + spin_unlock(&gr_learn_lock);
46744 + if (learn_buffer_user != NULL) {
46745 + vfree(learn_buffer_user);
46746 + learn_buffer_user = NULL;
46748 + learn_buffer_len = 0;
46749 + learn_buffer_user_len = 0;
46750 + gr_learn_attached = 0;
46751 + mutex_unlock(&gr_learn_user_mutex);
46757 +const struct file_operations grsec_fops = {
46758 + .read = read_learn,
46759 + .write = write_grsec_handler,
46760 + .open = open_learn,
46761 + .release = close_learn,
46762 + .poll = poll_learn,
46764 diff -urNp linux-2.6.39.4/grsecurity/gracl_res.c linux-2.6.39.4/grsecurity/gracl_res.c
46765 --- linux-2.6.39.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46766 +++ linux-2.6.39.4/grsecurity/gracl_res.c 2011-08-05 19:44:37.000000000 -0400
46768 +#include <linux/kernel.h>
46769 +#include <linux/sched.h>
46770 +#include <linux/gracl.h>
46771 +#include <linux/grinternal.h>
46773 +static const char *restab_log[] = {
46774 + [RLIMIT_CPU] = "RLIMIT_CPU",
46775 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46776 + [RLIMIT_DATA] = "RLIMIT_DATA",
46777 + [RLIMIT_STACK] = "RLIMIT_STACK",
46778 + [RLIMIT_CORE] = "RLIMIT_CORE",
46779 + [RLIMIT_RSS] = "RLIMIT_RSS",
46780 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
46781 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46782 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46783 + [RLIMIT_AS] = "RLIMIT_AS",
46784 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46785 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46786 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46787 + [RLIMIT_NICE] = "RLIMIT_NICE",
46788 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46789 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46790 + [GR_CRASH_RES] = "RLIMIT_CRASH"
46794 +gr_log_resource(const struct task_struct *task,
46795 + const int res, const unsigned long wanted, const int gt)
46797 + const struct cred *cred;
46798 + unsigned long rlim;
46800 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
46803 + // not yet supported resource
46804 + if (unlikely(!restab_log[res]))
46807 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46808 + rlim = task_rlimit_max(task, res);
46810 + rlim = task_rlimit(task, res);
46812 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46816 + cred = __task_cred(task);
46818 + if (res == RLIMIT_NPROC &&
46819 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46820 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46821 + goto out_rcu_unlock;
46822 + else if (res == RLIMIT_MEMLOCK &&
46823 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46824 + goto out_rcu_unlock;
46825 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46826 + goto out_rcu_unlock;
46827 + rcu_read_unlock();
46829 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46833 + rcu_read_unlock();
46836 diff -urNp linux-2.6.39.4/grsecurity/gracl_segv.c linux-2.6.39.4/grsecurity/gracl_segv.c
46837 --- linux-2.6.39.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46838 +++ linux-2.6.39.4/grsecurity/gracl_segv.c 2011-08-05 19:44:37.000000000 -0400
46840 +#include <linux/kernel.h>
46841 +#include <linux/mm.h>
46842 +#include <asm/uaccess.h>
46843 +#include <asm/errno.h>
46844 +#include <asm/mman.h>
46845 +#include <net/sock.h>
46846 +#include <linux/file.h>
46847 +#include <linux/fs.h>
46848 +#include <linux/net.h>
46849 +#include <linux/in.h>
46850 +#include <linux/slab.h>
46851 +#include <linux/types.h>
46852 +#include <linux/sched.h>
46853 +#include <linux/timer.h>
46854 +#include <linux/gracl.h>
46855 +#include <linux/grsecurity.h>
46856 +#include <linux/grinternal.h>
46858 +static struct crash_uid *uid_set;
46859 +static unsigned short uid_used;
46860 +static DEFINE_SPINLOCK(gr_uid_lock);
46861 +extern rwlock_t gr_inode_lock;
46862 +extern struct acl_subject_label *
46863 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46864 + struct acl_role_label *role);
46866 +#ifdef CONFIG_BTRFS_FS
46867 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46868 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46871 +static inline dev_t __get_dev(const struct dentry *dentry)
46873 +#ifdef CONFIG_BTRFS_FS
46874 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46875 + return get_btrfs_dev_from_inode(dentry->d_inode);
46878 + return dentry->d_inode->i_sb->s_dev;
46882 +gr_init_uidset(void)
46885 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46888 + return uid_set ? 1 : 0;
46892 +gr_free_uidset(void)
46901 +gr_find_uid(const uid_t uid)
46903 + struct crash_uid *tmp = uid_set;
46905 + int low = 0, high = uid_used - 1, mid;
46907 + while (high >= low) {
46908 + mid = (low + high) >> 1;
46909 + buid = tmp[mid].uid;
46921 +static __inline__ void
46922 +gr_insertsort(void)
46924 + unsigned short i, j;
46925 + struct crash_uid index;
46927 + for (i = 1; i < uid_used; i++) {
46928 + index = uid_set[i];
46930 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46931 + uid_set[j] = uid_set[j - 1];
46934 + uid_set[j] = index;
46940 +static __inline__ void
46941 +gr_insert_uid(const uid_t uid, const unsigned long expires)
46945 + if (uid_used == GR_UIDTABLE_MAX)
46948 + loc = gr_find_uid(uid);
46951 + uid_set[loc].expires = expires;
46955 + uid_set[uid_used].uid = uid;
46956 + uid_set[uid_used].expires = expires;
46965 +gr_remove_uid(const unsigned short loc)
46967 + unsigned short i;
46969 + for (i = loc + 1; i < uid_used; i++)
46970 + uid_set[i - 1] = uid_set[i];
46978 +gr_check_crash_uid(const uid_t uid)
46983 + if (unlikely(!gr_acl_is_enabled()))
46986 + spin_lock(&gr_uid_lock);
46987 + loc = gr_find_uid(uid);
46992 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
46993 + gr_remove_uid(loc);
46998 + spin_unlock(&gr_uid_lock);
47002 +static __inline__ int
47003 +proc_is_setxid(const struct cred *cred)
47005 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
47006 + cred->uid != cred->fsuid)
47008 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
47009 + cred->gid != cred->fsgid)
47015 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
47018 +gr_handle_crash(struct task_struct *task, const int sig)
47020 + struct acl_subject_label *curr;
47021 + struct acl_subject_label *curr2;
47022 + struct task_struct *tsk, *tsk2;
47023 + const struct cred *cred;
47024 + const struct cred *cred2;
47026 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
47029 + if (unlikely(!gr_acl_is_enabled()))
47032 + curr = task->acl;
47034 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
47037 + if (time_before_eq(curr->expires, get_seconds())) {
47038 + curr->expires = 0;
47039 + curr->crashes = 0;
47044 + if (!curr->expires)
47045 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
47047 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47048 + time_after(curr->expires, get_seconds())) {
47050 + cred = __task_cred(task);
47051 + if (cred->uid && proc_is_setxid(cred)) {
47052 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47053 + spin_lock(&gr_uid_lock);
47054 + gr_insert_uid(cred->uid, curr->expires);
47055 + spin_unlock(&gr_uid_lock);
47056 + curr->expires = 0;
47057 + curr->crashes = 0;
47058 + read_lock(&tasklist_lock);
47059 + do_each_thread(tsk2, tsk) {
47060 + cred2 = __task_cred(tsk);
47061 + if (tsk != task && cred2->uid == cred->uid)
47062 + gr_fake_force_sig(SIGKILL, tsk);
47063 + } while_each_thread(tsk2, tsk);
47064 + read_unlock(&tasklist_lock);
47066 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47067 + read_lock(&tasklist_lock);
47068 + do_each_thread(tsk2, tsk) {
47069 + if (likely(tsk != task)) {
47070 + curr2 = tsk->acl;
47072 + if (curr2->device == curr->device &&
47073 + curr2->inode == curr->inode)
47074 + gr_fake_force_sig(SIGKILL, tsk);
47076 + } while_each_thread(tsk2, tsk);
47077 + read_unlock(&tasklist_lock);
47079 + rcu_read_unlock();
47086 +gr_check_crash_exec(const struct file *filp)
47088 + struct acl_subject_label *curr;
47090 + if (unlikely(!gr_acl_is_enabled()))
47093 + read_lock(&gr_inode_lock);
47094 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
47095 + __get_dev(filp->f_path.dentry),
47097 + read_unlock(&gr_inode_lock);
47099 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
47100 + (!curr->crashes && !curr->expires))
47103 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47104 + time_after(curr->expires, get_seconds()))
47106 + else if (time_before_eq(curr->expires, get_seconds())) {
47107 + curr->crashes = 0;
47108 + curr->expires = 0;
47115 +gr_handle_alertkill(struct task_struct *task)
47117 + struct acl_subject_label *curracl;
47119 + struct task_struct *p, *p2;
47121 + if (unlikely(!gr_acl_is_enabled()))
47124 + curracl = task->acl;
47125 + curr_ip = task->signal->curr_ip;
47127 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47128 + read_lock(&tasklist_lock);
47129 + do_each_thread(p2, p) {
47130 + if (p->signal->curr_ip == curr_ip)
47131 + gr_fake_force_sig(SIGKILL, p);
47132 + } while_each_thread(p2, p);
47133 + read_unlock(&tasklist_lock);
47134 + } else if (curracl->mode & GR_KILLPROC)
47135 + gr_fake_force_sig(SIGKILL, task);
47139 diff -urNp linux-2.6.39.4/grsecurity/gracl_shm.c linux-2.6.39.4/grsecurity/gracl_shm.c
47140 --- linux-2.6.39.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47141 +++ linux-2.6.39.4/grsecurity/gracl_shm.c 2011-08-05 19:44:37.000000000 -0400
47143 +#include <linux/kernel.h>
47144 +#include <linux/mm.h>
47145 +#include <linux/sched.h>
47146 +#include <linux/file.h>
47147 +#include <linux/ipc.h>
47148 +#include <linux/gracl.h>
47149 +#include <linux/grsecurity.h>
47150 +#include <linux/grinternal.h>
47153 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47154 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47156 + struct task_struct *task;
47158 + if (!gr_acl_is_enabled())
47162 + read_lock(&tasklist_lock);
47164 + task = find_task_by_vpid(shm_cprid);
47166 + if (unlikely(!task))
47167 + task = find_task_by_vpid(shm_lapid);
47169 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47170 + (task->pid == shm_lapid)) &&
47171 + (task->acl->mode & GR_PROTSHM) &&
47172 + (task->acl != current->acl))) {
47173 + read_unlock(&tasklist_lock);
47174 + rcu_read_unlock();
47175 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47178 + read_unlock(&tasklist_lock);
47179 + rcu_read_unlock();
47183 diff -urNp linux-2.6.39.4/grsecurity/grsec_chdir.c linux-2.6.39.4/grsecurity/grsec_chdir.c
47184 --- linux-2.6.39.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47185 +++ linux-2.6.39.4/grsecurity/grsec_chdir.c 2011-08-05 19:44:37.000000000 -0400
47187 +#include <linux/kernel.h>
47188 +#include <linux/sched.h>
47189 +#include <linux/fs.h>
47190 +#include <linux/file.h>
47191 +#include <linux/grsecurity.h>
47192 +#include <linux/grinternal.h>
47195 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47197 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47198 + if ((grsec_enable_chdir && grsec_enable_group &&
47199 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47200 + !grsec_enable_group)) {
47201 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47206 diff -urNp linux-2.6.39.4/grsecurity/grsec_chroot.c linux-2.6.39.4/grsecurity/grsec_chroot.c
47207 --- linux-2.6.39.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47208 +++ linux-2.6.39.4/grsecurity/grsec_chroot.c 2011-08-05 19:44:37.000000000 -0400
47210 +#include <linux/kernel.h>
47211 +#include <linux/module.h>
47212 +#include <linux/sched.h>
47213 +#include <linux/file.h>
47214 +#include <linux/fs.h>
47215 +#include <linux/mount.h>
47216 +#include <linux/types.h>
47217 +#include <linux/pid_namespace.h>
47218 +#include <linux/grsecurity.h>
47219 +#include <linux/grinternal.h>
47221 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47223 +#ifdef CONFIG_GRKERNSEC
47224 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47225 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47226 + task->gr_is_chrooted = 1;
47228 + task->gr_is_chrooted = 0;
47230 + task->gr_chroot_dentry = path->dentry;
47235 +void gr_clear_chroot_entries(struct task_struct *task)
47237 +#ifdef CONFIG_GRKERNSEC
47238 + task->gr_is_chrooted = 0;
47239 + task->gr_chroot_dentry = NULL;
47245 +gr_handle_chroot_unix(const pid_t pid)
47247 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47248 + struct task_struct *p;
47250 + if (unlikely(!grsec_enable_chroot_unix))
47253 + if (likely(!proc_is_chrooted(current)))
47257 + read_lock(&tasklist_lock);
47258 + p = find_task_by_vpid_unrestricted(pid);
47259 + if (unlikely(p && !have_same_root(current, p))) {
47260 + read_unlock(&tasklist_lock);
47261 + rcu_read_unlock();
47262 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47265 + read_unlock(&tasklist_lock);
47266 + rcu_read_unlock();
47272 +gr_handle_chroot_nice(void)
47274 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47275 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47276 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47284 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47286 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47287 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47288 + && proc_is_chrooted(current)) {
47289 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47297 +gr_handle_chroot_rawio(const struct inode *inode)
47299 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47300 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47301 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47308 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47310 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47311 + struct task_struct *p;
47313 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47316 + read_lock(&tasklist_lock);
47317 + do_each_pid_task(pid, type, p) {
47318 + if (!have_same_root(current, p)) {
47322 + } while_each_pid_task(pid, type, p);
47324 + read_unlock(&tasklist_lock);
47331 +gr_pid_is_chrooted(struct task_struct *p)
47333 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47334 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47337 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47338 + !have_same_root(current, p)) {
47345 +EXPORT_SYMBOL(gr_pid_is_chrooted);
47347 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47348 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47350 + struct path path, currentroot;
47353 + path.dentry = (struct dentry *)u_dentry;
47354 + path.mnt = (struct vfsmount *)u_mnt;
47355 + get_fs_root(current->fs, ¤troot);
47356 + if (path_is_under(&path, ¤troot))
47358 + path_put(¤troot);
47365 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47367 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47368 + if (!grsec_enable_chroot_fchdir)
47371 + if (!proc_is_chrooted(current))
47373 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47374 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47382 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47383 + const time_t shm_createtime)
47385 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47386 + struct task_struct *p;
47387 + time_t starttime;
47389 + if (unlikely(!grsec_enable_chroot_shmat))
47392 + if (likely(!proc_is_chrooted(current)))
47396 + read_lock(&tasklist_lock);
47398 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47399 + starttime = p->start_time.tv_sec;
47400 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47401 + if (have_same_root(current, p)) {
47404 + read_unlock(&tasklist_lock);
47405 + rcu_read_unlock();
47406 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47410 + /* creator exited, pid reuse, fall through to next check */
47412 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47413 + if (unlikely(!have_same_root(current, p))) {
47414 + read_unlock(&tasklist_lock);
47415 + rcu_read_unlock();
47416 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47422 + read_unlock(&tasklist_lock);
47423 + rcu_read_unlock();
47429 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47431 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47432 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47433 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47439 +gr_handle_chroot_mknod(const struct dentry *dentry,
47440 + const struct vfsmount *mnt, const int mode)
47442 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47443 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47444 + proc_is_chrooted(current)) {
47445 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47453 +gr_handle_chroot_mount(const struct dentry *dentry,
47454 + const struct vfsmount *mnt, const char *dev_name)
47456 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47457 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47458 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47466 +gr_handle_chroot_pivot(void)
47468 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47469 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47470 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47478 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47480 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47481 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47482 + !gr_is_outside_chroot(dentry, mnt)) {
47483 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47491 +gr_handle_chroot_caps(struct path *path)
47493 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47494 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47495 + (init_task.fs->root.dentry != path->dentry) &&
47496 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47498 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47499 + const struct cred *old = current_cred();
47500 + struct cred *new = prepare_creds();
47504 + new->cap_permitted = cap_drop(old->cap_permitted,
47506 + new->cap_inheritable = cap_drop(old->cap_inheritable,
47508 + new->cap_effective = cap_drop(old->cap_effective,
47511 + commit_creds(new);
47520 +gr_handle_chroot_sysctl(const int op)
47522 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47523 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47524 + proc_is_chrooted(current))
47531 +gr_handle_chroot_chdir(struct path *path)
47533 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47534 + if (grsec_enable_chroot_chdir)
47535 + set_fs_pwd(current->fs, path);
47541 +gr_handle_chroot_chmod(const struct dentry *dentry,
47542 + const struct vfsmount *mnt, const int mode)
47544 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47545 + /* allow chmod +s on directories, but not files */
47546 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47547 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47548 + proc_is_chrooted(current)) {
47549 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47556 +#ifdef CONFIG_SECURITY
47557 +EXPORT_SYMBOL(gr_handle_chroot_caps);
47559 diff -urNp linux-2.6.39.4/grsecurity/grsec_disabled.c linux-2.6.39.4/grsecurity/grsec_disabled.c
47560 --- linux-2.6.39.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47561 +++ linux-2.6.39.4/grsecurity/grsec_disabled.c 2011-08-05 19:44:37.000000000 -0400
47563 +#include <linux/kernel.h>
47564 +#include <linux/module.h>
47565 +#include <linux/sched.h>
47566 +#include <linux/file.h>
47567 +#include <linux/fs.h>
47568 +#include <linux/kdev_t.h>
47569 +#include <linux/net.h>
47570 +#include <linux/in.h>
47571 +#include <linux/ip.h>
47572 +#include <linux/skbuff.h>
47573 +#include <linux/sysctl.h>
47575 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47577 +pax_set_initial_flags(struct linux_binprm *bprm)
47583 +#ifdef CONFIG_SYSCTL
47585 +gr_handle_sysctl(const struct ctl_table * table, const int op)
47591 +#ifdef CONFIG_TASKSTATS
47592 +int gr_is_taskstats_denied(int pid)
47599 +gr_acl_is_enabled(void)
47605 +gr_handle_rawio(const struct inode *inode)
47611 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47617 +gr_handle_ptrace(struct task_struct *task, const long request)
47623 +gr_handle_proc_ptrace(struct task_struct *task)
47629 +gr_learn_resource(const struct task_struct *task,
47630 + const int res, const unsigned long wanted, const int gt)
47636 +gr_set_acls(const int type)
47642 +gr_check_hidden_task(const struct task_struct *tsk)
47648 +gr_check_protected_task(const struct task_struct *task)
47654 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47660 +gr_copy_label(struct task_struct *tsk)
47666 +gr_set_pax_flags(struct task_struct *task)
47672 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47673 + const int unsafe_share)
47679 +gr_handle_delete(const ino_t ino, const dev_t dev)
47685 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47691 +gr_handle_crash(struct task_struct *task, const int sig)
47697 +gr_check_crash_exec(const struct file *filp)
47703 +gr_check_crash_uid(const uid_t uid)
47709 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47710 + struct dentry *old_dentry,
47711 + struct dentry *new_dentry,
47712 + struct vfsmount *mnt, const __u8 replace)
47718 +gr_search_socket(const int family, const int type, const int protocol)
47724 +gr_search_connectbind(const int mode, const struct socket *sock,
47725 + const struct sockaddr_in *addr)
47731 +gr_is_capable(const int cap)
47737 +gr_is_capable_nolog(const int cap)
47743 +gr_handle_alertkill(struct task_struct *task)
47749 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47755 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47756 + const struct vfsmount * mnt)
47762 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47769 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47775 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47781 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47782 + unsigned int *vm_flags)
47788 +gr_acl_handle_truncate(const struct dentry * dentry,
47789 + const struct vfsmount * mnt)
47795 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47801 +gr_acl_handle_access(const struct dentry * dentry,
47802 + const struct vfsmount * mnt, const int fmode)
47808 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47815 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47822 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47828 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47834 +grsecurity_init(void)
47840 +gr_acl_handle_mknod(const struct dentry * new_dentry,
47841 + const struct dentry * parent_dentry,
47842 + const struct vfsmount * parent_mnt,
47849 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
47850 + const struct dentry * parent_dentry,
47851 + const struct vfsmount * parent_mnt)
47857 +gr_acl_handle_symlink(const struct dentry * new_dentry,
47858 + const struct dentry * parent_dentry,
47859 + const struct vfsmount * parent_mnt, const char *from)
47865 +gr_acl_handle_link(const struct dentry * new_dentry,
47866 + const struct dentry * parent_dentry,
47867 + const struct vfsmount * parent_mnt,
47868 + const struct dentry * old_dentry,
47869 + const struct vfsmount * old_mnt, const char *to)
47875 +gr_acl_handle_rename(const struct dentry *new_dentry,
47876 + const struct dentry *parent_dentry,
47877 + const struct vfsmount *parent_mnt,
47878 + const struct dentry *old_dentry,
47879 + const struct inode *old_parent_inode,
47880 + const struct vfsmount *old_mnt, const char *newname)
47886 +gr_acl_handle_filldir(const struct file *file, const char *name,
47887 + const int namelen, const ino_t ino)
47893 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47894 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47900 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47906 +gr_search_accept(const struct socket *sock)
47912 +gr_search_listen(const struct socket *sock)
47918 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47924 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47930 +gr_acl_handle_creat(const struct dentry * dentry,
47931 + const struct dentry * p_dentry,
47932 + const struct vfsmount * p_mnt, const int fmode,
47939 +gr_acl_handle_exit(void)
47945 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47951 +gr_set_role_label(const uid_t uid, const gid_t gid)
47957 +gr_acl_handle_procpidmem(const struct task_struct *task)
47963 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47969 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47975 +gr_set_kernel_label(struct task_struct *task)
47981 +gr_check_user_change(int real, int effective, int fs)
47987 +gr_check_group_change(int real, int effective, int fs)
47992 +int gr_acl_enable_at_secure(void)
47997 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47999 + return dentry->d_inode->i_sb->s_dev;
48002 +EXPORT_SYMBOL(gr_is_capable);
48003 +EXPORT_SYMBOL(gr_is_capable_nolog);
48004 +EXPORT_SYMBOL(gr_learn_resource);
48005 +EXPORT_SYMBOL(gr_set_kernel_label);
48006 +#ifdef CONFIG_SECURITY
48007 +EXPORT_SYMBOL(gr_check_user_change);
48008 +EXPORT_SYMBOL(gr_check_group_change);
48010 diff -urNp linux-2.6.39.4/grsecurity/grsec_exec.c linux-2.6.39.4/grsecurity/grsec_exec.c
48011 --- linux-2.6.39.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
48012 +++ linux-2.6.39.4/grsecurity/grsec_exec.c 2011-08-05 19:44:37.000000000 -0400
48014 +#include <linux/kernel.h>
48015 +#include <linux/sched.h>
48016 +#include <linux/file.h>
48017 +#include <linux/binfmts.h>
48018 +#include <linux/fs.h>
48019 +#include <linux/types.h>
48020 +#include <linux/grdefs.h>
48021 +#include <linux/grinternal.h>
48022 +#include <linux/capability.h>
48023 +#include <linux/compat.h>
48025 +#include <asm/uaccess.h>
48027 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48028 +static char gr_exec_arg_buf[132];
48029 +static DEFINE_MUTEX(gr_exec_arg_mutex);
48033 +gr_handle_nproc(void)
48035 +#ifdef CONFIG_GRKERNSEC_EXECVE
48036 + const struct cred *cred = current_cred();
48037 + if (grsec_enable_execve && cred->user &&
48038 + (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
48039 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
48040 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
48048 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
48050 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48051 + char *grarg = gr_exec_arg_buf;
48052 + unsigned int i, x, execlen = 0;
48055 + if (!((grsec_enable_execlog && grsec_enable_group &&
48056 + in_group_p(grsec_audit_gid))
48057 + || (grsec_enable_execlog && !grsec_enable_group)))
48060 + mutex_lock(&gr_exec_arg_mutex);
48061 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
48063 + if (unlikely(argv == NULL))
48066 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48067 + const char __user *p;
48068 + unsigned int len;
48070 + if (copy_from_user(&p, argv + i, sizeof(p)))
48074 + len = strnlen_user(p, 128 - execlen);
48075 + if (len > 128 - execlen)
48076 + len = 128 - execlen;
48077 + else if (len > 0)
48079 + if (copy_from_user(grarg + execlen, p, len))
48082 + /* rewrite unprintable characters */
48083 + for (x = 0; x < len; x++) {
48084 + c = *(grarg + execlen + x);
48085 + if (c < 32 || c > 126)
48086 + *(grarg + execlen + x) = ' ';
48090 + *(grarg + execlen) = ' ';
48091 + *(grarg + execlen + 1) = '\0';
48096 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48097 + bprm->file->f_path.mnt, grarg);
48098 + mutex_unlock(&gr_exec_arg_mutex);
48103 +#ifdef CONFIG_COMPAT
48105 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
48107 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48108 + char *grarg = gr_exec_arg_buf;
48109 + unsigned int i, x, execlen = 0;
48112 + if (!((grsec_enable_execlog && grsec_enable_group &&
48113 + in_group_p(grsec_audit_gid))
48114 + || (grsec_enable_execlog && !grsec_enable_group)))
48117 + mutex_lock(&gr_exec_arg_mutex);
48118 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
48120 + if (unlikely(argv == NULL))
48123 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48125 + unsigned int len;
48127 + if (get_user(p, argv + i))
48129 + len = strnlen_user(compat_ptr(p), 128 - execlen);
48130 + if (len > 128 - execlen)
48131 + len = 128 - execlen;
48132 + else if (len > 0)
48136 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
48139 + /* rewrite unprintable characters */
48140 + for (x = 0; x < len; x++) {
48141 + c = *(grarg + execlen + x);
48142 + if (c < 32 || c > 126)
48143 + *(grarg + execlen + x) = ' ';
48147 + *(grarg + execlen) = ' ';
48148 + *(grarg + execlen + 1) = '\0';
48153 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48154 + bprm->file->f_path.mnt, grarg);
48155 + mutex_unlock(&gr_exec_arg_mutex);
48160 diff -urNp linux-2.6.39.4/grsecurity/grsec_fifo.c linux-2.6.39.4/grsecurity/grsec_fifo.c
48161 --- linux-2.6.39.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
48162 +++ linux-2.6.39.4/grsecurity/grsec_fifo.c 2011-08-05 19:44:37.000000000 -0400
48164 +#include <linux/kernel.h>
48165 +#include <linux/sched.h>
48166 +#include <linux/fs.h>
48167 +#include <linux/file.h>
48168 +#include <linux/grinternal.h>
48171 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
48172 + const struct dentry *dir, const int flag, const int acc_mode)
48174 +#ifdef CONFIG_GRKERNSEC_FIFO
48175 + const struct cred *cred = current_cred();
48177 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48178 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48179 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48180 + (cred->fsuid != dentry->d_inode->i_uid)) {
48181 + if (!inode_permission(dentry->d_inode, acc_mode))
48182 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48188 diff -urNp linux-2.6.39.4/grsecurity/grsec_fork.c linux-2.6.39.4/grsecurity/grsec_fork.c
48189 --- linux-2.6.39.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48190 +++ linux-2.6.39.4/grsecurity/grsec_fork.c 2011-08-05 19:44:37.000000000 -0400
48192 +#include <linux/kernel.h>
48193 +#include <linux/sched.h>
48194 +#include <linux/grsecurity.h>
48195 +#include <linux/grinternal.h>
48196 +#include <linux/errno.h>
48199 +gr_log_forkfail(const int retval)
48201 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48202 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48203 + switch (retval) {
48205 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48208 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48215 diff -urNp linux-2.6.39.4/grsecurity/grsec_init.c linux-2.6.39.4/grsecurity/grsec_init.c
48216 --- linux-2.6.39.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48217 +++ linux-2.6.39.4/grsecurity/grsec_init.c 2011-08-05 19:44:37.000000000 -0400
48219 +#include <linux/kernel.h>
48220 +#include <linux/sched.h>
48221 +#include <linux/mm.h>
48222 +#include <linux/gracl.h>
48223 +#include <linux/slab.h>
48224 +#include <linux/vmalloc.h>
48225 +#include <linux/percpu.h>
48226 +#include <linux/module.h>
48228 +int grsec_enable_brute;
48229 +int grsec_enable_link;
48230 +int grsec_enable_dmesg;
48231 +int grsec_enable_harden_ptrace;
48232 +int grsec_enable_fifo;
48233 +int grsec_enable_execve;
48234 +int grsec_enable_execlog;
48235 +int grsec_enable_signal;
48236 +int grsec_enable_forkfail;
48237 +int grsec_enable_audit_ptrace;
48238 +int grsec_enable_time;
48239 +int grsec_enable_audit_textrel;
48240 +int grsec_enable_group;
48241 +int grsec_audit_gid;
48242 +int grsec_enable_chdir;
48243 +int grsec_enable_mount;
48244 +int grsec_enable_rofs;
48245 +int grsec_enable_chroot_findtask;
48246 +int grsec_enable_chroot_mount;
48247 +int grsec_enable_chroot_shmat;
48248 +int grsec_enable_chroot_fchdir;
48249 +int grsec_enable_chroot_double;
48250 +int grsec_enable_chroot_pivot;
48251 +int grsec_enable_chroot_chdir;
48252 +int grsec_enable_chroot_chmod;
48253 +int grsec_enable_chroot_mknod;
48254 +int grsec_enable_chroot_nice;
48255 +int grsec_enable_chroot_execlog;
48256 +int grsec_enable_chroot_caps;
48257 +int grsec_enable_chroot_sysctl;
48258 +int grsec_enable_chroot_unix;
48259 +int grsec_enable_tpe;
48260 +int grsec_tpe_gid;
48261 +int grsec_enable_blackhole;
48262 +#ifdef CONFIG_IPV6_MODULE
48263 +EXPORT_SYMBOL(grsec_enable_blackhole);
48265 +int grsec_lastack_retries;
48266 +int grsec_enable_tpe_all;
48267 +int grsec_enable_tpe_invert;
48268 +int grsec_enable_socket_all;
48269 +int grsec_socket_all_gid;
48270 +int grsec_enable_socket_client;
48271 +int grsec_socket_client_gid;
48272 +int grsec_enable_socket_server;
48273 +int grsec_socket_server_gid;
48274 +int grsec_resource_logging;
48275 +int grsec_disable_privio;
48276 +int grsec_enable_log_rwxmaps;
48279 +DEFINE_SPINLOCK(grsec_alert_lock);
48280 +unsigned long grsec_alert_wtime = 0;
48281 +unsigned long grsec_alert_fyet = 0;
48283 +DEFINE_SPINLOCK(grsec_audit_lock);
48285 +DEFINE_RWLOCK(grsec_exec_file_lock);
48287 +char *gr_shared_page[4];
48289 +char *gr_alert_log_fmt;
48290 +char *gr_audit_log_fmt;
48291 +char *gr_alert_log_buf;
48292 +char *gr_audit_log_buf;
48294 +extern struct gr_arg *gr_usermode;
48295 +extern unsigned char *gr_system_salt;
48296 +extern unsigned char *gr_system_sum;
48299 +grsecurity_init(void)
48302 + /* create the per-cpu shared pages */
48305 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48308 + for (j = 0; j < 4; j++) {
48309 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48310 + if (gr_shared_page[j] == NULL) {
48311 + panic("Unable to allocate grsecurity shared page");
48316 + /* allocate log buffers */
48317 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48318 + if (!gr_alert_log_fmt) {
48319 + panic("Unable to allocate grsecurity alert log format buffer");
48322 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48323 + if (!gr_audit_log_fmt) {
48324 + panic("Unable to allocate grsecurity audit log format buffer");
48327 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48328 + if (!gr_alert_log_buf) {
48329 + panic("Unable to allocate grsecurity alert log buffer");
48332 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48333 + if (!gr_audit_log_buf) {
48334 + panic("Unable to allocate grsecurity audit log buffer");
48338 + /* allocate memory for authentication structure */
48339 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48340 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48341 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48343 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48344 + panic("Unable to allocate grsecurity authentication structure");
48349 +#ifdef CONFIG_GRKERNSEC_IO
48350 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48351 + grsec_disable_privio = 1;
48352 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48353 + grsec_disable_privio = 1;
48355 + grsec_disable_privio = 0;
48359 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48360 + /* for backward compatibility, tpe_invert always defaults to on if
48361 + enabled in the kernel
48363 + grsec_enable_tpe_invert = 1;
48366 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48367 +#ifndef CONFIG_GRKERNSEC_SYSCTL
48371 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48372 + grsec_enable_audit_textrel = 1;
48374 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48375 + grsec_enable_log_rwxmaps = 1;
48377 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48378 + grsec_enable_group = 1;
48379 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48381 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48382 + grsec_enable_chdir = 1;
48384 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48385 + grsec_enable_harden_ptrace = 1;
48387 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48388 + grsec_enable_mount = 1;
48390 +#ifdef CONFIG_GRKERNSEC_LINK
48391 + grsec_enable_link = 1;
48393 +#ifdef CONFIG_GRKERNSEC_BRUTE
48394 + grsec_enable_brute = 1;
48396 +#ifdef CONFIG_GRKERNSEC_DMESG
48397 + grsec_enable_dmesg = 1;
48399 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48400 + grsec_enable_blackhole = 1;
48401 + grsec_lastack_retries = 4;
48403 +#ifdef CONFIG_GRKERNSEC_FIFO
48404 + grsec_enable_fifo = 1;
48406 +#ifdef CONFIG_GRKERNSEC_EXECVE
48407 + grsec_enable_execve = 1;
48409 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48410 + grsec_enable_execlog = 1;
48412 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48413 + grsec_enable_signal = 1;
48415 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48416 + grsec_enable_forkfail = 1;
48418 +#ifdef CONFIG_GRKERNSEC_TIME
48419 + grsec_enable_time = 1;
48421 +#ifdef CONFIG_GRKERNSEC_RESLOG
48422 + grsec_resource_logging = 1;
48424 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48425 + grsec_enable_chroot_findtask = 1;
48427 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48428 + grsec_enable_chroot_unix = 1;
48430 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48431 + grsec_enable_chroot_mount = 1;
48433 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48434 + grsec_enable_chroot_fchdir = 1;
48436 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48437 + grsec_enable_chroot_shmat = 1;
48439 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48440 + grsec_enable_audit_ptrace = 1;
48442 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48443 + grsec_enable_chroot_double = 1;
48445 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48446 + grsec_enable_chroot_pivot = 1;
48448 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48449 + grsec_enable_chroot_chdir = 1;
48451 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48452 + grsec_enable_chroot_chmod = 1;
48454 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48455 + grsec_enable_chroot_mknod = 1;
48457 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48458 + grsec_enable_chroot_nice = 1;
48460 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48461 + grsec_enable_chroot_execlog = 1;
48463 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48464 + grsec_enable_chroot_caps = 1;
48466 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48467 + grsec_enable_chroot_sysctl = 1;
48469 +#ifdef CONFIG_GRKERNSEC_TPE
48470 + grsec_enable_tpe = 1;
48471 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48472 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
48473 + grsec_enable_tpe_all = 1;
48476 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48477 + grsec_enable_socket_all = 1;
48478 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48480 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48481 + grsec_enable_socket_client = 1;
48482 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48484 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48485 + grsec_enable_socket_server = 1;
48486 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48492 diff -urNp linux-2.6.39.4/grsecurity/grsec_link.c linux-2.6.39.4/grsecurity/grsec_link.c
48493 --- linux-2.6.39.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48494 +++ linux-2.6.39.4/grsecurity/grsec_link.c 2011-08-05 19:44:37.000000000 -0400
48496 +#include <linux/kernel.h>
48497 +#include <linux/sched.h>
48498 +#include <linux/fs.h>
48499 +#include <linux/file.h>
48500 +#include <linux/grinternal.h>
48503 +gr_handle_follow_link(const struct inode *parent,
48504 + const struct inode *inode,
48505 + const struct dentry *dentry, const struct vfsmount *mnt)
48507 +#ifdef CONFIG_GRKERNSEC_LINK
48508 + const struct cred *cred = current_cred();
48510 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48511 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48512 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48513 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48521 +gr_handle_hardlink(const struct dentry *dentry,
48522 + const struct vfsmount *mnt,
48523 + struct inode *inode, const int mode, const char *to)
48525 +#ifdef CONFIG_GRKERNSEC_LINK
48526 + const struct cred *cred = current_cred();
48528 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48529 + (!S_ISREG(mode) || (mode & S_ISUID) ||
48530 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48531 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48532 + !capable(CAP_FOWNER) && cred->uid) {
48533 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48539 diff -urNp linux-2.6.39.4/grsecurity/grsec_log.c linux-2.6.39.4/grsecurity/grsec_log.c
48540 --- linux-2.6.39.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48541 +++ linux-2.6.39.4/grsecurity/grsec_log.c 2011-08-05 19:44:37.000000000 -0400
48543 +#include <linux/kernel.h>
48544 +#include <linux/sched.h>
48545 +#include <linux/file.h>
48546 +#include <linux/tty.h>
48547 +#include <linux/fs.h>
48548 +#include <linux/grinternal.h>
48550 +#ifdef CONFIG_TREE_PREEMPT_RCU
48551 +#define DISABLE_PREEMPT() preempt_disable()
48552 +#define ENABLE_PREEMPT() preempt_enable()
48554 +#define DISABLE_PREEMPT()
48555 +#define ENABLE_PREEMPT()
48558 +#define BEGIN_LOCKS(x) \
48559 + DISABLE_PREEMPT(); \
48560 + rcu_read_lock(); \
48561 + read_lock(&tasklist_lock); \
48562 + read_lock(&grsec_exec_file_lock); \
48563 + if (x != GR_DO_AUDIT) \
48564 + spin_lock(&grsec_alert_lock); \
48566 + spin_lock(&grsec_audit_lock)
48568 +#define END_LOCKS(x) \
48569 + if (x != GR_DO_AUDIT) \
48570 + spin_unlock(&grsec_alert_lock); \
48572 + spin_unlock(&grsec_audit_lock); \
48573 + read_unlock(&grsec_exec_file_lock); \
48574 + read_unlock(&tasklist_lock); \
48575 + rcu_read_unlock(); \
48576 + ENABLE_PREEMPT(); \
48577 + if (x == GR_DONT_AUDIT) \
48578 + gr_handle_alertkill(current)
48585 +extern char *gr_alert_log_fmt;
48586 +extern char *gr_audit_log_fmt;
48587 +extern char *gr_alert_log_buf;
48588 +extern char *gr_audit_log_buf;
48590 +static int gr_log_start(int audit)
48592 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48593 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48594 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48596 + if (audit == GR_DO_AUDIT)
48599 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48600 + grsec_alert_wtime = jiffies;
48601 + grsec_alert_fyet = 0;
48602 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48603 + grsec_alert_fyet++;
48604 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48605 + grsec_alert_wtime = jiffies;
48606 + grsec_alert_fyet++;
48607 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48609 + } else return FLOODING;
48612 + memset(buf, 0, PAGE_SIZE);
48613 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
48614 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48615 + snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48616 + } else if (current->signal->curr_ip) {
48617 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48618 + snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip);
48619 + } else if (gr_acl_is_enabled()) {
48620 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48621 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48623 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
48624 + strcpy(buf, fmt);
48627 + return NO_FLOODING;
48630 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48631 + __attribute__ ((format (printf, 2, 0)));
48633 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48635 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48636 + unsigned int len = strlen(buf);
48638 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48643 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48644 + __attribute__ ((format (printf, 2, 3)));
48646 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48648 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48649 + unsigned int len = strlen(buf);
48652 + va_start(ap, msg);
48653 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48659 +static void gr_log_end(int audit)
48661 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48662 + unsigned int len = strlen(buf);
48664 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48665 + printk("%s\n", buf);
48670 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48673 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48674 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48675 + void *voidptr = NULL;
48676 + int num1 = 0, num2 = 0;
48677 + unsigned long ulong1 = 0, ulong2 = 0;
48678 + struct dentry *dentry = NULL;
48679 + struct vfsmount *mnt = NULL;
48680 + struct file *file = NULL;
48681 + struct task_struct *task = NULL;
48682 + const struct cred *cred, *pcred;
48685 + BEGIN_LOCKS(audit);
48686 + logtype = gr_log_start(audit);
48687 + if (logtype == FLOODING) {
48688 + END_LOCKS(audit);
48691 + va_start(ap, argtypes);
48692 + switch (argtypes) {
48693 + case GR_TTYSNIFF:
48694 + task = va_arg(ap, struct task_struct *);
48695 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48697 + case GR_SYSCTL_HIDDEN:
48698 + str1 = va_arg(ap, char *);
48699 + gr_log_middle_varargs(audit, msg, result, str1);
48702 + dentry = va_arg(ap, struct dentry *);
48703 + mnt = va_arg(ap, struct vfsmount *);
48704 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48706 + case GR_RBAC_STR:
48707 + dentry = va_arg(ap, struct dentry *);
48708 + mnt = va_arg(ap, struct vfsmount *);
48709 + str1 = va_arg(ap, char *);
48710 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48712 + case GR_STR_RBAC:
48713 + str1 = va_arg(ap, char *);
48714 + dentry = va_arg(ap, struct dentry *);
48715 + mnt = va_arg(ap, struct vfsmount *);
48716 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48718 + case GR_RBAC_MODE2:
48719 + dentry = va_arg(ap, struct dentry *);
48720 + mnt = va_arg(ap, struct vfsmount *);
48721 + str1 = va_arg(ap, char *);
48722 + str2 = va_arg(ap, char *);
48723 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48725 + case GR_RBAC_MODE3:
48726 + dentry = va_arg(ap, struct dentry *);
48727 + mnt = va_arg(ap, struct vfsmount *);
48728 + str1 = va_arg(ap, char *);
48729 + str2 = va_arg(ap, char *);
48730 + str3 = va_arg(ap, char *);
48731 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48733 + case GR_FILENAME:
48734 + dentry = va_arg(ap, struct dentry *);
48735 + mnt = va_arg(ap, struct vfsmount *);
48736 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48738 + case GR_STR_FILENAME:
48739 + str1 = va_arg(ap, char *);
48740 + dentry = va_arg(ap, struct dentry *);
48741 + mnt = va_arg(ap, struct vfsmount *);
48742 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48744 + case GR_FILENAME_STR:
48745 + dentry = va_arg(ap, struct dentry *);
48746 + mnt = va_arg(ap, struct vfsmount *);
48747 + str1 = va_arg(ap, char *);
48748 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48750 + case GR_FILENAME_TWO_INT:
48751 + dentry = va_arg(ap, struct dentry *);
48752 + mnt = va_arg(ap, struct vfsmount *);
48753 + num1 = va_arg(ap, int);
48754 + num2 = va_arg(ap, int);
48755 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48757 + case GR_FILENAME_TWO_INT_STR:
48758 + dentry = va_arg(ap, struct dentry *);
48759 + mnt = va_arg(ap, struct vfsmount *);
48760 + num1 = va_arg(ap, int);
48761 + num2 = va_arg(ap, int);
48762 + str1 = va_arg(ap, char *);
48763 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48766 + file = va_arg(ap, struct file *);
48767 + ulong1 = va_arg(ap, unsigned long);
48768 + ulong2 = va_arg(ap, unsigned long);
48769 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48772 + task = va_arg(ap, struct task_struct *);
48773 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48775 + case GR_RESOURCE:
48776 + task = va_arg(ap, struct task_struct *);
48777 + cred = __task_cred(task);
48778 + pcred = __task_cred(task->real_parent);
48779 + ulong1 = va_arg(ap, unsigned long);
48780 + str1 = va_arg(ap, char *);
48781 + ulong2 = va_arg(ap, unsigned long);
48782 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48785 + task = va_arg(ap, struct task_struct *);
48786 + cred = __task_cred(task);
48787 + pcred = __task_cred(task->real_parent);
48788 + str1 = va_arg(ap, char *);
48789 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48792 + str1 = va_arg(ap, char *);
48793 + voidptr = va_arg(ap, void *);
48794 + gr_log_middle_varargs(audit, msg, str1, voidptr);
48797 + task = va_arg(ap, struct task_struct *);
48798 + cred = __task_cred(task);
48799 + pcred = __task_cred(task->real_parent);
48800 + num1 = va_arg(ap, int);
48801 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48804 + task = va_arg(ap, struct task_struct *);
48805 + cred = __task_cred(task);
48806 + pcred = __task_cred(task->real_parent);
48807 + ulong1 = va_arg(ap, unsigned long);
48808 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48811 + task = va_arg(ap, struct task_struct *);
48812 + cred = __task_cred(task);
48813 + pcred = __task_cred(task->real_parent);
48814 + ulong1 = va_arg(ap, unsigned long);
48815 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48818 + file = va_arg(ap, struct file *);
48819 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48823 + unsigned int wday, cday;
48827 + char cur_tty[64] = { 0 };
48828 + char parent_tty[64] = { 0 };
48830 + task = va_arg(ap, struct task_struct *);
48831 + wday = va_arg(ap, unsigned int);
48832 + cday = va_arg(ap, unsigned int);
48833 + whr = va_arg(ap, int);
48834 + chr = va_arg(ap, int);
48835 + wmin = va_arg(ap, int);
48836 + cmin = va_arg(ap, int);
48837 + wsec = va_arg(ap, int);
48838 + csec = va_arg(ap, int);
48839 + ulong1 = va_arg(ap, unsigned long);
48840 + cred = __task_cred(task);
48841 + pcred = __task_cred(task->real_parent);
48843 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48847 + gr_log_middle(audit, msg, ap);
48850 + gr_log_end(audit);
48851 + END_LOCKS(audit);
48853 diff -urNp linux-2.6.39.4/grsecurity/grsec_mem.c linux-2.6.39.4/grsecurity/grsec_mem.c
48854 --- linux-2.6.39.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48855 +++ linux-2.6.39.4/grsecurity/grsec_mem.c 2011-08-05 19:44:37.000000000 -0400
48857 +#include <linux/kernel.h>
48858 +#include <linux/sched.h>
48859 +#include <linux/mm.h>
48860 +#include <linux/mman.h>
48861 +#include <linux/grinternal.h>
48864 +gr_handle_ioperm(void)
48866 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48871 +gr_handle_iopl(void)
48873 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48878 +gr_handle_mem_readwrite(u64 from, u64 to)
48880 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48885 +gr_handle_vm86(void)
48887 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48890 diff -urNp linux-2.6.39.4/grsecurity/grsec_mount.c linux-2.6.39.4/grsecurity/grsec_mount.c
48891 --- linux-2.6.39.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48892 +++ linux-2.6.39.4/grsecurity/grsec_mount.c 2011-08-05 19:44:37.000000000 -0400
48894 +#include <linux/kernel.h>
48895 +#include <linux/sched.h>
48896 +#include <linux/mount.h>
48897 +#include <linux/grsecurity.h>
48898 +#include <linux/grinternal.h>
48901 +gr_log_remount(const char *devname, const int retval)
48903 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48904 + if (grsec_enable_mount && (retval >= 0))
48905 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48911 +gr_log_unmount(const char *devname, const int retval)
48913 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48914 + if (grsec_enable_mount && (retval >= 0))
48915 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48921 +gr_log_mount(const char *from, const char *to, const int retval)
48923 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48924 + if (grsec_enable_mount && (retval >= 0))
48925 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48931 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48933 +#ifdef CONFIG_GRKERNSEC_ROFS
48934 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48935 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48944 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48946 +#ifdef CONFIG_GRKERNSEC_ROFS
48947 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48948 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48949 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48956 diff -urNp linux-2.6.39.4/grsecurity/grsec_pax.c linux-2.6.39.4/grsecurity/grsec_pax.c
48957 --- linux-2.6.39.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48958 +++ linux-2.6.39.4/grsecurity/grsec_pax.c 2011-08-05 19:44:37.000000000 -0400
48960 +#include <linux/kernel.h>
48961 +#include <linux/sched.h>
48962 +#include <linux/mm.h>
48963 +#include <linux/file.h>
48964 +#include <linux/grinternal.h>
48965 +#include <linux/grsecurity.h>
48968 +gr_log_textrel(struct vm_area_struct * vma)
48970 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48971 + if (grsec_enable_audit_textrel)
48972 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48978 +gr_log_rwxmmap(struct file *file)
48980 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48981 + if (grsec_enable_log_rwxmaps)
48982 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48988 +gr_log_rwxmprotect(struct file *file)
48990 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48991 + if (grsec_enable_log_rwxmaps)
48992 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48996 diff -urNp linux-2.6.39.4/grsecurity/grsec_ptrace.c linux-2.6.39.4/grsecurity/grsec_ptrace.c
48997 --- linux-2.6.39.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48998 +++ linux-2.6.39.4/grsecurity/grsec_ptrace.c 2011-08-05 19:44:37.000000000 -0400
49000 +#include <linux/kernel.h>
49001 +#include <linux/sched.h>
49002 +#include <linux/grinternal.h>
49003 +#include <linux/grsecurity.h>
49006 +gr_audit_ptrace(struct task_struct *task)
49008 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49009 + if (grsec_enable_audit_ptrace)
49010 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
49014 diff -urNp linux-2.6.39.4/grsecurity/grsec_sig.c linux-2.6.39.4/grsecurity/grsec_sig.c
49015 --- linux-2.6.39.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
49016 +++ linux-2.6.39.4/grsecurity/grsec_sig.c 2011-08-05 19:44:37.000000000 -0400
49018 +#include <linux/kernel.h>
49019 +#include <linux/sched.h>
49020 +#include <linux/delay.h>
49021 +#include <linux/grsecurity.h>
49022 +#include <linux/grinternal.h>
49023 +#include <linux/hardirq.h>
49025 +char *signames[] = {
49026 + [SIGSEGV] = "Segmentation fault",
49027 + [SIGILL] = "Illegal instruction",
49028 + [SIGABRT] = "Abort",
49029 + [SIGBUS] = "Invalid alignment/Bus error"
49033 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
49035 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49036 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
49037 + (sig == SIGABRT) || (sig == SIGBUS))) {
49038 + if (t->pid == current->pid) {
49039 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
49041 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
49049 +gr_handle_signal(const struct task_struct *p, const int sig)
49051 +#ifdef CONFIG_GRKERNSEC
49052 + if (current->pid > 1 && gr_check_protected_task(p)) {
49053 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
49055 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
49062 +#ifdef CONFIG_GRKERNSEC
49063 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
49065 +int gr_fake_force_sig(int sig, struct task_struct *t)
49067 + unsigned long int flags;
49068 + int ret, blocked, ignored;
49069 + struct k_sigaction *action;
49071 + spin_lock_irqsave(&t->sighand->siglock, flags);
49072 + action = &t->sighand->action[sig-1];
49073 + ignored = action->sa.sa_handler == SIG_IGN;
49074 + blocked = sigismember(&t->blocked, sig);
49075 + if (blocked || ignored) {
49076 + action->sa.sa_handler = SIG_DFL;
49078 + sigdelset(&t->blocked, sig);
49079 + recalc_sigpending_and_wake(t);
49082 + if (action->sa.sa_handler == SIG_DFL)
49083 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
49084 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
49086 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
49092 +#ifdef CONFIG_GRKERNSEC_BRUTE
49093 +#define GR_USER_BAN_TIME (15 * 60)
49095 +static int __get_dumpable(unsigned long mm_flags)
49099 + ret = mm_flags & MMF_DUMPABLE_MASK;
49100 + return (ret >= 2) ? 2 : ret;
49104 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
49106 +#ifdef CONFIG_GRKERNSEC_BRUTE
49109 + if (!grsec_enable_brute)
49113 + read_lock(&tasklist_lock);
49114 + read_lock(&grsec_exec_file_lock);
49115 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
49116 + p->real_parent->brute = 1;
49118 + const struct cred *cred = __task_cred(p), *cred2;
49119 + struct task_struct *tsk, *tsk2;
49121 + if (!__get_dumpable(mm_flags) && cred->uid) {
49122 + struct user_struct *user;
49126 + /* this is put upon execution past expiration */
49127 + user = find_user(uid);
49128 + if (user == NULL)
49130 + user->banned = 1;
49131 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
49132 + if (user->ban_expires == ~0UL)
49133 + user->ban_expires--;
49135 + do_each_thread(tsk2, tsk) {
49136 + cred2 = __task_cred(tsk);
49137 + if (tsk != p && cred2->uid == uid)
49138 + gr_fake_force_sig(SIGKILL, tsk);
49139 + } while_each_thread(tsk2, tsk);
49143 + read_unlock(&grsec_exec_file_lock);
49144 + read_unlock(&tasklist_lock);
49145 + rcu_read_unlock();
49148 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
49154 +void gr_handle_brute_check(void)
49156 +#ifdef CONFIG_GRKERNSEC_BRUTE
49157 + if (current->brute)
49158 + msleep(30 * 1000);
49163 +void gr_handle_kernel_exploit(void)
49165 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
49166 + const struct cred *cred;
49167 + struct task_struct *tsk, *tsk2;
49168 + struct user_struct *user;
49171 + if (in_irq() || in_serving_softirq() || in_nmi())
49172 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
49174 + uid = current_uid();
49177 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
49179 + /* kill all the processes of this user, hold a reference
49180 + to their creds struct, and prevent them from creating
49181 + another process until system reset
49183 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49184 + /* we intentionally leak this ref */
49185 + user = get_uid(current->cred->user);
49187 + user->banned = 1;
49188 + user->ban_expires = ~0UL;
49191 + read_lock(&tasklist_lock);
49192 + do_each_thread(tsk2, tsk) {
49193 + cred = __task_cred(tsk);
49194 + if (cred->uid == uid)
49195 + gr_fake_force_sig(SIGKILL, tsk);
49196 + } while_each_thread(tsk2, tsk);
49197 + read_unlock(&tasklist_lock);
49202 +int __gr_process_user_ban(struct user_struct *user)
49204 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49205 + if (unlikely(user->banned)) {
49206 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49207 + user->banned = 0;
49208 + user->ban_expires = 0;
49217 +int gr_process_user_ban(void)
49219 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49220 + return __gr_process_user_ban(current->cred->user);
49224 diff -urNp linux-2.6.39.4/grsecurity/grsec_sock.c linux-2.6.39.4/grsecurity/grsec_sock.c
49225 --- linux-2.6.39.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49226 +++ linux-2.6.39.4/grsecurity/grsec_sock.c 2011-08-05 19:44:37.000000000 -0400
49228 +#include <linux/kernel.h>
49229 +#include <linux/module.h>
49230 +#include <linux/sched.h>
49231 +#include <linux/file.h>
49232 +#include <linux/net.h>
49233 +#include <linux/in.h>
49234 +#include <linux/ip.h>
49235 +#include <net/sock.h>
49236 +#include <net/inet_sock.h>
49237 +#include <linux/grsecurity.h>
49238 +#include <linux/grinternal.h>
49239 +#include <linux/gracl.h>
49241 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49242 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49244 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
49245 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
49247 +#ifdef CONFIG_UNIX_MODULE
49248 +EXPORT_SYMBOL(gr_acl_handle_unix);
49249 +EXPORT_SYMBOL(gr_acl_handle_mknod);
49250 +EXPORT_SYMBOL(gr_handle_chroot_unix);
49251 +EXPORT_SYMBOL(gr_handle_create);
49254 +#ifdef CONFIG_GRKERNSEC
49255 +#define gr_conn_table_size 32749
49256 +struct conn_table_entry {
49257 + struct conn_table_entry *next;
49258 + struct signal_struct *sig;
49261 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49262 +DEFINE_SPINLOCK(gr_conn_table_lock);
49264 +extern const char * gr_socktype_to_name(unsigned char type);
49265 +extern const char * gr_proto_to_name(unsigned char proto);
49266 +extern const char * gr_sockfamily_to_name(unsigned char family);
49268 +static __inline__ int
49269 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49271 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49274 +static __inline__ int
49275 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49276 + __u16 sport, __u16 dport)
49278 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49279 + sig->gr_sport == sport && sig->gr_dport == dport))
49285 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49287 + struct conn_table_entry **match;
49288 + unsigned int index;
49290 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49291 + sig->gr_sport, sig->gr_dport,
49292 + gr_conn_table_size);
49294 + newent->sig = sig;
49296 + match = &gr_conn_table[index];
49297 + newent->next = *match;
49303 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49305 + struct conn_table_entry *match, *last = NULL;
49306 + unsigned int index;
49308 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49309 + sig->gr_sport, sig->gr_dport,
49310 + gr_conn_table_size);
49312 + match = gr_conn_table[index];
49313 + while (match && !conn_match(match->sig,
49314 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49315 + sig->gr_dport)) {
49317 + match = match->next;
49322 + last->next = match->next;
49324 + gr_conn_table[index] = NULL;
49331 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49332 + __u16 sport, __u16 dport)
49334 + struct conn_table_entry *match;
49335 + unsigned int index;
49337 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49339 + match = gr_conn_table[index];
49340 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49341 + match = match->next;
49344 + return match->sig;
49351 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49353 +#ifdef CONFIG_GRKERNSEC
49354 + struct signal_struct *sig = task->signal;
49355 + struct conn_table_entry *newent;
49357 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49358 + if (newent == NULL)
49360 + /* no bh lock needed since we are called with bh disabled */
49361 + spin_lock(&gr_conn_table_lock);
49362 + gr_del_task_from_ip_table_nolock(sig);
49363 + sig->gr_saddr = inet->inet_rcv_saddr;
49364 + sig->gr_daddr = inet->inet_daddr;
49365 + sig->gr_sport = inet->inet_sport;
49366 + sig->gr_dport = inet->inet_dport;
49367 + gr_add_to_task_ip_table_nolock(sig, newent);
49368 + spin_unlock(&gr_conn_table_lock);
49373 +void gr_del_task_from_ip_table(struct task_struct *task)
49375 +#ifdef CONFIG_GRKERNSEC
49376 + spin_lock_bh(&gr_conn_table_lock);
49377 + gr_del_task_from_ip_table_nolock(task->signal);
49378 + spin_unlock_bh(&gr_conn_table_lock);
49384 +gr_attach_curr_ip(const struct sock *sk)
49386 +#ifdef CONFIG_GRKERNSEC
49387 + struct signal_struct *p, *set;
49388 + const struct inet_sock *inet = inet_sk(sk);
49390 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49393 + set = current->signal;
49395 + spin_lock_bh(&gr_conn_table_lock);
49396 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49397 + inet->inet_dport, inet->inet_sport);
49398 + if (unlikely(p != NULL)) {
49399 + set->curr_ip = p->curr_ip;
49400 + set->used_accept = 1;
49401 + gr_del_task_from_ip_table_nolock(p);
49402 + spin_unlock_bh(&gr_conn_table_lock);
49405 + spin_unlock_bh(&gr_conn_table_lock);
49407 + set->curr_ip = inet->inet_daddr;
49408 + set->used_accept = 1;
49414 +gr_handle_sock_all(const int family, const int type, const int protocol)
49416 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49417 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49418 + (family != AF_UNIX)) {
49419 + if (family == AF_INET)
49420 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49422 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49430 +gr_handle_sock_server(const struct sockaddr *sck)
49432 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49433 + if (grsec_enable_socket_server &&
49434 + in_group_p(grsec_socket_server_gid) &&
49435 + sck && (sck->sa_family != AF_UNIX) &&
49436 + (sck->sa_family != AF_LOCAL)) {
49437 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49445 +gr_handle_sock_server_other(const struct sock *sck)
49447 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49448 + if (grsec_enable_socket_server &&
49449 + in_group_p(grsec_socket_server_gid) &&
49450 + sck && (sck->sk_family != AF_UNIX) &&
49451 + (sck->sk_family != AF_LOCAL)) {
49452 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49460 +gr_handle_sock_client(const struct sockaddr *sck)
49462 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49463 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49464 + sck && (sck->sa_family != AF_UNIX) &&
49465 + (sck->sa_family != AF_LOCAL)) {
49466 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49472 diff -urNp linux-2.6.39.4/grsecurity/grsec_sysctl.c linux-2.6.39.4/grsecurity/grsec_sysctl.c
49473 --- linux-2.6.39.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49474 +++ linux-2.6.39.4/grsecurity/grsec_sysctl.c 2011-08-05 19:44:37.000000000 -0400
49476 +#include <linux/kernel.h>
49477 +#include <linux/sched.h>
49478 +#include <linux/sysctl.h>
49479 +#include <linux/grsecurity.h>
49480 +#include <linux/grinternal.h>
49483 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49485 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49486 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49487 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49494 +#ifdef CONFIG_GRKERNSEC_ROFS
49495 +static int __maybe_unused one = 1;
49498 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49499 +struct ctl_table grsecurity_table[] = {
49500 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49501 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49502 +#ifdef CONFIG_GRKERNSEC_IO
49504 + .procname = "disable_priv_io",
49505 + .data = &grsec_disable_privio,
49506 + .maxlen = sizeof(int),
49508 + .proc_handler = &proc_dointvec,
49512 +#ifdef CONFIG_GRKERNSEC_LINK
49514 + .procname = "linking_restrictions",
49515 + .data = &grsec_enable_link,
49516 + .maxlen = sizeof(int),
49518 + .proc_handler = &proc_dointvec,
49521 +#ifdef CONFIG_GRKERNSEC_BRUTE
49523 + .procname = "deter_bruteforce",
49524 + .data = &grsec_enable_brute,
49525 + .maxlen = sizeof(int),
49527 + .proc_handler = &proc_dointvec,
49530 +#ifdef CONFIG_GRKERNSEC_FIFO
49532 + .procname = "fifo_restrictions",
49533 + .data = &grsec_enable_fifo,
49534 + .maxlen = sizeof(int),
49536 + .proc_handler = &proc_dointvec,
49539 +#ifdef CONFIG_GRKERNSEC_EXECVE
49541 + .procname = "execve_limiting",
49542 + .data = &grsec_enable_execve,
49543 + .maxlen = sizeof(int),
49545 + .proc_handler = &proc_dointvec,
49548 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49550 + .procname = "ip_blackhole",
49551 + .data = &grsec_enable_blackhole,
49552 + .maxlen = sizeof(int),
49554 + .proc_handler = &proc_dointvec,
49557 + .procname = "lastack_retries",
49558 + .data = &grsec_lastack_retries,
49559 + .maxlen = sizeof(int),
49561 + .proc_handler = &proc_dointvec,
49564 +#ifdef CONFIG_GRKERNSEC_EXECLOG
49566 + .procname = "exec_logging",
49567 + .data = &grsec_enable_execlog,
49568 + .maxlen = sizeof(int),
49570 + .proc_handler = &proc_dointvec,
49573 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49575 + .procname = "rwxmap_logging",
49576 + .data = &grsec_enable_log_rwxmaps,
49577 + .maxlen = sizeof(int),
49579 + .proc_handler = &proc_dointvec,
49582 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49584 + .procname = "signal_logging",
49585 + .data = &grsec_enable_signal,
49586 + .maxlen = sizeof(int),
49588 + .proc_handler = &proc_dointvec,
49591 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
49593 + .procname = "forkfail_logging",
49594 + .data = &grsec_enable_forkfail,
49595 + .maxlen = sizeof(int),
49597 + .proc_handler = &proc_dointvec,
49600 +#ifdef CONFIG_GRKERNSEC_TIME
49602 + .procname = "timechange_logging",
49603 + .data = &grsec_enable_time,
49604 + .maxlen = sizeof(int),
49606 + .proc_handler = &proc_dointvec,
49609 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49611 + .procname = "chroot_deny_shmat",
49612 + .data = &grsec_enable_chroot_shmat,
49613 + .maxlen = sizeof(int),
49615 + .proc_handler = &proc_dointvec,
49618 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49620 + .procname = "chroot_deny_unix",
49621 + .data = &grsec_enable_chroot_unix,
49622 + .maxlen = sizeof(int),
49624 + .proc_handler = &proc_dointvec,
49627 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49629 + .procname = "chroot_deny_mount",
49630 + .data = &grsec_enable_chroot_mount,
49631 + .maxlen = sizeof(int),
49633 + .proc_handler = &proc_dointvec,
49636 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49638 + .procname = "chroot_deny_fchdir",
49639 + .data = &grsec_enable_chroot_fchdir,
49640 + .maxlen = sizeof(int),
49642 + .proc_handler = &proc_dointvec,
49645 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49647 + .procname = "chroot_deny_chroot",
49648 + .data = &grsec_enable_chroot_double,
49649 + .maxlen = sizeof(int),
49651 + .proc_handler = &proc_dointvec,
49654 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49656 + .procname = "chroot_deny_pivot",
49657 + .data = &grsec_enable_chroot_pivot,
49658 + .maxlen = sizeof(int),
49660 + .proc_handler = &proc_dointvec,
49663 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49665 + .procname = "chroot_enforce_chdir",
49666 + .data = &grsec_enable_chroot_chdir,
49667 + .maxlen = sizeof(int),
49669 + .proc_handler = &proc_dointvec,
49672 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49674 + .procname = "chroot_deny_chmod",
49675 + .data = &grsec_enable_chroot_chmod,
49676 + .maxlen = sizeof(int),
49678 + .proc_handler = &proc_dointvec,
49681 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49683 + .procname = "chroot_deny_mknod",
49684 + .data = &grsec_enable_chroot_mknod,
49685 + .maxlen = sizeof(int),
49687 + .proc_handler = &proc_dointvec,
49690 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49692 + .procname = "chroot_restrict_nice",
49693 + .data = &grsec_enable_chroot_nice,
49694 + .maxlen = sizeof(int),
49696 + .proc_handler = &proc_dointvec,
49699 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49701 + .procname = "chroot_execlog",
49702 + .data = &grsec_enable_chroot_execlog,
49703 + .maxlen = sizeof(int),
49705 + .proc_handler = &proc_dointvec,
49708 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49710 + .procname = "chroot_caps",
49711 + .data = &grsec_enable_chroot_caps,
49712 + .maxlen = sizeof(int),
49714 + .proc_handler = &proc_dointvec,
49717 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49719 + .procname = "chroot_deny_sysctl",
49720 + .data = &grsec_enable_chroot_sysctl,
49721 + .maxlen = sizeof(int),
49723 + .proc_handler = &proc_dointvec,
49726 +#ifdef CONFIG_GRKERNSEC_TPE
49728 + .procname = "tpe",
49729 + .data = &grsec_enable_tpe,
49730 + .maxlen = sizeof(int),
49732 + .proc_handler = &proc_dointvec,
49735 + .procname = "tpe_gid",
49736 + .data = &grsec_tpe_gid,
49737 + .maxlen = sizeof(int),
49739 + .proc_handler = &proc_dointvec,
49742 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49744 + .procname = "tpe_invert",
49745 + .data = &grsec_enable_tpe_invert,
49746 + .maxlen = sizeof(int),
49748 + .proc_handler = &proc_dointvec,
49751 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49753 + .procname = "tpe_restrict_all",
49754 + .data = &grsec_enable_tpe_all,
49755 + .maxlen = sizeof(int),
49757 + .proc_handler = &proc_dointvec,
49760 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49762 + .procname = "socket_all",
49763 + .data = &grsec_enable_socket_all,
49764 + .maxlen = sizeof(int),
49766 + .proc_handler = &proc_dointvec,
49769 + .procname = "socket_all_gid",
49770 + .data = &grsec_socket_all_gid,
49771 + .maxlen = sizeof(int),
49773 + .proc_handler = &proc_dointvec,
49776 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49778 + .procname = "socket_client",
49779 + .data = &grsec_enable_socket_client,
49780 + .maxlen = sizeof(int),
49782 + .proc_handler = &proc_dointvec,
49785 + .procname = "socket_client_gid",
49786 + .data = &grsec_socket_client_gid,
49787 + .maxlen = sizeof(int),
49789 + .proc_handler = &proc_dointvec,
49792 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49794 + .procname = "socket_server",
49795 + .data = &grsec_enable_socket_server,
49796 + .maxlen = sizeof(int),
49798 + .proc_handler = &proc_dointvec,
49801 + .procname = "socket_server_gid",
49802 + .data = &grsec_socket_server_gid,
49803 + .maxlen = sizeof(int),
49805 + .proc_handler = &proc_dointvec,
49808 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49810 + .procname = "audit_group",
49811 + .data = &grsec_enable_group,
49812 + .maxlen = sizeof(int),
49814 + .proc_handler = &proc_dointvec,
49817 + .procname = "audit_gid",
49818 + .data = &grsec_audit_gid,
49819 + .maxlen = sizeof(int),
49821 + .proc_handler = &proc_dointvec,
49824 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49826 + .procname = "audit_chdir",
49827 + .data = &grsec_enable_chdir,
49828 + .maxlen = sizeof(int),
49830 + .proc_handler = &proc_dointvec,
49833 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49835 + .procname = "audit_mount",
49836 + .data = &grsec_enable_mount,
49837 + .maxlen = sizeof(int),
49839 + .proc_handler = &proc_dointvec,
49842 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49844 + .procname = "audit_textrel",
49845 + .data = &grsec_enable_audit_textrel,
49846 + .maxlen = sizeof(int),
49848 + .proc_handler = &proc_dointvec,
49851 +#ifdef CONFIG_GRKERNSEC_DMESG
49853 + .procname = "dmesg",
49854 + .data = &grsec_enable_dmesg,
49855 + .maxlen = sizeof(int),
49857 + .proc_handler = &proc_dointvec,
49860 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49862 + .procname = "chroot_findtask",
49863 + .data = &grsec_enable_chroot_findtask,
49864 + .maxlen = sizeof(int),
49866 + .proc_handler = &proc_dointvec,
49869 +#ifdef CONFIG_GRKERNSEC_RESLOG
49871 + .procname = "resource_logging",
49872 + .data = &grsec_resource_logging,
49873 + .maxlen = sizeof(int),
49875 + .proc_handler = &proc_dointvec,
49878 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49880 + .procname = "audit_ptrace",
49881 + .data = &grsec_enable_audit_ptrace,
49882 + .maxlen = sizeof(int),
49884 + .proc_handler = &proc_dointvec,
49887 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49889 + .procname = "harden_ptrace",
49890 + .data = &grsec_enable_harden_ptrace,
49891 + .maxlen = sizeof(int),
49893 + .proc_handler = &proc_dointvec,
49897 + .procname = "grsec_lock",
49898 + .data = &grsec_lock,
49899 + .maxlen = sizeof(int),
49901 + .proc_handler = &proc_dointvec,
49904 +#ifdef CONFIG_GRKERNSEC_ROFS
49906 + .procname = "romount_protect",
49907 + .data = &grsec_enable_rofs,
49908 + .maxlen = sizeof(int),
49910 + .proc_handler = &proc_dointvec_minmax,
49918 diff -urNp linux-2.6.39.4/grsecurity/grsec_time.c linux-2.6.39.4/grsecurity/grsec_time.c
49919 --- linux-2.6.39.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49920 +++ linux-2.6.39.4/grsecurity/grsec_time.c 2011-08-05 19:44:37.000000000 -0400
49922 +#include <linux/kernel.h>
49923 +#include <linux/sched.h>
49924 +#include <linux/grinternal.h>
49925 +#include <linux/module.h>
49928 +gr_log_timechange(void)
49930 +#ifdef CONFIG_GRKERNSEC_TIME
49931 + if (grsec_enable_time)
49932 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49937 +EXPORT_SYMBOL(gr_log_timechange);
49938 diff -urNp linux-2.6.39.4/grsecurity/grsec_tpe.c linux-2.6.39.4/grsecurity/grsec_tpe.c
49939 --- linux-2.6.39.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49940 +++ linux-2.6.39.4/grsecurity/grsec_tpe.c 2011-08-05 19:44:37.000000000 -0400
49942 +#include <linux/kernel.h>
49943 +#include <linux/sched.h>
49944 +#include <linux/file.h>
49945 +#include <linux/fs.h>
49946 +#include <linux/grinternal.h>
49948 +extern int gr_acl_tpe_check(void);
49951 +gr_tpe_allow(const struct file *file)
49953 +#ifdef CONFIG_GRKERNSEC
49954 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49955 + const struct cred *cred = current_cred();
49957 + if (cred->uid && ((grsec_enable_tpe &&
49958 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49959 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49960 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49962 + in_group_p(grsec_tpe_gid)
49964 + ) || gr_acl_tpe_check()) &&
49965 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49966 + (inode->i_mode & S_IWOTH))))) {
49967 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49970 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49971 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49972 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49973 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49974 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49981 diff -urNp linux-2.6.39.4/grsecurity/grsum.c linux-2.6.39.4/grsecurity/grsum.c
49982 --- linux-2.6.39.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49983 +++ linux-2.6.39.4/grsecurity/grsum.c 2011-08-05 19:44:37.000000000 -0400
49985 +#include <linux/err.h>
49986 +#include <linux/kernel.h>
49987 +#include <linux/sched.h>
49988 +#include <linux/mm.h>
49989 +#include <linux/scatterlist.h>
49990 +#include <linux/crypto.h>
49991 +#include <linux/gracl.h>
49994 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49995 +#error "crypto and sha256 must be built into the kernel"
49999 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
50002 + struct crypto_hash *tfm;
50003 + struct hash_desc desc;
50004 + struct scatterlist sg;
50005 + unsigned char temp_sum[GR_SHA_LEN];
50006 + volatile int retval = 0;
50007 + volatile int dummy = 0;
50010 + sg_init_table(&sg, 1);
50012 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
50013 + if (IS_ERR(tfm)) {
50014 + /* should never happen, since sha256 should be built in */
50021 + crypto_hash_init(&desc);
50024 + sg_set_buf(&sg, p, GR_SALT_LEN);
50025 + crypto_hash_update(&desc, &sg, sg.length);
50028 + sg_set_buf(&sg, p, strlen(p));
50030 + crypto_hash_update(&desc, &sg, sg.length);
50032 + crypto_hash_final(&desc, temp_sum);
50034 + memset(entry->pw, 0, GR_PW_LEN);
50036 + for (i = 0; i < GR_SHA_LEN; i++)
50037 + if (sum[i] != temp_sum[i])
50040 + dummy = 1; // waste a cycle
50042 + crypto_free_hash(tfm);
50046 diff -urNp linux-2.6.39.4/grsecurity/Kconfig linux-2.6.39.4/grsecurity/Kconfig
50047 --- linux-2.6.39.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
50048 +++ linux-2.6.39.4/grsecurity/Kconfig 2011-08-05 19:44:37.000000000 -0400
50051 +# grecurity configuration
50057 + bool "Grsecurity"
50059 + select CRYPTO_SHA256
50061 + If you say Y here, you will be able to configure many features
50062 + that will enhance the security of your system. It is highly
50063 + recommended that you say Y here and read through the help
50064 + for each option so that you fully understand the features and
50065 + can evaluate their usefulness for your machine.
50068 + prompt "Security Level"
50069 + depends on GRKERNSEC
50070 + default GRKERNSEC_CUSTOM
50072 +config GRKERNSEC_LOW
50074 + select GRKERNSEC_LINK
50075 + select GRKERNSEC_FIFO
50076 + select GRKERNSEC_EXECVE
50077 + select GRKERNSEC_RANDNET
50078 + select GRKERNSEC_DMESG
50079 + select GRKERNSEC_CHROOT
50080 + select GRKERNSEC_CHROOT_CHDIR
50083 + If you choose this option, several of the grsecurity options will
50084 + be enabled that will give you greater protection against a number
50085 + of attacks, while assuring that none of your software will have any
50086 + conflicts with the additional security measures. If you run a lot
50087 + of unusual software, or you are having problems with the higher
50088 + security levels, you should say Y here. With this option, the
50089 + following features are enabled:
50091 + - Linking restrictions
50092 + - FIFO restrictions
50093 + - Enforcing RLIMIT_NPROC on execve
50094 + - Restricted dmesg
50095 + - Enforced chdir("/") on chroot
50096 + - Runtime module disabling
50098 +config GRKERNSEC_MEDIUM
50101 + select PAX_EI_PAX
50102 + select PAX_PT_PAX_FLAGS
50103 + select PAX_HAVE_ACL_FLAGS
50104 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50105 + select GRKERNSEC_CHROOT
50106 + select GRKERNSEC_CHROOT_SYSCTL
50107 + select GRKERNSEC_LINK
50108 + select GRKERNSEC_FIFO
50109 + select GRKERNSEC_EXECVE
50110 + select GRKERNSEC_DMESG
50111 + select GRKERNSEC_RANDNET
50112 + select GRKERNSEC_FORKFAIL
50113 + select GRKERNSEC_TIME
50114 + select GRKERNSEC_SIGNAL
50115 + select GRKERNSEC_CHROOT
50116 + select GRKERNSEC_CHROOT_UNIX
50117 + select GRKERNSEC_CHROOT_MOUNT
50118 + select GRKERNSEC_CHROOT_PIVOT
50119 + select GRKERNSEC_CHROOT_DOUBLE
50120 + select GRKERNSEC_CHROOT_CHDIR
50121 + select GRKERNSEC_CHROOT_MKNOD
50122 + select GRKERNSEC_PROC
50123 + select GRKERNSEC_PROC_USERGROUP
50124 + select PAX_RANDUSTACK
50126 + select PAX_RANDMMAP
50127 + select PAX_REFCOUNT if (X86 || SPARC64)
50128 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
50131 + If you say Y here, several features in addition to those included
50132 + in the low additional security level will be enabled. These
50133 + features provide even more security to your system, though in rare
50134 + cases they may be incompatible with very old or poorly written
50135 + software. If you enable this option, make sure that your auth
50136 + service (identd) is running as gid 1001. With this option,
50137 + the following features (in addition to those provided in the
50138 + low additional security level) will be enabled:
50140 + - Failed fork logging
50141 + - Time change logging
50143 + - Deny mounts in chroot
50144 + - Deny double chrooting
50145 + - Deny sysctl writes in chroot
50146 + - Deny mknod in chroot
50147 + - Deny access to abstract AF_UNIX sockets out of chroot
50148 + - Deny pivot_root in chroot
50149 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
50150 + - /proc restrictions with special GID set to 10 (usually wheel)
50151 + - Address Space Layout Randomization (ASLR)
50152 + - Prevent exploitation of most refcount overflows
50153 + - Bounds checking of copying between the kernel and userland
50155 +config GRKERNSEC_HIGH
50157 + select GRKERNSEC_LINK
50158 + select GRKERNSEC_FIFO
50159 + select GRKERNSEC_EXECVE
50160 + select GRKERNSEC_DMESG
50161 + select GRKERNSEC_FORKFAIL
50162 + select GRKERNSEC_TIME
50163 + select GRKERNSEC_SIGNAL
50164 + select GRKERNSEC_CHROOT
50165 + select GRKERNSEC_CHROOT_SHMAT
50166 + select GRKERNSEC_CHROOT_UNIX
50167 + select GRKERNSEC_CHROOT_MOUNT
50168 + select GRKERNSEC_CHROOT_FCHDIR
50169 + select GRKERNSEC_CHROOT_PIVOT
50170 + select GRKERNSEC_CHROOT_DOUBLE
50171 + select GRKERNSEC_CHROOT_CHDIR
50172 + select GRKERNSEC_CHROOT_MKNOD
50173 + select GRKERNSEC_CHROOT_CAPS
50174 + select GRKERNSEC_CHROOT_SYSCTL
50175 + select GRKERNSEC_CHROOT_FINDTASK
50176 + select GRKERNSEC_SYSFS_RESTRICT
50177 + select GRKERNSEC_PROC
50178 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50179 + select GRKERNSEC_HIDESYM
50180 + select GRKERNSEC_BRUTE
50181 + select GRKERNSEC_PROC_USERGROUP
50182 + select GRKERNSEC_KMEM
50183 + select GRKERNSEC_RESLOG
50184 + select GRKERNSEC_RANDNET
50185 + select GRKERNSEC_PROC_ADD
50186 + select GRKERNSEC_CHROOT_CHMOD
50187 + select GRKERNSEC_CHROOT_NICE
50188 + select GRKERNSEC_AUDIT_MOUNT
50189 + select GRKERNSEC_MODHARDEN if (MODULES)
50190 + select GRKERNSEC_HARDEN_PTRACE
50191 + select GRKERNSEC_VM86 if (X86_32)
50192 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50194 + select PAX_RANDUSTACK
50196 + select PAX_RANDMMAP
50197 + select PAX_NOEXEC
50198 + select PAX_MPROTECT
50199 + select PAX_EI_PAX
50200 + select PAX_PT_PAX_FLAGS
50201 + select PAX_HAVE_ACL_FLAGS
50202 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50203 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
50204 + select PAX_RANDKSTACK if (X86_TSC && X86)
50205 + select PAX_SEGMEXEC if (X86_32)
50206 + select PAX_PAGEEXEC
50207 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50208 + select PAX_EMUTRAMP if (PARISC)
50209 + select PAX_EMUSIGRT if (PARISC)
50210 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50211 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50212 + select PAX_REFCOUNT if (X86 || SPARC64)
50213 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50215 + If you say Y here, many of the features of grsecurity will be
50216 + enabled, which will protect you against many kinds of attacks
50217 + against your system. The heightened security comes at a cost
50218 + of an increased chance of incompatibilities with rare software
50219 + on your machine. Since this security level enables PaX, you should
50220 + view <http://pax.grsecurity.net> and read about the PaX
50221 + project. While you are there, download chpax and run it on
50222 + binaries that cause problems with PaX. Also remember that
50223 + since the /proc restrictions are enabled, you must run your
50224 + identd as gid 1001. This security level enables the following
50225 + features in addition to those listed in the low and medium
50228 + - Additional /proc restrictions
50229 + - Chmod restrictions in chroot
50230 + - No signals, ptrace, or viewing of processes outside of chroot
50231 + - Capability restrictions in chroot
50232 + - Deny fchdir out of chroot
50233 + - Priority restrictions in chroot
50234 + - Segmentation-based implementation of PaX
50235 + - Mprotect restrictions
50236 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50237 + - Kernel stack randomization
50238 + - Mount/unmount/remount logging
50239 + - Kernel symbol hiding
50240 + - Prevention of memory exhaustion-based exploits
50241 + - Hardening of module auto-loading
50242 + - Ptrace restrictions
50243 + - Restricted vm86 mode
50244 + - Restricted sysfs/debugfs
50245 + - Active kernel exploit response
50247 +config GRKERNSEC_CUSTOM
50250 + If you say Y here, you will be able to configure every grsecurity
50251 + option, which allows you to enable many more features that aren't
50252 + covered in the basic security levels. These additional features
50253 + include TPE, socket restrictions, and the sysctl system for
50254 + grsecurity. It is advised that you read through the help for
50255 + each option to determine its usefulness in your situation.
50259 +menu "Address Space Protection"
50260 +depends on GRKERNSEC
50262 +config GRKERNSEC_KMEM
50263 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50264 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50266 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50267 + be written to via mmap or otherwise to modify the running kernel.
50268 + /dev/port will also not be allowed to be opened. If you have module
50269 + support disabled, enabling this will close up four ways that are
50270 + currently used to insert malicious code into the running kernel.
50271 + Even with all these features enabled, we still highly recommend that
50272 + you use the RBAC system, as it is still possible for an attacker to
50273 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50274 + If you are not using XFree86, you may be able to stop this additional
50275 + case by enabling the 'Disable privileged I/O' option. Though nothing
50276 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50277 + but only to video memory, which is the only writing we allow in this
50278 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50279 + not be allowed to mprotect it with PROT_WRITE later.
50280 + It is highly recommended that you say Y here if you meet all the
50281 + conditions above.
50283 +config GRKERNSEC_VM86
50284 + bool "Restrict VM86 mode"
50285 + depends on X86_32
50288 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50289 + make use of a special execution mode on 32bit x86 processors called
50290 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50291 + video cards and will still work with this option enabled. The purpose
50292 + of the option is to prevent exploitation of emulation errors in
50293 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50294 + Nearly all users should be able to enable this option.
50296 +config GRKERNSEC_IO
50297 + bool "Disable privileged I/O"
50300 + select RTC_INTF_DEV
50301 + select RTC_DRV_CMOS
50304 + If you say Y here, all ioperm and iopl calls will return an error.
50305 + Ioperm and iopl can be used to modify the running kernel.
50306 + Unfortunately, some programs need this access to operate properly,
50307 + the most notable of which are XFree86 and hwclock. hwclock can be
50308 + remedied by having RTC support in the kernel, so real-time
50309 + clock support is enabled if this option is enabled, to ensure
50310 + that hwclock operates correctly. XFree86 still will not
50311 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50312 + IF YOU USE XFree86. If you use XFree86 and you still want to
50313 + protect your kernel against modification, use the RBAC system.
50315 +config GRKERNSEC_PROC_MEMMAP
50316 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50317 + default y if (PAX_NOEXEC || PAX_ASLR)
50318 + depends on PAX_NOEXEC || PAX_ASLR
50320 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50321 + give no information about the addresses of its mappings if
50322 + PaX features that rely on random addresses are enabled on the task.
50323 + If you use PaX it is greatly recommended that you say Y here as it
50324 + closes up a hole that makes the full ASLR useless for suid
50327 +config GRKERNSEC_BRUTE
50328 + bool "Deter exploit bruteforcing"
50330 + If you say Y here, attempts to bruteforce exploits against forking
50331 + daemons such as apache or sshd, as well as against suid/sgid binaries
50332 + will be deterred. When a child of a forking daemon is killed by PaX
50333 + or crashes due to an illegal instruction or other suspicious signal,
50334 + the parent process will be delayed 30 seconds upon every subsequent
50335 + fork until the administrator is able to assess the situation and
50336 + restart the daemon.
50337 + In the suid/sgid case, the attempt is logged, the user has all their
50338 + processes terminated, and they are prevented from executing any further
50339 + processes for 15 minutes.
50340 + It is recommended that you also enable signal logging in the auditing
50341 + section so that logs are generated when a process triggers a suspicious
50343 + If the sysctl option is enabled, a sysctl option with name
50344 + "deter_bruteforce" is created.
50347 +config GRKERNSEC_MODHARDEN
50348 + bool "Harden module auto-loading"
50349 + depends on MODULES
50351 + If you say Y here, module auto-loading in response to use of some
50352 + feature implemented by an unloaded module will be restricted to
50353 + root users. Enabling this option helps defend against attacks
50354 + by unprivileged users who abuse the auto-loading behavior to
50355 + cause a vulnerable module to load that is then exploited.
50357 + If this option prevents a legitimate use of auto-loading for a
50358 + non-root user, the administrator can execute modprobe manually
50359 + with the exact name of the module mentioned in the alert log.
50360 + Alternatively, the administrator can add the module to the list
50361 + of modules loaded at boot by modifying init scripts.
50363 + Modification of init scripts will most likely be needed on
50364 + Ubuntu servers with encrypted home directory support enabled,
50365 + as the first non-root user logging in will cause the ecb(aes),
50366 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50368 +config GRKERNSEC_HIDESYM
50369 + bool "Hide kernel symbols"
50371 + If you say Y here, getting information on loaded modules, and
50372 + displaying all kernel symbols through a syscall will be restricted
50373 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50374 + /proc/kallsyms will be restricted to the root user. The RBAC
50375 + system can hide that entry even from root.
50377 + This option also prevents leaking of kernel addresses through
50378 + several /proc entries.
50380 + Note that this option is only effective provided the following
50381 + conditions are met:
50382 + 1) The kernel using grsecurity is not precompiled by some distribution
50383 + 2) You have also enabled GRKERNSEC_DMESG
50384 + 3) You are using the RBAC system and hiding other files such as your
50385 + kernel image and System.map. Alternatively, enabling this option
50386 + causes the permissions on /boot, /lib/modules, and the kernel
50387 + source directory to change at compile time to prevent
50388 + reading by non-root users.
50389 + If the above conditions are met, this option will aid in providing a
50390 + useful protection against local kernel exploitation of overflows
50391 + and arbitrary read/write vulnerabilities.
50393 +config GRKERNSEC_KERN_LOCKOUT
50394 + bool "Active kernel exploit response"
50395 + depends on X86 || ARM || PPC || SPARC
50397 + If you say Y here, when a PaX alert is triggered due to suspicious
50398 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50399 + or an OOPs occurs due to bad memory accesses, instead of just
50400 + terminating the offending process (and potentially allowing
50401 + a subsequent exploit from the same user), we will take one of two
50403 + If the user was root, we will panic the system
50404 + If the user was non-root, we will log the attempt, terminate
50405 + all processes owned by the user, then prevent them from creating
50406 + any new processes until the system is restarted
50407 + This deters repeated kernel exploitation/bruteforcing attempts
50408 + and is useful for later forensics.
50411 +menu "Role Based Access Control Options"
50412 +depends on GRKERNSEC
50414 +config GRKERNSEC_RBAC_DEBUG
50417 +config GRKERNSEC_NO_RBAC
50418 + bool "Disable RBAC system"
50420 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50421 + preventing the RBAC system from being enabled. You should only say Y
50422 + here if you have no intention of using the RBAC system, so as to prevent
50423 + an attacker with root access from misusing the RBAC system to hide files
50424 + and processes when loadable module support and /dev/[k]mem have been
50427 +config GRKERNSEC_ACL_HIDEKERN
50428 + bool "Hide kernel processes"
50430 + If you say Y here, all kernel threads will be hidden to all
50431 + processes but those whose subject has the "view hidden processes"
50434 +config GRKERNSEC_ACL_MAXTRIES
50435 + int "Maximum tries before password lockout"
50438 + This option enforces the maximum number of times a user can attempt
50439 + to authorize themselves with the grsecurity RBAC system before being
50440 + denied the ability to attempt authorization again for a specified time.
50441 + The lower the number, the harder it will be to brute-force a password.
50443 +config GRKERNSEC_ACL_TIMEOUT
50444 + int "Time to wait after max password tries, in seconds"
50447 + This option specifies the time the user must wait after attempting to
50448 + authorize to the RBAC system with the maximum number of invalid
50449 + passwords. The higher the number, the harder it will be to brute-force
50453 +menu "Filesystem Protections"
50454 +depends on GRKERNSEC
50456 +config GRKERNSEC_PROC
50457 + bool "Proc restrictions"
50459 + If you say Y here, the permissions of the /proc filesystem
50460 + will be altered to enhance system security and privacy. You MUST
50461 + choose either a user only restriction or a user and group restriction.
50462 + Depending upon the option you choose, you can either restrict users to
50463 + see only the processes they themselves run, or choose a group that can
50464 + view all processes and files normally restricted to root if you choose
50465 + the "restrict to user only" option. NOTE: If you're running identd as
50466 + a non-root user, you will have to run it as the group you specify here.
50468 +config GRKERNSEC_PROC_USER
50469 + bool "Restrict /proc to user only"
50470 + depends on GRKERNSEC_PROC
50472 + If you say Y here, non-root users will only be able to view their own
50473 + processes, and restricts them from viewing network-related information,
50474 + and viewing kernel symbol and module information.
50476 +config GRKERNSEC_PROC_USERGROUP
50477 + bool "Allow special group"
50478 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50480 + If you say Y here, you will be able to select a group that will be
50481 + able to view all processes and network-related information. If you've
50482 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50483 + remain hidden. This option is useful if you want to run identd as
50486 +config GRKERNSEC_PROC_GID
50487 + int "GID for special group"
50488 + depends on GRKERNSEC_PROC_USERGROUP
50491 +config GRKERNSEC_PROC_ADD
50492 + bool "Additional restrictions"
50493 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50495 + If you say Y here, additional restrictions will be placed on
50496 + /proc that keep normal users from viewing device information and
50497 + slabinfo information that could be useful for exploits.
50499 +config GRKERNSEC_LINK
50500 + bool "Linking restrictions"
50502 + If you say Y here, /tmp race exploits will be prevented, since users
50503 + will no longer be able to follow symlinks owned by other users in
50504 + world-writable +t directories (e.g. /tmp), unless the owner of the
50505 + symlink is the owner of the directory. users will also not be
50506 + able to hardlink to files they do not own. If the sysctl option is
50507 + enabled, a sysctl option with name "linking_restrictions" is created.
50509 +config GRKERNSEC_FIFO
50510 + bool "FIFO restrictions"
50512 + If you say Y here, users will not be able to write to FIFOs they don't
50513 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50514 + the FIFO is the same owner of the directory it's held in. If the sysctl
50515 + option is enabled, a sysctl option with name "fifo_restrictions" is
50518 +config GRKERNSEC_SYSFS_RESTRICT
50519 + bool "Sysfs/debugfs restriction"
50522 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50523 + any filesystem normally mounted under it (e.g. debugfs) will only
50524 + be accessible by root. These filesystems generally provide access
50525 + to hardware and debug information that isn't appropriate for unprivileged
50526 + users of the system. Sysfs and debugfs have also become a large source
50527 + of new vulnerabilities, ranging from infoleaks to local compromise.
50528 + There has been very little oversight with an eye toward security involved
50529 + in adding new exporters of information to these filesystems, so their
50530 + use is discouraged.
50531 + This option is equivalent to a chmod 0700 of the mount paths.
50533 +config GRKERNSEC_ROFS
50534 + bool "Runtime read-only mount protection"
50536 + If you say Y here, a sysctl option with name "romount_protect" will
50537 + be created. By setting this option to 1 at runtime, filesystems
50538 + will be protected in the following ways:
50539 + * No new writable mounts will be allowed
50540 + * Existing read-only mounts won't be able to be remounted read/write
50541 + * Write operations will be denied on all block devices
50542 + This option acts independently of grsec_lock: once it is set to 1,
50543 + it cannot be turned off. Therefore, please be mindful of the resulting
50544 + behavior if this option is enabled in an init script on a read-only
50545 + filesystem. This feature is mainly intended for secure embedded systems.
50547 +config GRKERNSEC_CHROOT
50548 + bool "Chroot jail restrictions"
50550 + If you say Y here, you will be able to choose several options that will
50551 + make breaking out of a chrooted jail much more difficult. If you
50552 + encounter no software incompatibilities with the following options, it
50553 + is recommended that you enable each one.
50555 +config GRKERNSEC_CHROOT_MOUNT
50556 + bool "Deny mounts"
50557 + depends on GRKERNSEC_CHROOT
50559 + If you say Y here, processes inside a chroot will not be able to
50560 + mount or remount filesystems. If the sysctl option is enabled, a
50561 + sysctl option with name "chroot_deny_mount" is created.
50563 +config GRKERNSEC_CHROOT_DOUBLE
50564 + bool "Deny double-chroots"
50565 + depends on GRKERNSEC_CHROOT
50567 + If you say Y here, processes inside a chroot will not be able to chroot
50568 + again outside the chroot. This is a widely used method of breaking
50569 + out of a chroot jail and should not be allowed. If the sysctl
50570 + option is enabled, a sysctl option with name
50571 + "chroot_deny_chroot" is created.
50573 +config GRKERNSEC_CHROOT_PIVOT
50574 + bool "Deny pivot_root in chroot"
50575 + depends on GRKERNSEC_CHROOT
50577 + If you say Y here, processes inside a chroot will not be able to use
50578 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50579 + works similar to chroot in that it changes the root filesystem. This
50580 + function could be misused in a chrooted process to attempt to break out
50581 + of the chroot, and therefore should not be allowed. If the sysctl
50582 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50585 +config GRKERNSEC_CHROOT_CHDIR
50586 + bool "Enforce chdir(\"/\") on all chroots"
50587 + depends on GRKERNSEC_CHROOT
50589 + If you say Y here, the current working directory of all newly-chrooted
50590 + applications will be set to the the root directory of the chroot.
50591 + The man page on chroot(2) states:
50592 + Note that this call does not change the current working
50593 + directory, so that `.' can be outside the tree rooted at
50594 + `/'. In particular, the super-user can escape from a
50595 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50597 + It is recommended that you say Y here, since it's not known to break
50598 + any software. If the sysctl option is enabled, a sysctl option with
50599 + name "chroot_enforce_chdir" is created.
50601 +config GRKERNSEC_CHROOT_CHMOD
50602 + bool "Deny (f)chmod +s"
50603 + depends on GRKERNSEC_CHROOT
50605 + If you say Y here, processes inside a chroot will not be able to chmod
50606 + or fchmod files to make them have suid or sgid bits. This protects
50607 + against another published method of breaking a chroot. If the sysctl
50608 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50611 +config GRKERNSEC_CHROOT_FCHDIR
50612 + bool "Deny fchdir out of chroot"
50613 + depends on GRKERNSEC_CHROOT
50615 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50616 + to a file descriptor of the chrooting process that points to a directory
50617 + outside the filesystem will be stopped. If the sysctl option
50618 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50620 +config GRKERNSEC_CHROOT_MKNOD
50621 + bool "Deny mknod"
50622 + depends on GRKERNSEC_CHROOT
50624 + If you say Y here, processes inside a chroot will not be allowed to
50625 + mknod. The problem with using mknod inside a chroot is that it
50626 + would allow an attacker to create a device entry that is the same
50627 + as one on the physical root of your system, which could range from
50628 + anything from the console device to a device for your harddrive (which
50629 + they could then use to wipe the drive or steal data). It is recommended
50630 + that you say Y here, unless you run into software incompatibilities.
50631 + If the sysctl option is enabled, a sysctl option with name
50632 + "chroot_deny_mknod" is created.
50634 +config GRKERNSEC_CHROOT_SHMAT
50635 + bool "Deny shmat() out of chroot"
50636 + depends on GRKERNSEC_CHROOT
50638 + If you say Y here, processes inside a chroot will not be able to attach
50639 + to shared memory segments that were created outside of the chroot jail.
50640 + It is recommended that you say Y here. If the sysctl option is enabled,
50641 + a sysctl option with name "chroot_deny_shmat" is created.
50643 +config GRKERNSEC_CHROOT_UNIX
50644 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50645 + depends on GRKERNSEC_CHROOT
50647 + If you say Y here, processes inside a chroot will not be able to
50648 + connect to abstract (meaning not belonging to a filesystem) Unix
50649 + domain sockets that were bound outside of a chroot. It is recommended
50650 + that you say Y here. If the sysctl option is enabled, a sysctl option
50651 + with name "chroot_deny_unix" is created.
50653 +config GRKERNSEC_CHROOT_FINDTASK
50654 + bool "Protect outside processes"
50655 + depends on GRKERNSEC_CHROOT
50657 + If you say Y here, processes inside a chroot will not be able to
50658 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50659 + getsid, or view any process outside of the chroot. If the sysctl
50660 + option is enabled, a sysctl option with name "chroot_findtask" is
50663 +config GRKERNSEC_CHROOT_NICE
50664 + bool "Restrict priority changes"
50665 + depends on GRKERNSEC_CHROOT
50667 + If you say Y here, processes inside a chroot will not be able to raise
50668 + the priority of processes in the chroot, or alter the priority of
50669 + processes outside the chroot. This provides more security than simply
50670 + removing CAP_SYS_NICE from the process' capability set. If the
50671 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50674 +config GRKERNSEC_CHROOT_SYSCTL
50675 + bool "Deny sysctl writes"
50676 + depends on GRKERNSEC_CHROOT
50678 + If you say Y here, an attacker in a chroot will not be able to
50679 + write to sysctl entries, either by sysctl(2) or through a /proc
50680 + interface. It is strongly recommended that you say Y here. If the
50681 + sysctl option is enabled, a sysctl option with name
50682 + "chroot_deny_sysctl" is created.
50684 +config GRKERNSEC_CHROOT_CAPS
50685 + bool "Capability restrictions"
50686 + depends on GRKERNSEC_CHROOT
50688 + If you say Y here, the capabilities on all root processes within a
50689 + chroot jail will be lowered to stop module insertion, raw i/o,
50690 + system and net admin tasks, rebooting the system, modifying immutable
50691 + files, modifying IPC owned by another, and changing the system time.
50692 + This is left an option because it can break some apps. Disable this
50693 + if your chrooted apps are having problems performing those kinds of
50694 + tasks. If the sysctl option is enabled, a sysctl option with
50695 + name "chroot_caps" is created.
50698 +menu "Kernel Auditing"
50699 +depends on GRKERNSEC
50701 +config GRKERNSEC_AUDIT_GROUP
50702 + bool "Single group for auditing"
50704 + If you say Y here, the exec, chdir, and (un)mount logging features
50705 + will only operate on a group you specify. This option is recommended
50706 + if you only want to watch certain users instead of having a large
50707 + amount of logs from the entire system. If the sysctl option is enabled,
50708 + a sysctl option with name "audit_group" is created.
50710 +config GRKERNSEC_AUDIT_GID
50711 + int "GID for auditing"
50712 + depends on GRKERNSEC_AUDIT_GROUP
50715 +config GRKERNSEC_EXECLOG
50716 + bool "Exec logging"
50718 + If you say Y here, all execve() calls will be logged (since the
50719 + other exec*() calls are frontends to execve(), all execution
50720 + will be logged). Useful for shell-servers that like to keep track
50721 + of their users. If the sysctl option is enabled, a sysctl option with
50722 + name "exec_logging" is created.
50723 + WARNING: This option when enabled will produce a LOT of logs, especially
50724 + on an active system.
50726 +config GRKERNSEC_RESLOG
50727 + bool "Resource logging"
50729 + If you say Y here, all attempts to overstep resource limits will
50730 + be logged with the resource name, the requested size, and the current
50731 + limit. It is highly recommended that you say Y here. If the sysctl
50732 + option is enabled, a sysctl option with name "resource_logging" is
50733 + created. If the RBAC system is enabled, the sysctl value is ignored.
50735 +config GRKERNSEC_CHROOT_EXECLOG
50736 + bool "Log execs within chroot"
50738 + If you say Y here, all executions inside a chroot jail will be logged
50739 + to syslog. This can cause a large amount of logs if certain
50740 + applications (eg. djb's daemontools) are installed on the system, and
50741 + is therefore left as an option. If the sysctl option is enabled, a
50742 + sysctl option with name "chroot_execlog" is created.
50744 +config GRKERNSEC_AUDIT_PTRACE
50745 + bool "Ptrace logging"
50747 + If you say Y here, all attempts to attach to a process via ptrace
50748 + will be logged. If the sysctl option is enabled, a sysctl option
50749 + with name "audit_ptrace" is created.
50751 +config GRKERNSEC_AUDIT_CHDIR
50752 + bool "Chdir logging"
50754 + If you say Y here, all chdir() calls will be logged. If the sysctl
50755 + option is enabled, a sysctl option with name "audit_chdir" is created.
50757 +config GRKERNSEC_AUDIT_MOUNT
50758 + bool "(Un)Mount logging"
50760 + If you say Y here, all mounts and unmounts will be logged. If the
50761 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50764 +config GRKERNSEC_SIGNAL
50765 + bool "Signal logging"
50767 + If you say Y here, certain important signals will be logged, such as
50768 + SIGSEGV, which will as a result inform you of when a error in a program
50769 + occurred, which in some cases could mean a possible exploit attempt.
50770 + If the sysctl option is enabled, a sysctl option with name
50771 + "signal_logging" is created.
50773 +config GRKERNSEC_FORKFAIL
50774 + bool "Fork failure logging"
50776 + If you say Y here, all failed fork() attempts will be logged.
50777 + This could suggest a fork bomb, or someone attempting to overstep
50778 + their process limit. If the sysctl option is enabled, a sysctl option
50779 + with name "forkfail_logging" is created.
50781 +config GRKERNSEC_TIME
50782 + bool "Time change logging"
50784 + If you say Y here, any changes of the system clock will be logged.
50785 + If the sysctl option is enabled, a sysctl option with name
50786 + "timechange_logging" is created.
50788 +config GRKERNSEC_PROC_IPADDR
50789 + bool "/proc/<pid>/ipaddr support"
50791 + If you say Y here, a new entry will be added to each /proc/<pid>
50792 + directory that contains the IP address of the person using the task.
50793 + The IP is carried across local TCP and AF_UNIX stream sockets.
50794 + This information can be useful for IDS/IPSes to perform remote response
50795 + to a local attack. The entry is readable by only the owner of the
50796 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50797 + the RBAC system), and thus does not create privacy concerns.
50799 +config GRKERNSEC_RWXMAP_LOG
50800 + bool 'Denied RWX mmap/mprotect logging'
50801 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50803 + If you say Y here, calls to mmap() and mprotect() with explicit
50804 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50805 + denied by the PAX_MPROTECT feature. If the sysctl option is
50806 + enabled, a sysctl option with name "rwxmap_logging" is created.
50808 +config GRKERNSEC_AUDIT_TEXTREL
50809 + bool 'ELF text relocations logging (READ HELP)'
50810 + depends on PAX_MPROTECT
50812 + If you say Y here, text relocations will be logged with the filename
50813 + of the offending library or binary. The purpose of the feature is
50814 + to help Linux distribution developers get rid of libraries and
50815 + binaries that need text relocations which hinder the future progress
50816 + of PaX. Only Linux distribution developers should say Y here, and
50817 + never on a production machine, as this option creates an information
50818 + leak that could aid an attacker in defeating the randomization of
50819 + a single memory region. If the sysctl option is enabled, a sysctl
50820 + option with name "audit_textrel" is created.
50824 +menu "Executable Protections"
50825 +depends on GRKERNSEC
50827 +config GRKERNSEC_EXECVE
50828 + bool "Enforce RLIMIT_NPROC on execs"
50830 + If you say Y here, users with a resource limit on processes will
50831 + have the value checked during execve() calls. The current system
50832 + only checks the system limit during fork() calls. If the sysctl option
50833 + is enabled, a sysctl option with name "execve_limiting" is created.
50835 +config GRKERNSEC_DMESG
50836 + bool "Dmesg(8) restriction"
50838 + If you say Y here, non-root users will not be able to use dmesg(8)
50839 + to view up to the last 4kb of messages in the kernel's log buffer.
50840 + The kernel's log buffer often contains kernel addresses and other
50841 + identifying information useful to an attacker in fingerprinting a
50842 + system for a targeted exploit.
50843 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50846 +config GRKERNSEC_HARDEN_PTRACE
50847 + bool "Deter ptrace-based process snooping"
50849 + If you say Y here, TTY sniffers and other malicious monitoring
50850 + programs implemented through ptrace will be defeated. If you
50851 + have been using the RBAC system, this option has already been
50852 + enabled for several years for all users, with the ability to make
50853 + fine-grained exceptions.
50855 + This option only affects the ability of non-root users to ptrace
50856 + processes that are not a descendent of the ptracing process.
50857 + This means that strace ./binary and gdb ./binary will still work,
50858 + but attaching to arbitrary processes will not. If the sysctl
50859 + option is enabled, a sysctl option with name "harden_ptrace" is
50862 +config GRKERNSEC_TPE
50863 + bool "Trusted Path Execution (TPE)"
50865 + If you say Y here, you will be able to choose a gid to add to the
50866 + supplementary groups of users you want to mark as "untrusted."
50867 + These users will not be able to execute any files that are not in
50868 + root-owned directories writable only by root. If the sysctl option
50869 + is enabled, a sysctl option with name "tpe" is created.
50871 +config GRKERNSEC_TPE_ALL
50872 + bool "Partially restrict all non-root users"
50873 + depends on GRKERNSEC_TPE
50875 + If you say Y here, all non-root users will be covered under
50876 + a weaker TPE restriction. This is separate from, and in addition to,
50877 + the main TPE options that you have selected elsewhere. Thus, if a
50878 + "trusted" GID is chosen, this restriction applies to even that GID.
50879 + Under this restriction, all non-root users will only be allowed to
50880 + execute files in directories they own that are not group or
50881 + world-writable, or in directories owned by root and writable only by
50882 + root. If the sysctl option is enabled, a sysctl option with name
50883 + "tpe_restrict_all" is created.
50885 +config GRKERNSEC_TPE_INVERT
50886 + bool "Invert GID option"
50887 + depends on GRKERNSEC_TPE
50889 + If you say Y here, the group you specify in the TPE configuration will
50890 + decide what group TPE restrictions will be *disabled* for. This
50891 + option is useful if you want TPE restrictions to be applied to most
50892 + users on the system. If the sysctl option is enabled, a sysctl option
50893 + with name "tpe_invert" is created. Unlike other sysctl options, this
50894 + entry will default to on for backward-compatibility.
50896 +config GRKERNSEC_TPE_GID
50897 + int "GID for untrusted users"
50898 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50901 + Setting this GID determines what group TPE restrictions will be
50902 + *enabled* for. If the sysctl option is enabled, a sysctl option
50903 + with name "tpe_gid" is created.
50905 +config GRKERNSEC_TPE_GID
50906 + int "GID for trusted users"
50907 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50910 + Setting this GID determines what group TPE restrictions will be
50911 + *disabled* for. If the sysctl option is enabled, a sysctl option
50912 + with name "tpe_gid" is created.
50915 +menu "Network Protections"
50916 +depends on GRKERNSEC
50918 +config GRKERNSEC_RANDNET
50919 + bool "Larger entropy pools"
50921 + If you say Y here, the entropy pools used for many features of Linux
50922 + and grsecurity will be doubled in size. Since several grsecurity
50923 + features use additional randomness, it is recommended that you say Y
50924 + here. Saying Y here has a similar effect as modifying
50925 + /proc/sys/kernel/random/poolsize.
50927 +config GRKERNSEC_BLACKHOLE
50928 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50930 + If you say Y here, neither TCP resets nor ICMP
50931 + destination-unreachable packets will be sent in response to packets
50932 + sent to ports for which no associated listening process exists.
50933 + This feature supports both IPV4 and IPV6 and exempts the
50934 + loopback interface from blackholing. Enabling this feature
50935 + makes a host more resilient to DoS attacks and reduces network
50936 + visibility against scanners.
50938 + The blackhole feature as-implemented is equivalent to the FreeBSD
50939 + blackhole feature, as it prevents RST responses to all packets, not
50940 + just SYNs. Under most application behavior this causes no
50941 + problems, but applications (like haproxy) may not close certain
50942 + connections in a way that cleanly terminates them on the remote
50943 + end, leaving the remote host in LAST_ACK state. Because of this
50944 + side-effect and to prevent intentional LAST_ACK DoSes, this
50945 + feature also adds automatic mitigation against such attacks.
50946 + The mitigation drastically reduces the amount of time a socket
50947 + can spend in LAST_ACK state. If you're using haproxy and not
50948 + all servers it connects to have this option enabled, consider
50949 + disabling this feature on the haproxy host.
50951 + If the sysctl option is enabled, two sysctl options with names
50952 + "ip_blackhole" and "lastack_retries" will be created.
50953 + While "ip_blackhole" takes the standard zero/non-zero on/off
50954 + toggle, "lastack_retries" uses the same kinds of values as
50955 + "tcp_retries1" and "tcp_retries2". The default value of 4
50956 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50959 +config GRKERNSEC_SOCKET
50960 + bool "Socket restrictions"
50962 + If you say Y here, you will be able to choose from several options.
50963 + If you assign a GID on your system and add it to the supplementary
50964 + groups of users you want to restrict socket access to, this patch
50965 + will perform up to three things, based on the option(s) you choose.
50967 +config GRKERNSEC_SOCKET_ALL
50968 + bool "Deny any sockets to group"
50969 + depends on GRKERNSEC_SOCKET
50971 + If you say Y here, you will be able to choose a GID of whose users will
50972 + be unable to connect to other hosts from your machine or run server
50973 + applications from your machine. If the sysctl option is enabled, a
50974 + sysctl option with name "socket_all" is created.
50976 +config GRKERNSEC_SOCKET_ALL_GID
50977 + int "GID to deny all sockets for"
50978 + depends on GRKERNSEC_SOCKET_ALL
50981 + Here you can choose the GID to disable socket access for. Remember to
50982 + add the users you want socket access disabled for to the GID
50983 + specified here. If the sysctl option is enabled, a sysctl option
50984 + with name "socket_all_gid" is created.
50986 +config GRKERNSEC_SOCKET_CLIENT
50987 + bool "Deny client sockets to group"
50988 + depends on GRKERNSEC_SOCKET
50990 + If you say Y here, you will be able to choose a GID of whose users will
50991 + be unable to connect to other hosts from your machine, but will be
50992 + able to run servers. If this option is enabled, all users in the group
50993 + you specify will have to use passive mode when initiating ftp transfers
50994 + from the shell on your machine. If the sysctl option is enabled, a
50995 + sysctl option with name "socket_client" is created.
50997 +config GRKERNSEC_SOCKET_CLIENT_GID
50998 + int "GID to deny client sockets for"
50999 + depends on GRKERNSEC_SOCKET_CLIENT
51002 + Here you can choose the GID to disable client socket access for.
51003 + Remember to add the users you want client socket access disabled for to
51004 + the GID specified here. If the sysctl option is enabled, a sysctl
51005 + option with name "socket_client_gid" is created.
51007 +config GRKERNSEC_SOCKET_SERVER
51008 + bool "Deny server sockets to group"
51009 + depends on GRKERNSEC_SOCKET
51011 + If you say Y here, you will be able to choose a GID of whose users will
51012 + be unable to run server applications from your machine. If the sysctl
51013 + option is enabled, a sysctl option with name "socket_server" is created.
51015 +config GRKERNSEC_SOCKET_SERVER_GID
51016 + int "GID to deny server sockets for"
51017 + depends on GRKERNSEC_SOCKET_SERVER
51020 + Here you can choose the GID to disable server socket access for.
51021 + Remember to add the users you want server socket access disabled for to
51022 + the GID specified here. If the sysctl option is enabled, a sysctl
51023 + option with name "socket_server_gid" is created.
51026 +menu "Sysctl support"
51027 +depends on GRKERNSEC && SYSCTL
51029 +config GRKERNSEC_SYSCTL
51030 + bool "Sysctl support"
51032 + If you say Y here, you will be able to change the options that
51033 + grsecurity runs with at bootup, without having to recompile your
51034 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51035 + to enable (1) or disable (0) various features. All the sysctl entries
51036 + are mutable until the "grsec_lock" entry is set to a non-zero value.
51037 + All features enabled in the kernel configuration are disabled at boot
51038 + if you do not say Y to the "Turn on features by default" option.
51039 + All options should be set at startup, and the grsec_lock entry should
51040 + be set to a non-zero value after all the options are set.
51041 + *THIS IS EXTREMELY IMPORTANT*
51043 +config GRKERNSEC_SYSCTL_DISTRO
51044 + bool "Extra sysctl support for distro makers (READ HELP)"
51045 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51047 + If you say Y here, additional sysctl options will be created
51048 + for features that affect processes running as root. Therefore,
51049 + it is critical when using this option that the grsec_lock entry be
51050 + enabled after boot. Only distros with prebuilt kernel packages
51051 + with this option enabled that can ensure grsec_lock is enabled
51052 + after boot should use this option.
51053 + *Failure to set grsec_lock after boot makes all grsec features
51054 + this option covers useless*
51056 + Currently this option creates the following sysctl entries:
51057 + "Disable Privileged I/O": "disable_priv_io"
51059 +config GRKERNSEC_SYSCTL_ON
51060 + bool "Turn on features by default"
51061 + depends on GRKERNSEC_SYSCTL
51063 + If you say Y here, instead of having all features enabled in the
51064 + kernel configuration disabled at boot time, the features will be
51065 + enabled at boot time. It is recommended you say Y here unless
51066 + there is some reason you would want all sysctl-tunable features to
51067 + be disabled by default. As mentioned elsewhere, it is important
51068 + to enable the grsec_lock entry once you have finished modifying
51069 + the sysctl entries.
51072 +menu "Logging Options"
51073 +depends on GRKERNSEC
51075 +config GRKERNSEC_FLOODTIME
51076 + int "Seconds in between log messages (minimum)"
51079 + This option allows you to enforce the number of seconds between
51080 + grsecurity log messages. The default should be suitable for most
51081 + people, however, if you choose to change it, choose a value small enough
51082 + to allow informative logs to be produced, but large enough to
51083 + prevent flooding.
51085 +config GRKERNSEC_FLOODBURST
51086 + int "Number of messages in a burst (maximum)"
51089 + This option allows you to choose the maximum number of messages allowed
51090 + within the flood time interval you chose in a separate option. The
51091 + default should be suitable for most people, however if you find that
51092 + many of your logs are being interpreted as flooding, you may want to
51093 + raise this value.
51098 diff -urNp linux-2.6.39.4/grsecurity/Makefile linux-2.6.39.4/grsecurity/Makefile
51099 --- linux-2.6.39.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
51100 +++ linux-2.6.39.4/grsecurity/Makefile 2011-08-05 19:44:37.000000000 -0400
51102 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51103 +# during 2001-2009 it has been completely redesigned by Brad Spengler
51104 +# into an RBAC system
51106 +# All code in this directory and various hooks inserted throughout the kernel
51107 +# are copyright Brad Spengler - Open Source Security, Inc., and released
51108 +# under the GPL v2 or higher
51110 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51111 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
51112 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51114 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51115 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51116 + gracl_learn.o grsec_log.o
51117 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51120 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51123 +ifndef CONFIG_GRKERNSEC
51124 +obj-y += grsec_disabled.o
51127 +ifdef CONFIG_GRKERNSEC_HIDESYM
51128 +extra-y := grsec_hidesym.o
51129 +$(obj)/grsec_hidesym.o:
51130 + @-chmod -f 500 /boot
51131 + @-chmod -f 500 /lib/modules
51133 + @echo ' grsec: protected kernel image paths'
51135 diff -urNp linux-2.6.39.4/include/acpi/acpi_bus.h linux-2.6.39.4/include/acpi/acpi_bus.h
51136 --- linux-2.6.39.4/include/acpi/acpi_bus.h 2011-05-19 00:06:34.000000000 -0400
51137 +++ linux-2.6.39.4/include/acpi/acpi_bus.h 2011-08-05 20:34:06.000000000 -0400
51138 @@ -107,7 +107,7 @@ struct acpi_device_ops {
51140 acpi_op_unbind unbind;
51141 acpi_op_notify notify;
51145 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
51147 diff -urNp linux-2.6.39.4/include/asm-generic/atomic-long.h linux-2.6.39.4/include/asm-generic/atomic-long.h
51148 --- linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-05-19 00:06:34.000000000 -0400
51149 +++ linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-08-05 20:34:06.000000000 -0400
51152 typedef atomic64_t atomic_long_t;
51154 +#ifdef CONFIG_PAX_REFCOUNT
51155 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
51157 +typedef atomic64_t atomic_long_unchecked_t;
51160 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
51162 static inline long atomic_long_read(atomic_long_t *l)
51163 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
51164 return (long)atomic64_read(v);
51167 +#ifdef CONFIG_PAX_REFCOUNT
51168 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51170 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51172 + return (long)atomic64_read_unchecked(v);
51176 static inline void atomic_long_set(atomic_long_t *l, long i)
51178 atomic64_t *v = (atomic64_t *)l;
51179 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
51180 atomic64_set(v, i);
51183 +#ifdef CONFIG_PAX_REFCOUNT
51184 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51186 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51188 + atomic64_set_unchecked(v, i);
51192 static inline void atomic_long_inc(atomic_long_t *l)
51194 atomic64_t *v = (atomic64_t *)l;
51195 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
51199 +#ifdef CONFIG_PAX_REFCOUNT
51200 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51202 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51204 + atomic64_inc_unchecked(v);
51208 static inline void atomic_long_dec(atomic_long_t *l)
51210 atomic64_t *v = (atomic64_t *)l;
51211 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51215 +#ifdef CONFIG_PAX_REFCOUNT
51216 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51218 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51220 + atomic64_dec_unchecked(v);
51224 static inline void atomic_long_add(long i, atomic_long_t *l)
51226 atomic64_t *v = (atomic64_t *)l;
51227 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51228 atomic64_add(i, v);
51231 +#ifdef CONFIG_PAX_REFCOUNT
51232 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51234 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51236 + atomic64_add_unchecked(i, v);
51240 static inline void atomic_long_sub(long i, atomic_long_t *l)
51242 atomic64_t *v = (atomic64_t *)l;
51243 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51244 atomic64_sub(i, v);
51247 +#ifdef CONFIG_PAX_REFCOUNT
51248 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51250 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51252 + atomic64_sub_unchecked(i, v);
51256 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51258 atomic64_t *v = (atomic64_t *)l;
51259 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51260 return (long)atomic64_inc_return(v);
51263 +#ifdef CONFIG_PAX_REFCOUNT
51264 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51266 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51268 + return (long)atomic64_inc_return_unchecked(v);
51272 static inline long atomic_long_dec_return(atomic_long_t *l)
51274 atomic64_t *v = (atomic64_t *)l;
51275 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51277 typedef atomic_t atomic_long_t;
51279 +#ifdef CONFIG_PAX_REFCOUNT
51280 +typedef atomic_unchecked_t atomic_long_unchecked_t;
51282 +typedef atomic_t atomic_long_unchecked_t;
51285 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51286 static inline long atomic_long_read(atomic_long_t *l)
51288 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51289 return (long)atomic_read(v);
51292 +#ifdef CONFIG_PAX_REFCOUNT
51293 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51295 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51297 + return (long)atomic_read_unchecked(v);
51301 static inline void atomic_long_set(atomic_long_t *l, long i)
51303 atomic_t *v = (atomic_t *)l;
51304 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51308 +#ifdef CONFIG_PAX_REFCOUNT
51309 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51311 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51313 + atomic_set_unchecked(v, i);
51317 static inline void atomic_long_inc(atomic_long_t *l)
51319 atomic_t *v = (atomic_t *)l;
51320 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51324 +#ifdef CONFIG_PAX_REFCOUNT
51325 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51327 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51329 + atomic_inc_unchecked(v);
51333 static inline void atomic_long_dec(atomic_long_t *l)
51335 atomic_t *v = (atomic_t *)l;
51336 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51340 +#ifdef CONFIG_PAX_REFCOUNT
51341 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51343 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51345 + atomic_dec_unchecked(v);
51349 static inline void atomic_long_add(long i, atomic_long_t *l)
51351 atomic_t *v = (atomic_t *)l;
51352 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51356 +#ifdef CONFIG_PAX_REFCOUNT
51357 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51359 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51361 + atomic_add_unchecked(i, v);
51365 static inline void atomic_long_sub(long i, atomic_long_t *l)
51367 atomic_t *v = (atomic_t *)l;
51368 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51372 +#ifdef CONFIG_PAX_REFCOUNT
51373 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51375 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51377 + atomic_sub_unchecked(i, v);
51381 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51383 atomic_t *v = (atomic_t *)l;
51384 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51385 return (long)atomic_inc_return(v);
51388 +#ifdef CONFIG_PAX_REFCOUNT
51389 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51391 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51393 + return (long)atomic_inc_return_unchecked(v);
51397 static inline long atomic_long_dec_return(atomic_long_t *l)
51399 atomic_t *v = (atomic_t *)l;
51400 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51402 #endif /* BITS_PER_LONG == 64 */
51404 +#ifdef CONFIG_PAX_REFCOUNT
51405 +static inline void pax_refcount_needs_these_functions(void)
51407 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
51408 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51409 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51410 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51411 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51412 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51413 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51414 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51415 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51416 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51417 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51419 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51420 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51421 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51422 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51423 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51424 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51425 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51428 +#define atomic_read_unchecked(v) atomic_read(v)
51429 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51430 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51431 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51432 +#define atomic_inc_unchecked(v) atomic_inc(v)
51433 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51434 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51435 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51436 +#define atomic_dec_unchecked(v) atomic_dec(v)
51437 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51438 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51440 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
51441 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51442 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51443 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51444 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51445 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51446 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51449 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51450 diff -urNp linux-2.6.39.4/include/asm-generic/cache.h linux-2.6.39.4/include/asm-generic/cache.h
51451 --- linux-2.6.39.4/include/asm-generic/cache.h 2011-05-19 00:06:34.000000000 -0400
51452 +++ linux-2.6.39.4/include/asm-generic/cache.h 2011-08-05 19:44:37.000000000 -0400
51454 * cache lines need to provide their own cache.h.
51457 -#define L1_CACHE_SHIFT 5
51458 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51459 +#define L1_CACHE_SHIFT 5UL
51460 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51462 #endif /* __ASM_GENERIC_CACHE_H */
51463 diff -urNp linux-2.6.39.4/include/asm-generic/int-l64.h linux-2.6.39.4/include/asm-generic/int-l64.h
51464 --- linux-2.6.39.4/include/asm-generic/int-l64.h 2011-05-19 00:06:34.000000000 -0400
51465 +++ linux-2.6.39.4/include/asm-generic/int-l64.h 2011-08-05 19:44:37.000000000 -0400
51466 @@ -46,6 +46,8 @@ typedef unsigned int u32;
51467 typedef signed long s64;
51468 typedef unsigned long u64;
51470 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51473 #define U8_C(x) x ## U
51475 diff -urNp linux-2.6.39.4/include/asm-generic/int-ll64.h linux-2.6.39.4/include/asm-generic/int-ll64.h
51476 --- linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-05-19 00:06:34.000000000 -0400
51477 +++ linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-08-05 19:44:37.000000000 -0400
51478 @@ -51,6 +51,8 @@ typedef unsigned int u32;
51479 typedef signed long long s64;
51480 typedef unsigned long long u64;
51482 +typedef unsigned long long intoverflow_t;
51485 #define U8_C(x) x ## U
51487 diff -urNp linux-2.6.39.4/include/asm-generic/kmap_types.h linux-2.6.39.4/include/asm-generic/kmap_types.h
51488 --- linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
51489 +++ linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-08-05 19:44:37.000000000 -0400
51490 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51492 KMAP_D(18) KM_NMI_PTE,
51494 +KMAP_D(20) KM_CLEARPAGE,
51496 * Remember to update debug_kmap_atomic() when adding new kmap types!
51498 -KMAP_D(20) KM_TYPE_NR
51499 +KMAP_D(21) KM_TYPE_NR
51503 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable.h linux-2.6.39.4/include/asm-generic/pgtable.h
51504 --- linux-2.6.39.4/include/asm-generic/pgtable.h 2011-05-19 00:06:34.000000000 -0400
51505 +++ linux-2.6.39.4/include/asm-generic/pgtable.h 2011-08-05 19:44:37.000000000 -0400
51506 @@ -447,6 +447,14 @@ static inline int pmd_write(pmd_t pmd)
51507 #endif /* __HAVE_ARCH_PMD_WRITE */
51510 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51511 +static inline unsigned long pax_open_kernel(void) { return 0; }
51514 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51515 +static inline unsigned long pax_close_kernel(void) { return 0; }
51518 #endif /* !__ASSEMBLY__ */
51520 #endif /* _ASM_GENERIC_PGTABLE_H */
51521 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h
51522 --- linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-05-19 00:06:34.000000000 -0400
51523 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-08-05 19:44:37.000000000 -0400
51525 #ifndef _PGTABLE_NOPMD_H
51526 #define _PGTABLE_NOPMD_H
51528 -#ifndef __ASSEMBLY__
51530 #include <asm-generic/pgtable-nopud.h>
51534 #define __PAGETABLE_PMD_FOLDED
51536 +#define PMD_SHIFT PUD_SHIFT
51537 +#define PTRS_PER_PMD 1
51538 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51539 +#define PMD_MASK (~(PMD_SIZE-1))
51541 +#ifndef __ASSEMBLY__
51546 * Having the pmd type consist of a pud gets the size right, and allows
51547 * us to conceptually access the pud entry that this pmd is folded into
51548 @@ -16,11 +21,6 @@ struct mm_struct;
51550 typedef struct { pud_t pud; } pmd_t;
51552 -#define PMD_SHIFT PUD_SHIFT
51553 -#define PTRS_PER_PMD 1
51554 -#define PMD_SIZE (1UL << PMD_SHIFT)
51555 -#define PMD_MASK (~(PMD_SIZE-1))
51558 * The "pud_xxx()" functions here are trivial for a folded two-level
51559 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51560 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopud.h linux-2.6.39.4/include/asm-generic/pgtable-nopud.h
51561 --- linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-05-19 00:06:34.000000000 -0400
51562 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-08-05 19:44:37.000000000 -0400
51564 #ifndef _PGTABLE_NOPUD_H
51565 #define _PGTABLE_NOPUD_H
51567 -#ifndef __ASSEMBLY__
51569 #define __PAGETABLE_PUD_FOLDED
51571 +#define PUD_SHIFT PGDIR_SHIFT
51572 +#define PTRS_PER_PUD 1
51573 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51574 +#define PUD_MASK (~(PUD_SIZE-1))
51576 +#ifndef __ASSEMBLY__
51579 * Having the pud type consist of a pgd gets the size right, and allows
51580 * us to conceptually access the pgd entry that this pud is folded into
51583 typedef struct { pgd_t pgd; } pud_t;
51585 -#define PUD_SHIFT PGDIR_SHIFT
51586 -#define PTRS_PER_PUD 1
51587 -#define PUD_SIZE (1UL << PUD_SHIFT)
51588 -#define PUD_MASK (~(PUD_SIZE-1))
51591 * The "pgd_xxx()" functions here are trivial for a folded two-level
51592 * setup: the pud is never bad, and a pud always exists (as it's folded
51593 diff -urNp linux-2.6.39.4/include/asm-generic/vmlinux.lds.h linux-2.6.39.4/include/asm-generic/vmlinux.lds.h
51594 --- linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-05-19 00:06:34.000000000 -0400
51595 +++ linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-08-05 19:44:37.000000000 -0400
51596 @@ -213,6 +213,7 @@
51597 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51598 VMLINUX_SYMBOL(__start_rodata) = .; \
51599 *(.rodata) *(.rodata.*) \
51600 + *(.data..read_only) \
51601 *(__vermagic) /* Kernel version magic */ \
51603 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51604 @@ -707,14 +708,15 @@
51605 * section in the linker script will go there too. @phdr should have
51608 - * Note that this macros defines __per_cpu_load as an absolute symbol.
51609 + * Note that this macros defines per_cpu_load as an absolute symbol.
51610 * If there is no need to put the percpu section at a predetermined
51611 * address, use PERCPU().
51613 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51614 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
51615 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51616 + per_cpu_load = .; \
51617 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51619 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51620 VMLINUX_SYMBOL(__per_cpu_start) = .; \
51621 *(.data..percpu..first) \
51622 . = ALIGN(PAGE_SIZE); \
51623 @@ -726,7 +728,7 @@
51624 *(.data..percpu..shared_aligned) \
51625 VMLINUX_SYMBOL(__per_cpu_end) = .; \
51627 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51628 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51631 * PERCPU - define output section for percpu area, simple version
51632 diff -urNp linux-2.6.39.4/include/drm/drm_crtc_helper.h linux-2.6.39.4/include/drm/drm_crtc_helper.h
51633 --- linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-05-19 00:06:34.000000000 -0400
51634 +++ linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-08-05 20:34:06.000000000 -0400
51635 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51637 /* disable crtc when not in use - more explicit than dpms off */
51638 void (*disable)(struct drm_crtc *crtc);
51642 struct drm_encoder_helper_funcs {
51643 void (*dpms)(struct drm_encoder *encoder, int mode);
51644 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51645 struct drm_connector *connector);
51646 /* disable encoder when not in use - more explicit than dpms off */
51647 void (*disable)(struct drm_encoder *encoder);
51651 struct drm_connector_helper_funcs {
51652 int (*get_modes)(struct drm_connector *connector);
51653 diff -urNp linux-2.6.39.4/include/drm/drmP.h linux-2.6.39.4/include/drm/drmP.h
51654 --- linux-2.6.39.4/include/drm/drmP.h 2011-05-19 00:06:34.000000000 -0400
51655 +++ linux-2.6.39.4/include/drm/drmP.h 2011-08-05 20:34:06.000000000 -0400
51657 #include <linux/workqueue.h>
51658 #include <linux/poll.h>
51659 #include <asm/pgalloc.h>
51660 +#include <asm/local.h>
51663 #include <linux/idr.h>
51664 @@ -1023,7 +1024,7 @@ struct drm_device {
51666 /** \name Usage Counters */
51668 - int open_count; /**< Outstanding files open */
51669 + local_t open_count; /**< Outstanding files open */
51670 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51671 atomic_t vma_count; /**< Outstanding vma areas open */
51672 int buf_use; /**< Buffers in use -- cannot alloc */
51673 @@ -1034,7 +1035,7 @@ struct drm_device {
51675 unsigned long counters;
51676 enum drm_stat_type types[15];
51677 - atomic_t counts[15];
51678 + atomic_unchecked_t counts[15];
51681 struct list_head filelist;
51682 diff -urNp linux-2.6.39.4/include/drm/ttm/ttm_memory.h linux-2.6.39.4/include/drm/ttm/ttm_memory.h
51683 --- linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-05-19 00:06:34.000000000 -0400
51684 +++ linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-08-05 20:34:06.000000000 -0400
51687 struct ttm_mem_shrink {
51688 int (*do_shrink) (struct ttm_mem_shrink *);
51693 * struct ttm_mem_global - Global memory accounting structure.
51694 diff -urNp linux-2.6.39.4/include/linux/a.out.h linux-2.6.39.4/include/linux/a.out.h
51695 --- linux-2.6.39.4/include/linux/a.out.h 2011-05-19 00:06:34.000000000 -0400
51696 +++ linux-2.6.39.4/include/linux/a.out.h 2011-08-05 19:44:37.000000000 -0400
51697 @@ -39,6 +39,14 @@ enum machine_type {
51698 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51701 +/* Constants for the N_FLAGS field */
51702 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51703 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51704 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51705 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51706 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51707 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51709 #if !defined (N_MAGIC)
51710 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51712 diff -urNp linux-2.6.39.4/include/linux/atmdev.h linux-2.6.39.4/include/linux/atmdev.h
51713 --- linux-2.6.39.4/include/linux/atmdev.h 2011-05-19 00:06:34.000000000 -0400
51714 +++ linux-2.6.39.4/include/linux/atmdev.h 2011-08-05 19:44:37.000000000 -0400
51715 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51718 struct k_atm_aal_stats {
51719 -#define __HANDLE_ITEM(i) atomic_t i
51720 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
51722 #undef __HANDLE_ITEM
51724 diff -urNp linux-2.6.39.4/include/linux/binfmts.h linux-2.6.39.4/include/linux/binfmts.h
51725 --- linux-2.6.39.4/include/linux/binfmts.h 2011-05-19 00:06:34.000000000 -0400
51726 +++ linux-2.6.39.4/include/linux/binfmts.h 2011-08-05 19:44:37.000000000 -0400
51727 @@ -92,6 +92,7 @@ struct linux_binfmt {
51728 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51729 int (*load_shlib)(struct file *);
51730 int (*core_dump)(struct coredump_params *cprm);
51731 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51732 unsigned long min_coredump; /* minimal dump size */
51735 diff -urNp linux-2.6.39.4/include/linux/blkdev.h linux-2.6.39.4/include/linux/blkdev.h
51736 --- linux-2.6.39.4/include/linux/blkdev.h 2011-06-03 00:04:14.000000000 -0400
51737 +++ linux-2.6.39.4/include/linux/blkdev.h 2011-08-05 20:34:06.000000000 -0400
51738 @@ -1307,7 +1307,7 @@ struct block_device_operations {
51739 int (*getgeo)(struct block_device *, struct hd_geometry *);
51740 /* this callback is with swap_lock and sometimes page table lock held */
51741 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51742 - struct module *owner;
51743 + struct module * const owner;
51746 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51747 diff -urNp linux-2.6.39.4/include/linux/blktrace_api.h linux-2.6.39.4/include/linux/blktrace_api.h
51748 --- linux-2.6.39.4/include/linux/blktrace_api.h 2011-05-19 00:06:34.000000000 -0400
51749 +++ linux-2.6.39.4/include/linux/blktrace_api.h 2011-08-05 19:44:37.000000000 -0400
51750 @@ -161,7 +161,7 @@ struct blk_trace {
51751 struct dentry *dir;
51752 struct dentry *dropped_file;
51753 struct dentry *msg_file;
51754 - atomic_t dropped;
51755 + atomic_unchecked_t dropped;
51758 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51759 diff -urNp linux-2.6.39.4/include/linux/byteorder/little_endian.h linux-2.6.39.4/include/linux/byteorder/little_endian.h
51760 --- linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-05-19 00:06:34.000000000 -0400
51761 +++ linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-08-05 19:44:37.000000000 -0400
51762 @@ -42,51 +42,51 @@
51764 static inline __le64 __cpu_to_le64p(const __u64 *p)
51766 - return (__force __le64)*p;
51767 + return (__force const __le64)*p;
51769 static inline __u64 __le64_to_cpup(const __le64 *p)
51771 - return (__force __u64)*p;
51772 + return (__force const __u64)*p;
51774 static inline __le32 __cpu_to_le32p(const __u32 *p)
51776 - return (__force __le32)*p;
51777 + return (__force const __le32)*p;
51779 static inline __u32 __le32_to_cpup(const __le32 *p)
51781 - return (__force __u32)*p;
51782 + return (__force const __u32)*p;
51784 static inline __le16 __cpu_to_le16p(const __u16 *p)
51786 - return (__force __le16)*p;
51787 + return (__force const __le16)*p;
51789 static inline __u16 __le16_to_cpup(const __le16 *p)
51791 - return (__force __u16)*p;
51792 + return (__force const __u16)*p;
51794 static inline __be64 __cpu_to_be64p(const __u64 *p)
51796 - return (__force __be64)__swab64p(p);
51797 + return (__force const __be64)__swab64p(p);
51799 static inline __u64 __be64_to_cpup(const __be64 *p)
51801 - return __swab64p((__u64 *)p);
51802 + return __swab64p((const __u64 *)p);
51804 static inline __be32 __cpu_to_be32p(const __u32 *p)
51806 - return (__force __be32)__swab32p(p);
51807 + return (__force const __be32)__swab32p(p);
51809 static inline __u32 __be32_to_cpup(const __be32 *p)
51811 - return __swab32p((__u32 *)p);
51812 + return __swab32p((const __u32 *)p);
51814 static inline __be16 __cpu_to_be16p(const __u16 *p)
51816 - return (__force __be16)__swab16p(p);
51817 + return (__force const __be16)__swab16p(p);
51819 static inline __u16 __be16_to_cpup(const __be16 *p)
51821 - return __swab16p((__u16 *)p);
51822 + return __swab16p((const __u16 *)p);
51824 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51825 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51826 diff -urNp linux-2.6.39.4/include/linux/cache.h linux-2.6.39.4/include/linux/cache.h
51827 --- linux-2.6.39.4/include/linux/cache.h 2011-05-19 00:06:34.000000000 -0400
51828 +++ linux-2.6.39.4/include/linux/cache.h 2011-08-05 19:44:37.000000000 -0400
51830 #define __read_mostly
51833 +#ifndef __read_only
51834 +#define __read_only __read_mostly
51837 #ifndef ____cacheline_aligned
51838 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51840 diff -urNp linux-2.6.39.4/include/linux/capability.h linux-2.6.39.4/include/linux/capability.h
51841 --- linux-2.6.39.4/include/linux/capability.h 2011-05-19 00:06:34.000000000 -0400
51842 +++ linux-2.6.39.4/include/linux/capability.h 2011-08-05 19:44:37.000000000 -0400
51843 @@ -547,6 +547,9 @@ extern bool capable(int cap);
51844 extern bool ns_capable(struct user_namespace *ns, int cap);
51845 extern bool task_ns_capable(struct task_struct *t, int cap);
51846 extern bool nsown_capable(int cap);
51847 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51848 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51849 +extern bool capable_nolog(int cap);
51851 /* audit system wants to get cap info from files as well */
51852 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51853 diff -urNp linux-2.6.39.4/include/linux/compiler-gcc4.h linux-2.6.39.4/include/linux/compiler-gcc4.h
51854 --- linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-05-19 00:06:34.000000000 -0400
51855 +++ linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-08-05 20:34:06.000000000 -0400
51859 #if __GNUC_MINOR__ >= 5
51861 +#define __no_const __attribute__((no_const))
51864 * Mark a position in code as unreachable. This can be used to
51865 * suppress control flow warnings after asm blocks that transfer
51867 #define __noclone __attribute__((__noclone__))
51871 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51872 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51873 +#define __bos0(ptr) __bos((ptr), 0)
51874 +#define __bos1(ptr) __bos((ptr), 1)
51877 #if __GNUC_MINOR__ > 0
51878 diff -urNp linux-2.6.39.4/include/linux/compiler.h linux-2.6.39.4/include/linux/compiler.h
51879 --- linux-2.6.39.4/include/linux/compiler.h 2011-05-19 00:06:34.000000000 -0400
51880 +++ linux-2.6.39.4/include/linux/compiler.h 2011-08-05 20:34:06.000000000 -0400
51881 @@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51882 # define __attribute_const__ /* unimplemented */
51885 +#ifndef __no_const
51886 +# define __no_const
51890 * Tell gcc if a function is cold. The compiler will assume any path
51891 * directly leading to the call is unlikely.
51892 @@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51896 +#ifndef __alloc_size
51897 +#define __alloc_size(...)
51901 +#define __bos(ptr, arg)
51905 +#define __bos0(ptr)
51909 +#define __bos1(ptr)
51912 /* Simple shorthand for a section definition */
51914 # define __section(S) __attribute__ ((__section__(#S)))
51915 @@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51916 * use is to mediate communication between process-level code and irq/NMI
51917 * handlers, all running on the same CPU.
51919 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51920 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51921 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51923 #endif /* __LINUX_COMPILER_H */
51924 diff -urNp linux-2.6.39.4/include/linux/cpuset.h linux-2.6.39.4/include/linux/cpuset.h
51925 --- linux-2.6.39.4/include/linux/cpuset.h 2011-05-19 00:06:34.000000000 -0400
51926 +++ linux-2.6.39.4/include/linux/cpuset.h 2011-08-05 19:44:37.000000000 -0400
51927 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51931 - --ACCESS_ONCE(current->mems_allowed_change_disable);
51932 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51935 static inline void set_mems_allowed(nodemask_t nodemask)
51936 diff -urNp linux-2.6.39.4/include/linux/crypto.h linux-2.6.39.4/include/linux/crypto.h
51937 --- linux-2.6.39.4/include/linux/crypto.h 2011-05-19 00:06:34.000000000 -0400
51938 +++ linux-2.6.39.4/include/linux/crypto.h 2011-08-05 20:34:06.000000000 -0400
51939 @@ -361,7 +361,7 @@ struct cipher_tfm {
51940 const u8 *key, unsigned int keylen);
51941 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51942 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51947 int (*init)(struct hash_desc *desc);
51948 @@ -382,13 +382,13 @@ struct compress_tfm {
51949 int (*cot_decompress)(struct crypto_tfm *tfm,
51950 const u8 *src, unsigned int slen,
51951 u8 *dst, unsigned int *dlen);
51956 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51957 unsigned int dlen);
51958 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51962 #define crt_ablkcipher crt_u.ablkcipher
51963 #define crt_aead crt_u.aead
51964 diff -urNp linux-2.6.39.4/include/linux/decompress/mm.h linux-2.6.39.4/include/linux/decompress/mm.h
51965 --- linux-2.6.39.4/include/linux/decompress/mm.h 2011-05-19 00:06:34.000000000 -0400
51966 +++ linux-2.6.39.4/include/linux/decompress/mm.h 2011-08-05 19:44:37.000000000 -0400
51967 @@ -77,7 +77,7 @@ static void free(void *where)
51968 * warnings when not needed (indeed large_malloc / large_free are not
51969 * needed by inflate */
51971 -#define malloc(a) kmalloc(a, GFP_KERNEL)
51972 +#define malloc(a) kmalloc((a), GFP_KERNEL)
51973 #define free(a) kfree(a)
51975 #define large_malloc(a) vmalloc(a)
51976 diff -urNp linux-2.6.39.4/include/linux/dma-mapping.h linux-2.6.39.4/include/linux/dma-mapping.h
51977 --- linux-2.6.39.4/include/linux/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
51978 +++ linux-2.6.39.4/include/linux/dma-mapping.h 2011-08-05 20:34:06.000000000 -0400
51979 @@ -49,7 +49,7 @@ struct dma_map_ops {
51980 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
51981 int (*dma_supported)(struct device *dev, u64 mask);
51982 int (*set_dma_mask)(struct device *dev, u64 mask);
51984 + const int is_phys;
51987 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51988 diff -urNp linux-2.6.39.4/include/linux/efi.h linux-2.6.39.4/include/linux/efi.h
51989 --- linux-2.6.39.4/include/linux/efi.h 2011-06-25 12:55:23.000000000 -0400
51990 +++ linux-2.6.39.4/include/linux/efi.h 2011-08-05 20:34:06.000000000 -0400
51991 @@ -409,7 +409,7 @@ struct efivar_operations {
51992 efi_get_variable_t *get_variable;
51993 efi_get_next_variable_t *get_next_variable;
51994 efi_set_variable_t *set_variable;
52000 diff -urNp linux-2.6.39.4/include/linux/elf.h linux-2.6.39.4/include/linux/elf.h
52001 --- linux-2.6.39.4/include/linux/elf.h 2011-05-19 00:06:34.000000000 -0400
52002 +++ linux-2.6.39.4/include/linux/elf.h 2011-08-05 19:44:37.000000000 -0400
52003 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
52004 #define PT_GNU_EH_FRAME 0x6474e550
52006 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
52007 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
52009 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
52011 +/* Constants for the e_flags field */
52012 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
52013 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
52014 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
52015 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
52016 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
52017 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
52020 * Extended Numbering
52021 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
52022 #define DT_DEBUG 21
52023 #define DT_TEXTREL 22
52024 #define DT_JMPREL 23
52025 +#define DT_FLAGS 30
52026 + #define DF_TEXTREL 0x00000004
52027 #define DT_ENCODING 32
52028 #define OLD_DT_LOOS 0x60000000
52029 #define DT_LOOS 0x6000000d
52030 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
52034 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
52035 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
52036 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
52037 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
52038 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
52039 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
52040 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
52041 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
52042 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
52043 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
52044 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
52045 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
52047 typedef struct elf32_phdr{
52049 Elf32_Off p_offset;
52050 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
52056 #define ELFMAG0 0x7f /* EI_MAG */
52057 #define ELFMAG1 'E'
52058 #define ELFMAG2 'L'
52059 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
52060 #define elf_note elf32_note
52061 #define elf_addr_t Elf32_Off
52062 #define Elf_Half Elf32_Half
52063 +#define elf_dyn Elf32_Dyn
52067 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
52068 #define elf_note elf64_note
52069 #define elf_addr_t Elf64_Off
52070 #define Elf_Half Elf64_Half
52071 +#define elf_dyn Elf64_Dyn
52075 diff -urNp linux-2.6.39.4/include/linux/firewire.h linux-2.6.39.4/include/linux/firewire.h
52076 --- linux-2.6.39.4/include/linux/firewire.h 2011-05-19 00:06:34.000000000 -0400
52077 +++ linux-2.6.39.4/include/linux/firewire.h 2011-08-05 20:34:06.000000000 -0400
52078 @@ -429,7 +429,7 @@ struct fw_iso_context {
52080 fw_iso_callback_t sc;
52081 fw_iso_mc_callback_t mc;
52083 + } __no_const callback;
52084 void *callback_data;
52087 diff -urNp linux-2.6.39.4/include/linux/fscache-cache.h linux-2.6.39.4/include/linux/fscache-cache.h
52088 --- linux-2.6.39.4/include/linux/fscache-cache.h 2011-05-19 00:06:34.000000000 -0400
52089 +++ linux-2.6.39.4/include/linux/fscache-cache.h 2011-08-05 19:44:37.000000000 -0400
52090 @@ -113,7 +113,7 @@ struct fscache_operation {
52094 -extern atomic_t fscache_op_debug_id;
52095 +extern atomic_unchecked_t fscache_op_debug_id;
52096 extern void fscache_op_work_func(struct work_struct *work);
52098 extern void fscache_enqueue_operation(struct fscache_operation *);
52099 @@ -133,7 +133,7 @@ static inline void fscache_operation_ini
52101 INIT_WORK(&op->work, fscache_op_work_func);
52102 atomic_set(&op->usage, 1);
52103 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
52104 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52105 op->processor = processor;
52106 op->release = release;
52107 INIT_LIST_HEAD(&op->pend_link);
52108 diff -urNp linux-2.6.39.4/include/linux/fs.h linux-2.6.39.4/include/linux/fs.h
52109 --- linux-2.6.39.4/include/linux/fs.h 2011-05-19 00:06:34.000000000 -0400
52110 +++ linux-2.6.39.4/include/linux/fs.h 2011-08-05 20:34:06.000000000 -0400
52111 @@ -108,6 +108,11 @@ struct inodes_stat_t {
52112 /* File was opened by fanotify and shouldn't generate fanotify events */
52113 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
52115 +/* Hack for grsec so as not to require read permission simply to execute
52118 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
52121 * The below are the various read and write types that we support. Some of
52122 * them include behavioral modifiers that send information down to the
52123 @@ -1535,7 +1540,7 @@ struct block_device_operations;
52124 * the big kernel lock held in all filesystems.
52126 struct file_operations {
52127 - struct module *owner;
52128 + struct module * const owner;
52129 loff_t (*llseek) (struct file *, loff_t, int);
52130 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
52131 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
52132 @@ -1563,6 +1568,7 @@ struct file_operations {
52133 long (*fallocate)(struct file *file, int mode, loff_t offset,
52136 +typedef struct file_operations __no_const file_operations_no_const;
52138 #define IPERM_FLAG_RCU 0x0001
52140 diff -urNp linux-2.6.39.4/include/linux/fs_struct.h linux-2.6.39.4/include/linux/fs_struct.h
52141 --- linux-2.6.39.4/include/linux/fs_struct.h 2011-05-19 00:06:34.000000000 -0400
52142 +++ linux-2.6.39.4/include/linux/fs_struct.h 2011-08-05 19:44:37.000000000 -0400
52144 #include <linux/seqlock.h>
52152 diff -urNp linux-2.6.39.4/include/linux/ftrace_event.h linux-2.6.39.4/include/linux/ftrace_event.h
52153 --- linux-2.6.39.4/include/linux/ftrace_event.h 2011-05-19 00:06:34.000000000 -0400
52154 +++ linux-2.6.39.4/include/linux/ftrace_event.h 2011-08-05 20:34:06.000000000 -0400
52155 @@ -84,7 +84,7 @@ struct trace_event_functions {
52156 trace_print_func raw;
52157 trace_print_func hex;
52158 trace_print_func binary;
52162 struct trace_event {
52163 struct hlist_node node;
52164 @@ -235,7 +235,7 @@ extern int trace_define_field(struct ftr
52165 extern int trace_add_event_call(struct ftrace_event_call *call);
52166 extern void trace_remove_event_call(struct ftrace_event_call *call);
52168 -#define is_signed_type(type) (((type)(-1)) < 0)
52169 +#define is_signed_type(type) (((type)(-1)) < (type)1)
52171 int trace_set_clr_event(const char *system, const char *event, int set);
52173 diff -urNp linux-2.6.39.4/include/linux/genhd.h linux-2.6.39.4/include/linux/genhd.h
52174 --- linux-2.6.39.4/include/linux/genhd.h 2011-06-03 00:04:14.000000000 -0400
52175 +++ linux-2.6.39.4/include/linux/genhd.h 2011-08-05 19:44:37.000000000 -0400
52176 @@ -184,7 +184,7 @@ struct gendisk {
52177 struct kobject *slave_dir;
52179 struct timer_rand_state *random;
52180 - atomic_t sync_io; /* RAID */
52181 + atomic_unchecked_t sync_io; /* RAID */
52182 struct disk_events *ev;
52183 #ifdef CONFIG_BLK_DEV_INTEGRITY
52184 struct blk_integrity *integrity;
52185 diff -urNp linux-2.6.39.4/include/linux/gracl.h linux-2.6.39.4/include/linux/gracl.h
52186 --- linux-2.6.39.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52187 +++ linux-2.6.39.4/include/linux/gracl.h 2011-08-05 19:44:37.000000000 -0400
52192 +#include <linux/grdefs.h>
52193 +#include <linux/resource.h>
52194 +#include <linux/capability.h>
52195 +#include <linux/dcache.h>
52196 +#include <asm/resource.h>
52198 +/* Major status information */
52200 +#define GR_VERSION "grsecurity 2.2.2"
52201 +#define GRSECURITY_VERSION 0x2202
52212 + GR_SPROLEPAM = 8,
52215 +/* Password setup definitions
52216 + * kernel/grhash.c */
52219 + GR_SALT_LEN = 16,
52224 + GR_SPROLE_LEN = 64,
52233 +#define GR_NLIMITS 32
52235 +/* Begin Data Structures */
52237 +struct sprole_pw {
52238 + unsigned char *rolename;
52239 + unsigned char salt[GR_SALT_LEN];
52240 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52243 +struct name_entry {
52250 + struct name_entry *prev;
52251 + struct name_entry *next;
52254 +struct inodev_entry {
52255 + struct name_entry *nentry;
52256 + struct inodev_entry *prev;
52257 + struct inodev_entry *next;
52260 +struct acl_role_db {
52261 + struct acl_role_label **r_hash;
52265 +struct inodev_db {
52266 + struct inodev_entry **i_hash;
52271 + struct name_entry **n_hash;
52275 +struct crash_uid {
52277 + unsigned long expires;
52280 +struct gr_hash_struct {
52282 + void **nametable;
52284 + __u32 table_size;
52289 +/* Userspace Grsecurity ACL data structures */
52291 +struct acl_subject_label {
52296 + kernel_cap_t cap_mask;
52297 + kernel_cap_t cap_lower;
52298 + kernel_cap_t cap_invert_audit;
52300 + struct rlimit res[GR_NLIMITS];
52303 + __u8 user_trans_type;
52304 + __u8 group_trans_type;
52305 + uid_t *user_transitions;
52306 + gid_t *group_transitions;
52307 + __u16 user_trans_num;
52308 + __u16 group_trans_num;
52310 + __u32 sock_families[2];
52311 + __u32 ip_proto[8];
52313 + struct acl_ip_label **ips;
52315 + __u32 inaddr_any_override;
52318 + unsigned long expires;
52320 + struct acl_subject_label *parent_subject;
52321 + struct gr_hash_struct *hash;
52322 + struct acl_subject_label *prev;
52323 + struct acl_subject_label *next;
52325 + struct acl_object_label **obj_hash;
52326 + __u32 obj_hash_size;
52330 +struct role_allowed_ip {
52334 + struct role_allowed_ip *prev;
52335 + struct role_allowed_ip *next;
52338 +struct role_transition {
52341 + struct role_transition *prev;
52342 + struct role_transition *next;
52345 +struct acl_role_label {
52350 + __u16 auth_attempts;
52351 + unsigned long expires;
52353 + struct acl_subject_label *root_label;
52354 + struct gr_hash_struct *hash;
52356 + struct acl_role_label *prev;
52357 + struct acl_role_label *next;
52359 + struct role_transition *transitions;
52360 + struct role_allowed_ip *allowed_ips;
52361 + uid_t *domain_children;
52362 + __u16 domain_child_num;
52364 + struct acl_subject_label **subj_hash;
52365 + __u32 subj_hash_size;
52368 +struct user_acl_role_db {
52369 + struct acl_role_label **r_table;
52370 + __u32 num_pointers; /* Number of allocations to track */
52371 + __u32 num_roles; /* Number of roles */
52372 + __u32 num_domain_children; /* Number of domain children */
52373 + __u32 num_subjects; /* Number of subjects */
52374 + __u32 num_objects; /* Number of objects */
52377 +struct acl_object_label {
52383 + struct acl_subject_label *nested;
52384 + struct acl_object_label *globbed;
52386 + /* next two structures not used */
52388 + struct acl_object_label *prev;
52389 + struct acl_object_label *next;
52392 +struct acl_ip_label {
52401 + /* next two structures not used */
52403 + struct acl_ip_label *prev;
52404 + struct acl_ip_label *next;
52408 + struct user_acl_role_db role_db;
52409 + unsigned char pw[GR_PW_LEN];
52410 + unsigned char salt[GR_SALT_LEN];
52411 + unsigned char sum[GR_SHA_LEN];
52412 + unsigned char sp_role[GR_SPROLE_LEN];
52413 + struct sprole_pw *sprole_pws;
52414 + dev_t segv_device;
52415 + ino_t segv_inode;
52417 + __u16 num_sprole_pws;
52421 +struct gr_arg_wrapper {
52422 + struct gr_arg *arg;
52427 +struct subject_map {
52428 + struct acl_subject_label *user;
52429 + struct acl_subject_label *kernel;
52430 + struct subject_map *prev;
52431 + struct subject_map *next;
52434 +struct acl_subj_map_db {
52435 + struct subject_map **s_hash;
52439 +/* End Data Structures Section */
52441 +/* Hash functions generated by empirical testing by Brad Spengler
52442 + Makes good use of the low bits of the inode. Generally 0-1 times
52443 + in loop for successful match. 0-3 for unsuccessful match.
52444 + Shift/add algorithm with modulus of table size and an XOR*/
52446 +static __inline__ unsigned int
52447 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52449 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
52452 + static __inline__ unsigned int
52453 +shash(const struct acl_subject_label *userp, const unsigned int sz)
52455 + return ((const unsigned long)userp % sz);
52458 +static __inline__ unsigned int
52459 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52461 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52464 +static __inline__ unsigned int
52465 +nhash(const char *name, const __u16 len, const unsigned int sz)
52467 + return full_name_hash((const unsigned char *)name, len) % sz;
52470 +#define FOR_EACH_ROLE_START(role) \
52471 + role = role_list; \
52474 +#define FOR_EACH_ROLE_END(role) \
52475 + role = role->prev; \
52478 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52481 + while (iter < role->subj_hash_size) { \
52482 + if (subj == NULL) \
52483 + subj = role->subj_hash[iter]; \
52484 + if (subj == NULL) { \
52489 +#define FOR_EACH_SUBJECT_END(subj,iter) \
52490 + subj = subj->next; \
52491 + if (subj == NULL) \
52496 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52497 + subj = role->hash->first; \
52498 + while (subj != NULL) {
52500 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52501 + subj = subj->next; \
52506 diff -urNp linux-2.6.39.4/include/linux/gralloc.h linux-2.6.39.4/include/linux/gralloc.h
52507 --- linux-2.6.39.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52508 +++ linux-2.6.39.4/include/linux/gralloc.h 2011-08-05 19:44:37.000000000 -0400
52510 +#ifndef __GRALLOC_H
52511 +#define __GRALLOC_H
52513 +void acl_free_all(void);
52514 +int acl_alloc_stack_init(unsigned long size);
52515 +void *acl_alloc(unsigned long len);
52516 +void *acl_alloc_num(unsigned long num, unsigned long len);
52519 diff -urNp linux-2.6.39.4/include/linux/grdefs.h linux-2.6.39.4/include/linux/grdefs.h
52520 --- linux-2.6.39.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52521 +++ linux-2.6.39.4/include/linux/grdefs.h 2011-08-05 19:44:37.000000000 -0400
52526 +/* Begin grsecurity status declarations */
52530 + GR_STATUS_INIT = 0x00 // disabled state
52533 +/* Begin ACL declarations */
52538 + GR_ROLE_USER = 0x0001,
52539 + GR_ROLE_GROUP = 0x0002,
52540 + GR_ROLE_DEFAULT = 0x0004,
52541 + GR_ROLE_SPECIAL = 0x0008,
52542 + GR_ROLE_AUTH = 0x0010,
52543 + GR_ROLE_NOPW = 0x0020,
52544 + GR_ROLE_GOD = 0x0040,
52545 + GR_ROLE_LEARN = 0x0080,
52546 + GR_ROLE_TPE = 0x0100,
52547 + GR_ROLE_DOMAIN = 0x0200,
52548 + GR_ROLE_PAM = 0x0400,
52549 + GR_ROLE_PERSIST = 0x0800
52552 +/* ACL Subject and Object mode flags */
52554 + GR_DELETED = 0x80000000
52557 +/* ACL Object-only mode flags */
52559 + GR_READ = 0x00000001,
52560 + GR_APPEND = 0x00000002,
52561 + GR_WRITE = 0x00000004,
52562 + GR_EXEC = 0x00000008,
52563 + GR_FIND = 0x00000010,
52564 + GR_INHERIT = 0x00000020,
52565 + GR_SETID = 0x00000040,
52566 + GR_CREATE = 0x00000080,
52567 + GR_DELETE = 0x00000100,
52568 + GR_LINK = 0x00000200,
52569 + GR_AUDIT_READ = 0x00000400,
52570 + GR_AUDIT_APPEND = 0x00000800,
52571 + GR_AUDIT_WRITE = 0x00001000,
52572 + GR_AUDIT_EXEC = 0x00002000,
52573 + GR_AUDIT_FIND = 0x00004000,
52574 + GR_AUDIT_INHERIT= 0x00008000,
52575 + GR_AUDIT_SETID = 0x00010000,
52576 + GR_AUDIT_CREATE = 0x00020000,
52577 + GR_AUDIT_DELETE = 0x00040000,
52578 + GR_AUDIT_LINK = 0x00080000,
52579 + GR_PTRACERD = 0x00100000,
52580 + GR_NOPTRACE = 0x00200000,
52581 + GR_SUPPRESS = 0x00400000,
52582 + GR_NOLEARN = 0x00800000,
52583 + GR_INIT_TRANSFER= 0x01000000
52586 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52587 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52588 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52590 +/* ACL subject-only mode flags */
52592 + GR_KILL = 0x00000001,
52593 + GR_VIEW = 0x00000002,
52594 + GR_PROTECTED = 0x00000004,
52595 + GR_LEARN = 0x00000008,
52596 + GR_OVERRIDE = 0x00000010,
52597 + /* just a placeholder, this mode is only used in userspace */
52598 + GR_DUMMY = 0x00000020,
52599 + GR_PROTSHM = 0x00000040,
52600 + GR_KILLPROC = 0x00000080,
52601 + GR_KILLIPPROC = 0x00000100,
52602 + /* just a placeholder, this mode is only used in userspace */
52603 + GR_NOTROJAN = 0x00000200,
52604 + GR_PROTPROCFD = 0x00000400,
52605 + GR_PROCACCT = 0x00000800,
52606 + GR_RELAXPTRACE = 0x00001000,
52607 + GR_NESTED = 0x00002000,
52608 + GR_INHERITLEARN = 0x00004000,
52609 + GR_PROCFIND = 0x00008000,
52610 + GR_POVERRIDE = 0x00010000,
52611 + GR_KERNELAUTH = 0x00020000,
52612 + GR_ATSECURE = 0x00040000,
52613 + GR_SHMEXEC = 0x00080000
52617 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52618 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52619 + GR_PAX_ENABLE_MPROTECT = 0x0004,
52620 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
52621 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52622 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52623 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52624 + GR_PAX_DISABLE_MPROTECT = 0x0400,
52625 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
52626 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52630 + GR_ID_USER = 0x01,
52631 + GR_ID_GROUP = 0x02,
52635 + GR_ID_ALLOW = 0x01,
52636 + GR_ID_DENY = 0x02,
52639 +#define GR_CRASH_RES 31
52640 +#define GR_UIDTABLE_MAX 500
52642 +/* begin resource learning section */
52644 + GR_RLIM_CPU_BUMP = 60,
52645 + GR_RLIM_FSIZE_BUMP = 50000,
52646 + GR_RLIM_DATA_BUMP = 10000,
52647 + GR_RLIM_STACK_BUMP = 1000,
52648 + GR_RLIM_CORE_BUMP = 10000,
52649 + GR_RLIM_RSS_BUMP = 500000,
52650 + GR_RLIM_NPROC_BUMP = 1,
52651 + GR_RLIM_NOFILE_BUMP = 5,
52652 + GR_RLIM_MEMLOCK_BUMP = 50000,
52653 + GR_RLIM_AS_BUMP = 500000,
52654 + GR_RLIM_LOCKS_BUMP = 2,
52655 + GR_RLIM_SIGPENDING_BUMP = 5,
52656 + GR_RLIM_MSGQUEUE_BUMP = 10000,
52657 + GR_RLIM_NICE_BUMP = 1,
52658 + GR_RLIM_RTPRIO_BUMP = 1,
52659 + GR_RLIM_RTTIME_BUMP = 1000000
52663 diff -urNp linux-2.6.39.4/include/linux/grinternal.h linux-2.6.39.4/include/linux/grinternal.h
52664 --- linux-2.6.39.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52665 +++ linux-2.6.39.4/include/linux/grinternal.h 2011-08-05 19:44:37.000000000 -0400
52667 +#ifndef __GRINTERNAL_H
52668 +#define __GRINTERNAL_H
52670 +#ifdef CONFIG_GRKERNSEC
52672 +#include <linux/fs.h>
52673 +#include <linux/mnt_namespace.h>
52674 +#include <linux/nsproxy.h>
52675 +#include <linux/gracl.h>
52676 +#include <linux/grdefs.h>
52677 +#include <linux/grmsg.h>
52679 +void gr_add_learn_entry(const char *fmt, ...)
52680 + __attribute__ ((format (printf, 1, 2)));
52681 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52682 + const struct vfsmount *mnt);
52683 +__u32 gr_check_create(const struct dentry *new_dentry,
52684 + const struct dentry *parent,
52685 + const struct vfsmount *mnt, const __u32 mode);
52686 +int gr_check_protected_task(const struct task_struct *task);
52687 +__u32 to_gr_audit(const __u32 reqmode);
52688 +int gr_set_acls(const int type);
52689 +int gr_apply_subject_to_task(struct task_struct *task);
52690 +int gr_acl_is_enabled(void);
52691 +char gr_roletype_to_char(void);
52693 +void gr_handle_alertkill(struct task_struct *task);
52694 +char *gr_to_filename(const struct dentry *dentry,
52695 + const struct vfsmount *mnt);
52696 +char *gr_to_filename1(const struct dentry *dentry,
52697 + const struct vfsmount *mnt);
52698 +char *gr_to_filename2(const struct dentry *dentry,
52699 + const struct vfsmount *mnt);
52700 +char *gr_to_filename3(const struct dentry *dentry,
52701 + const struct vfsmount *mnt);
52703 +extern int grsec_enable_harden_ptrace;
52704 +extern int grsec_enable_link;
52705 +extern int grsec_enable_fifo;
52706 +extern int grsec_enable_execve;
52707 +extern int grsec_enable_shm;
52708 +extern int grsec_enable_execlog;
52709 +extern int grsec_enable_signal;
52710 +extern int grsec_enable_audit_ptrace;
52711 +extern int grsec_enable_forkfail;
52712 +extern int grsec_enable_time;
52713 +extern int grsec_enable_rofs;
52714 +extern int grsec_enable_chroot_shmat;
52715 +extern int grsec_enable_chroot_mount;
52716 +extern int grsec_enable_chroot_double;
52717 +extern int grsec_enable_chroot_pivot;
52718 +extern int grsec_enable_chroot_chdir;
52719 +extern int grsec_enable_chroot_chmod;
52720 +extern int grsec_enable_chroot_mknod;
52721 +extern int grsec_enable_chroot_fchdir;
52722 +extern int grsec_enable_chroot_nice;
52723 +extern int grsec_enable_chroot_execlog;
52724 +extern int grsec_enable_chroot_caps;
52725 +extern int grsec_enable_chroot_sysctl;
52726 +extern int grsec_enable_chroot_unix;
52727 +extern int grsec_enable_tpe;
52728 +extern int grsec_tpe_gid;
52729 +extern int grsec_enable_tpe_all;
52730 +extern int grsec_enable_tpe_invert;
52731 +extern int grsec_enable_socket_all;
52732 +extern int grsec_socket_all_gid;
52733 +extern int grsec_enable_socket_client;
52734 +extern int grsec_socket_client_gid;
52735 +extern int grsec_enable_socket_server;
52736 +extern int grsec_socket_server_gid;
52737 +extern int grsec_audit_gid;
52738 +extern int grsec_enable_group;
52739 +extern int grsec_enable_audit_textrel;
52740 +extern int grsec_enable_log_rwxmaps;
52741 +extern int grsec_enable_mount;
52742 +extern int grsec_enable_chdir;
52743 +extern int grsec_resource_logging;
52744 +extern int grsec_enable_blackhole;
52745 +extern int grsec_lastack_retries;
52746 +extern int grsec_enable_brute;
52747 +extern int grsec_lock;
52749 +extern spinlock_t grsec_alert_lock;
52750 +extern unsigned long grsec_alert_wtime;
52751 +extern unsigned long grsec_alert_fyet;
52753 +extern spinlock_t grsec_audit_lock;
52755 +extern rwlock_t grsec_exec_file_lock;
52757 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52758 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52759 + (tsk)->exec_file->f_vfsmnt) : "/")
52761 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52762 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52763 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52765 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52766 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
52767 + (tsk)->exec_file->f_vfsmnt) : "/")
52769 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52770 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52771 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52773 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52775 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52777 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52778 + (task)->pid, (cred)->uid, \
52779 + (cred)->euid, (cred)->gid, (cred)->egid, \
52780 + gr_parent_task_fullpath(task), \
52781 + (task)->real_parent->comm, (task)->real_parent->pid, \
52782 + (pcred)->uid, (pcred)->euid, \
52783 + (pcred)->gid, (pcred)->egid
52785 +#define GR_CHROOT_CAPS {{ \
52786 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52787 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52788 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52789 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52790 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52791 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52793 +#define security_learn(normal_msg,args...) \
52795 + read_lock(&grsec_exec_file_lock); \
52796 + gr_add_learn_entry(normal_msg "\n", ## args); \
52797 + read_unlock(&grsec_exec_file_lock); \
52803 + /* used for non-audit messages that we shouldn't kill the task on */
52804 + GR_DONT_AUDIT_GOOD
52815 + GR_SYSCTL_HIDDEN,
52818 + GR_ONE_INT_TWO_STR,
52825 + GR_FIVE_INT_TWO_STR,
52831 + GR_FILENAME_TWO_INT,
52832 + GR_FILENAME_TWO_INT_STR,
52845 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52846 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52847 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52848 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52849 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52850 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52851 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52852 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52853 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52854 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52855 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52856 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52857 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52858 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52859 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52860 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52861 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52862 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52863 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52864 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52865 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52866 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52867 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52868 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52869 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52870 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52871 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52872 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52873 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52874 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52875 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52876 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52877 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52878 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52879 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52881 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52886 diff -urNp linux-2.6.39.4/include/linux/grmsg.h linux-2.6.39.4/include/linux/grmsg.h
52887 --- linux-2.6.39.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52888 +++ linux-2.6.39.4/include/linux/grmsg.h 2011-08-05 19:44:37.000000000 -0400
52890 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52891 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52892 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52893 +#define GR_STOPMOD_MSG "denied modification of module state by "
52894 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52895 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52896 +#define GR_IOPERM_MSG "denied use of ioperm() by "
52897 +#define GR_IOPL_MSG "denied use of iopl() by "
52898 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52899 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52900 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52901 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52902 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52903 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52904 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52905 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52906 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52907 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52908 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52909 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52910 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52911 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52912 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52913 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52914 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52915 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52916 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52917 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52918 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52919 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52920 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52921 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52922 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52923 +#define GR_NPROC_MSG "denied overstep of process limit by "
52924 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52925 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52926 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52927 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52928 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52929 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52930 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52931 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52932 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52933 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52934 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52935 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52936 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52937 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52938 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52939 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52940 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52941 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52942 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52943 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52944 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52945 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52946 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52947 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52948 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52949 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52950 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52951 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52952 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52953 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52954 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52955 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52956 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52957 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52958 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52959 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52960 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52961 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52962 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52963 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
52964 +#define GR_NICE_CHROOT_MSG "denied priority change by "
52965 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52966 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52967 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52968 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52969 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52970 +#define GR_TIME_MSG "time set by "
52971 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52972 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52973 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52974 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52975 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52976 +#define GR_BIND_MSG "denied bind() by "
52977 +#define GR_CONNECT_MSG "denied connect() by "
52978 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52979 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52980 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52981 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52982 +#define GR_CAP_ACL_MSG "use of %s denied for "
52983 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52984 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52985 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52986 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52987 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52988 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52989 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52990 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52991 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52992 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52993 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52994 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52995 +#define GR_VM86_MSG "denied use of vm86 by "
52996 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52997 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52998 diff -urNp linux-2.6.39.4/include/linux/grsecurity.h linux-2.6.39.4/include/linux/grsecurity.h
52999 --- linux-2.6.39.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
53000 +++ linux-2.6.39.4/include/linux/grsecurity.h 2011-08-05 19:54:17.000000000 -0400
53002 +#ifndef GR_SECURITY_H
53003 +#define GR_SECURITY_H
53004 +#include <linux/fs.h>
53005 +#include <linux/fs_struct.h>
53006 +#include <linux/binfmts.h>
53007 +#include <linux/gracl.h>
53008 +#include <linux/compat.h>
53010 +/* notify of brain-dead configs */
53011 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53012 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
53014 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
53015 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
53017 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53018 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53020 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53021 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53023 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
53024 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
53026 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
53027 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
53030 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
53031 +void gr_handle_brute_check(void);
53032 +void gr_handle_kernel_exploit(void);
53033 +int gr_process_user_ban(void);
53035 +char gr_roletype_to_char(void);
53037 +int gr_acl_enable_at_secure(void);
53039 +int gr_check_user_change(int real, int effective, int fs);
53040 +int gr_check_group_change(int real, int effective, int fs);
53042 +void gr_del_task_from_ip_table(struct task_struct *p);
53044 +int gr_pid_is_chrooted(struct task_struct *p);
53045 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
53046 +int gr_handle_chroot_nice(void);
53047 +int gr_handle_chroot_sysctl(const int op);
53048 +int gr_handle_chroot_setpriority(struct task_struct *p,
53049 + const int niceval);
53050 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
53051 +int gr_handle_chroot_chroot(const struct dentry *dentry,
53052 + const struct vfsmount *mnt);
53053 +int gr_handle_chroot_caps(struct path *path);
53054 +void gr_handle_chroot_chdir(struct path *path);
53055 +int gr_handle_chroot_chmod(const struct dentry *dentry,
53056 + const struct vfsmount *mnt, const int mode);
53057 +int gr_handle_chroot_mknod(const struct dentry *dentry,
53058 + const struct vfsmount *mnt, const int mode);
53059 +int gr_handle_chroot_mount(const struct dentry *dentry,
53060 + const struct vfsmount *mnt,
53061 + const char *dev_name);
53062 +int gr_handle_chroot_pivot(void);
53063 +int gr_handle_chroot_unix(const pid_t pid);
53065 +int gr_handle_rawio(const struct inode *inode);
53066 +int gr_handle_nproc(void);
53068 +void gr_handle_ioperm(void);
53069 +void gr_handle_iopl(void);
53071 +int gr_tpe_allow(const struct file *file);
53073 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
53074 +void gr_clear_chroot_entries(struct task_struct *task);
53076 +void gr_log_forkfail(const int retval);
53077 +void gr_log_timechange(void);
53078 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
53079 +void gr_log_chdir(const struct dentry *dentry,
53080 + const struct vfsmount *mnt);
53081 +void gr_log_chroot_exec(const struct dentry *dentry,
53082 + const struct vfsmount *mnt);
53083 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
53084 +#ifdef CONFIG_COMPAT
53085 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
53087 +void gr_log_remount(const char *devname, const int retval);
53088 +void gr_log_unmount(const char *devname, const int retval);
53089 +void gr_log_mount(const char *from, const char *to, const int retval);
53090 +void gr_log_textrel(struct vm_area_struct *vma);
53091 +void gr_log_rwxmmap(struct file *file);
53092 +void gr_log_rwxmprotect(struct file *file);
53094 +int gr_handle_follow_link(const struct inode *parent,
53095 + const struct inode *inode,
53096 + const struct dentry *dentry,
53097 + const struct vfsmount *mnt);
53098 +int gr_handle_fifo(const struct dentry *dentry,
53099 + const struct vfsmount *mnt,
53100 + const struct dentry *dir, const int flag,
53101 + const int acc_mode);
53102 +int gr_handle_hardlink(const struct dentry *dentry,
53103 + const struct vfsmount *mnt,
53104 + struct inode *inode,
53105 + const int mode, const char *to);
53107 +int gr_is_capable(const int cap);
53108 +int gr_is_capable_nolog(const int cap);
53109 +void gr_learn_resource(const struct task_struct *task, const int limit,
53110 + const unsigned long wanted, const int gt);
53111 +void gr_copy_label(struct task_struct *tsk);
53112 +void gr_handle_crash(struct task_struct *task, const int sig);
53113 +int gr_handle_signal(const struct task_struct *p, const int sig);
53114 +int gr_check_crash_uid(const uid_t uid);
53115 +int gr_check_protected_task(const struct task_struct *task);
53116 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
53117 +int gr_acl_handle_mmap(const struct file *file,
53118 + const unsigned long prot);
53119 +int gr_acl_handle_mprotect(const struct file *file,
53120 + const unsigned long prot);
53121 +int gr_check_hidden_task(const struct task_struct *tsk);
53122 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
53123 + const struct vfsmount *mnt);
53124 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
53125 + const struct vfsmount *mnt);
53126 +__u32 gr_acl_handle_access(const struct dentry *dentry,
53127 + const struct vfsmount *mnt, const int fmode);
53128 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
53129 + const struct vfsmount *mnt, mode_t mode);
53130 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
53131 + const struct vfsmount *mnt, mode_t mode);
53132 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
53133 + const struct vfsmount *mnt);
53134 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
53135 + const struct vfsmount *mnt);
53136 +int gr_handle_ptrace(struct task_struct *task, const long request);
53137 +int gr_handle_proc_ptrace(struct task_struct *task);
53138 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
53139 + const struct vfsmount *mnt);
53140 +int gr_check_crash_exec(const struct file *filp);
53141 +int gr_acl_is_enabled(void);
53142 +void gr_set_kernel_label(struct task_struct *task);
53143 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
53144 + const gid_t gid);
53145 +int gr_set_proc_label(const struct dentry *dentry,
53146 + const struct vfsmount *mnt,
53147 + const int unsafe_share);
53148 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
53149 + const struct vfsmount *mnt);
53150 +__u32 gr_acl_handle_open(const struct dentry *dentry,
53151 + const struct vfsmount *mnt, const int fmode);
53152 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
53153 + const struct dentry *p_dentry,
53154 + const struct vfsmount *p_mnt, const int fmode,
53155 + const int imode);
53156 +void gr_handle_create(const struct dentry *dentry,
53157 + const struct vfsmount *mnt);
53158 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
53159 + const struct dentry *parent_dentry,
53160 + const struct vfsmount *parent_mnt,
53162 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
53163 + const struct dentry *parent_dentry,
53164 + const struct vfsmount *parent_mnt);
53165 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
53166 + const struct vfsmount *mnt);
53167 +void gr_handle_delete(const ino_t ino, const dev_t dev);
53168 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
53169 + const struct vfsmount *mnt);
53170 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53171 + const struct dentry *parent_dentry,
53172 + const struct vfsmount *parent_mnt,
53173 + const char *from);
53174 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53175 + const struct dentry *parent_dentry,
53176 + const struct vfsmount *parent_mnt,
53177 + const struct dentry *old_dentry,
53178 + const struct vfsmount *old_mnt, const char *to);
53179 +int gr_acl_handle_rename(struct dentry *new_dentry,
53180 + struct dentry *parent_dentry,
53181 + const struct vfsmount *parent_mnt,
53182 + struct dentry *old_dentry,
53183 + struct inode *old_parent_inode,
53184 + struct vfsmount *old_mnt, const char *newname);
53185 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53186 + struct dentry *old_dentry,
53187 + struct dentry *new_dentry,
53188 + struct vfsmount *mnt, const __u8 replace);
53189 +__u32 gr_check_link(const struct dentry *new_dentry,
53190 + const struct dentry *parent_dentry,
53191 + const struct vfsmount *parent_mnt,
53192 + const struct dentry *old_dentry,
53193 + const struct vfsmount *old_mnt);
53194 +int gr_acl_handle_filldir(const struct file *file, const char *name,
53195 + const unsigned int namelen, const ino_t ino);
53197 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
53198 + const struct vfsmount *mnt);
53199 +void gr_acl_handle_exit(void);
53200 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
53201 +int gr_acl_handle_procpidmem(const struct task_struct *task);
53202 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53203 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53204 +void gr_audit_ptrace(struct task_struct *task);
53205 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53207 +#ifdef CONFIG_GRKERNSEC
53208 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53209 +void gr_handle_vm86(void);
53210 +void gr_handle_mem_readwrite(u64 from, u64 to);
53212 +extern int grsec_enable_dmesg;
53213 +extern int grsec_disable_privio;
53214 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53215 +extern int grsec_enable_chroot_findtask;
53220 diff -urNp linux-2.6.39.4/include/linux/grsock.h linux-2.6.39.4/include/linux/grsock.h
53221 --- linux-2.6.39.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53222 +++ linux-2.6.39.4/include/linux/grsock.h 2011-08-05 19:44:37.000000000 -0400
53224 +#ifndef __GRSOCK_H
53225 +#define __GRSOCK_H
53227 +extern void gr_attach_curr_ip(const struct sock *sk);
53228 +extern int gr_handle_sock_all(const int family, const int type,
53229 + const int protocol);
53230 +extern int gr_handle_sock_server(const struct sockaddr *sck);
53231 +extern int gr_handle_sock_server_other(const struct sock *sck);
53232 +extern int gr_handle_sock_client(const struct sockaddr *sck);
53233 +extern int gr_search_connect(struct socket * sock,
53234 + struct sockaddr_in * addr);
53235 +extern int gr_search_bind(struct socket * sock,
53236 + struct sockaddr_in * addr);
53237 +extern int gr_search_listen(struct socket * sock);
53238 +extern int gr_search_accept(struct socket * sock);
53239 +extern int gr_search_socket(const int domain, const int type,
53240 + const int protocol);
53243 diff -urNp linux-2.6.39.4/include/linux/highmem.h linux-2.6.39.4/include/linux/highmem.h
53244 --- linux-2.6.39.4/include/linux/highmem.h 2011-05-19 00:06:34.000000000 -0400
53245 +++ linux-2.6.39.4/include/linux/highmem.h 2011-08-05 19:44:37.000000000 -0400
53246 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53247 kunmap_atomic(kaddr, KM_USER0);
53250 +static inline void sanitize_highpage(struct page *page)
53253 + unsigned long flags;
53255 + local_irq_save(flags);
53256 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
53257 + clear_page(kaddr);
53258 + kunmap_atomic(kaddr, KM_CLEARPAGE);
53259 + local_irq_restore(flags);
53262 static inline void zero_user_segments(struct page *page,
53263 unsigned start1, unsigned end1,
53264 unsigned start2, unsigned end2)
53265 diff -urNp linux-2.6.39.4/include/linux/i2c.h linux-2.6.39.4/include/linux/i2c.h
53266 --- linux-2.6.39.4/include/linux/i2c.h 2011-05-19 00:06:34.000000000 -0400
53267 +++ linux-2.6.39.4/include/linux/i2c.h 2011-08-05 20:34:06.000000000 -0400
53268 @@ -346,6 +346,7 @@ struct i2c_algorithm {
53269 /* To determine what the adapter supports */
53270 u32 (*functionality) (struct i2c_adapter *);
53272 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53275 * i2c_adapter is the structure used to identify a physical i2c bus along
53276 diff -urNp linux-2.6.39.4/include/linux/i2o.h linux-2.6.39.4/include/linux/i2o.h
53277 --- linux-2.6.39.4/include/linux/i2o.h 2011-05-19 00:06:34.000000000 -0400
53278 +++ linux-2.6.39.4/include/linux/i2o.h 2011-08-05 19:44:37.000000000 -0400
53279 @@ -564,7 +564,7 @@ struct i2o_controller {
53280 struct i2o_device *exec; /* Executive */
53281 #if BITS_PER_LONG == 64
53282 spinlock_t context_list_lock; /* lock for context_list */
53283 - atomic_t context_list_counter; /* needed for unique contexts */
53284 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53285 struct list_head context_list; /* list of context id's
53288 diff -urNp linux-2.6.39.4/include/linux/init.h linux-2.6.39.4/include/linux/init.h
53289 --- linux-2.6.39.4/include/linux/init.h 2011-05-19 00:06:34.000000000 -0400
53290 +++ linux-2.6.39.4/include/linux/init.h 2011-08-05 19:44:37.000000000 -0400
53291 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53293 /* Each module must use one module_init(). */
53294 #define module_init(initfn) \
53295 - static inline initcall_t __inittest(void) \
53296 + static inline __used initcall_t __inittest(void) \
53297 { return initfn; } \
53298 int init_module(void) __attribute__((alias(#initfn)));
53300 /* This is only required if you want to be unloadable. */
53301 #define module_exit(exitfn) \
53302 - static inline exitcall_t __exittest(void) \
53303 + static inline __used exitcall_t __exittest(void) \
53304 { return exitfn; } \
53305 void cleanup_module(void) __attribute__((alias(#exitfn)));
53307 diff -urNp linux-2.6.39.4/include/linux/init_task.h linux-2.6.39.4/include/linux/init_task.h
53308 --- linux-2.6.39.4/include/linux/init_task.h 2011-05-19 00:06:34.000000000 -0400
53309 +++ linux-2.6.39.4/include/linux/init_task.h 2011-08-05 19:44:37.000000000 -0400
53310 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
53315 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53317 +#define INIT_TASK_THREAD_INFO
53321 * Because of the reduced scope of CAP_SETPCAP when filesystem
53322 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
53323 @@ -163,6 +169,7 @@ extern struct cred init_cred;
53324 RCU_INIT_POINTER(.cred, &init_cred), \
53325 .comm = "swapper", \
53326 .thread = INIT_THREAD, \
53327 + INIT_TASK_THREAD_INFO \
53329 .files = &init_files, \
53330 .signal = &init_signals, \
53331 diff -urNp linux-2.6.39.4/include/linux/intel-iommu.h linux-2.6.39.4/include/linux/intel-iommu.h
53332 --- linux-2.6.39.4/include/linux/intel-iommu.h 2011-05-19 00:06:34.000000000 -0400
53333 +++ linux-2.6.39.4/include/linux/intel-iommu.h 2011-08-05 20:34:06.000000000 -0400
53334 @@ -296,7 +296,7 @@ struct iommu_flush {
53336 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53337 unsigned int size_order, u64 type);
53343 diff -urNp linux-2.6.39.4/include/linux/interrupt.h linux-2.6.39.4/include/linux/interrupt.h
53344 --- linux-2.6.39.4/include/linux/interrupt.h 2011-05-19 00:06:34.000000000 -0400
53345 +++ linux-2.6.39.4/include/linux/interrupt.h 2011-08-05 19:44:37.000000000 -0400
53346 @@ -422,7 +422,7 @@ enum
53347 /* map softirq index to softirq name. update 'softirq_to_name' in
53348 * kernel/softirq.c when adding a new softirq.
53350 -extern char *softirq_to_name[NR_SOFTIRQS];
53351 +extern const char * const softirq_to_name[NR_SOFTIRQS];
53353 /* softirq mask and active fields moved to irq_cpustat_t in
53354 * asm/hardirq.h to get better cache usage. KAO
53355 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53357 struct softirq_action
53359 - void (*action)(struct softirq_action *);
53360 + void (*action)(void);
53363 asmlinkage void do_softirq(void);
53364 asmlinkage void __do_softirq(void);
53365 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53366 +extern void open_softirq(int nr, void (*action)(void));
53367 extern void softirq_init(void);
53368 static inline void __raise_softirq_irqoff(unsigned int nr)
53370 diff -urNp linux-2.6.39.4/include/linux/kallsyms.h linux-2.6.39.4/include/linux/kallsyms.h
53371 --- linux-2.6.39.4/include/linux/kallsyms.h 2011-05-19 00:06:34.000000000 -0400
53372 +++ linux-2.6.39.4/include/linux/kallsyms.h 2011-08-05 19:44:37.000000000 -0400
53377 -#ifdef CONFIG_KALLSYMS
53378 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53379 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53380 /* Lookup the address for a symbol. Returns 0 if not found. */
53381 unsigned long kallsyms_lookup_name(const char *name);
53383 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53384 /* Stupid that this does nothing, but I didn't create this mess. */
53385 #define __print_symbol(fmt, addr)
53386 #endif /*CONFIG_KALLSYMS*/
53387 +#else /* when included by kallsyms.c, vsnprintf.c, or
53388 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53389 +extern void __print_symbol(const char *fmt, unsigned long address);
53390 +extern int sprint_backtrace(char *buffer, unsigned long address);
53391 +extern int sprint_symbol(char *buffer, unsigned long address);
53392 +const char *kallsyms_lookup(unsigned long addr,
53393 + unsigned long *symbolsize,
53394 + unsigned long *offset,
53395 + char **modname, char *namebuf);
53398 /* This macro allows us to keep printk typechecking */
53399 static void __check_printsym_format(const char *fmt, ...)
53400 diff -urNp linux-2.6.39.4/include/linux/kgdb.h linux-2.6.39.4/include/linux/kgdb.h
53401 --- linux-2.6.39.4/include/linux/kgdb.h 2011-05-19 00:06:34.000000000 -0400
53402 +++ linux-2.6.39.4/include/linux/kgdb.h 2011-08-05 20:34:06.000000000 -0400
53403 @@ -53,7 +53,7 @@ extern int kgdb_connected;
53404 extern int kgdb_io_module_registered;
53406 extern atomic_t kgdb_setting_breakpoint;
53407 -extern atomic_t kgdb_cpu_doing_single_step;
53408 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53410 extern struct task_struct *kgdb_usethread;
53411 extern struct task_struct *kgdb_contthread;
53412 @@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
53413 * hardware debug registers.
53416 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53417 - unsigned long flags;
53418 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53419 + const unsigned long flags;
53421 int (*set_breakpoint)(unsigned long, char *);
53422 int (*remove_breakpoint)(unsigned long, char *);
53423 @@ -268,14 +268,14 @@ struct kgdb_arch {
53427 - const char *name;
53428 + const char * const name;
53429 int (*read_char) (void);
53430 void (*write_char) (u8);
53431 void (*flush) (void);
53432 int (*init) (void);
53433 void (*pre_exception) (void);
53434 void (*post_exception) (void);
53436 + const int is_console;
53439 extern struct kgdb_arch arch_kgdb_ops;
53440 diff -urNp linux-2.6.39.4/include/linux/kmod.h linux-2.6.39.4/include/linux/kmod.h
53441 --- linux-2.6.39.4/include/linux/kmod.h 2011-05-19 00:06:34.000000000 -0400
53442 +++ linux-2.6.39.4/include/linux/kmod.h 2011-08-05 19:44:37.000000000 -0400
53443 @@ -33,6 +33,8 @@ extern char modprobe_path[]; /* for sysc
53444 * usually useless though. */
53445 extern int __request_module(bool wait, const char *name, ...) \
53446 __attribute__((format(printf, 2, 3)));
53447 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53448 + __attribute__((format(printf, 3, 4)));
53449 #define request_module(mod...) __request_module(true, mod)
53450 #define request_module_nowait(mod...) __request_module(false, mod)
53451 #define try_then_request_module(x, mod...) \
53452 diff -urNp linux-2.6.39.4/include/linux/kvm_host.h linux-2.6.39.4/include/linux/kvm_host.h
53453 --- linux-2.6.39.4/include/linux/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
53454 +++ linux-2.6.39.4/include/linux/kvm_host.h 2011-08-05 19:44:37.000000000 -0400
53455 @@ -302,7 +302,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53456 void vcpu_load(struct kvm_vcpu *vcpu);
53457 void vcpu_put(struct kvm_vcpu *vcpu);
53459 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53460 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53461 struct module *module);
53462 void kvm_exit(void);
53464 @@ -442,7 +442,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53465 struct kvm_guest_debug *dbg);
53466 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53468 -int kvm_arch_init(void *opaque);
53469 +int kvm_arch_init(const void *opaque);
53470 void kvm_arch_exit(void);
53472 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53473 diff -urNp linux-2.6.39.4/include/linux/libata.h linux-2.6.39.4/include/linux/libata.h
53474 --- linux-2.6.39.4/include/linux/libata.h 2011-05-19 00:06:34.000000000 -0400
53475 +++ linux-2.6.39.4/include/linux/libata.h 2011-08-05 20:34:06.000000000 -0400
53476 @@ -898,7 +898,7 @@ struct ata_port_operations {
53477 * ->inherits must be the last field and all the preceding
53478 * fields must be pointers.
53480 - const struct ata_port_operations *inherits;
53481 + const struct ata_port_operations * const inherits;
53484 struct ata_port_info {
53485 diff -urNp linux-2.6.39.4/include/linux/mca.h linux-2.6.39.4/include/linux/mca.h
53486 --- linux-2.6.39.4/include/linux/mca.h 2011-05-19 00:06:34.000000000 -0400
53487 +++ linux-2.6.39.4/include/linux/mca.h 2011-08-05 20:34:06.000000000 -0400
53488 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53490 void * (*mca_transform_memory)(struct mca_device *,
53496 u64 default_dma_mask;
53497 diff -urNp linux-2.6.39.4/include/linux/memory.h linux-2.6.39.4/include/linux/memory.h
53498 --- linux-2.6.39.4/include/linux/memory.h 2011-05-19 00:06:34.000000000 -0400
53499 +++ linux-2.6.39.4/include/linux/memory.h 2011-08-05 20:34:06.000000000 -0400
53500 @@ -142,7 +142,7 @@ struct memory_accessor {
53502 ssize_t (*write)(struct memory_accessor *, const char *buf,
53503 off_t offset, size_t count);
53508 * Kernel text modification mutex, used for code patching. Users of this lock
53509 diff -urNp linux-2.6.39.4/include/linux/mfd/abx500.h linux-2.6.39.4/include/linux/mfd/abx500.h
53510 --- linux-2.6.39.4/include/linux/mfd/abx500.h 2011-05-19 00:06:34.000000000 -0400
53511 +++ linux-2.6.39.4/include/linux/mfd/abx500.h 2011-08-05 20:34:06.000000000 -0400
53512 @@ -226,6 +226,7 @@ struct abx500_ops {
53513 int (*event_registers_startup_state_get) (struct device *, u8 *);
53514 int (*startup_irq_enabled) (struct device *, unsigned int);
53516 +typedef struct abx500_ops __no_const abx500_ops_no_const;
53518 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53519 void abx500_remove_ops(struct device *dev);
53520 diff -urNp linux-2.6.39.4/include/linux/mm.h linux-2.6.39.4/include/linux/mm.h
53521 --- linux-2.6.39.4/include/linux/mm.h 2011-05-19 00:06:34.000000000 -0400
53522 +++ linux-2.6.39.4/include/linux/mm.h 2011-08-05 19:44:37.000000000 -0400
53523 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53525 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53526 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53528 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53529 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53530 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53532 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53535 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53536 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53538 @@ -1010,34 +1017,6 @@ int set_page_dirty(struct page *page);
53539 int set_page_dirty_lock(struct page *page);
53540 int clear_page_dirty_for_io(struct page *page);
53542 -/* Is the vma a continuation of the stack vma above it? */
53543 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53545 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53548 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
53549 - unsigned long addr)
53551 - return (vma->vm_flags & VM_GROWSDOWN) &&
53552 - (vma->vm_start == addr) &&
53553 - !vma_growsdown(vma->vm_prev, addr);
53556 -/* Is the vma a continuation of the stack vma below it? */
53557 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53559 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53562 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
53563 - unsigned long addr)
53565 - return (vma->vm_flags & VM_GROWSUP) &&
53566 - (vma->vm_end == addr) &&
53567 - !vma_growsup(vma->vm_next, addr);
53570 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53571 unsigned long old_addr, struct vm_area_struct *new_vma,
53572 unsigned long new_addr, unsigned long len);
53573 @@ -1189,6 +1168,15 @@ struct shrinker {
53574 extern void register_shrinker(struct shrinker *);
53575 extern void unregister_shrinker(struct shrinker *);
53578 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
53580 +static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53582 + return __pgprot(0);
53586 int vma_wants_writenotify(struct vm_area_struct *vma);
53588 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53589 @@ -1476,6 +1464,7 @@ out:
53592 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53593 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53595 extern unsigned long do_brk(unsigned long, unsigned long);
53597 @@ -1532,6 +1521,10 @@ extern struct vm_area_struct * find_vma(
53598 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53599 struct vm_area_struct **pprev);
53601 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53602 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53603 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53605 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53606 NULL if none. Assume start_addr < end_addr. */
53607 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53608 @@ -1548,15 +1541,6 @@ static inline unsigned long vma_pages(st
53609 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53613 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
53615 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53617 - return __pgprot(0);
53621 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53622 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53623 unsigned long pfn, unsigned long size, pgprot_t);
53624 @@ -1668,7 +1652,7 @@ extern int unpoison_memory(unsigned long
53625 extern int sysctl_memory_failure_early_kill;
53626 extern int sysctl_memory_failure_recovery;
53627 extern void shake_page(struct page *p, int access);
53628 -extern atomic_long_t mce_bad_pages;
53629 +extern atomic_long_unchecked_t mce_bad_pages;
53630 extern int soft_offline_page(struct page *page, int flags);
53632 extern void dump_page(struct page *page);
53633 @@ -1682,5 +1666,11 @@ extern void copy_user_huge_page(struct p
53634 unsigned int pages_per_huge_page);
53635 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53637 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53638 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53640 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53643 #endif /* __KERNEL__ */
53644 #endif /* _LINUX_MM_H */
53645 diff -urNp linux-2.6.39.4/include/linux/mm_types.h linux-2.6.39.4/include/linux/mm_types.h
53646 --- linux-2.6.39.4/include/linux/mm_types.h 2011-05-19 00:06:34.000000000 -0400
53647 +++ linux-2.6.39.4/include/linux/mm_types.h 2011-08-05 19:44:37.000000000 -0400
53648 @@ -183,6 +183,8 @@ struct vm_area_struct {
53650 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53653 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53656 struct core_thread {
53657 @@ -317,6 +319,24 @@ struct mm_struct {
53658 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
53659 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
53662 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53663 + unsigned long pax_flags;
53666 +#ifdef CONFIG_PAX_DLRESOLVE
53667 + unsigned long call_dl_resolve;
53670 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53671 + unsigned long call_syscall;
53674 +#ifdef CONFIG_PAX_ASLR
53675 + unsigned long delta_mmap; /* randomized offset */
53676 + unsigned long delta_stack; /* randomized offset */
53681 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
53682 diff -urNp linux-2.6.39.4/include/linux/mmu_notifier.h linux-2.6.39.4/include/linux/mmu_notifier.h
53683 --- linux-2.6.39.4/include/linux/mmu_notifier.h 2011-05-19 00:06:34.000000000 -0400
53684 +++ linux-2.6.39.4/include/linux/mmu_notifier.h 2011-08-05 19:44:37.000000000 -0400
53685 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53687 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53691 struct vm_area_struct *___vma = __vma; \
53692 unsigned long ___address = __address; \
53693 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53694 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53695 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53700 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53701 diff -urNp linux-2.6.39.4/include/linux/mmzone.h linux-2.6.39.4/include/linux/mmzone.h
53702 --- linux-2.6.39.4/include/linux/mmzone.h 2011-05-19 00:06:34.000000000 -0400
53703 +++ linux-2.6.39.4/include/linux/mmzone.h 2011-08-05 19:44:37.000000000 -0400
53704 @@ -355,7 +355,7 @@ struct zone {
53705 unsigned long flags; /* zone flags, see below */
53707 /* Zone statistics */
53708 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53709 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53712 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53713 diff -urNp linux-2.6.39.4/include/linux/mod_devicetable.h linux-2.6.39.4/include/linux/mod_devicetable.h
53714 --- linux-2.6.39.4/include/linux/mod_devicetable.h 2011-05-19 00:06:34.000000000 -0400
53715 +++ linux-2.6.39.4/include/linux/mod_devicetable.h 2011-08-05 19:44:37.000000000 -0400
53717 typedef unsigned long kernel_ulong_t;
53720 -#define PCI_ANY_ID (~0)
53721 +#define PCI_ANY_ID ((__u16)~0)
53723 struct pci_device_id {
53724 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53725 @@ -131,7 +131,7 @@ struct usb_device_id {
53726 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53727 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53729 -#define HID_ANY_ID (~0)
53730 +#define HID_ANY_ID (~0U)
53732 struct hid_device_id {
53734 diff -urNp linux-2.6.39.4/include/linux/module.h linux-2.6.39.4/include/linux/module.h
53735 --- linux-2.6.39.4/include/linux/module.h 2011-05-19 00:06:34.000000000 -0400
53736 +++ linux-2.6.39.4/include/linux/module.h 2011-08-05 20:34:06.000000000 -0400
53738 #include <linux/kobject.h>
53739 #include <linux/moduleparam.h>
53740 #include <linux/tracepoint.h>
53741 +#include <linux/fs.h>
53743 #include <linux/percpu.h>
53744 #include <asm/module.h>
53745 @@ -324,19 +325,16 @@ struct module
53748 /* If this is non-NULL, vfree after init() returns */
53749 - void *module_init;
53750 + void *module_init_rx, *module_init_rw;
53752 /* Here is the actual code + data, vfree'd on unload. */
53753 - void *module_core;
53754 + void *module_core_rx, *module_core_rw;
53756 /* Here are the sizes of the init and core sections */
53757 - unsigned int init_size, core_size;
53758 + unsigned int init_size_rw, core_size_rw;
53760 /* The size of the executable code in each section. */
53761 - unsigned int init_text_size, core_text_size;
53763 - /* Size of RO sections of the module (text+rodata) */
53764 - unsigned int init_ro_size, core_ro_size;
53765 + unsigned int init_size_rx, core_size_rx;
53767 /* Arch-specific module values */
53768 struct mod_arch_specific arch;
53769 @@ -391,6 +389,10 @@ struct module
53770 #ifdef CONFIG_EVENT_TRACING
53771 struct ftrace_event_call **trace_events;
53772 unsigned int num_trace_events;
53773 + struct file_operations trace_id;
53774 + struct file_operations trace_enable;
53775 + struct file_operations trace_format;
53776 + struct file_operations trace_filter;
53778 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53779 unsigned long *ftrace_callsites;
53780 @@ -441,16 +443,46 @@ bool is_module_address(unsigned long add
53781 bool is_module_percpu_address(unsigned long addr);
53782 bool is_module_text_address(unsigned long addr);
53784 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53787 +#ifdef CONFIG_PAX_KERNEXEC
53788 + if (ktla_ktva(addr) >= (unsigned long)start &&
53789 + ktla_ktva(addr) < (unsigned long)start + size)
53793 + return ((void *)addr >= start && (void *)addr < start + size);
53796 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53798 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53801 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53803 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53806 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53808 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53811 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53813 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53816 static inline int within_module_core(unsigned long addr, struct module *mod)
53818 - return (unsigned long)mod->module_core <= addr &&
53819 - addr < (unsigned long)mod->module_core + mod->core_size;
53820 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53823 static inline int within_module_init(unsigned long addr, struct module *mod)
53825 - return (unsigned long)mod->module_init <= addr &&
53826 - addr < (unsigned long)mod->module_init + mod->init_size;
53827 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53830 /* Search for module by name: must hold module_mutex. */
53831 diff -urNp linux-2.6.39.4/include/linux/moduleloader.h linux-2.6.39.4/include/linux/moduleloader.h
53832 --- linux-2.6.39.4/include/linux/moduleloader.h 2011-05-19 00:06:34.000000000 -0400
53833 +++ linux-2.6.39.4/include/linux/moduleloader.h 2011-08-05 19:44:37.000000000 -0400
53834 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53835 sections. Returns NULL on failure. */
53836 void *module_alloc(unsigned long size);
53838 +#ifdef CONFIG_PAX_KERNEXEC
53839 +void *module_alloc_exec(unsigned long size);
53841 +#define module_alloc_exec(x) module_alloc(x)
53844 /* Free memory returned from module_alloc. */
53845 void module_free(struct module *mod, void *module_region);
53847 +#ifdef CONFIG_PAX_KERNEXEC
53848 +void module_free_exec(struct module *mod, void *module_region);
53850 +#define module_free_exec(x, y) module_free((x), (y))
53853 /* Apply the given relocation to the (simplified) ELF. Return -error
53855 int apply_relocate(Elf_Shdr *sechdrs,
53856 diff -urNp linux-2.6.39.4/include/linux/moduleparam.h linux-2.6.39.4/include/linux/moduleparam.h
53857 --- linux-2.6.39.4/include/linux/moduleparam.h 2011-05-19 00:06:34.000000000 -0400
53858 +++ linux-2.6.39.4/include/linux/moduleparam.h 2011-08-05 20:34:06.000000000 -0400
53859 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53860 * @len is usually just sizeof(string).
53862 #define module_param_string(name, string, len, perm) \
53863 - static const struct kparam_string __param_string_##name \
53864 + static const struct kparam_string __param_string_##name __used \
53865 = { len, string }; \
53866 __module_param_call(MODULE_PARAM_PREFIX, name, \
53867 ¶m_ops_string, \
53868 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53869 * module_param_named() for why this might be necessary.
53871 #define module_param_array_named(name, array, type, nump, perm) \
53872 - static const struct kparam_array __param_arr_##name \
53873 + static const struct kparam_array __param_arr_##name __used \
53874 = { ARRAY_SIZE(array), nump, ¶m_ops_##type, \
53875 sizeof(array[0]), array }; \
53876 __module_param_call(MODULE_PARAM_PREFIX, name, \
53877 diff -urNp linux-2.6.39.4/include/linux/mutex.h linux-2.6.39.4/include/linux/mutex.h
53878 --- linux-2.6.39.4/include/linux/mutex.h 2011-05-19 00:06:34.000000000 -0400
53879 +++ linux-2.6.39.4/include/linux/mutex.h 2011-08-05 19:44:37.000000000 -0400
53880 @@ -51,7 +51,7 @@ struct mutex {
53881 spinlock_t wait_lock;
53882 struct list_head wait_list;
53883 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
53884 - struct thread_info *owner;
53885 + struct task_struct *owner;
53887 #ifdef CONFIG_DEBUG_MUTEXES
53889 diff -urNp linux-2.6.39.4/include/linux/namei.h linux-2.6.39.4/include/linux/namei.h
53890 --- linux-2.6.39.4/include/linux/namei.h 2011-05-19 00:06:34.000000000 -0400
53891 +++ linux-2.6.39.4/include/linux/namei.h 2011-08-05 19:44:37.000000000 -0400
53892 @@ -24,7 +24,7 @@ struct nameidata {
53896 - char *saved_names[MAX_NESTED_LINKS + 1];
53897 + const char *saved_names[MAX_NESTED_LINKS + 1];
53901 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53902 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53903 extern void unlock_rename(struct dentry *, struct dentry *);
53905 -static inline void nd_set_link(struct nameidata *nd, char *path)
53906 +static inline void nd_set_link(struct nameidata *nd, const char *path)
53908 nd->saved_names[nd->depth] = path;
53911 -static inline char *nd_get_link(struct nameidata *nd)
53912 +static inline const char *nd_get_link(const struct nameidata *nd)
53914 return nd->saved_names[nd->depth];
53916 diff -urNp linux-2.6.39.4/include/linux/netdevice.h linux-2.6.39.4/include/linux/netdevice.h
53917 --- linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:11:51.000000000 -0400
53918 +++ linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:12:20.000000000 -0400
53919 @@ -979,6 +979,7 @@ struct net_device_ops {
53920 int (*ndo_set_features)(struct net_device *dev,
53923 +typedef struct net_device_ops __no_const net_device_ops_no_const;
53926 * The DEVICE structure.
53927 diff -urNp linux-2.6.39.4/include/linux/netfilter/xt_gradm.h linux-2.6.39.4/include/linux/netfilter/xt_gradm.h
53928 --- linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53929 +++ linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 2011-08-05 19:44:37.000000000 -0400
53931 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
53932 +#define _LINUX_NETFILTER_XT_GRADM_H 1
53934 +struct xt_gradm_mtinfo {
53940 diff -urNp linux-2.6.39.4/include/linux/oprofile.h linux-2.6.39.4/include/linux/oprofile.h
53941 --- linux-2.6.39.4/include/linux/oprofile.h 2011-05-19 00:06:34.000000000 -0400
53942 +++ linux-2.6.39.4/include/linux/oprofile.h 2011-08-05 19:44:37.000000000 -0400
53943 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53944 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53945 char const * name, ulong * val);
53947 -/** Create a file for read-only access to an atomic_t. */
53948 +/** Create a file for read-only access to an atomic_unchecked_t. */
53949 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53950 - char const * name, atomic_t * val);
53951 + char const * name, atomic_unchecked_t * val);
53953 /** create a directory */
53954 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53955 diff -urNp linux-2.6.39.4/include/linux/padata.h linux-2.6.39.4/include/linux/padata.h
53956 --- linux-2.6.39.4/include/linux/padata.h 2011-05-19 00:06:34.000000000 -0400
53957 +++ linux-2.6.39.4/include/linux/padata.h 2011-08-05 19:44:37.000000000 -0400
53958 @@ -129,7 +129,7 @@ struct parallel_data {
53959 struct padata_instance *pinst;
53960 struct padata_parallel_queue __percpu *pqueue;
53961 struct padata_serial_queue __percpu *squeue;
53963 + atomic_unchecked_t seq_nr;
53964 atomic_t reorder_objects;
53966 unsigned int max_seq_nr;
53967 diff -urNp linux-2.6.39.4/include/linux/perf_event.h linux-2.6.39.4/include/linux/perf_event.h
53968 --- linux-2.6.39.4/include/linux/perf_event.h 2011-05-19 00:06:34.000000000 -0400
53969 +++ linux-2.6.39.4/include/linux/perf_event.h 2011-08-05 20:34:06.000000000 -0400
53970 @@ -759,8 +759,8 @@ struct perf_event {
53972 enum perf_event_active_state state;
53973 unsigned int attach_state;
53975 - atomic64_t child_count;
53976 + local64_t count; /* PaX: fix it one day */
53977 + atomic64_unchecked_t child_count;
53980 * These are the total time in nanoseconds that the event
53981 @@ -811,8 +811,8 @@ struct perf_event {
53982 * These accumulate total time (in nanoseconds) that children
53983 * events have been enabled and running, respectively.
53985 - atomic64_t child_total_time_enabled;
53986 - atomic64_t child_total_time_running;
53987 + atomic64_unchecked_t child_total_time_enabled;
53988 + atomic64_unchecked_t child_total_time_running;
53991 * Protect attach/detach and child_list:
53992 diff -urNp linux-2.6.39.4/include/linux/pipe_fs_i.h linux-2.6.39.4/include/linux/pipe_fs_i.h
53993 --- linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-05-19 00:06:34.000000000 -0400
53994 +++ linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-08-05 19:44:37.000000000 -0400
53995 @@ -46,9 +46,9 @@ struct pipe_buffer {
53996 struct pipe_inode_info {
53997 wait_queue_head_t wait;
53998 unsigned int nrbufs, curbuf, buffers;
53999 - unsigned int readers;
54000 - unsigned int writers;
54001 - unsigned int waiting_writers;
54002 + atomic_t readers;
54003 + atomic_t writers;
54004 + atomic_t waiting_writers;
54005 unsigned int r_counter;
54006 unsigned int w_counter;
54007 struct page *tmp_page;
54008 diff -urNp linux-2.6.39.4/include/linux/pm_runtime.h linux-2.6.39.4/include/linux/pm_runtime.h
54009 --- linux-2.6.39.4/include/linux/pm_runtime.h 2011-05-19 00:06:34.000000000 -0400
54010 +++ linux-2.6.39.4/include/linux/pm_runtime.h 2011-08-05 19:44:37.000000000 -0400
54011 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
54013 static inline void pm_runtime_mark_last_busy(struct device *dev)
54015 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
54016 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
54019 #else /* !CONFIG_PM_RUNTIME */
54020 diff -urNp linux-2.6.39.4/include/linux/poison.h linux-2.6.39.4/include/linux/poison.h
54021 --- linux-2.6.39.4/include/linux/poison.h 2011-05-19 00:06:34.000000000 -0400
54022 +++ linux-2.6.39.4/include/linux/poison.h 2011-08-05 19:44:37.000000000 -0400
54024 * under normal circumstances, used to verify that nobody uses
54025 * non-initialized list entries.
54027 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
54028 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
54029 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
54030 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
54032 /********** include/linux/timer.h **********/
54034 diff -urNp linux-2.6.39.4/include/linux/preempt.h linux-2.6.39.4/include/linux/preempt.h
54035 --- linux-2.6.39.4/include/linux/preempt.h 2011-05-19 00:06:34.000000000 -0400
54036 +++ linux-2.6.39.4/include/linux/preempt.h 2011-08-05 20:34:06.000000000 -0400
54037 @@ -115,7 +115,7 @@ struct preempt_ops {
54038 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
54039 void (*sched_out)(struct preempt_notifier *notifier,
54040 struct task_struct *next);
54045 * preempt_notifier - key for installing preemption notifiers
54046 diff -urNp linux-2.6.39.4/include/linux/proc_fs.h linux-2.6.39.4/include/linux/proc_fs.h
54047 --- linux-2.6.39.4/include/linux/proc_fs.h 2011-05-19 00:06:34.000000000 -0400
54048 +++ linux-2.6.39.4/include/linux/proc_fs.h 2011-08-05 20:34:06.000000000 -0400
54049 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
54050 return proc_create_data(name, mode, parent, proc_fops, NULL);
54053 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
54054 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
54056 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54057 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
54058 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54059 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
54061 + return proc_create_data(name, mode, parent, proc_fops, NULL);
54066 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
54067 mode_t mode, struct proc_dir_entry *base,
54068 read_proc_t *read_proc, void * data)
54069 @@ -258,7 +271,7 @@ union proc_op {
54070 int (*proc_show)(struct seq_file *m,
54071 struct pid_namespace *ns, struct pid *pid,
54072 struct task_struct *task);
54076 struct ctl_table_header;
54078 diff -urNp linux-2.6.39.4/include/linux/ptrace.h linux-2.6.39.4/include/linux/ptrace.h
54079 --- linux-2.6.39.4/include/linux/ptrace.h 2011-05-19 00:06:34.000000000 -0400
54080 +++ linux-2.6.39.4/include/linux/ptrace.h 2011-08-05 19:44:37.000000000 -0400
54081 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
54082 extern void exit_ptrace(struct task_struct *tracer);
54083 #define PTRACE_MODE_READ 1
54084 #define PTRACE_MODE_ATTACH 2
54085 -/* Returns 0 on success, -errno on denial. */
54086 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
54087 /* Returns true on success, false on denial. */
54088 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
54089 +/* Returns true on success, false on denial. */
54090 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
54092 static inline int ptrace_reparented(struct task_struct *child)
54094 diff -urNp linux-2.6.39.4/include/linux/random.h linux-2.6.39.4/include/linux/random.h
54095 --- linux-2.6.39.4/include/linux/random.h 2011-05-19 00:06:34.000000000 -0400
54096 +++ linux-2.6.39.4/include/linux/random.h 2011-08-05 19:44:37.000000000 -0400
54097 @@ -80,12 +80,17 @@ void srandom32(u32 seed);
54099 u32 prandom32(struct rnd_state *);
54101 +static inline unsigned long pax_get_random_long(void)
54103 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
54107 * Handle minimum values for seeds
54109 static inline u32 __seed(u32 x, u32 m)
54111 - return (x < m) ? x + m : x;
54112 + return (x <= m) ? x + m + 1 : x;
54116 diff -urNp linux-2.6.39.4/include/linux/reboot.h linux-2.6.39.4/include/linux/reboot.h
54117 --- linux-2.6.39.4/include/linux/reboot.h 2011-05-19 00:06:34.000000000 -0400
54118 +++ linux-2.6.39.4/include/linux/reboot.h 2011-08-05 19:44:37.000000000 -0400
54119 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
54120 * Architecture-specific implementations of sys_reboot commands.
54123 -extern void machine_restart(char *cmd);
54124 -extern void machine_halt(void);
54125 -extern void machine_power_off(void);
54126 +extern void machine_restart(char *cmd) __noreturn;
54127 +extern void machine_halt(void) __noreturn;
54128 +extern void machine_power_off(void) __noreturn;
54130 extern void machine_shutdown(void);
54132 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
54135 extern void kernel_restart_prepare(char *cmd);
54136 -extern void kernel_restart(char *cmd);
54137 -extern void kernel_halt(void);
54138 -extern void kernel_power_off(void);
54139 +extern void kernel_restart(char *cmd) __noreturn;
54140 +extern void kernel_halt(void) __noreturn;
54141 +extern void kernel_power_off(void) __noreturn;
54143 extern int C_A_D; /* for sysctl */
54144 void ctrl_alt_del(void);
54145 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
54146 * Emergency restart, callable from an interrupt handler.
54149 -extern void emergency_restart(void);
54150 +extern void emergency_restart(void) __noreturn;
54151 #include <asm/emergency-restart.h>
54154 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs.h linux-2.6.39.4/include/linux/reiserfs_fs.h
54155 --- linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-05-19 00:06:34.000000000 -0400
54156 +++ linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-08-05 20:34:06.000000000 -0400
54157 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
54158 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
54160 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
54161 -#define get_generation(s) atomic_read (&fs_generation(s))
54162 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
54163 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
54164 #define __fs_changed(gen,s) (gen != get_generation (s))
54165 #define fs_changed(gen,s) \
54166 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs_sb.h linux-2.6.39.4/include/linux/reiserfs_fs_sb.h
54167 --- linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-05-19 00:06:34.000000000 -0400
54168 +++ linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-08-05 19:44:37.000000000 -0400
54169 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
54170 /* Comment? -Hans */
54171 wait_queue_head_t s_wait;
54172 /* To be obsoleted soon by per buffer seals.. -Hans */
54173 - atomic_t s_generation_counter; // increased by one every time the
54174 + atomic_unchecked_t s_generation_counter; // increased by one every time the
54175 // tree gets re-balanced
54176 unsigned long s_properties; /* File system properties. Currently holds
54177 on-disk FS format */
54178 diff -urNp linux-2.6.39.4/include/linux/relay.h linux-2.6.39.4/include/linux/relay.h
54179 --- linux-2.6.39.4/include/linux/relay.h 2011-05-19 00:06:34.000000000 -0400
54180 +++ linux-2.6.39.4/include/linux/relay.h 2011-08-05 20:34:06.000000000 -0400
54181 @@ -159,7 +159,7 @@ struct rchan_callbacks
54182 * The callback should return 0 if successful, negative if not.
54184 int (*remove_buf_file)(struct dentry *dentry);
54189 * CONFIG_RELAY kernel API, kernel/relay.c
54190 diff -urNp linux-2.6.39.4/include/linux/rfkill.h linux-2.6.39.4/include/linux/rfkill.h
54191 --- linux-2.6.39.4/include/linux/rfkill.h 2011-05-19 00:06:34.000000000 -0400
54192 +++ linux-2.6.39.4/include/linux/rfkill.h 2011-08-05 20:34:06.000000000 -0400
54193 @@ -147,6 +147,7 @@ struct rfkill_ops {
54194 void (*query)(struct rfkill *rfkill, void *data);
54195 int (*set_block)(void *data, bool blocked);
54197 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54199 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54201 diff -urNp linux-2.6.39.4/include/linux/rmap.h linux-2.6.39.4/include/linux/rmap.h
54202 --- linux-2.6.39.4/include/linux/rmap.h 2011-05-19 00:06:34.000000000 -0400
54203 +++ linux-2.6.39.4/include/linux/rmap.h 2011-08-05 19:44:37.000000000 -0400
54204 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54205 void anon_vma_init(void); /* create anon_vma_cachep */
54206 int anon_vma_prepare(struct vm_area_struct *);
54207 void unlink_anon_vmas(struct vm_area_struct *);
54208 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54209 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54210 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54211 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54212 void __anon_vma_link(struct vm_area_struct *);
54214 static inline void anon_vma_merge(struct vm_area_struct *vma,
54215 diff -urNp linux-2.6.39.4/include/linux/sched.h linux-2.6.39.4/include/linux/sched.h
54216 --- linux-2.6.39.4/include/linux/sched.h 2011-05-19 00:06:34.000000000 -0400
54217 +++ linux-2.6.39.4/include/linux/sched.h 2011-08-05 20:34:06.000000000 -0400
54218 @@ -100,6 +100,7 @@ struct bio_list;
54220 struct perf_event_context;
54222 +struct linux_binprm;
54225 * List of flags we want to share for kernel threads,
54226 @@ -360,7 +361,7 @@ extern signed long schedule_timeout_inte
54227 extern signed long schedule_timeout_killable(signed long timeout);
54228 extern signed long schedule_timeout_uninterruptible(signed long timeout);
54229 asmlinkage void schedule(void);
54230 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
54231 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
54234 struct user_namespace;
54235 @@ -381,10 +382,13 @@ struct user_namespace;
54236 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54238 extern int sysctl_max_map_count;
54239 +extern unsigned long sysctl_heap_stack_gap;
54241 #include <linux/aio.h>
54244 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54245 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54246 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54247 extern unsigned long
54248 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54249 @@ -629,6 +633,17 @@ struct signal_struct {
54250 #ifdef CONFIG_TASKSTATS
54251 struct taskstats *stats;
54254 +#ifdef CONFIG_GRKERNSEC
54261 + u8 used_accept:1;
54264 #ifdef CONFIG_AUDIT
54265 unsigned audit_tty;
54266 struct tty_audit_buf *tty_audit_buf;
54267 @@ -701,6 +716,11 @@ struct user_struct {
54268 struct key *session_keyring; /* UID's default session keyring */
54271 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54272 + unsigned int banned;
54273 + unsigned long ban_expires;
54276 /* Hash table maintenance information */
54277 struct hlist_node uidhash_node;
54279 @@ -1310,8 +1330,8 @@ struct task_struct {
54280 struct list_head thread_group;
54282 struct completion *vfork_done; /* for vfork() */
54283 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54284 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54285 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54286 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54288 cputime_t utime, stime, utimescaled, stimescaled;
54290 @@ -1327,13 +1347,6 @@ struct task_struct {
54291 struct task_cputime cputime_expires;
54292 struct list_head cpu_timers[3];
54294 -/* process credentials */
54295 - const struct cred __rcu *real_cred; /* objective and real subjective task
54296 - * credentials (COW) */
54297 - const struct cred __rcu *cred; /* effective (overridable) subjective task
54298 - * credentials (COW) */
54299 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54301 char comm[TASK_COMM_LEN]; /* executable name excluding path
54302 - access with [gs]et_task_comm (which lock
54303 it with task_lock())
54304 @@ -1350,8 +1363,16 @@ struct task_struct {
54306 /* CPU-specific state of this task */
54307 struct thread_struct thread;
54308 +/* thread_info moved to task_struct */
54310 + struct thread_info tinfo;
54312 /* filesystem information */
54313 struct fs_struct *fs;
54315 + const struct cred __rcu *cred; /* effective (overridable) subjective task
54316 + * credentials (COW) */
54318 /* open file information */
54319 struct files_struct *files;
54321 @@ -1398,6 +1419,11 @@ struct task_struct {
54322 struct rt_mutex_waiter *pi_blocked_on;
54325 +/* process credentials */
54326 + const struct cred __rcu *real_cred; /* objective and real subjective task
54327 + * credentials (COW) */
54328 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54330 #ifdef CONFIG_DEBUG_MUTEXES
54331 /* mutex deadlock detection */
54332 struct mutex_waiter *blocked_on;
54333 @@ -1508,6 +1534,21 @@ struct task_struct {
54334 unsigned long default_timer_slack_ns;
54336 struct list_head *scm_work_list;
54338 +#ifdef CONFIG_GRKERNSEC
54340 + struct dentry *gr_chroot_dentry;
54341 + struct acl_subject_label *acl;
54342 + struct acl_role_label *role;
54343 + struct file *exec_file;
54345 + /* is this the task that authenticated to the special role */
54349 + u8 gr_is_chrooted;
54352 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54353 /* Index of current stored address in ret_stack */
54354 int curr_ret_stack;
54355 @@ -1542,6 +1583,57 @@ struct task_struct {
54359 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54360 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54361 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54362 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54363 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54364 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54366 +#ifdef CONFIG_PAX_SOFTMODE
54367 +extern int pax_softmode;
54370 +extern int pax_check_flags(unsigned long *);
54372 +/* if tsk != current then task_lock must be held on it */
54373 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54374 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
54376 + if (likely(tsk->mm))
54377 + return tsk->mm->pax_flags;
54382 +/* if tsk != current then task_lock must be held on it */
54383 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54385 + if (likely(tsk->mm)) {
54386 + tsk->mm->pax_flags = flags;
54393 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54394 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
54395 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54396 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54399 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54400 +extern void pax_report_insns(void *pc, void *sp);
54401 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
54402 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54404 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54405 +extern void pax_track_stack(void);
54407 +static inline void pax_track_stack(void) {}
54410 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54411 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54413 @@ -2009,7 +2101,9 @@ void yield(void);
54414 extern struct exec_domain default_exec_domain;
54416 union thread_union {
54417 +#ifndef CONFIG_X86
54418 struct thread_info thread_info;
54420 unsigned long stack[THREAD_SIZE/sizeof(long)];
54423 @@ -2042,6 +2136,7 @@ extern struct pid_namespace init_pid_ns;
54426 extern struct task_struct *find_task_by_vpid(pid_t nr);
54427 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54428 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54429 struct pid_namespace *ns);
54431 @@ -2179,7 +2274,7 @@ extern void __cleanup_sighand(struct sig
54432 extern void exit_itimers(struct signal_struct *);
54433 extern void flush_itimer_signals(void);
54435 -extern NORET_TYPE void do_group_exit(int);
54436 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54438 extern void daemonize(const char *, ...);
54439 extern int allow_signal(int);
54440 @@ -2320,13 +2415,17 @@ static inline unsigned long *end_of_stac
54444 -static inline int object_is_on_stack(void *obj)
54445 +static inline int object_starts_on_stack(void *obj)
54447 - void *stack = task_stack_page(current);
54448 + const void *stack = task_stack_page(current);
54450 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54453 +#ifdef CONFIG_PAX_USERCOPY
54454 +extern int object_is_on_stack(const void *obj, unsigned long len);
54457 extern void thread_info_cache_init(void);
54459 #ifdef CONFIG_DEBUG_STACK_USAGE
54460 diff -urNp linux-2.6.39.4/include/linux/screen_info.h linux-2.6.39.4/include/linux/screen_info.h
54461 --- linux-2.6.39.4/include/linux/screen_info.h 2011-05-19 00:06:34.000000000 -0400
54462 +++ linux-2.6.39.4/include/linux/screen_info.h 2011-08-05 19:44:37.000000000 -0400
54463 @@ -43,7 +43,8 @@ struct screen_info {
54464 __u16 pages; /* 0x32 */
54465 __u16 vesa_attributes; /* 0x34 */
54466 __u32 capabilities; /* 0x36 */
54467 - __u8 _reserved[6]; /* 0x3a */
54468 + __u16 vesapm_size; /* 0x3a */
54469 + __u8 _reserved[4]; /* 0x3c */
54470 } __attribute__((packed));
54472 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54473 diff -urNp linux-2.6.39.4/include/linux/security.h linux-2.6.39.4/include/linux/security.h
54474 --- linux-2.6.39.4/include/linux/security.h 2011-05-19 00:06:34.000000000 -0400
54475 +++ linux-2.6.39.4/include/linux/security.h 2011-08-05 19:44:37.000000000 -0400
54477 #include <linux/key.h>
54478 #include <linux/xfrm.h>
54479 #include <linux/slab.h>
54480 +#include <linux/grsecurity.h>
54481 #include <net/flow.h>
54483 /* Maximum number of letters for an LSM name string */
54484 diff -urNp linux-2.6.39.4/include/linux/seq_file.h linux-2.6.39.4/include/linux/seq_file.h
54485 --- linux-2.6.39.4/include/linux/seq_file.h 2011-05-19 00:06:34.000000000 -0400
54486 +++ linux-2.6.39.4/include/linux/seq_file.h 2011-08-05 20:34:06.000000000 -0400
54487 @@ -32,6 +32,7 @@ struct seq_operations {
54488 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54489 int (*show) (struct seq_file *m, void *v);
54491 +typedef struct seq_operations __no_const seq_operations_no_const;
54495 diff -urNp linux-2.6.39.4/include/linux/shm.h linux-2.6.39.4/include/linux/shm.h
54496 --- linux-2.6.39.4/include/linux/shm.h 2011-05-19 00:06:34.000000000 -0400
54497 +++ linux-2.6.39.4/include/linux/shm.h 2011-08-05 19:44:37.000000000 -0400
54498 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54501 struct user_struct *mlock_user;
54502 +#ifdef CONFIG_GRKERNSEC
54503 + time_t shm_createtime;
54508 /* shm_mode upper byte flags */
54509 diff -urNp linux-2.6.39.4/include/linux/skbuff.h linux-2.6.39.4/include/linux/skbuff.h
54510 --- linux-2.6.39.4/include/linux/skbuff.h 2011-05-19 00:06:34.000000000 -0400
54511 +++ linux-2.6.39.4/include/linux/skbuff.h 2011-08-05 19:44:37.000000000 -0400
54512 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54514 static inline int skb_queue_empty(const struct sk_buff_head *list)
54516 - return list->next == (struct sk_buff *)list;
54517 + return list->next == (const struct sk_buff *)list;
54521 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54522 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54523 const struct sk_buff *skb)
54525 - return skb->next == (struct sk_buff *)list;
54526 + return skb->next == (const struct sk_buff *)list;
54530 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54531 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54532 const struct sk_buff *skb)
54534 - return skb->prev == (struct sk_buff *)list;
54535 + return skb->prev == (const struct sk_buff *)list;
54539 @@ -1435,7 +1435,7 @@ static inline int pskb_network_may_pull(
54540 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54542 #ifndef NET_SKB_PAD
54543 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54544 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54547 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54548 diff -urNp linux-2.6.39.4/include/linux/slab_def.h linux-2.6.39.4/include/linux/slab_def.h
54549 --- linux-2.6.39.4/include/linux/slab_def.h 2011-05-19 00:06:34.000000000 -0400
54550 +++ linux-2.6.39.4/include/linux/slab_def.h 2011-08-05 19:44:37.000000000 -0400
54551 @@ -96,10 +96,10 @@ struct kmem_cache {
54552 unsigned long node_allocs;
54553 unsigned long node_frees;
54554 unsigned long node_overflow;
54555 - atomic_t allochit;
54556 - atomic_t allocmiss;
54557 - atomic_t freehit;
54558 - atomic_t freemiss;
54559 + atomic_unchecked_t allochit;
54560 + atomic_unchecked_t allocmiss;
54561 + atomic_unchecked_t freehit;
54562 + atomic_unchecked_t freemiss;
54565 * If debugging is enabled, then the allocator can add additional
54566 diff -urNp linux-2.6.39.4/include/linux/slab.h linux-2.6.39.4/include/linux/slab.h
54567 --- linux-2.6.39.4/include/linux/slab.h 2011-05-19 00:06:34.000000000 -0400
54568 +++ linux-2.6.39.4/include/linux/slab.h 2011-08-05 19:44:37.000000000 -0400
54569 @@ -11,12 +11,20 @@
54571 #include <linux/gfp.h>
54572 #include <linux/types.h>
54573 +#include <linux/err.h>
54576 * Flags to pass to kmem_cache_create().
54577 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54579 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54581 +#ifdef CONFIG_PAX_USERCOPY
54582 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54584 +#define SLAB_USERCOPY 0x00000000UL
54587 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54588 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54589 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54590 @@ -87,10 +95,13 @@
54591 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54592 * Both make kfree a no-op.
54594 -#define ZERO_SIZE_PTR ((void *)16)
54595 +#define ZERO_SIZE_PTR \
54597 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54598 + (void *)(-MAX_ERRNO-1L); \
54601 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54602 - (unsigned long)ZERO_SIZE_PTR)
54603 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54606 * struct kmem_cache related prototypes
54607 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54608 void kfree(const void *);
54609 void kzfree(const void *);
54610 size_t ksize(const void *);
54611 +void check_object_size(const void *ptr, unsigned long n, bool to);
54614 * Allocator specific definitions. These are mainly used to establish optimized
54615 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54617 void __init kmem_cache_init_late(void);
54619 +#define kmalloc(x, y) \
54621 + void *___retval; \
54622 + intoverflow_t ___x = (intoverflow_t)x; \
54623 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54624 + ___retval = NULL; \
54626 + ___retval = kmalloc((size_t)___x, (y)); \
54630 +#define kmalloc_node(x, y, z) \
54632 + void *___retval; \
54633 + intoverflow_t ___x = (intoverflow_t)x; \
54634 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54635 + ___retval = NULL; \
54637 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
54641 +#define kzalloc(x, y) \
54643 + void *___retval; \
54644 + intoverflow_t ___x = (intoverflow_t)x; \
54645 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54646 + ___retval = NULL; \
54648 + ___retval = kzalloc((size_t)___x, (y)); \
54652 +#define __krealloc(x, y, z) \
54654 + void *___retval; \
54655 + intoverflow_t ___y = (intoverflow_t)y; \
54656 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54657 + ___retval = NULL; \
54659 + ___retval = __krealloc((x), (size_t)___y, (z)); \
54663 +#define krealloc(x, y, z) \
54665 + void *___retval; \
54666 + intoverflow_t ___y = (intoverflow_t)y; \
54667 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54668 + ___retval = NULL; \
54670 + ___retval = krealloc((x), (size_t)___y, (z)); \
54674 #endif /* _LINUX_SLAB_H */
54675 diff -urNp linux-2.6.39.4/include/linux/slub_def.h linux-2.6.39.4/include/linux/slub_def.h
54676 --- linux-2.6.39.4/include/linux/slub_def.h 2011-05-19 00:06:34.000000000 -0400
54677 +++ linux-2.6.39.4/include/linux/slub_def.h 2011-08-05 20:34:06.000000000 -0400
54678 @@ -84,7 +84,7 @@ struct kmem_cache {
54679 struct kmem_cache_order_objects max;
54680 struct kmem_cache_order_objects min;
54681 gfp_t allocflags; /* gfp flags to use on each alloc */
54682 - int refcount; /* Refcount for slab cache destroy */
54683 + atomic_t refcount; /* Refcount for slab cache destroy */
54684 void (*ctor)(void *);
54685 int inuse; /* Offset to metadata */
54686 int align; /* Alignment */
54687 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54690 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54691 -void *__kmalloc(size_t size, gfp_t flags);
54692 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54694 static __always_inline void *
54695 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54696 diff -urNp linux-2.6.39.4/include/linux/sonet.h linux-2.6.39.4/include/linux/sonet.h
54697 --- linux-2.6.39.4/include/linux/sonet.h 2011-05-19 00:06:34.000000000 -0400
54698 +++ linux-2.6.39.4/include/linux/sonet.h 2011-08-05 19:44:37.000000000 -0400
54699 @@ -61,7 +61,7 @@ struct sonet_stats {
54700 #include <asm/atomic.h>
54702 struct k_sonet_stats {
54703 -#define __HANDLE_ITEM(i) atomic_t i
54704 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54706 #undef __HANDLE_ITEM
54708 diff -urNp linux-2.6.39.4/include/linux/sunrpc/clnt.h linux-2.6.39.4/include/linux/sunrpc/clnt.h
54709 --- linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-05-19 00:06:34.000000000 -0400
54710 +++ linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-08-05 19:44:37.000000000 -0400
54711 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54713 switch (sap->sa_family) {
54715 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
54716 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54718 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54719 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54723 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54724 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54725 const struct sockaddr *src)
54727 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54728 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54729 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54731 dsin->sin_family = ssin->sin_family;
54732 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54733 if (sa->sa_family != AF_INET6)
54736 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54737 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54740 #endif /* __KERNEL__ */
54741 diff -urNp linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h
54742 --- linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-05-19 00:06:34.000000000 -0400
54743 +++ linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-08-05 19:44:37.000000000 -0400
54744 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54745 extern unsigned int svcrdma_max_requests;
54746 extern unsigned int svcrdma_max_req_size;
54748 -extern atomic_t rdma_stat_recv;
54749 -extern atomic_t rdma_stat_read;
54750 -extern atomic_t rdma_stat_write;
54751 -extern atomic_t rdma_stat_sq_starve;
54752 -extern atomic_t rdma_stat_rq_starve;
54753 -extern atomic_t rdma_stat_rq_poll;
54754 -extern atomic_t rdma_stat_rq_prod;
54755 -extern atomic_t rdma_stat_sq_poll;
54756 -extern atomic_t rdma_stat_sq_prod;
54757 +extern atomic_unchecked_t rdma_stat_recv;
54758 +extern atomic_unchecked_t rdma_stat_read;
54759 +extern atomic_unchecked_t rdma_stat_write;
54760 +extern atomic_unchecked_t rdma_stat_sq_starve;
54761 +extern atomic_unchecked_t rdma_stat_rq_starve;
54762 +extern atomic_unchecked_t rdma_stat_rq_poll;
54763 +extern atomic_unchecked_t rdma_stat_rq_prod;
54764 +extern atomic_unchecked_t rdma_stat_sq_poll;
54765 +extern atomic_unchecked_t rdma_stat_sq_prod;
54767 #define RPCRDMA_VERSION 1
54769 diff -urNp linux-2.6.39.4/include/linux/sysctl.h linux-2.6.39.4/include/linux/sysctl.h
54770 --- linux-2.6.39.4/include/linux/sysctl.h 2011-05-19 00:06:34.000000000 -0400
54771 +++ linux-2.6.39.4/include/linux/sysctl.h 2011-08-05 19:44:37.000000000 -0400
54772 @@ -155,7 +155,11 @@ enum
54773 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54777 +#ifdef CONFIG_PAX_SOFTMODE
54779 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54783 /* CTL_VM names: */
54785 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54787 extern int proc_dostring(struct ctl_table *, int,
54788 void __user *, size_t *, loff_t *);
54789 +extern int proc_dostring_modpriv(struct ctl_table *, int,
54790 + void __user *, size_t *, loff_t *);
54791 extern int proc_dointvec(struct ctl_table *, int,
54792 void __user *, size_t *, loff_t *);
54793 extern int proc_dointvec_minmax(struct ctl_table *, int,
54794 diff -urNp linux-2.6.39.4/include/linux/tty_ldisc.h linux-2.6.39.4/include/linux/tty_ldisc.h
54795 --- linux-2.6.39.4/include/linux/tty_ldisc.h 2011-05-19 00:06:34.000000000 -0400
54796 +++ linux-2.6.39.4/include/linux/tty_ldisc.h 2011-08-05 19:44:37.000000000 -0400
54797 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54799 struct module *owner;
54802 + atomic_t refcount;
54806 diff -urNp linux-2.6.39.4/include/linux/types.h linux-2.6.39.4/include/linux/types.h
54807 --- linux-2.6.39.4/include/linux/types.h 2011-05-19 00:06:34.000000000 -0400
54808 +++ linux-2.6.39.4/include/linux/types.h 2011-08-05 19:44:37.000000000 -0400
54809 @@ -213,10 +213,26 @@ typedef struct {
54813 +#ifdef CONFIG_PAX_REFCOUNT
54816 +} atomic_unchecked_t;
54818 +typedef atomic_t atomic_unchecked_t;
54821 #ifdef CONFIG_64BIT
54826 +#ifdef CONFIG_PAX_REFCOUNT
54829 +} atomic64_unchecked_t;
54831 +typedef atomic64_t atomic64_unchecked_t;
54836 diff -urNp linux-2.6.39.4/include/linux/uaccess.h linux-2.6.39.4/include/linux/uaccess.h
54837 --- linux-2.6.39.4/include/linux/uaccess.h 2011-05-19 00:06:34.000000000 -0400
54838 +++ linux-2.6.39.4/include/linux/uaccess.h 2011-08-05 19:44:37.000000000 -0400
54839 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54841 mm_segment_t old_fs = get_fs(); \
54843 - set_fs(KERNEL_DS); \
54844 pagefault_disable(); \
54845 + set_fs(KERNEL_DS); \
54846 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54847 - pagefault_enable(); \
54849 + pagefault_enable(); \
54853 @@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
54854 * Safely read from address @src to the buffer at @dst. If a kernel fault
54855 * happens, handle that and return -EFAULT.
54857 -extern long probe_kernel_read(void *dst, void *src, size_t size);
54858 -extern long __probe_kernel_read(void *dst, void *src, size_t size);
54859 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
54860 +extern long __probe_kernel_read(void *dst, const void *src, size_t size);
54863 * probe_kernel_write(): safely attempt to write to a location
54864 @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
54865 * Safely write to address @dst from the buffer at @src. If a kernel fault
54866 * happens, handle that and return -EFAULT.
54868 -extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
54869 -extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
54870 +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
54871 +extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
54873 #endif /* __LINUX_UACCESS_H__ */
54874 diff -urNp linux-2.6.39.4/include/linux/unaligned/access_ok.h linux-2.6.39.4/include/linux/unaligned/access_ok.h
54875 --- linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-05-19 00:06:34.000000000 -0400
54876 +++ linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-08-05 19:44:37.000000000 -0400
54879 static inline u16 get_unaligned_le16(const void *p)
54881 - return le16_to_cpup((__le16 *)p);
54882 + return le16_to_cpup((const __le16 *)p);
54885 static inline u32 get_unaligned_le32(const void *p)
54887 - return le32_to_cpup((__le32 *)p);
54888 + return le32_to_cpup((const __le32 *)p);
54891 static inline u64 get_unaligned_le64(const void *p)
54893 - return le64_to_cpup((__le64 *)p);
54894 + return le64_to_cpup((const __le64 *)p);
54897 static inline u16 get_unaligned_be16(const void *p)
54899 - return be16_to_cpup((__be16 *)p);
54900 + return be16_to_cpup((const __be16 *)p);
54903 static inline u32 get_unaligned_be32(const void *p)
54905 - return be32_to_cpup((__be32 *)p);
54906 + return be32_to_cpup((const __be32 *)p);
54909 static inline u64 get_unaligned_be64(const void *p)
54911 - return be64_to_cpup((__be64 *)p);
54912 + return be64_to_cpup((const __be64 *)p);
54915 static inline void put_unaligned_le16(u16 val, void *p)
54916 diff -urNp linux-2.6.39.4/include/linux/vmalloc.h linux-2.6.39.4/include/linux/vmalloc.h
54917 --- linux-2.6.39.4/include/linux/vmalloc.h 2011-05-19 00:06:34.000000000 -0400
54918 +++ linux-2.6.39.4/include/linux/vmalloc.h 2011-08-05 19:44:37.000000000 -0400
54919 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54920 #define VM_MAP 0x00000004 /* vmap()ed pages */
54921 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54922 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54924 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54925 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54928 /* bits [20..32] reserved for arch specific ioremap internals */
54931 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54935 +#define vmalloc(x) \
54937 + void *___retval; \
54938 + intoverflow_t ___x = (intoverflow_t)x; \
54939 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54940 + ___retval = NULL; \
54942 + ___retval = vmalloc((unsigned long)___x); \
54946 +#define vzalloc(x) \
54948 + void *___retval; \
54949 + intoverflow_t ___x = (intoverflow_t)x; \
54950 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54951 + ___retval = NULL; \
54953 + ___retval = vzalloc((unsigned long)___x); \
54957 +#define __vmalloc(x, y, z) \
54959 + void *___retval; \
54960 + intoverflow_t ___x = (intoverflow_t)x; \
54961 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54962 + ___retval = NULL; \
54964 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54968 +#define vmalloc_user(x) \
54970 + void *___retval; \
54971 + intoverflow_t ___x = (intoverflow_t)x; \
54972 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54973 + ___retval = NULL; \
54975 + ___retval = vmalloc_user((unsigned long)___x); \
54979 +#define vmalloc_exec(x) \
54981 + void *___retval; \
54982 + intoverflow_t ___x = (intoverflow_t)x; \
54983 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54984 + ___retval = NULL; \
54986 + ___retval = vmalloc_exec((unsigned long)___x); \
54990 +#define vmalloc_node(x, y) \
54992 + void *___retval; \
54993 + intoverflow_t ___x = (intoverflow_t)x; \
54994 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54995 + ___retval = NULL; \
54997 + ___retval = vmalloc_node((unsigned long)___x, (y));\
55001 +#define vzalloc_node(x, y) \
55003 + void *___retval; \
55004 + intoverflow_t ___x = (intoverflow_t)x; \
55005 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
55006 + ___retval = NULL; \
55008 + ___retval = vzalloc_node((unsigned long)___x, (y));\
55012 +#define vmalloc_32(x) \
55014 + void *___retval; \
55015 + intoverflow_t ___x = (intoverflow_t)x; \
55016 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
55017 + ___retval = NULL; \
55019 + ___retval = vmalloc_32((unsigned long)___x); \
55023 +#define vmalloc_32_user(x) \
55025 +void *___retval; \
55026 + intoverflow_t ___x = (intoverflow_t)x; \
55027 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
55028 + ___retval = NULL; \
55030 + ___retval = vmalloc_32_user((unsigned long)___x);\
55034 #endif /* _LINUX_VMALLOC_H */
55035 diff -urNp linux-2.6.39.4/include/linux/vmstat.h linux-2.6.39.4/include/linux/vmstat.h
55036 --- linux-2.6.39.4/include/linux/vmstat.h 2011-05-19 00:06:34.000000000 -0400
55037 +++ linux-2.6.39.4/include/linux/vmstat.h 2011-08-05 19:44:37.000000000 -0400
55038 @@ -147,18 +147,18 @@ static inline void vm_events_fold_cpu(in
55040 * Zone based page accounting with per cpu differentials.
55042 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55043 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55045 static inline void zone_page_state_add(long x, struct zone *zone,
55046 enum zone_stat_item item)
55048 - atomic_long_add(x, &zone->vm_stat[item]);
55049 - atomic_long_add(x, &vm_stat[item]);
55050 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
55051 + atomic_long_add_unchecked(x, &vm_stat[item]);
55054 static inline unsigned long global_page_state(enum zone_stat_item item)
55056 - long x = atomic_long_read(&vm_stat[item]);
55057 + long x = atomic_long_read_unchecked(&vm_stat[item]);
55061 @@ -169,7 +169,7 @@ static inline unsigned long global_page_
55062 static inline unsigned long zone_page_state(struct zone *zone,
55063 enum zone_stat_item item)
55065 - long x = atomic_long_read(&zone->vm_stat[item]);
55066 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55070 @@ -186,7 +186,7 @@ static inline unsigned long zone_page_st
55071 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
55072 enum zone_stat_item item)
55074 - long x = atomic_long_read(&zone->vm_stat[item]);
55075 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55079 @@ -280,8 +280,8 @@ static inline void __mod_zone_page_state
55081 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
55083 - atomic_long_inc(&zone->vm_stat[item]);
55084 - atomic_long_inc(&vm_stat[item]);
55085 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
55086 + atomic_long_inc_unchecked(&vm_stat[item]);
55089 static inline void __inc_zone_page_state(struct page *page,
55090 @@ -292,8 +292,8 @@ static inline void __inc_zone_page_state
55092 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
55094 - atomic_long_dec(&zone->vm_stat[item]);
55095 - atomic_long_dec(&vm_stat[item]);
55096 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
55097 + atomic_long_dec_unchecked(&vm_stat[item]);
55100 static inline void __dec_zone_page_state(struct page *page,
55101 diff -urNp linux-2.6.39.4/include/media/saa7146_vv.h linux-2.6.39.4/include/media/saa7146_vv.h
55102 --- linux-2.6.39.4/include/media/saa7146_vv.h 2011-05-19 00:06:34.000000000 -0400
55103 +++ linux-2.6.39.4/include/media/saa7146_vv.h 2011-08-05 20:34:06.000000000 -0400
55104 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
55105 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
55107 /* the extension can override this */
55108 - struct v4l2_ioctl_ops ops;
55109 + v4l2_ioctl_ops_no_const ops;
55110 /* pointer to the saa7146 core ops */
55111 const struct v4l2_ioctl_ops *core_ops;
55113 diff -urNp linux-2.6.39.4/include/media/v4l2-dev.h linux-2.6.39.4/include/media/v4l2-dev.h
55114 --- linux-2.6.39.4/include/media/v4l2-dev.h 2011-05-19 00:06:34.000000000 -0400
55115 +++ linux-2.6.39.4/include/media/v4l2-dev.h 2011-08-05 20:34:06.000000000 -0400
55116 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
55119 struct v4l2_file_operations {
55120 - struct module *owner;
55121 + struct module * const owner;
55122 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
55123 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
55124 unsigned int (*poll) (struct file *, struct poll_table_struct *);
55125 diff -urNp linux-2.6.39.4/include/media/v4l2-device.h linux-2.6.39.4/include/media/v4l2-device.h
55126 --- linux-2.6.39.4/include/media/v4l2-device.h 2011-05-19 00:06:34.000000000 -0400
55127 +++ linux-2.6.39.4/include/media/v4l2-device.h 2011-08-05 19:44:37.000000000 -0400
55128 @@ -95,7 +95,7 @@ int __must_check v4l2_device_register(st
55129 this function returns 0. If the name ends with a digit (e.g. cx18),
55130 then the name will be set to cx18-0 since cx180 looks really odd. */
55131 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
55132 - atomic_t *instance);
55133 + atomic_unchecked_t *instance);
55135 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
55136 Since the parent disappears this ensures that v4l2_dev doesn't have an
55137 diff -urNp linux-2.6.39.4/include/media/v4l2-ioctl.h linux-2.6.39.4/include/media/v4l2-ioctl.h
55138 --- linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-05-19 00:06:34.000000000 -0400
55139 +++ linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-08-05 20:34:06.000000000 -0400
55140 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
55141 long (*vidioc_default) (struct file *file, void *fh,
55142 bool valid_prio, int cmd, void *arg);
55144 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
55147 /* v4l debugging and diagnostics */
55148 diff -urNp linux-2.6.39.4/include/net/caif/cfctrl.h linux-2.6.39.4/include/net/caif/cfctrl.h
55149 --- linux-2.6.39.4/include/net/caif/cfctrl.h 2011-05-19 00:06:34.000000000 -0400
55150 +++ linux-2.6.39.4/include/net/caif/cfctrl.h 2011-08-05 20:34:06.000000000 -0400
55151 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
55152 void (*radioset_rsp)(void);
55153 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
55154 struct cflayer *client_layer);
55158 /* Link Setup Parameters for CAIF-Links. */
55159 struct cfctrl_link_param {
55160 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
55162 struct cfsrvl serv;
55163 struct cfctrl_rsp res;
55164 - atomic_t req_seq_no;
55165 - atomic_t rsp_seq_no;
55166 + atomic_unchecked_t req_seq_no;
55167 + atomic_unchecked_t rsp_seq_no;
55168 struct list_head list;
55169 /* Protects from simultaneous access to first_req list */
55170 spinlock_t info_list_lock;
55171 diff -urNp linux-2.6.39.4/include/net/flow.h linux-2.6.39.4/include/net/flow.h
55172 --- linux-2.6.39.4/include/net/flow.h 2011-05-19 00:06:34.000000000 -0400
55173 +++ linux-2.6.39.4/include/net/flow.h 2011-08-05 19:44:37.000000000 -0400
55174 @@ -167,6 +167,6 @@ extern struct flow_cache_object *flow_ca
55175 u8 dir, flow_resolve_t resolver, void *ctx);
55177 extern void flow_cache_flush(void);
55178 -extern atomic_t flow_cache_genid;
55179 +extern atomic_unchecked_t flow_cache_genid;
55182 diff -urNp linux-2.6.39.4/include/net/inetpeer.h linux-2.6.39.4/include/net/inetpeer.h
55183 --- linux-2.6.39.4/include/net/inetpeer.h 2011-05-19 00:06:34.000000000 -0400
55184 +++ linux-2.6.39.4/include/net/inetpeer.h 2011-08-05 19:44:37.000000000 -0400
55185 @@ -43,8 +43,8 @@ struct inet_peer {
55189 - atomic_t rid; /* Frag reception counter */
55190 - atomic_t ip_id_count; /* IP ID for the next packet */
55191 + atomic_unchecked_t rid; /* Frag reception counter */
55192 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
55194 __u32 tcp_ts_stamp;
55195 u32 metrics[RTAX_MAX];
55196 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
55199 inet_peer_refcheck(p);
55200 - return atomic_add_return(more, &p->ip_id_count) - more;
55201 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
55204 #endif /* _NET_INETPEER_H */
55205 diff -urNp linux-2.6.39.4/include/net/ip_fib.h linux-2.6.39.4/include/net/ip_fib.h
55206 --- linux-2.6.39.4/include/net/ip_fib.h 2011-05-19 00:06:34.000000000 -0400
55207 +++ linux-2.6.39.4/include/net/ip_fib.h 2011-08-05 19:44:37.000000000 -0400
55208 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
55210 #define FIB_RES_SADDR(net, res) \
55211 ((FIB_RES_NH(res).nh_saddr_genid == \
55212 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55213 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55214 FIB_RES_NH(res).nh_saddr : \
55215 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55216 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55217 diff -urNp linux-2.6.39.4/include/net/ip_vs.h linux-2.6.39.4/include/net/ip_vs.h
55218 --- linux-2.6.39.4/include/net/ip_vs.h 2011-07-09 09:18:51.000000000 -0400
55219 +++ linux-2.6.39.4/include/net/ip_vs.h 2011-08-05 19:44:37.000000000 -0400
55220 @@ -512,7 +512,7 @@ struct ip_vs_conn {
55221 struct ip_vs_conn *control; /* Master control connection */
55222 atomic_t n_control; /* Number of controlled ones */
55223 struct ip_vs_dest *dest; /* real server */
55224 - atomic_t in_pkts; /* incoming packet counter */
55225 + atomic_unchecked_t in_pkts; /* incoming packet counter */
55227 /* packet transmitter for different forwarding methods. If it
55228 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55229 @@ -650,7 +650,7 @@ struct ip_vs_dest {
55230 __be16 port; /* port number of the server */
55231 union nf_inet_addr addr; /* IP address of the server */
55232 volatile unsigned flags; /* dest status flags */
55233 - atomic_t conn_flags; /* flags to copy to conn */
55234 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
55235 atomic_t weight; /* server weight */
55237 atomic_t refcnt; /* reference counter */
55238 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_core.h linux-2.6.39.4/include/net/irda/ircomm_core.h
55239 --- linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-05-19 00:06:34.000000000 -0400
55240 +++ linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-08-05 20:34:06.000000000 -0400
55241 @@ -51,7 +51,7 @@ typedef struct {
55242 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55243 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55244 struct ircomm_info *);
55246 +} __no_const call_t;
55249 irda_queue_t queue;
55250 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_tty.h linux-2.6.39.4/include/net/irda/ircomm_tty.h
55251 --- linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-05-19 00:06:34.000000000 -0400
55252 +++ linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-08-05 19:44:37.000000000 -0400
55254 #include <linux/termios.h>
55255 #include <linux/timer.h>
55256 #include <linux/tty.h> /* struct tty_struct */
55257 +#include <asm/local.h>
55259 #include <net/irda/irias_object.h>
55260 #include <net/irda/ircomm_core.h>
55261 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55262 unsigned short close_delay;
55263 unsigned short closing_wait; /* time to wait before closing */
55266 - int blocked_open; /* # of blocked opens */
55267 + local_t open_count;
55268 + local_t blocked_open; /* # of blocked opens */
55270 /* Protect concurent access to :
55271 * o self->open_count
55272 diff -urNp linux-2.6.39.4/include/net/iucv/af_iucv.h linux-2.6.39.4/include/net/iucv/af_iucv.h
55273 --- linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-05-19 00:06:34.000000000 -0400
55274 +++ linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-08-05 19:44:37.000000000 -0400
55275 @@ -87,7 +87,7 @@ struct iucv_sock {
55276 struct iucv_sock_list {
55277 struct hlist_head head;
55279 - atomic_t autobind_name;
55280 + atomic_unchecked_t autobind_name;
55283 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55284 diff -urNp linux-2.6.39.4/include/net/lapb.h linux-2.6.39.4/include/net/lapb.h
55285 --- linux-2.6.39.4/include/net/lapb.h 2011-05-19 00:06:34.000000000 -0400
55286 +++ linux-2.6.39.4/include/net/lapb.h 2011-08-05 20:34:06.000000000 -0400
55287 @@ -95,7 +95,7 @@ struct lapb_cb {
55288 struct sk_buff_head write_queue;
55289 struct sk_buff_head ack_queue;
55290 unsigned char window;
55291 - struct lapb_register_struct callbacks;
55292 + struct lapb_register_struct *callbacks;
55294 /* FRMR control information */
55295 struct lapb_frame frmr_data;
55296 diff -urNp linux-2.6.39.4/include/net/neighbour.h linux-2.6.39.4/include/net/neighbour.h
55297 --- linux-2.6.39.4/include/net/neighbour.h 2011-05-19 00:06:34.000000000 -0400
55298 +++ linux-2.6.39.4/include/net/neighbour.h 2011-08-05 20:34:06.000000000 -0400
55299 @@ -117,7 +117,7 @@ struct neighbour {
55304 + const int family;
55305 void (*solicit)(struct neighbour *, struct sk_buff*);
55306 void (*error_report)(struct neighbour *, struct sk_buff*);
55307 int (*output)(struct sk_buff*);
55308 diff -urNp linux-2.6.39.4/include/net/netlink.h linux-2.6.39.4/include/net/netlink.h
55309 --- linux-2.6.39.4/include/net/netlink.h 2011-05-19 00:06:34.000000000 -0400
55310 +++ linux-2.6.39.4/include/net/netlink.h 2011-08-05 19:44:37.000000000 -0400
55311 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55312 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55315 - skb_trim(skb, (unsigned char *) mark - skb->data);
55316 + skb_trim(skb, (const unsigned char *) mark - skb->data);
55320 diff -urNp linux-2.6.39.4/include/net/netns/ipv4.h linux-2.6.39.4/include/net/netns/ipv4.h
55321 --- linux-2.6.39.4/include/net/netns/ipv4.h 2011-05-19 00:06:34.000000000 -0400
55322 +++ linux-2.6.39.4/include/net/netns/ipv4.h 2011-08-05 19:44:37.000000000 -0400
55323 @@ -54,8 +54,8 @@ struct netns_ipv4 {
55324 int sysctl_rt_cache_rebuild_count;
55325 int current_rt_cache_rebuild_count;
55327 - atomic_t rt_genid;
55328 - atomic_t dev_addr_genid;
55329 + atomic_unchecked_t rt_genid;
55330 + atomic_unchecked_t dev_addr_genid;
55332 #ifdef CONFIG_IP_MROUTE
55333 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55334 diff -urNp linux-2.6.39.4/include/net/sctp/sctp.h linux-2.6.39.4/include/net/sctp/sctp.h
55335 --- linux-2.6.39.4/include/net/sctp/sctp.h 2011-05-19 00:06:34.000000000 -0400
55336 +++ linux-2.6.39.4/include/net/sctp/sctp.h 2011-08-05 19:44:37.000000000 -0400
55337 @@ -316,9 +316,9 @@ do { \
55339 #else /* SCTP_DEBUG */
55341 -#define SCTP_DEBUG_PRINTK(whatever...)
55342 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55343 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55344 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55345 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55346 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55347 #define SCTP_ENABLE_DEBUG
55348 #define SCTP_DISABLE_DEBUG
55349 #define SCTP_ASSERT(expr, str, func)
55350 diff -urNp linux-2.6.39.4/include/net/sock.h linux-2.6.39.4/include/net/sock.h
55351 --- linux-2.6.39.4/include/net/sock.h 2011-05-19 00:06:34.000000000 -0400
55352 +++ linux-2.6.39.4/include/net/sock.h 2011-08-05 19:44:37.000000000 -0400
55353 @@ -277,7 +277,7 @@ struct sock {
55357 - atomic_t sk_drops;
55358 + atomic_unchecked_t sk_drops;
55361 struct sk_filter __rcu *sk_filter;
55362 diff -urNp linux-2.6.39.4/include/net/tcp.h linux-2.6.39.4/include/net/tcp.h
55363 --- linux-2.6.39.4/include/net/tcp.h 2011-05-19 00:06:34.000000000 -0400
55364 +++ linux-2.6.39.4/include/net/tcp.h 2011-08-05 20:34:06.000000000 -0400
55365 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55366 struct tcp_seq_afinfo {
55368 sa_family_t family;
55369 - struct file_operations seq_fops;
55370 - struct seq_operations seq_ops;
55371 + file_operations_no_const seq_fops;
55372 + seq_operations_no_const seq_ops;
55375 struct tcp_iter_state {
55376 diff -urNp linux-2.6.39.4/include/net/udp.h linux-2.6.39.4/include/net/udp.h
55377 --- linux-2.6.39.4/include/net/udp.h 2011-05-19 00:06:34.000000000 -0400
55378 +++ linux-2.6.39.4/include/net/udp.h 2011-08-05 20:34:06.000000000 -0400
55379 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55381 sa_family_t family;
55382 struct udp_table *udp_table;
55383 - struct file_operations seq_fops;
55384 - struct seq_operations seq_ops;
55385 + file_operations_no_const seq_fops;
55386 + seq_operations_no_const seq_ops;
55389 struct udp_iter_state {
55390 diff -urNp linux-2.6.39.4/include/net/xfrm.h linux-2.6.39.4/include/net/xfrm.h
55391 --- linux-2.6.39.4/include/net/xfrm.h 2011-05-19 00:06:34.000000000 -0400
55392 +++ linux-2.6.39.4/include/net/xfrm.h 2011-08-05 19:44:37.000000000 -0400
55393 @@ -505,7 +505,7 @@ struct xfrm_policy {
55394 struct timer_list timer;
55396 struct flow_cache_object flo;
55398 + atomic_unchecked_t genid;
55401 struct xfrm_mark mark;
55402 diff -urNp linux-2.6.39.4/include/rdma/iw_cm.h linux-2.6.39.4/include/rdma/iw_cm.h
55403 --- linux-2.6.39.4/include/rdma/iw_cm.h 2011-05-19 00:06:34.000000000 -0400
55404 +++ linux-2.6.39.4/include/rdma/iw_cm.h 2011-08-05 20:34:06.000000000 -0400
55405 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
55408 int (*destroy_listen)(struct iw_cm_id *cm_id);
55413 * iw_create_cm_id - Create an IW CM identifier.
55414 diff -urNp linux-2.6.39.4/include/scsi/libfc.h linux-2.6.39.4/include/scsi/libfc.h
55415 --- linux-2.6.39.4/include/scsi/libfc.h 2011-05-19 00:06:34.000000000 -0400
55416 +++ linux-2.6.39.4/include/scsi/libfc.h 2011-08-05 20:34:06.000000000 -0400
55417 @@ -750,6 +750,7 @@ struct libfc_function_template {
55419 void (*disc_stop_final) (struct fc_lport *);
55421 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55424 * struct fc_disc - Discovery context
55425 @@ -853,7 +854,7 @@ struct fc_lport {
55426 struct fc_vport *vport;
55428 /* Operational Information */
55429 - struct libfc_function_template tt;
55430 + libfc_function_template_no_const tt;
55433 enum fc_lport_state state;
55434 diff -urNp linux-2.6.39.4/include/scsi/scsi_device.h linux-2.6.39.4/include/scsi/scsi_device.h
55435 --- linux-2.6.39.4/include/scsi/scsi_device.h 2011-05-19 00:06:34.000000000 -0400
55436 +++ linux-2.6.39.4/include/scsi/scsi_device.h 2011-08-05 19:44:37.000000000 -0400
55437 @@ -161,9 +161,9 @@ struct scsi_device {
55438 unsigned int max_device_blocked; /* what device_blocked counts down from */
55439 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55441 - atomic_t iorequest_cnt;
55442 - atomic_t iodone_cnt;
55443 - atomic_t ioerr_cnt;
55444 + atomic_unchecked_t iorequest_cnt;
55445 + atomic_unchecked_t iodone_cnt;
55446 + atomic_unchecked_t ioerr_cnt;
55448 struct device sdev_gendev,
55450 diff -urNp linux-2.6.39.4/include/scsi/scsi_transport_fc.h linux-2.6.39.4/include/scsi/scsi_transport_fc.h
55451 --- linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-05-19 00:06:34.000000000 -0400
55452 +++ linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-08-05 20:34:06.000000000 -0400
55453 @@ -666,9 +666,9 @@ struct fc_function_template {
55454 int (*bsg_timeout)(struct fc_bsg_job *);
55456 /* allocation lengths for host-specific data */
55457 - u32 dd_fcrport_size;
55458 - u32 dd_fcvport_size;
55460 + const u32 dd_fcrport_size;
55461 + const u32 dd_fcvport_size;
55462 + const u32 dd_bsg_size;
55465 * The driver sets these to tell the transport class it
55466 @@ -678,39 +678,39 @@ struct fc_function_template {
55469 /* remote port fixed attributes */
55470 - unsigned long show_rport_maxframe_size:1;
55471 - unsigned long show_rport_supported_classes:1;
55472 - unsigned long show_rport_dev_loss_tmo:1;
55473 + const unsigned long show_rport_maxframe_size:1;
55474 + const unsigned long show_rport_supported_classes:1;
55475 + const unsigned long show_rport_dev_loss_tmo:1;
55478 * target dynamic attributes
55479 * These should all be "1" if the driver uses the remote port
55480 * add/delete functions (so attributes reflect rport values).
55482 - unsigned long show_starget_node_name:1;
55483 - unsigned long show_starget_port_name:1;
55484 - unsigned long show_starget_port_id:1;
55485 + const unsigned long show_starget_node_name:1;
55486 + const unsigned long show_starget_port_name:1;
55487 + const unsigned long show_starget_port_id:1;
55489 /* host fixed attributes */
55490 - unsigned long show_host_node_name:1;
55491 - unsigned long show_host_port_name:1;
55492 - unsigned long show_host_permanent_port_name:1;
55493 - unsigned long show_host_supported_classes:1;
55494 - unsigned long show_host_supported_fc4s:1;
55495 - unsigned long show_host_supported_speeds:1;
55496 - unsigned long show_host_maxframe_size:1;
55497 - unsigned long show_host_serial_number:1;
55498 + const unsigned long show_host_node_name:1;
55499 + const unsigned long show_host_port_name:1;
55500 + const unsigned long show_host_permanent_port_name:1;
55501 + const unsigned long show_host_supported_classes:1;
55502 + const unsigned long show_host_supported_fc4s:1;
55503 + const unsigned long show_host_supported_speeds:1;
55504 + const unsigned long show_host_maxframe_size:1;
55505 + const unsigned long show_host_serial_number:1;
55506 /* host dynamic attributes */
55507 - unsigned long show_host_port_id:1;
55508 - unsigned long show_host_port_type:1;
55509 - unsigned long show_host_port_state:1;
55510 - unsigned long show_host_active_fc4s:1;
55511 - unsigned long show_host_speed:1;
55512 - unsigned long show_host_fabric_name:1;
55513 - unsigned long show_host_symbolic_name:1;
55514 - unsigned long show_host_system_hostname:1;
55515 + const unsigned long show_host_port_id:1;
55516 + const unsigned long show_host_port_type:1;
55517 + const unsigned long show_host_port_state:1;
55518 + const unsigned long show_host_active_fc4s:1;
55519 + const unsigned long show_host_speed:1;
55520 + const unsigned long show_host_fabric_name:1;
55521 + const unsigned long show_host_symbolic_name:1;
55522 + const unsigned long show_host_system_hostname:1;
55524 - unsigned long disable_target_scan:1;
55525 + const unsigned long disable_target_scan:1;
55529 diff -urNp linux-2.6.39.4/include/sound/ak4xxx-adda.h linux-2.6.39.4/include/sound/ak4xxx-adda.h
55530 --- linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-05-19 00:06:34.000000000 -0400
55531 +++ linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-08-05 20:34:06.000000000 -0400
55532 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55533 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55534 unsigned char val);
55535 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55539 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55541 diff -urNp linux-2.6.39.4/include/sound/hwdep.h linux-2.6.39.4/include/sound/hwdep.h
55542 --- linux-2.6.39.4/include/sound/hwdep.h 2011-05-19 00:06:34.000000000 -0400
55543 +++ linux-2.6.39.4/include/sound/hwdep.h 2011-08-05 20:34:06.000000000 -0400
55544 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55545 struct snd_hwdep_dsp_status *status);
55546 int (*dsp_load)(struct snd_hwdep *hw,
55547 struct snd_hwdep_dsp_image *image);
55552 struct snd_card *card;
55553 diff -urNp linux-2.6.39.4/include/sound/info.h linux-2.6.39.4/include/sound/info.h
55554 --- linux-2.6.39.4/include/sound/info.h 2011-05-19 00:06:34.000000000 -0400
55555 +++ linux-2.6.39.4/include/sound/info.h 2011-08-05 20:34:06.000000000 -0400
55556 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
55557 struct snd_info_buffer *buffer);
55558 void (*write)(struct snd_info_entry *entry,
55559 struct snd_info_buffer *buffer);
55563 struct snd_info_entry_ops {
55564 int (*open)(struct snd_info_entry *entry,
55565 diff -urNp linux-2.6.39.4/include/sound/pcm.h linux-2.6.39.4/include/sound/pcm.h
55566 --- linux-2.6.39.4/include/sound/pcm.h 2011-05-19 00:06:34.000000000 -0400
55567 +++ linux-2.6.39.4/include/sound/pcm.h 2011-08-05 20:34:06.000000000 -0400
55568 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
55569 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55570 int (*ack)(struct snd_pcm_substream *substream);
55572 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55576 diff -urNp linux-2.6.39.4/include/sound/sb16_csp.h linux-2.6.39.4/include/sound/sb16_csp.h
55577 --- linux-2.6.39.4/include/sound/sb16_csp.h 2011-05-19 00:06:34.000000000 -0400
55578 +++ linux-2.6.39.4/include/sound/sb16_csp.h 2011-08-05 20:34:06.000000000 -0400
55579 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
55580 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55581 int (*csp_stop) (struct snd_sb_csp * p);
55582 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55588 diff -urNp linux-2.6.39.4/include/sound/soc.h linux-2.6.39.4/include/sound/soc.h
55589 --- linux-2.6.39.4/include/sound/soc.h 2011-05-19 00:06:34.000000000 -0400
55590 +++ linux-2.6.39.4/include/sound/soc.h 2011-08-05 20:34:06.000000000 -0400
55591 @@ -624,7 +624,7 @@ struct snd_soc_platform_driver {
55592 struct snd_soc_dai *);
55594 /* platform stream ops */
55595 - struct snd_pcm_ops *ops;
55596 + struct snd_pcm_ops * const ops;
55599 struct snd_soc_platform {
55600 diff -urNp linux-2.6.39.4/include/sound/ymfpci.h linux-2.6.39.4/include/sound/ymfpci.h
55601 --- linux-2.6.39.4/include/sound/ymfpci.h 2011-05-19 00:06:34.000000000 -0400
55602 +++ linux-2.6.39.4/include/sound/ymfpci.h 2011-08-05 19:44:37.000000000 -0400
55603 @@ -358,7 +358,7 @@ struct snd_ymfpci {
55604 spinlock_t reg_lock;
55605 spinlock_t voice_lock;
55606 wait_queue_head_t interrupt_sleep;
55607 - atomic_t interrupt_sleep_count;
55608 + atomic_unchecked_t interrupt_sleep_count;
55609 struct snd_info_entry *proc_entry;
55610 const struct firmware *dsp_microcode;
55611 const struct firmware *controller_microcode;
55612 diff -urNp linux-2.6.39.4/include/target/target_core_base.h linux-2.6.39.4/include/target/target_core_base.h
55613 --- linux-2.6.39.4/include/target/target_core_base.h 2011-06-03 00:04:14.000000000 -0400
55614 +++ linux-2.6.39.4/include/target/target_core_base.h 2011-08-05 20:34:06.000000000 -0400
55615 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
55616 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55617 int (*t10_pr_register)(struct se_cmd *);
55618 int (*t10_pr_clear)(struct se_cmd *);
55622 struct t10_reservation_template {
55623 /* Reservation effects all target ports */
55624 @@ -432,8 +432,8 @@ struct se_transport_task {
55625 atomic_t t_task_cdbs_left;
55626 atomic_t t_task_cdbs_ex_left;
55627 atomic_t t_task_cdbs_timeout_left;
55628 - atomic_t t_task_cdbs_sent;
55629 - atomic_t t_transport_aborted;
55630 + atomic_unchecked_t t_task_cdbs_sent;
55631 + atomic_unchecked_t t_transport_aborted;
55632 atomic_t t_transport_active;
55633 atomic_t t_transport_complete;
55634 atomic_t t_transport_queue_active;
55635 @@ -774,7 +774,7 @@ struct se_device {
55636 atomic_t active_cmds;
55637 atomic_t simple_cmds;
55638 atomic_t depth_left;
55639 - atomic_t dev_ordered_id;
55640 + atomic_unchecked_t dev_ordered_id;
55641 atomic_t dev_tur_active;
55642 atomic_t execute_tasks;
55643 atomic_t dev_status_thr_count;
55644 diff -urNp linux-2.6.39.4/include/trace/events/irq.h linux-2.6.39.4/include/trace/events/irq.h
55645 --- linux-2.6.39.4/include/trace/events/irq.h 2011-05-19 00:06:34.000000000 -0400
55646 +++ linux-2.6.39.4/include/trace/events/irq.h 2011-08-05 19:44:37.000000000 -0400
55647 @@ -36,7 +36,7 @@ struct softirq_action;
55649 TRACE_EVENT(irq_handler_entry,
55651 - TP_PROTO(int irq, struct irqaction *action),
55652 + TP_PROTO(int irq, const struct irqaction *action),
55654 TP_ARGS(irq, action),
55656 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55658 TRACE_EVENT(irq_handler_exit,
55660 - TP_PROTO(int irq, struct irqaction *action, int ret),
55661 + TP_PROTO(int irq, const struct irqaction *action, int ret),
55663 TP_ARGS(irq, action, ret),
55665 diff -urNp linux-2.6.39.4/include/video/udlfb.h linux-2.6.39.4/include/video/udlfb.h
55666 --- linux-2.6.39.4/include/video/udlfb.h 2011-05-19 00:06:34.000000000 -0400
55667 +++ linux-2.6.39.4/include/video/udlfb.h 2011-08-05 19:44:37.000000000 -0400
55668 @@ -51,10 +51,10 @@ struct dlfb_data {
55670 u32 pseudo_palette[256];
55671 /* blit-only rendering path metrics, exposed through sysfs */
55672 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55673 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55674 - atomic_t bytes_sent; /* to usb, after compression including overhead */
55675 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55676 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55677 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55678 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55679 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55682 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55683 diff -urNp linux-2.6.39.4/include/video/uvesafb.h linux-2.6.39.4/include/video/uvesafb.h
55684 --- linux-2.6.39.4/include/video/uvesafb.h 2011-05-19 00:06:34.000000000 -0400
55685 +++ linux-2.6.39.4/include/video/uvesafb.h 2011-08-05 19:44:37.000000000 -0400
55686 @@ -177,6 +177,7 @@ struct uvesafb_par {
55687 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55688 u8 pmi_setpal; /* PMI for palette changes */
55689 u16 *pmi_base; /* protected mode interface location */
55690 + u8 *pmi_code; /* protected mode code location */
55693 u8 *vbe_state_orig; /*
55694 diff -urNp linux-2.6.39.4/init/do_mounts.c linux-2.6.39.4/init/do_mounts.c
55695 --- linux-2.6.39.4/init/do_mounts.c 2011-05-19 00:06:34.000000000 -0400
55696 +++ linux-2.6.39.4/init/do_mounts.c 2011-08-05 19:44:37.000000000 -0400
55697 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55699 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55701 - int err = sys_mount(name, "/root", fs, flags, data);
55702 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55706 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55707 va_start(args, fmt);
55708 vsprintf(buf, fmt, args);
55710 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55711 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55713 sys_ioctl(fd, FDEJECT, 0);
55716 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55717 - fd = sys_open("/dev/console", O_RDWR, 0);
55718 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55720 sys_ioctl(fd, TCGETS, (long)&termios);
55721 termios.c_lflag &= ~ICANON;
55722 sys_ioctl(fd, TCSETSF, (long)&termios);
55723 - sys_read(fd, &c, 1);
55724 + sys_read(fd, (char __user *)&c, 1);
55725 termios.c_lflag |= ICANON;
55726 sys_ioctl(fd, TCSETSF, (long)&termios);
55728 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55731 devtmpfs_mount("dev");
55732 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55733 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55734 sys_chroot((const char __user __force *)".");
55736 diff -urNp linux-2.6.39.4/init/do_mounts.h linux-2.6.39.4/init/do_mounts.h
55737 --- linux-2.6.39.4/init/do_mounts.h 2011-05-19 00:06:34.000000000 -0400
55738 +++ linux-2.6.39.4/init/do_mounts.h 2011-08-05 19:44:37.000000000 -0400
55739 @@ -15,15 +15,15 @@ extern int root_mountflags;
55741 static inline int create_dev(char *name, dev_t dev)
55743 - sys_unlink(name);
55744 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55745 + sys_unlink((__force char __user *)name);
55746 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55749 #if BITS_PER_LONG == 32
55750 static inline u32 bstat(char *name)
55752 struct stat64 stat;
55753 - if (sys_stat64(name, &stat) != 0)
55754 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55756 if (!S_ISBLK(stat.st_mode))
55758 diff -urNp linux-2.6.39.4/init/do_mounts_initrd.c linux-2.6.39.4/init/do_mounts_initrd.c
55759 --- linux-2.6.39.4/init/do_mounts_initrd.c 2011-05-19 00:06:34.000000000 -0400
55760 +++ linux-2.6.39.4/init/do_mounts_initrd.c 2011-08-05 19:44:37.000000000 -0400
55761 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55762 create_dev("/dev/root.old", Root_RAM0);
55763 /* mount initrd on rootfs' /root */
55764 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55765 - sys_mkdir("/old", 0700);
55766 - root_fd = sys_open("/", 0, 0);
55767 - old_fd = sys_open("/old", 0, 0);
55768 + sys_mkdir((__force const char __user *)"/old", 0700);
55769 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
55770 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55771 /* move initrd over / and chdir/chroot in initrd root */
55772 - sys_chdir("/root");
55773 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55775 + sys_chdir((__force const char __user *)"/root");
55776 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55777 + sys_chroot((__force const char __user *)".");
55780 * In case that a resume from disk is carried out by linuxrc or one of
55781 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55783 /* move initrd to rootfs' /old */
55784 sys_fchdir(old_fd);
55785 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
55786 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55787 /* switch root and cwd back to / of rootfs */
55788 sys_fchdir(root_fd);
55790 + sys_chroot((__force const char __user *)".");
55792 sys_close(root_fd);
55794 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55795 - sys_chdir("/old");
55796 + sys_chdir((__force const char __user *)"/old");
55800 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55803 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55804 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55805 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55809 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
55810 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55811 if (error == -ENOENT)
55812 printk("/initrd does not exist. Ignored.\n");
55814 printk("failed\n");
55815 printk(KERN_NOTICE "Unmounting old root\n");
55816 - sys_umount("/old", MNT_DETACH);
55817 + sys_umount((__force char __user *)"/old", MNT_DETACH);
55818 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55821 @@ -116,11 +116,11 @@ int __init initrd_load(void)
55822 * mounted in the normal path.
55824 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55825 - sys_unlink("/initrd.image");
55826 + sys_unlink((__force const char __user *)"/initrd.image");
55831 - sys_unlink("/initrd.image");
55832 + sys_unlink((__force const char __user *)"/initrd.image");
55835 diff -urNp linux-2.6.39.4/init/do_mounts_md.c linux-2.6.39.4/init/do_mounts_md.c
55836 --- linux-2.6.39.4/init/do_mounts_md.c 2011-05-19 00:06:34.000000000 -0400
55837 +++ linux-2.6.39.4/init/do_mounts_md.c 2011-08-05 19:44:37.000000000 -0400
55838 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55839 partitioned ? "_d" : "", minor,
55840 md_setup_args[ent].device_names);
55842 - fd = sys_open(name, 0, 0);
55843 + fd = sys_open((__force char __user *)name, 0, 0);
55845 printk(KERN_ERR "md: open failed - cannot start "
55846 "array %s\n", name);
55847 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55851 - fd = sys_open(name, 0, 0);
55852 + fd = sys_open((__force char __user *)name, 0, 0);
55853 sys_ioctl(fd, BLKRRPART, 0);
55856 diff -urNp linux-2.6.39.4/init/initramfs.c linux-2.6.39.4/init/initramfs.c
55857 --- linux-2.6.39.4/init/initramfs.c 2011-05-19 00:06:34.000000000 -0400
55858 +++ linux-2.6.39.4/init/initramfs.c 2011-08-05 19:44:37.000000000 -0400
55859 @@ -74,7 +74,7 @@ static void __init free_hash(void)
55863 -static long __init do_utime(char __user *filename, time_t mtime)
55864 +static long __init do_utime(__force char __user *filename, time_t mtime)
55866 struct timespec t[2];
55868 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
55869 struct dir_entry *de, *tmp;
55870 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55871 list_del(&de->list);
55872 - do_utime(de->name, de->mtime);
55873 + do_utime((__force char __user *)de->name, de->mtime);
55877 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
55879 char *old = find_link(major, minor, ino, mode, collected);
55881 - return (sys_link(old, collected) < 0) ? -1 : 1;
55882 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55886 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
55890 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55891 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55892 if (S_ISDIR(st.st_mode))
55894 + sys_rmdir((__force char __user *)path);
55896 - sys_unlink(path);
55897 + sys_unlink((__force char __user *)path);
55901 @@ -305,7 +305,7 @@ static int __init do_name(void)
55902 int openflags = O_WRONLY|O_CREAT;
55904 openflags |= O_TRUNC;
55905 - wfd = sys_open(collected, openflags, mode);
55906 + wfd = sys_open((__force char __user *)collected, openflags, mode);
55909 sys_fchown(wfd, uid, gid);
55910 @@ -317,17 +317,17 @@ static int __init do_name(void)
55913 } else if (S_ISDIR(mode)) {
55914 - sys_mkdir(collected, mode);
55915 - sys_chown(collected, uid, gid);
55916 - sys_chmod(collected, mode);
55917 + sys_mkdir((__force char __user *)collected, mode);
55918 + sys_chown((__force char __user *)collected, uid, gid);
55919 + sys_chmod((__force char __user *)collected, mode);
55920 dir_add(collected, mtime);
55921 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55922 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55923 if (maybe_link() == 0) {
55924 - sys_mknod(collected, mode, rdev);
55925 - sys_chown(collected, uid, gid);
55926 - sys_chmod(collected, mode);
55927 - do_utime(collected, mtime);
55928 + sys_mknod((__force char __user *)collected, mode, rdev);
55929 + sys_chown((__force char __user *)collected, uid, gid);
55930 + sys_chmod((__force char __user *)collected, mode);
55931 + do_utime((__force char __user *)collected, mtime);
55935 @@ -336,15 +336,15 @@ static int __init do_name(void)
55936 static int __init do_copy(void)
55938 if (count >= body_len) {
55939 - sys_write(wfd, victim, body_len);
55940 + sys_write(wfd, (__force char __user *)victim, body_len);
55942 - do_utime(vcollected, mtime);
55943 + do_utime((__force char __user *)vcollected, mtime);
55949 - sys_write(wfd, victim, count);
55950 + sys_write(wfd, (__force char __user *)victim, count);
55954 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
55956 collected[N_ALIGN(name_len) + body_len] = '\0';
55957 clean_path(collected, 0);
55958 - sys_symlink(collected + N_ALIGN(name_len), collected);
55959 - sys_lchown(collected, uid, gid);
55960 - do_utime(collected, mtime);
55961 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55962 + sys_lchown((__force char __user *)collected, uid, gid);
55963 + do_utime((__force char __user *)collected, mtime);
55965 next_state = Reset;
55967 diff -urNp linux-2.6.39.4/init/Kconfig linux-2.6.39.4/init/Kconfig
55968 --- linux-2.6.39.4/init/Kconfig 2011-05-19 00:06:34.000000000 -0400
55969 +++ linux-2.6.39.4/init/Kconfig 2011-08-05 19:44:37.000000000 -0400
55970 @@ -1202,7 +1202,7 @@ config SLUB_DEBUG
55973 bool "Disable heap randomization"
55977 Randomizing heap placement makes heap exploits harder, but it
55978 also breaks ancient binaries (including anything libc5 based).
55979 diff -urNp linux-2.6.39.4/init/main.c linux-2.6.39.4/init/main.c
55980 --- linux-2.6.39.4/init/main.c 2011-06-03 00:04:14.000000000 -0400
55981 +++ linux-2.6.39.4/init/main.c 2011-08-05 20:34:06.000000000 -0400
55982 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55983 extern void tc_init(void);
55986 +extern void grsecurity_init(void);
55989 * Debug helper: via this flag we know that we are in 'early bootup code'
55990 * where only the boot processor is running with IRQ disabled. This means
55991 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55993 __setup("reset_devices", set_reset_devices);
55995 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55996 +extern char pax_enter_kernel_user[];
55997 +extern char pax_exit_kernel_user[];
55998 +extern pgdval_t clone_pgd_mask;
56001 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
56002 +static int __init setup_pax_nouderef(char *str)
56004 +#ifdef CONFIG_X86_32
56005 + unsigned int cpu;
56006 + struct desc_struct *gdt;
56008 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
56009 + gdt = get_cpu_gdt_table(cpu);
56010 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
56011 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
56012 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
56013 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
56015 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
56017 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
56018 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
56019 + clone_pgd_mask = ~(pgdval_t)0UL;
56024 +early_param("pax_nouderef", setup_pax_nouderef);
56027 +#ifdef CONFIG_PAX_SOFTMODE
56030 +static int __init setup_pax_softmode(char *str)
56032 + get_option(&str, &pax_softmode);
56035 +__setup("pax_softmode=", setup_pax_softmode);
56038 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
56039 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
56040 static const char *panic_later, *panic_param;
56041 @@ -663,6 +708,7 @@ int __init_or_module do_one_initcall(ini
56043 int count = preempt_count();
56045 + const char *msg1 = "", *msg2 = "";
56047 if (initcall_debug)
56048 ret = do_one_initcall_debug(fn);
56049 @@ -675,15 +721,15 @@ int __init_or_module do_one_initcall(ini
56050 sprintf(msgbuf, "error code %d ", ret);
56052 if (preempt_count() != count) {
56053 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
56054 + msg1 = " preemption imbalance";
56055 preempt_count() = count;
56057 if (irqs_disabled()) {
56058 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
56059 + msg2 = " disabled interrupts";
56060 local_irq_enable();
56063 - printk("initcall %pF returned with %s\n", fn, msgbuf);
56064 + if (msgbuf[0] || *msg1 || *msg2) {
56065 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
56069 @@ -801,7 +847,7 @@ static int __init kernel_init(void * unu
56072 /* Open the /dev/console on the rootfs, this should never fail */
56073 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
56074 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
56075 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
56078 @@ -814,11 +860,13 @@ static int __init kernel_init(void * unu
56079 if (!ramdisk_execute_command)
56080 ramdisk_execute_command = "/init";
56082 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
56083 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
56084 ramdisk_execute_command = NULL;
56085 prepare_namespace();
56088 + grsecurity_init();
56091 * Ok, we have completed the initial bootup, and
56092 * we're essentially up and running. Get rid of the
56093 diff -urNp linux-2.6.39.4/ipc/mqueue.c linux-2.6.39.4/ipc/mqueue.c
56094 --- linux-2.6.39.4/ipc/mqueue.c 2011-05-19 00:06:34.000000000 -0400
56095 +++ linux-2.6.39.4/ipc/mqueue.c 2011-08-05 19:44:37.000000000 -0400
56096 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
56097 mq_bytes = (mq_msg_tblsz +
56098 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
56100 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
56101 spin_lock(&mq_lock);
56102 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
56103 u->mq_bytes + mq_bytes >
56104 diff -urNp linux-2.6.39.4/ipc/msg.c linux-2.6.39.4/ipc/msg.c
56105 --- linux-2.6.39.4/ipc/msg.c 2011-05-19 00:06:34.000000000 -0400
56106 +++ linux-2.6.39.4/ipc/msg.c 2011-08-05 20:34:06.000000000 -0400
56107 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
56108 return security_msg_queue_associate(msq, msgflg);
56111 +static struct ipc_ops msg_ops = {
56112 + .getnew = newque,
56113 + .associate = msg_security,
56114 + .more_checks = NULL
56117 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
56119 struct ipc_namespace *ns;
56120 - struct ipc_ops msg_ops;
56121 struct ipc_params msg_params;
56123 ns = current->nsproxy->ipc_ns;
56125 - msg_ops.getnew = newque;
56126 - msg_ops.associate = msg_security;
56127 - msg_ops.more_checks = NULL;
56129 msg_params.key = key;
56130 msg_params.flg = msgflg;
56132 diff -urNp linux-2.6.39.4/ipc/sem.c linux-2.6.39.4/ipc/sem.c
56133 --- linux-2.6.39.4/ipc/sem.c 2011-05-19 00:06:34.000000000 -0400
56134 +++ linux-2.6.39.4/ipc/sem.c 2011-08-05 20:34:06.000000000 -0400
56135 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
56139 +static struct ipc_ops sem_ops = {
56140 + .getnew = newary,
56141 + .associate = sem_security,
56142 + .more_checks = sem_more_checks
56145 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
56147 struct ipc_namespace *ns;
56148 - struct ipc_ops sem_ops;
56149 struct ipc_params sem_params;
56151 ns = current->nsproxy->ipc_ns;
56152 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
56153 if (nsems < 0 || nsems > ns->sc_semmsl)
56156 - sem_ops.getnew = newary;
56157 - sem_ops.associate = sem_security;
56158 - sem_ops.more_checks = sem_more_checks;
56160 sem_params.key = key;
56161 sem_params.flg = semflg;
56162 sem_params.u.nsems = nsems;
56163 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
56165 struct list_head tasks;
56167 + pax_track_stack();
56169 sma = sem_lock_check(ns, semid);
56171 return PTR_ERR(sma);
56172 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
56173 struct ipc_namespace *ns;
56174 struct list_head tasks;
56176 + pax_track_stack();
56178 ns = current->nsproxy->ipc_ns;
56180 if (nsops < 1 || semid < 0)
56181 diff -urNp linux-2.6.39.4/ipc/shm.c linux-2.6.39.4/ipc/shm.c
56182 --- linux-2.6.39.4/ipc/shm.c 2011-05-19 00:06:34.000000000 -0400
56183 +++ linux-2.6.39.4/ipc/shm.c 2011-08-05 20:34:06.000000000 -0400
56184 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
56185 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
56188 +#ifdef CONFIG_GRKERNSEC
56189 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56190 + const time_t shm_createtime, const uid_t cuid,
56191 + const int shmid);
56192 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56193 + const time_t shm_createtime);
56196 void shm_init_ns(struct ipc_namespace *ns)
56198 ns->shm_ctlmax = SHMMAX;
56199 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
56200 shp->shm_lprid = 0;
56201 shp->shm_atim = shp->shm_dtim = 0;
56202 shp->shm_ctim = get_seconds();
56203 +#ifdef CONFIG_GRKERNSEC
56205 + struct timespec timeval;
56206 + do_posix_clock_monotonic_gettime(&timeval);
56208 + shp->shm_createtime = timeval.tv_sec;
56211 shp->shm_segsz = size;
56212 shp->shm_nattch = 0;
56213 shp->shm_file = file;
56214 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
56218 +static struct ipc_ops shm_ops = {
56219 + .getnew = newseg,
56220 + .associate = shm_security,
56221 + .more_checks = shm_more_checks
56224 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
56226 struct ipc_namespace *ns;
56227 - struct ipc_ops shm_ops;
56228 struct ipc_params shm_params;
56230 ns = current->nsproxy->ipc_ns;
56232 - shm_ops.getnew = newseg;
56233 - shm_ops.associate = shm_security;
56234 - shm_ops.more_checks = shm_more_checks;
56236 shm_params.key = key;
56237 shm_params.flg = shmflg;
56238 shm_params.u.size = size;
56239 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
56243 - struct file *uninitialized_var(shm_file);
56245 lru_add_drain_all(); /* drain pagevecs to lru lists */
56247 shp = shm_lock_check(ns, shmid);
56248 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
56252 +#ifdef CONFIG_GRKERNSEC
56253 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
56254 + shp->shm_perm.cuid, shmid) ||
56255 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56261 path = shp->shm_file->f_path;
56264 +#ifdef CONFIG_GRKERNSEC
56265 + shp->shm_lapid = current->pid;
56267 size = i_size_read(path.dentry->d_inode);
56270 diff -urNp linux-2.6.39.4/kernel/acct.c linux-2.6.39.4/kernel/acct.c
56271 --- linux-2.6.39.4/kernel/acct.c 2011-05-19 00:06:34.000000000 -0400
56272 +++ linux-2.6.39.4/kernel/acct.c 2011-08-05 19:44:37.000000000 -0400
56273 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56275 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56276 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56277 - file->f_op->write(file, (char *)&ac,
56278 + file->f_op->write(file, (__force char __user *)&ac,
56279 sizeof(acct_t), &file->f_pos);
56280 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56282 diff -urNp linux-2.6.39.4/kernel/audit.c linux-2.6.39.4/kernel/audit.c
56283 --- linux-2.6.39.4/kernel/audit.c 2011-05-19 00:06:34.000000000 -0400
56284 +++ linux-2.6.39.4/kernel/audit.c 2011-08-05 19:44:37.000000000 -0400
56285 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56286 3) suppressed due to audit_rate_limit
56287 4) suppressed due to audit_backlog_limit
56289 -static atomic_t audit_lost = ATOMIC_INIT(0);
56290 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56292 /* The netlink socket. */
56293 static struct sock *audit_sock;
56294 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56298 - atomic_inc(&audit_lost);
56299 + atomic_inc_unchecked(&audit_lost);
56301 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56303 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56304 printk(KERN_WARNING
56305 "audit: audit_lost=%d audit_rate_limit=%d "
56306 "audit_backlog_limit=%d\n",
56307 - atomic_read(&audit_lost),
56308 + atomic_read_unchecked(&audit_lost),
56310 audit_backlog_limit);
56311 audit_panic(message);
56312 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56313 status_set.pid = audit_pid;
56314 status_set.rate_limit = audit_rate_limit;
56315 status_set.backlog_limit = audit_backlog_limit;
56316 - status_set.lost = atomic_read(&audit_lost);
56317 + status_set.lost = atomic_read_unchecked(&audit_lost);
56318 status_set.backlog = skb_queue_len(&audit_skb_queue);
56319 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56320 &status_set, sizeof(status_set));
56321 diff -urNp linux-2.6.39.4/kernel/auditsc.c linux-2.6.39.4/kernel/auditsc.c
56322 --- linux-2.6.39.4/kernel/auditsc.c 2011-05-19 00:06:34.000000000 -0400
56323 +++ linux-2.6.39.4/kernel/auditsc.c 2011-08-05 19:44:37.000000000 -0400
56324 @@ -2111,7 +2111,7 @@ int auditsc_get_stamp(struct audit_conte
56327 /* global counter which is incremented every time something logs in */
56328 -static atomic_t session_id = ATOMIC_INIT(0);
56329 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56332 * audit_set_loginuid - set a task's audit_context loginuid
56333 @@ -2124,7 +2124,7 @@ static atomic_t session_id = ATOMIC_INIT
56335 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56337 - unsigned int sessionid = atomic_inc_return(&session_id);
56338 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56339 struct audit_context *context = task->audit_context;
56341 if (context && context->in_syscall) {
56342 diff -urNp linux-2.6.39.4/kernel/capability.c linux-2.6.39.4/kernel/capability.c
56343 --- linux-2.6.39.4/kernel/capability.c 2011-05-19 00:06:34.000000000 -0400
56344 +++ linux-2.6.39.4/kernel/capability.c 2011-08-05 19:44:37.000000000 -0400
56345 @@ -206,6 +206,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56346 * before modification is attempted and the application
56349 + if (tocopy > ARRAY_SIZE(kdata))
56352 if (copy_to_user(dataptr, kdata, tocopy
56353 * sizeof(struct __user_cap_data_struct))) {
56355 @@ -378,7 +381,7 @@ bool ns_capable(struct user_namespace *n
56359 - if (security_capable(ns, current_cred(), cap) == 0) {
56360 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56361 current->flags |= PF_SUPERPRIV;
56364 @@ -386,6 +389,27 @@ bool ns_capable(struct user_namespace *n
56366 EXPORT_SYMBOL(ns_capable);
56368 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
56370 + if (unlikely(!cap_valid(cap))) {
56371 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56375 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56376 + current->flags |= PF_SUPERPRIV;
56381 +EXPORT_SYMBOL(ns_capable_nolog);
56383 +bool capable_nolog(int cap)
56385 + return ns_capable_nolog(&init_user_ns, cap);
56387 +EXPORT_SYMBOL(capable_nolog);
56390 * task_ns_capable - Determine whether current task has a superior
56391 * capability targeted at a specific task's user namespace.
56392 @@ -400,6 +424,12 @@ bool task_ns_capable(struct task_struct
56394 EXPORT_SYMBOL(task_ns_capable);
56396 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
56398 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56400 +EXPORT_SYMBOL(task_ns_capable_nolog);
56403 * nsown_capable - Check superior capability to one's own user_ns
56404 * @cap: The capability in question
56405 diff -urNp linux-2.6.39.4/kernel/cgroup.c linux-2.6.39.4/kernel/cgroup.c
56406 --- linux-2.6.39.4/kernel/cgroup.c 2011-05-19 00:06:34.000000000 -0400
56407 +++ linux-2.6.39.4/kernel/cgroup.c 2011-08-05 19:44:37.000000000 -0400
56408 @@ -598,6 +598,8 @@ static struct css_set *find_css_set(
56409 struct hlist_head *hhead;
56410 struct cg_cgroup_link *link;
56412 + pax_track_stack();
56414 /* First see if we already have a cgroup group that matches
56415 * the desired set */
56416 read_lock(&css_set_lock);
56417 diff -urNp linux-2.6.39.4/kernel/compat.c linux-2.6.39.4/kernel/compat.c
56418 --- linux-2.6.39.4/kernel/compat.c 2011-05-19 00:06:34.000000000 -0400
56419 +++ linux-2.6.39.4/kernel/compat.c 2011-08-05 19:44:37.000000000 -0400
56422 #include <linux/linkage.h>
56423 #include <linux/compat.h>
56424 +#include <linux/module.h>
56425 #include <linux/errno.h>
56426 #include <linux/time.h>
56427 #include <linux/signal.h>
56428 diff -urNp linux-2.6.39.4/kernel/configs.c linux-2.6.39.4/kernel/configs.c
56429 --- linux-2.6.39.4/kernel/configs.c 2011-05-19 00:06:34.000000000 -0400
56430 +++ linux-2.6.39.4/kernel/configs.c 2011-08-05 19:44:37.000000000 -0400
56431 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56432 struct proc_dir_entry *entry;
56434 /* create the current config file */
56435 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56436 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56437 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56438 + &ikconfig_file_ops);
56439 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56440 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56441 + &ikconfig_file_ops);
56444 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56445 &ikconfig_file_ops);
56451 diff -urNp linux-2.6.39.4/kernel/cred.c linux-2.6.39.4/kernel/cred.c
56452 --- linux-2.6.39.4/kernel/cred.c 2011-05-19 00:06:34.000000000 -0400
56453 +++ linux-2.6.39.4/kernel/cred.c 2011-08-05 19:44:37.000000000 -0400
56454 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56456 void __put_cred(struct cred *cred)
56458 + pax_track_stack();
56460 kdebug("__put_cred(%p{%d,%d})", cred,
56461 atomic_read(&cred->usage),
56462 read_cred_subscribers(cred));
56463 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56467 + pax_track_stack();
56469 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56470 atomic_read(&tsk->cred->usage),
56471 read_cred_subscribers(tsk->cred));
56472 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56474 const struct cred *cred;
56476 + pax_track_stack();
56481 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56485 + pax_track_stack();
56487 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56490 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56491 const struct cred *old;
56494 + pax_track_stack();
56496 validate_process_creds();
56498 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56499 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56500 struct thread_group_cred *tgcred = NULL;
56503 + pax_track_stack();
56506 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56508 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56512 + pax_track_stack();
56516 !p->cred->thread_keyring &&
56517 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56518 struct task_struct *task = current;
56519 const struct cred *old = task->real_cred;
56521 + pax_track_stack();
56523 kdebug("commit_creds(%p{%d,%d})", new,
56524 atomic_read(&new->usage),
56525 read_cred_subscribers(new));
56526 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56528 get_cred(new); /* we will require a ref for the subj creds too */
56530 + gr_set_role_label(task, new->uid, new->gid);
56532 /* dumpability changes */
56533 if (old->euid != new->euid ||
56534 old->egid != new->egid ||
56535 @@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
56537 void abort_creds(struct cred *new)
56539 + pax_track_stack();
56541 kdebug("abort_creds(%p{%d,%d})", new,
56542 atomic_read(&new->usage),
56543 read_cred_subscribers(new));
56544 @@ -574,6 +594,8 @@ const struct cred *override_creds(const
56546 const struct cred *old = current->cred;
56548 + pax_track_stack();
56550 kdebug("override_creds(%p{%d,%d})", new,
56551 atomic_read(&new->usage),
56552 read_cred_subscribers(new));
56553 @@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
56555 const struct cred *override = current->cred;
56557 + pax_track_stack();
56559 kdebug("revert_creds(%p{%d,%d})", old,
56560 atomic_read(&old->usage),
56561 read_cred_subscribers(old));
56562 @@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
56563 const struct cred *old;
56566 + pax_track_stack();
56568 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56571 @@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56573 int set_security_override(struct cred *new, u32 secid)
56575 + pax_track_stack();
56577 return security_kernel_act_as(new, secid);
56579 EXPORT_SYMBOL(set_security_override);
56580 @@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
56584 + pax_track_stack();
56586 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56589 diff -urNp linux-2.6.39.4/kernel/debug/debug_core.c linux-2.6.39.4/kernel/debug/debug_core.c
56590 --- linux-2.6.39.4/kernel/debug/debug_core.c 2011-05-19 00:06:34.000000000 -0400
56591 +++ linux-2.6.39.4/kernel/debug/debug_core.c 2011-08-05 20:34:06.000000000 -0400
56592 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56594 static atomic_t masters_in_kgdb;
56595 static atomic_t slaves_in_kgdb;
56596 -static atomic_t kgdb_break_tasklet_var;
56597 +static atomic_unchecked_t kgdb_break_tasklet_var;
56598 atomic_t kgdb_setting_breakpoint;
56600 struct task_struct *kgdb_usethread;
56601 @@ -129,7 +129,7 @@ int kgdb_single_step;
56602 static pid_t kgdb_sstep_pid;
56604 /* to keep track of the CPU which is doing the single stepping*/
56605 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56606 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56609 * If you are debugging a problem where roundup (the collection of
56610 @@ -542,7 +542,7 @@ return_normal:
56611 * kernel will only try for the value of sstep_tries before
56612 * giving up and continuing on.
56614 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56615 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56616 (kgdb_info[cpu].task &&
56617 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56618 atomic_set(&kgdb_active, -1);
56619 @@ -636,8 +636,8 @@ cpu_master_loop:
56623 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56624 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56625 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56626 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56627 if (kgdb_info[sstep_cpu].task)
56628 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56630 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56631 static void kgdb_tasklet_bpt(unsigned long ing)
56634 - atomic_set(&kgdb_break_tasklet_var, 0);
56635 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56638 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56640 void kgdb_schedule_breakpoint(void)
56642 - if (atomic_read(&kgdb_break_tasklet_var) ||
56643 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56644 atomic_read(&kgdb_active) != -1 ||
56645 atomic_read(&kgdb_setting_breakpoint))
56647 - atomic_inc(&kgdb_break_tasklet_var);
56648 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
56649 tasklet_schedule(&kgdb_tasklet_breakpoint);
56651 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56652 diff -urNp linux-2.6.39.4/kernel/debug/kdb/kdb_main.c linux-2.6.39.4/kernel/debug/kdb/kdb_main.c
56653 --- linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-05-19 00:06:34.000000000 -0400
56654 +++ linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-08-05 19:44:37.000000000 -0400
56655 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56656 list_for_each_entry(mod, kdb_modules, list) {
56658 kdb_printf("%-20s%8u 0x%p ", mod->name,
56659 - mod->core_size, (void *)mod);
56660 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
56661 #ifdef CONFIG_MODULE_UNLOAD
56662 kdb_printf("%4d ", module_refcount(mod));
56664 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56665 kdb_printf(" (Loading)");
56667 kdb_printf(" (Live)");
56668 - kdb_printf(" 0x%p", mod->module_core);
56669 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56671 #ifdef CONFIG_MODULE_UNLOAD
56673 diff -urNp linux-2.6.39.4/kernel/exit.c linux-2.6.39.4/kernel/exit.c
56674 --- linux-2.6.39.4/kernel/exit.c 2011-05-19 00:06:34.000000000 -0400
56675 +++ linux-2.6.39.4/kernel/exit.c 2011-08-05 19:44:37.000000000 -0400
56677 #include <asm/pgtable.h>
56678 #include <asm/mmu_context.h>
56680 +#ifdef CONFIG_GRKERNSEC
56681 +extern rwlock_t grsec_exec_file_lock;
56684 static void exit_mm(struct task_struct * tsk);
56686 static void __unhash_process(struct task_struct *p, bool group_dead)
56687 @@ -169,6 +173,8 @@ void release_task(struct task_struct * p
56688 struct task_struct *leader;
56691 + gr_del_task_from_ip_table(p);
56693 tracehook_prepare_release_task(p);
56694 /* don't need to get the RCU readlock here - the process is dead and
56695 * can't be modifying its own credentials. But shut RCU-lockdep up */
56696 @@ -338,11 +344,22 @@ static void reparent_to_kthreadd(void)
56698 write_lock_irq(&tasklist_lock);
56700 +#ifdef CONFIG_GRKERNSEC
56701 + write_lock(&grsec_exec_file_lock);
56702 + if (current->exec_file) {
56703 + fput(current->exec_file);
56704 + current->exec_file = NULL;
56706 + write_unlock(&grsec_exec_file_lock);
56709 ptrace_unlink(current);
56710 /* Reparent to init */
56711 current->real_parent = current->parent = kthreadd_task;
56712 list_move_tail(¤t->sibling, ¤t->real_parent->children);
56714 + gr_set_kernel_label(current);
56716 /* Set the exit signal to SIGCHLD so we signal init on exit */
56717 current->exit_signal = SIGCHLD;
56719 @@ -394,7 +411,7 @@ int allow_signal(int sig)
56720 * know it'll be handled, so that they don't get converted to
56721 * SIGKILL or just silently dropped.
56723 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56724 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56725 recalc_sigpending();
56726 spin_unlock_irq(¤t->sighand->siglock);
56728 @@ -430,6 +447,17 @@ void daemonize(const char *name, ...)
56729 vsnprintf(current->comm, sizeof(current->comm), name, args);
56732 +#ifdef CONFIG_GRKERNSEC
56733 + write_lock(&grsec_exec_file_lock);
56734 + if (current->exec_file) {
56735 + fput(current->exec_file);
56736 + current->exec_file = NULL;
56738 + write_unlock(&grsec_exec_file_lock);
56741 + gr_set_kernel_label(current);
56744 * If we were started as result of loading a module, close all of the
56745 * user space pages. We don't need them, and if we didn't close them
56746 @@ -905,15 +933,8 @@ NORET_TYPE void do_exit(long code)
56747 struct task_struct *tsk = current;
56750 - profile_task_exit(tsk);
56752 - WARN_ON(atomic_read(&tsk->fs_excl));
56753 - WARN_ON(blk_needs_flush_plug(tsk));
56755 if (unlikely(in_interrupt()))
56756 panic("Aiee, killing interrupt handler!");
56757 - if (unlikely(!tsk->pid))
56758 - panic("Attempted to kill the idle task!");
56761 * If do_exit is called because this processes oopsed, it's possible
56762 @@ -924,6 +945,14 @@ NORET_TYPE void do_exit(long code)
56766 + profile_task_exit(tsk);
56768 + WARN_ON(atomic_read(&tsk->fs_excl));
56769 + WARN_ON(blk_needs_flush_plug(tsk));
56771 + if (unlikely(!tsk->pid))
56772 + panic("Attempted to kill the idle task!");
56774 tracehook_report_exit(&code);
56776 validate_creds_for_do_exit(tsk);
56777 @@ -984,6 +1013,9 @@ NORET_TYPE void do_exit(long code)
56778 tsk->exit_code = code;
56779 taskstats_exit(tsk, group_dead);
56781 + gr_acl_handle_psacct(tsk, code);
56782 + gr_acl_handle_exit();
56787 diff -urNp linux-2.6.39.4/kernel/fork.c linux-2.6.39.4/kernel/fork.c
56788 --- linux-2.6.39.4/kernel/fork.c 2011-05-19 00:06:34.000000000 -0400
56789 +++ linux-2.6.39.4/kernel/fork.c 2011-08-05 19:44:37.000000000 -0400
56790 @@ -287,7 +287,7 @@ static struct task_struct *dup_task_stru
56791 *stackend = STACK_END_MAGIC; /* for overflow detection */
56793 #ifdef CONFIG_CC_STACKPROTECTOR
56794 - tsk->stack_canary = get_random_int();
56795 + tsk->stack_canary = pax_get_random_long();
56798 /* One for us, one for whoever does the "release_task()" (usually parent) */
56799 @@ -309,13 +309,78 @@ out:
56803 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56805 + struct vm_area_struct *tmp;
56806 + unsigned long charge;
56807 + struct mempolicy *pol;
56808 + struct file *file;
56811 + if (mpnt->vm_flags & VM_ACCOUNT) {
56812 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56813 + if (security_vm_enough_memory(len))
56817 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56822 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
56823 + pol = mpol_dup(vma_policy(mpnt));
56825 + goto fail_nomem_policy;
56826 + vma_set_policy(tmp, pol);
56827 + if (anon_vma_fork(tmp, mpnt))
56828 + goto fail_nomem_anon_vma_fork;
56829 + tmp->vm_flags &= ~VM_LOCKED;
56830 + tmp->vm_next = tmp->vm_prev = NULL;
56831 + tmp->vm_mirror = NULL;
56832 + file = tmp->vm_file;
56834 + struct inode *inode = file->f_path.dentry->d_inode;
56835 + struct address_space *mapping = file->f_mapping;
56838 + if (tmp->vm_flags & VM_DENYWRITE)
56839 + atomic_dec(&inode->i_writecount);
56840 + spin_lock(&mapping->i_mmap_lock);
56841 + if (tmp->vm_flags & VM_SHARED)
56842 + mapping->i_mmap_writable++;
56843 + tmp->vm_truncate_count = mpnt->vm_truncate_count;
56844 + flush_dcache_mmap_lock(mapping);
56845 + /* insert tmp into the share list, just after mpnt */
56846 + vma_prio_tree_add(tmp, mpnt);
56847 + flush_dcache_mmap_unlock(mapping);
56848 + spin_unlock(&mapping->i_mmap_lock);
56852 + * Clear hugetlb-related page reserves for children. This only
56853 + * affects MAP_PRIVATE mappings. Faults generated by the child
56854 + * are not guaranteed to succeed, even if read-only
56856 + if (is_vm_hugetlb_page(tmp))
56857 + reset_vma_resv_huge_pages(tmp);
56861 +fail_nomem_anon_vma_fork:
56863 +fail_nomem_policy:
56864 + kmem_cache_free(vm_area_cachep, tmp);
56866 + vm_unacct_memory(charge);
56870 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56872 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56873 struct rb_node **rb_link, *rb_parent;
56875 - unsigned long charge;
56876 - struct mempolicy *pol;
56878 down_write(&oldmm->mmap_sem);
56879 flush_cache_dup_mm(oldmm);
56880 @@ -327,8 +392,8 @@ static int dup_mmap(struct mm_struct *mm
56883 mm->mmap_cache = NULL;
56884 - mm->free_area_cache = oldmm->mmap_base;
56885 - mm->cached_hole_size = ~0UL;
56886 + mm->free_area_cache = oldmm->free_area_cache;
56887 + mm->cached_hole_size = oldmm->cached_hole_size;
56889 cpumask_clear(mm_cpumask(mm));
56890 mm->mm_rb = RB_ROOT;
56891 @@ -344,8 +409,6 @@ static int dup_mmap(struct mm_struct *mm
56894 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56895 - struct file *file;
56897 if (mpnt->vm_flags & VM_DONTCOPY) {
56898 long pages = vma_pages(mpnt);
56899 mm->total_vm -= pages;
56900 @@ -353,56 +416,13 @@ static int dup_mmap(struct mm_struct *mm
56905 - if (mpnt->vm_flags & VM_ACCOUNT) {
56906 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56907 - if (security_vm_enough_memory(len))
56911 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56915 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
56916 - pol = mpol_dup(vma_policy(mpnt));
56917 - retval = PTR_ERR(pol);
56919 - goto fail_nomem_policy;
56920 - vma_set_policy(tmp, pol);
56922 - if (anon_vma_fork(tmp, mpnt))
56923 - goto fail_nomem_anon_vma_fork;
56924 - tmp->vm_flags &= ~VM_LOCKED;
56925 - tmp->vm_next = tmp->vm_prev = NULL;
56926 - file = tmp->vm_file;
56928 - struct inode *inode = file->f_path.dentry->d_inode;
56929 - struct address_space *mapping = file->f_mapping;
56932 - if (tmp->vm_flags & VM_DENYWRITE)
56933 - atomic_dec(&inode->i_writecount);
56934 - spin_lock(&mapping->i_mmap_lock);
56935 - if (tmp->vm_flags & VM_SHARED)
56936 - mapping->i_mmap_writable++;
56937 - tmp->vm_truncate_count = mpnt->vm_truncate_count;
56938 - flush_dcache_mmap_lock(mapping);
56939 - /* insert tmp into the share list, just after mpnt */
56940 - vma_prio_tree_add(tmp, mpnt);
56941 - flush_dcache_mmap_unlock(mapping);
56942 - spin_unlock(&mapping->i_mmap_lock);
56943 + tmp = dup_vma(mm, mpnt);
56945 + retval = -ENOMEM;
56950 - * Clear hugetlb-related page reserves for children. This only
56951 - * affects MAP_PRIVATE mappings. Faults generated by the child
56952 - * are not guaranteed to succeed, even if read-only
56954 - if (is_vm_hugetlb_page(tmp))
56955 - reset_vma_resv_huge_pages(tmp);
56958 * Link in the new vma and copy the page table entries.
56961 @@ -423,6 +443,31 @@ static int dup_mmap(struct mm_struct *mm
56966 +#ifdef CONFIG_PAX_SEGMEXEC
56967 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56968 + struct vm_area_struct *mpnt_m;
56970 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56971 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56973 + if (!mpnt->vm_mirror)
56976 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56977 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56978 + mpnt->vm_mirror = mpnt_m;
56980 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56981 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56982 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56983 + mpnt->vm_mirror->vm_mirror = mpnt;
56990 /* a new mm has just been created */
56991 arch_dup_mmap(oldmm, mm);
56993 @@ -431,14 +476,6 @@ out:
56994 flush_tlb_mm(oldmm);
56995 up_write(&oldmm->mmap_sem);
56997 -fail_nomem_anon_vma_fork:
56999 -fail_nomem_policy:
57000 - kmem_cache_free(vm_area_cachep, tmp);
57002 - retval = -ENOMEM;
57003 - vm_unacct_memory(charge);
57007 static inline int mm_alloc_pgd(struct mm_struct * mm)
57008 @@ -785,13 +822,14 @@ static int copy_fs(unsigned long clone_f
57009 spin_unlock(&fs->lock);
57013 + atomic_inc(&fs->users);
57014 spin_unlock(&fs->lock);
57017 tsk->fs = copy_fs_struct(fs);
57020 + gr_set_chroot_entries(tsk, &tsk->fs->root);
57024 @@ -1049,10 +1087,13 @@ static struct task_struct *copy_process(
57025 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
57029 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
57031 if (atomic_read(&p->real_cred->user->processes) >=
57032 task_rlimit(p, RLIMIT_NPROC)) {
57033 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
57034 - p->real_cred->user != INIT_USER)
57035 + if (p->real_cred->user != INIT_USER &&
57036 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
57037 goto bad_fork_free;
57040 @@ -1200,6 +1241,8 @@ static struct task_struct *copy_process(
57041 goto bad_fork_free_pid;
57044 + gr_copy_label(p);
57046 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
57048 * Clear TID on mm_release()?
57049 @@ -1360,6 +1403,8 @@ bad_fork_cleanup_count:
57053 + gr_log_forkfail(retval);
57055 return ERR_PTR(retval);
57058 @@ -1448,6 +1493,8 @@ long do_fork(unsigned long clone_flags,
57059 if (clone_flags & CLONE_PARENT_SETTID)
57060 put_user(nr, parent_tidptr);
57062 + gr_handle_brute_check();
57064 if (clone_flags & CLONE_VFORK) {
57065 p->vfork_done = &vfork;
57066 init_completion(&vfork);
57067 @@ -1549,7 +1596,7 @@ static int unshare_fs(unsigned long unsh
57070 /* don't need lock here; in the worst case we'll do useless copy */
57071 - if (fs->users == 1)
57072 + if (atomic_read(&fs->users) == 1)
57075 *new_fsp = copy_fs_struct(fs);
57076 @@ -1636,7 +1683,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
57078 spin_lock(&fs->lock);
57079 current->fs = new_fs;
57081 + gr_set_chroot_entries(current, ¤t->fs->root);
57082 + if (atomic_dec_return(&fs->users))
57086 diff -urNp linux-2.6.39.4/kernel/futex.c linux-2.6.39.4/kernel/futex.c
57087 --- linux-2.6.39.4/kernel/futex.c 2011-05-19 00:06:34.000000000 -0400
57088 +++ linux-2.6.39.4/kernel/futex.c 2011-08-05 19:44:37.000000000 -0400
57090 #include <linux/mount.h>
57091 #include <linux/pagemap.h>
57092 #include <linux/syscalls.h>
57093 +#include <linux/ptrace.h>
57094 #include <linux/signal.h>
57095 #include <linux/module.h>
57096 #include <linux/magic.h>
57097 @@ -236,6 +237,11 @@ get_futex_key(u32 __user *uaddr, int fsh
57098 struct page *page, *page_head;
57101 +#ifdef CONFIG_PAX_SEGMEXEC
57102 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
57107 * The futex address must be "naturally" aligned.
57109 @@ -1833,6 +1839,8 @@ static int futex_wait(u32 __user *uaddr,
57110 struct futex_q q = futex_q_init;
57113 + pax_track_stack();
57118 @@ -2229,6 +2237,8 @@ static int futex_wait_requeue_pi(u32 __u
57119 struct futex_q q = futex_q_init;
57122 + pax_track_stack();
57127 @@ -2401,7 +2411,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57129 struct robust_list_head __user *head;
57131 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57132 const struct cred *cred = current_cred(), *pcred;
57135 if (!futex_cmpxchg_enabled)
57137 @@ -2417,6 +2429,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57141 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57142 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57145 pcred = __task_cred(p);
57146 /* If victim is in different user_ns, then uids are not
57147 comparable, so we must have CAP_SYS_PTRACE */
57148 @@ -2431,6 +2447,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57149 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57153 head = p->robust_list;
57156 @@ -2682,6 +2699,7 @@ static int __init futex_init(void)
57160 + mm_segment_t oldfs;
57163 * This will fail and we want it. Some arch implementations do
57164 @@ -2693,8 +2711,11 @@ static int __init futex_init(void)
57165 * implementation, the non-functional ones will return
57168 + oldfs = get_fs();
57170 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57171 futex_cmpxchg_enabled = 1;
57174 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57175 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57176 diff -urNp linux-2.6.39.4/kernel/futex_compat.c linux-2.6.39.4/kernel/futex_compat.c
57177 --- linux-2.6.39.4/kernel/futex_compat.c 2011-05-19 00:06:34.000000000 -0400
57178 +++ linux-2.6.39.4/kernel/futex_compat.c 2011-08-05 19:44:37.000000000 -0400
57180 #include <linux/compat.h>
57181 #include <linux/nsproxy.h>
57182 #include <linux/futex.h>
57183 +#include <linux/ptrace.h>
57185 #include <asm/uaccess.h>
57187 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57189 struct compat_robust_list_head __user *head;
57191 - const struct cred *cred = current_cred(), *pcred;
57192 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57193 + const struct cred *cred = current_cred();
57194 + const struct cred *pcred;
57197 if (!futex_cmpxchg_enabled)
57199 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57203 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57204 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57207 pcred = __task_cred(p);
57208 /* If victim is in different user_ns, then uids are not
57209 comparable, so we must have CAP_SYS_PTRACE */
57210 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57211 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57215 head = p->compat_robust_list;
57218 diff -urNp linux-2.6.39.4/kernel/gcov/base.c linux-2.6.39.4/kernel/gcov/base.c
57219 --- linux-2.6.39.4/kernel/gcov/base.c 2011-05-19 00:06:34.000000000 -0400
57220 +++ linux-2.6.39.4/kernel/gcov/base.c 2011-08-05 19:44:37.000000000 -0400
57221 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
57224 #ifdef CONFIG_MODULES
57225 -static inline int within(void *addr, void *start, unsigned long size)
57227 - return ((addr >= start) && (addr < start + size));
57230 /* Update list and generate events when modules are unloaded. */
57231 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57233 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57235 /* Remove entries located in module from linked list. */
57236 for (info = gcov_info_head; info; info = info->next) {
57237 - if (within(info, mod->module_core, mod->core_size)) {
57238 + if (within_module_core_rw((unsigned long)info, mod)) {
57240 prev->next = info->next;
57242 diff -urNp linux-2.6.39.4/kernel/hrtimer.c linux-2.6.39.4/kernel/hrtimer.c
57243 --- linux-2.6.39.4/kernel/hrtimer.c 2011-05-19 00:06:34.000000000 -0400
57244 +++ linux-2.6.39.4/kernel/hrtimer.c 2011-08-05 19:44:37.000000000 -0400
57245 @@ -1383,7 +1383,7 @@ void hrtimer_peek_ahead_timers(void)
57246 local_irq_restore(flags);
57249 -static void run_hrtimer_softirq(struct softirq_action *h)
57250 +static void run_hrtimer_softirq(void)
57252 hrtimer_peek_ahead_timers();
57254 diff -urNp linux-2.6.39.4/kernel/irq/manage.c linux-2.6.39.4/kernel/irq/manage.c
57255 --- linux-2.6.39.4/kernel/irq/manage.c 2011-05-19 00:06:34.000000000 -0400
57256 +++ linux-2.6.39.4/kernel/irq/manage.c 2011-08-05 19:44:37.000000000 -0400
57257 @@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, u
57258 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
57264 /* wakeup-capable irqs can be shared between drivers that
57265 * don't need to have the same sleep mode behaviors.
57267 diff -urNp linux-2.6.39.4/kernel/jump_label.c linux-2.6.39.4/kernel/jump_label.c
57268 --- linux-2.6.39.4/kernel/jump_label.c 2011-05-19 00:06:34.000000000 -0400
57269 +++ linux-2.6.39.4/kernel/jump_label.c 2011-08-05 19:44:37.000000000 -0400
57270 @@ -49,6 +49,17 @@ void jump_label_unlock(void)
57271 mutex_unlock(&jump_label_mutex);
57274 +static void jump_label_swap(void *a, void *b, int size)
57276 + struct jump_entry t;
57278 + t = *(struct jump_entry *)a;
57279 + pax_open_kernel();
57280 + *(struct jump_entry *)a = *(struct jump_entry *)b;
57281 + *(struct jump_entry *)b = t;
57282 + pax_close_kernel();
57285 static int jump_label_cmp(const void *a, const void *b)
57287 const struct jump_entry *jea = a;
57288 @@ -70,7 +81,7 @@ sort_jump_label_entries(struct jump_entr
57290 size = (((unsigned long)stop - (unsigned long)start)
57291 / sizeof(struct jump_entry));
57292 - sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57293 + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, jump_label_swap);
57296 static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
57297 @@ -407,8 +418,11 @@ static void remove_jump_label_module_ini
57298 count = e_module->nr_entries;
57299 iter = e_module->table;
57301 - if (within_module_init(iter->code, mod))
57302 + if (within_module_init(iter->code, mod)) {
57303 + pax_open_kernel();
57305 + pax_close_kernel();
57310 diff -urNp linux-2.6.39.4/kernel/kallsyms.c linux-2.6.39.4/kernel/kallsyms.c
57311 --- linux-2.6.39.4/kernel/kallsyms.c 2011-05-19 00:06:34.000000000 -0400
57312 +++ linux-2.6.39.4/kernel/kallsyms.c 2011-08-05 19:44:37.000000000 -0400
57314 * Changed the compression method from stem compression to "table lookup"
57315 * compression (see scripts/kallsyms.c for a more complete description)
57317 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57318 +#define __INCLUDED_BY_HIDESYM 1
57320 #include <linux/kallsyms.h>
57321 #include <linux/module.h>
57322 #include <linux/init.h>
57323 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57325 static inline int is_kernel_inittext(unsigned long addr)
57327 + if (system_state != SYSTEM_BOOTING)
57330 if (addr >= (unsigned long)_sinittext
57331 && addr <= (unsigned long)_einittext)
57336 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57337 +#ifdef CONFIG_MODULES
57338 +static inline int is_module_text(unsigned long addr)
57340 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57343 + addr = ktla_ktva(addr);
57344 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57347 +static inline int is_module_text(unsigned long addr)
57354 static inline int is_kernel_text(unsigned long addr)
57356 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57357 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57359 static inline int is_kernel(unsigned long addr)
57362 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57363 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
57366 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57368 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57372 return in_gate_area_no_mm(addr);
57375 static int is_ksym_addr(unsigned long addr)
57378 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57379 + if (is_module_text(addr))
57384 return is_kernel(addr);
57386 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57388 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57390 - iter->name[0] = '\0';
57391 iter->nameoff = get_symbol_offset(new_pos);
57392 iter->pos = new_pos;
57394 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57396 struct kallsym_iter *iter = m->private;
57398 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57399 + if (current_uid())
57403 /* Some debugging symbols have no name. Ignore them. */
57404 if (!iter->name[0])
57406 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57407 struct kallsym_iter *iter;
57410 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57411 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57414 reset_iter(iter, 0);
57415 diff -urNp linux-2.6.39.4/kernel/kmod.c linux-2.6.39.4/kernel/kmod.c
57416 --- linux-2.6.39.4/kernel/kmod.c 2011-05-19 00:06:34.000000000 -0400
57417 +++ linux-2.6.39.4/kernel/kmod.c 2011-08-05 19:44:37.000000000 -0400
57418 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57419 * If module auto-loading support is disabled then this function
57420 * becomes a no-operation.
57422 -int __request_module(bool wait, const char *fmt, ...)
57423 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57426 char module_name[MODULE_NAME_LEN];
57427 unsigned int max_modprobes;
57429 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57430 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57431 static char *envp[] = { "HOME=/",
57433 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57434 @@ -80,9 +79,7 @@ int __request_module(bool wait, const ch
57435 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57436 static int kmod_loop_msg;
57438 - va_start(args, fmt);
57439 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57441 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57442 if (ret >= MODULE_NAME_LEN)
57443 return -ENAMETOOLONG;
57445 @@ -90,6 +87,20 @@ int __request_module(bool wait, const ch
57449 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57450 + if (!current_uid()) {
57451 + /* hack to workaround consolekit/udisks stupidity */
57452 + read_lock(&tasklist_lock);
57453 + if (!strcmp(current->comm, "mount") &&
57454 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57455 + read_unlock(&tasklist_lock);
57456 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57459 + read_unlock(&tasklist_lock);
57463 /* If modprobe needs a service that is in a module, we get a recursive
57464 * loop. Limit the number of running kmod threads to max_threads/2 or
57465 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57466 @@ -123,6 +134,47 @@ int __request_module(bool wait, const ch
57467 atomic_dec(&kmod_concurrent);
57471 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57476 + va_start(args, fmt);
57477 + ret = ____request_module(wait, module_param, fmt, args);
57483 +int __request_module(bool wait, const char *fmt, ...)
57488 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57489 + if (current_uid()) {
57490 + char module_param[MODULE_NAME_LEN];
57492 + memset(module_param, 0, sizeof(module_param));
57494 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57496 + va_start(args, fmt);
57497 + ret = ____request_module(wait, module_param, fmt, args);
57504 + va_start(args, fmt);
57505 + ret = ____request_module(wait, NULL, fmt, args);
57511 EXPORT_SYMBOL(__request_module);
57512 #endif /* CONFIG_MODULES */
57514 diff -urNp linux-2.6.39.4/kernel/kprobes.c linux-2.6.39.4/kernel/kprobes.c
57515 --- linux-2.6.39.4/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
57516 +++ linux-2.6.39.4/kernel/kprobes.c 2011-08-05 19:44:37.000000000 -0400
57517 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57518 * kernel image and loaded module images reside. This is required
57519 * so x86_64 can correctly handle the %rip-relative fixups.
57521 - kip->insns = module_alloc(PAGE_SIZE);
57522 + kip->insns = module_alloc_exec(PAGE_SIZE);
57526 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57528 if (!list_is_singular(&kip->list)) {
57529 list_del(&kip->list);
57530 - module_free(NULL, kip->insns);
57531 + module_free_exec(NULL, kip->insns);
57535 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57538 unsigned long offset = 0, size = 0;
57539 - char *modname, namebuf[128];
57540 + char *modname, namebuf[KSYM_NAME_LEN];
57541 const char *symbol_name;
57543 struct kprobe_blackpoint *kb;
57544 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57545 const char *sym = NULL;
57546 unsigned int i = *(loff_t *) v;
57547 unsigned long offset = 0;
57548 - char *modname, namebuf[128];
57549 + char *modname, namebuf[KSYM_NAME_LEN];
57551 head = &kprobe_table[i];
57553 diff -urNp linux-2.6.39.4/kernel/lockdep.c linux-2.6.39.4/kernel/lockdep.c
57554 --- linux-2.6.39.4/kernel/lockdep.c 2011-06-25 12:55:23.000000000 -0400
57555 +++ linux-2.6.39.4/kernel/lockdep.c 2011-08-05 19:44:37.000000000 -0400
57556 @@ -571,6 +571,10 @@ static int static_obj(void *obj)
57557 end = (unsigned long) &_end,
57558 addr = (unsigned long) obj;
57560 +#ifdef CONFIG_PAX_KERNEXEC
57561 + start = ktla_ktva(start);
57567 @@ -706,6 +710,7 @@ register_lock_class(struct lockdep_map *
57568 if (!static_obj(lock->key)) {
57570 printk("INFO: trying to register non-static key.\n");
57571 + printk("lock:%pS key:%pS.\n", lock, lock->key);
57572 printk("the code is fine but needs lockdep annotation.\n");
57573 printk("turning off the locking correctness validator.\n");
57575 @@ -2752,7 +2757,7 @@ static int __lock_acquire(struct lockdep
57579 - atomic_inc((atomic_t *)&class->ops);
57580 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57581 if (very_verbose(class)) {
57582 printk("\nacquire class [%p] %s", class->key, class->name);
57583 if (class->name_version > 1)
57584 diff -urNp linux-2.6.39.4/kernel/lockdep_proc.c linux-2.6.39.4/kernel/lockdep_proc.c
57585 --- linux-2.6.39.4/kernel/lockdep_proc.c 2011-05-19 00:06:34.000000000 -0400
57586 +++ linux-2.6.39.4/kernel/lockdep_proc.c 2011-08-05 19:44:37.000000000 -0400
57587 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57589 static void print_name(struct seq_file *m, struct lock_class *class)
57592 + char str[KSYM_NAME_LEN];
57593 const char *name = class->name;
57596 diff -urNp linux-2.6.39.4/kernel/module.c linux-2.6.39.4/kernel/module.c
57597 --- linux-2.6.39.4/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
57598 +++ linux-2.6.39.4/kernel/module.c 2011-08-05 19:44:37.000000000 -0400
57600 #include <linux/kmemleak.h>
57601 #include <linux/jump_label.h>
57602 #include <linux/pfn.h>
57603 +#include <linux/grsecurity.h>
57605 #define CREATE_TRACE_POINTS
57606 #include <trace/events/module.h>
57607 @@ -118,7 +119,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57609 /* Bounds of module allocation, for speeding __module_address.
57610 * Protected by module_mutex. */
57611 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57612 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57613 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57615 int register_module_notifier(struct notifier_block * nb)
57617 @@ -282,7 +284,7 @@ bool each_symbol(bool (*fn)(const struct
57620 list_for_each_entry_rcu(mod, &modules, list) {
57621 - struct symsearch arr[] = {
57622 + struct symsearch modarr[] = {
57623 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57624 NOT_GPL_ONLY, false },
57625 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57626 @@ -304,7 +306,7 @@ bool each_symbol(bool (*fn)(const struct
57630 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57631 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57635 @@ -415,7 +417,7 @@ static inline void __percpu *mod_percpu(
57636 static int percpu_modalloc(struct module *mod,
57637 unsigned long size, unsigned long align)
57639 - if (align > PAGE_SIZE) {
57640 + if (align-1 >= PAGE_SIZE) {
57641 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57642 mod->name, align, PAGE_SIZE);
57644 @@ -1143,7 +1145,7 @@ resolve_symbol_wait(struct module *mod,
57646 #ifdef CONFIG_SYSFS
57648 -#ifdef CONFIG_KALLSYMS
57649 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57650 static inline bool sect_empty(const Elf_Shdr *sect)
57652 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57653 @@ -1612,17 +1614,17 @@ void unset_section_ro_nx(struct module *
57655 unsigned long total_pages;
57657 - if (mod->module_core == module_region) {
57658 + if (mod->module_core_rx == module_region) {
57659 /* Set core as NX+RW */
57660 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size);
57661 - set_memory_nx((unsigned long)mod->module_core, total_pages);
57662 - set_memory_rw((unsigned long)mod->module_core, total_pages);
57663 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_core_rx, mod->core_size_rx);
57664 + set_memory_nx((unsigned long)mod->module_core_rx, total_pages);
57665 + set_memory_rw((unsigned long)mod->module_core_rx, total_pages);
57667 - } else if (mod->module_init == module_region) {
57668 + } else if (mod->module_init_rx == module_region) {
57669 /* Set init as NX+RW */
57670 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size);
57671 - set_memory_nx((unsigned long)mod->module_init, total_pages);
57672 - set_memory_rw((unsigned long)mod->module_init, total_pages);
57673 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_init_rx, mod->init_size_rx);
57674 + set_memory_nx((unsigned long)mod->module_init_rx, total_pages);
57675 + set_memory_rw((unsigned long)mod->module_init_rx, total_pages);
57679 @@ -1633,14 +1635,14 @@ void set_all_modules_text_rw()
57681 mutex_lock(&module_mutex);
57682 list_for_each_entry_rcu(mod, &modules, list) {
57683 - if ((mod->module_core) && (mod->core_text_size)) {
57684 - set_page_attributes(mod->module_core,
57685 - mod->module_core + mod->core_text_size,
57686 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57687 + set_page_attributes(mod->module_core_rx,
57688 + mod->module_core_rx + mod->core_size_rx,
57691 - if ((mod->module_init) && (mod->init_text_size)) {
57692 - set_page_attributes(mod->module_init,
57693 - mod->module_init + mod->init_text_size,
57694 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57695 + set_page_attributes(mod->module_init_rx,
57696 + mod->module_init_rx + mod->init_size_rx,
57700 @@ -1654,14 +1656,14 @@ void set_all_modules_text_ro()
57702 mutex_lock(&module_mutex);
57703 list_for_each_entry_rcu(mod, &modules, list) {
57704 - if ((mod->module_core) && (mod->core_text_size)) {
57705 - set_page_attributes(mod->module_core,
57706 - mod->module_core + mod->core_text_size,
57707 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57708 + set_page_attributes(mod->module_core_rx,
57709 + mod->module_core_rx + mod->core_size_rx,
57712 - if ((mod->module_init) && (mod->init_text_size)) {
57713 - set_page_attributes(mod->module_init,
57714 - mod->module_init + mod->init_text_size,
57715 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57716 + set_page_attributes(mod->module_init_rx,
57717 + mod->module_init_rx + mod->init_size_rx,
57721 @@ -1696,17 +1698,20 @@ static void free_module(struct module *m
57722 destroy_params(mod->kp, mod->num_kp);
57724 /* This may be NULL, but that's OK */
57725 - unset_section_ro_nx(mod, mod->module_init);
57726 - module_free(mod, mod->module_init);
57727 + unset_section_ro_nx(mod, mod->module_init_rx);
57728 + module_free(mod, mod->module_init_rw);
57729 + module_free_exec(mod, mod->module_init_rx);
57731 percpu_modfree(mod);
57733 /* Free lock-classes: */
57734 - lockdep_free_key_range(mod->module_core, mod->core_size);
57735 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57736 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57738 /* Finally, free the core (containing the module structure) */
57739 - unset_section_ro_nx(mod, mod->module_core);
57740 - module_free(mod, mod->module_core);
57741 + unset_section_ro_nx(mod, mod->module_core_rx);
57742 + module_free_exec(mod, mod->module_core_rx);
57743 + module_free(mod, mod->module_core_rw);
57746 update_protections(current->mm);
57747 @@ -1775,10 +1780,31 @@ static int simplify_symbols(struct modul
57750 const struct kernel_symbol *ksym;
57751 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57752 + int is_fs_load = 0;
57753 + int register_filesystem_found = 0;
57756 + p = strstr(mod->args, "grsec_modharden_fs");
57758 + char *endptr = p + strlen("grsec_modharden_fs");
57759 + /* copy \0 as well */
57760 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57765 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57766 const char *name = info->strtab + sym[i].st_name;
57768 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57769 + /* it's a real shame this will never get ripped and copied
57772 + if (is_fs_load && !strcmp(name, "register_filesystem"))
57773 + register_filesystem_found = 1;
57776 switch (sym[i].st_shndx) {
57778 /* We compiled with -fno-common. These are not
57779 @@ -1799,7 +1825,9 @@ static int simplify_symbols(struct modul
57780 ksym = resolve_symbol_wait(mod, info, name);
57781 /* Ok if resolved. */
57782 if (ksym && !IS_ERR(ksym)) {
57783 + pax_open_kernel();
57784 sym[i].st_value = ksym->value;
57785 + pax_close_kernel();
57789 @@ -1818,11 +1846,20 @@ static int simplify_symbols(struct modul
57790 secbase = (unsigned long)mod_percpu(mod);
57792 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57793 + pax_open_kernel();
57794 sym[i].st_value += secbase;
57795 + pax_close_kernel();
57800 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57801 + if (is_fs_load && !register_filesystem_found) {
57802 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57810 @@ -1906,22 +1943,12 @@ static void layout_sections(struct modul
57811 || s->sh_entsize != ~0UL
57812 || strstarts(sname, ".init"))
57814 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57815 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57816 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57818 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57819 DEBUGP("\t%s\n", name);
57822 - case 0: /* executable */
57823 - mod->core_size = debug_align(mod->core_size);
57824 - mod->core_text_size = mod->core_size;
57826 - case 1: /* RO: text and ro-data */
57827 - mod->core_size = debug_align(mod->core_size);
57828 - mod->core_ro_size = mod->core_size;
57830 - case 3: /* whole core */
57831 - mod->core_size = debug_align(mod->core_size);
57836 DEBUGP("Init section allocation order:\n");
57837 @@ -1935,23 +1962,13 @@ static void layout_sections(struct modul
57838 || s->sh_entsize != ~0UL
57839 || !strstarts(sname, ".init"))
57841 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57842 - | INIT_OFFSET_MASK);
57843 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57844 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57846 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57847 + s->sh_entsize |= INIT_OFFSET_MASK;
57848 DEBUGP("\t%s\n", sname);
57851 - case 0: /* executable */
57852 - mod->init_size = debug_align(mod->init_size);
57853 - mod->init_text_size = mod->init_size;
57855 - case 1: /* RO: text and ro-data */
57856 - mod->init_size = debug_align(mod->init_size);
57857 - mod->init_ro_size = mod->init_size;
57859 - case 3: /* whole init */
57860 - mod->init_size = debug_align(mod->init_size);
57866 @@ -2119,7 +2136,7 @@ static void layout_symtab(struct module
57868 /* Put symbol section at end of init part of module. */
57869 symsect->sh_flags |= SHF_ALLOC;
57870 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57871 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57872 info->index.sym) | INIT_OFFSET_MASK;
57873 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57875 @@ -2136,19 +2153,19 @@ static void layout_symtab(struct module
57878 /* Append room for core symbols at end of core part. */
57879 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57880 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57881 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57882 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57884 /* Put string table section at end of init part of module. */
57885 strsect->sh_flags |= SHF_ALLOC;
57886 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57887 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57888 info->index.str) | INIT_OFFSET_MASK;
57889 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57891 /* Append room for core symbols' strings at end of core part. */
57892 - info->stroffs = mod->core_size;
57893 + info->stroffs = mod->core_size_rx;
57894 __set_bit(0, info->strmap);
57895 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57896 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57899 static void add_kallsyms(struct module *mod, const struct load_info *info)
57900 @@ -2164,11 +2181,13 @@ static void add_kallsyms(struct module *
57901 /* Make sure we get permanent strtab: don't use info->strtab. */
57902 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57904 + pax_open_kernel();
57906 /* Set types up while we still have access to sections. */
57907 for (i = 0; i < mod->num_symtab; i++)
57908 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57910 - mod->core_symtab = dst = mod->module_core + info->symoffs;
57911 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57914 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57915 @@ -2181,10 +2200,12 @@ static void add_kallsyms(struct module *
57917 mod->core_num_syms = ndst;
57919 - mod->core_strtab = s = mod->module_core + info->stroffs;
57920 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57921 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57922 if (test_bit(i, info->strmap))
57923 *++s = mod->strtab[i];
57925 + pax_close_kernel();
57928 static inline void layout_symtab(struct module *mod, struct load_info *info)
57929 @@ -2213,17 +2234,33 @@ static void dynamic_debug_remove(struct
57930 ddebug_remove_module(debug->modname);
57933 -static void *module_alloc_update_bounds(unsigned long size)
57934 +static void *module_alloc_update_bounds_rw(unsigned long size)
57936 void *ret = module_alloc(size);
57939 mutex_lock(&module_mutex);
57940 /* Update module bounds. */
57941 - if ((unsigned long)ret < module_addr_min)
57942 - module_addr_min = (unsigned long)ret;
57943 - if ((unsigned long)ret + size > module_addr_max)
57944 - module_addr_max = (unsigned long)ret + size;
57945 + if ((unsigned long)ret < module_addr_min_rw)
57946 + module_addr_min_rw = (unsigned long)ret;
57947 + if ((unsigned long)ret + size > module_addr_max_rw)
57948 + module_addr_max_rw = (unsigned long)ret + size;
57949 + mutex_unlock(&module_mutex);
57954 +static void *module_alloc_update_bounds_rx(unsigned long size)
57956 + void *ret = module_alloc_exec(size);
57959 + mutex_lock(&module_mutex);
57960 + /* Update module bounds. */
57961 + if ((unsigned long)ret < module_addr_min_rx)
57962 + module_addr_min_rx = (unsigned long)ret;
57963 + if ((unsigned long)ret + size > module_addr_max_rx)
57964 + module_addr_max_rx = (unsigned long)ret + size;
57965 mutex_unlock(&module_mutex);
57968 @@ -2516,7 +2553,7 @@ static int move_module(struct module *mo
57971 /* Do the allocs. */
57972 - ptr = module_alloc_update_bounds(mod->core_size);
57973 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57975 * The pointer to this block is stored in the module structure
57976 * which is inside the block. Just mark it as not being a
57977 @@ -2526,23 +2563,50 @@ static int move_module(struct module *mo
57981 - memset(ptr, 0, mod->core_size);
57982 - mod->module_core = ptr;
57983 + memset(ptr, 0, mod->core_size_rw);
57984 + mod->module_core_rw = ptr;
57986 - ptr = module_alloc_update_bounds(mod->init_size);
57987 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57989 * The pointer to this block is stored in the module structure
57990 * which is inside the block. This block doesn't need to be
57991 * scanned as it contains data and code that will be freed
57992 * after the module is initialized.
57994 - kmemleak_ignore(ptr);
57995 - if (!ptr && mod->init_size) {
57996 - module_free(mod, mod->module_core);
57997 + kmemleak_not_leak(ptr);
57998 + if (!ptr && mod->init_size_rw) {
57999 + module_free(mod, mod->module_core_rw);
58002 - memset(ptr, 0, mod->init_size);
58003 - mod->module_init = ptr;
58004 + memset(ptr, 0, mod->init_size_rw);
58005 + mod->module_init_rw = ptr;
58007 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
58008 + kmemleak_not_leak(ptr);
58010 + module_free(mod, mod->module_init_rw);
58011 + module_free(mod, mod->module_core_rw);
58015 + pax_open_kernel();
58016 + memset(ptr, 0, mod->core_size_rx);
58017 + pax_close_kernel();
58018 + mod->module_core_rx = ptr;
58020 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
58021 + kmemleak_not_leak(ptr);
58022 + if (!ptr && mod->init_size_rx) {
58023 + module_free_exec(mod, mod->module_core_rx);
58024 + module_free(mod, mod->module_init_rw);
58025 + module_free(mod, mod->module_core_rw);
58029 + pax_open_kernel();
58030 + memset(ptr, 0, mod->init_size_rx);
58031 + pax_close_kernel();
58032 + mod->module_init_rx = ptr;
58034 /* Transfer each section which specifies SHF_ALLOC */
58035 DEBUGP("final section addresses:\n");
58036 @@ -2553,16 +2617,45 @@ static int move_module(struct module *mo
58037 if (!(shdr->sh_flags & SHF_ALLOC))
58040 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
58041 - dest = mod->module_init
58042 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58044 - dest = mod->module_core + shdr->sh_entsize;
58045 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
58046 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58047 + dest = mod->module_init_rw
58048 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58050 + dest = mod->module_init_rx
58051 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58053 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58054 + dest = mod->module_core_rw + shdr->sh_entsize;
58056 + dest = mod->module_core_rx + shdr->sh_entsize;
58059 + if (shdr->sh_type != SHT_NOBITS) {
58061 +#ifdef CONFIG_PAX_KERNEXEC
58062 +#ifdef CONFIG_X86_64
58063 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
58064 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
58066 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
58067 + pax_open_kernel();
58068 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58069 + pax_close_kernel();
58073 - if (shdr->sh_type != SHT_NOBITS)
58074 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58076 /* Update sh_addr to point to copy in image. */
58077 - shdr->sh_addr = (unsigned long)dest;
58079 +#ifdef CONFIG_PAX_KERNEXEC
58080 + if (shdr->sh_flags & SHF_EXECINSTR)
58081 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
58085 + shdr->sh_addr = (unsigned long)dest;
58086 DEBUGP("\t0x%lx %s\n",
58087 shdr->sh_addr, info->secstrings + shdr->sh_name);
58089 @@ -2613,12 +2706,12 @@ static void flush_module_icache(const st
58090 * Do it before processing of module parameters, so the module
58091 * can provide parameter accessor functions of its own.
58093 - if (mod->module_init)
58094 - flush_icache_range((unsigned long)mod->module_init,
58095 - (unsigned long)mod->module_init
58096 - + mod->init_size);
58097 - flush_icache_range((unsigned long)mod->module_core,
58098 - (unsigned long)mod->module_core + mod->core_size);
58099 + if (mod->module_init_rx)
58100 + flush_icache_range((unsigned long)mod->module_init_rx,
58101 + (unsigned long)mod->module_init_rx
58102 + + mod->init_size_rx);
58103 + flush_icache_range((unsigned long)mod->module_core_rx,
58104 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
58108 @@ -2690,8 +2783,10 @@ static void module_deallocate(struct mod
58110 kfree(info->strmap);
58111 percpu_modfree(mod);
58112 - module_free(mod, mod->module_init);
58113 - module_free(mod, mod->module_core);
58114 + module_free_exec(mod, mod->module_init_rx);
58115 + module_free_exec(mod, mod->module_core_rx);
58116 + module_free(mod, mod->module_init_rw);
58117 + module_free(mod, mod->module_core_rw);
58120 static int post_relocation(struct module *mod, const struct load_info *info)
58121 @@ -2748,9 +2843,38 @@ static struct module *load_module(void _
58125 + /* Now copy in args */
58126 + mod->args = strndup_user(uargs, ~0UL >> 1);
58127 + if (IS_ERR(mod->args)) {
58128 + err = PTR_ERR(mod->args);
58129 + goto free_unload;
58132 /* Set up MODINFO_ATTR fields */
58133 setup_modinfo(mod, &info);
58135 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
58139 + if (strstr(mod->args, "grsec_modharden_netdev")) {
58140 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
58142 + goto free_modinfo;
58143 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
58144 + p += strlen("grsec_modharden_normal");
58145 + p2 = strstr(p, "_");
58148 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
58152 + goto free_modinfo;
58157 /* Fix up syms, so that st_value is a pointer to location. */
58158 err = simplify_symbols(mod, &info);
58160 @@ -2766,13 +2890,6 @@ static struct module *load_module(void _
58162 flush_module_icache(mod);
58164 - /* Now copy in args */
58165 - mod->args = strndup_user(uargs, ~0UL >> 1);
58166 - if (IS_ERR(mod->args)) {
58167 - err = PTR_ERR(mod->args);
58168 - goto free_arch_cleanup;
58171 /* Mark state as coming so strong_try_module_get() ignores us. */
58172 mod->state = MODULE_STATE_COMING;
58174 @@ -2832,11 +2949,10 @@ static struct module *load_module(void _
58176 mutex_unlock(&module_mutex);
58177 synchronize_sched();
58178 - kfree(mod->args);
58179 - free_arch_cleanup:
58180 module_arch_cleanup(mod);
58183 + kfree(mod->args);
58185 module_unload_free(mod);
58187 @@ -2877,16 +2993,16 @@ SYSCALL_DEFINE3(init_module, void __user
58188 MODULE_STATE_COMING, mod);
58190 /* Set RO and NX regions for core */
58191 - set_section_ro_nx(mod->module_core,
58192 - mod->core_text_size,
58193 - mod->core_ro_size,
58195 + set_section_ro_nx(mod->module_core_rx,
58196 + mod->core_size_rx,
58197 + mod->core_size_rx,
58198 + mod->core_size_rx);
58200 /* Set RO and NX regions for init */
58201 - set_section_ro_nx(mod->module_init,
58202 - mod->init_text_size,
58203 - mod->init_ro_size,
58205 + set_section_ro_nx(mod->module_init_rx,
58206 + mod->init_size_rx,
58207 + mod->init_size_rx,
58208 + mod->init_size_rx);
58211 /* Start the module */
58212 @@ -2931,11 +3047,13 @@ SYSCALL_DEFINE3(init_module, void __user
58213 mod->symtab = mod->core_symtab;
58214 mod->strtab = mod->core_strtab;
58216 - unset_section_ro_nx(mod, mod->module_init);
58217 - module_free(mod, mod->module_init);
58218 - mod->module_init = NULL;
58219 - mod->init_size = 0;
58220 - mod->init_text_size = 0;
58221 + unset_section_ro_nx(mod, mod->module_init_rx);
58222 + module_free(mod, mod->module_init_rw);
58223 + module_free_exec(mod, mod->module_init_rx);
58224 + mod->module_init_rw = NULL;
58225 + mod->module_init_rx = NULL;
58226 + mod->init_size_rw = 0;
58227 + mod->init_size_rx = 0;
58228 mutex_unlock(&module_mutex);
58231 @@ -2966,10 +3084,16 @@ static const char *get_ksymbol(struct mo
58232 unsigned long nextval;
58234 /* At worse, next value is at end of module */
58235 - if (within_module_init(addr, mod))
58236 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
58237 + if (within_module_init_rx(addr, mod))
58238 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58239 + else if (within_module_init_rw(addr, mod))
58240 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58241 + else if (within_module_core_rx(addr, mod))
58242 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58243 + else if (within_module_core_rw(addr, mod))
58244 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58246 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
58249 /* Scan for closest preceding symbol, and next symbol. (ELF
58250 starts real symbols at 1). */
58251 @@ -3215,7 +3339,7 @@ static int m_show(struct seq_file *m, vo
58254 seq_printf(m, "%s %u",
58255 - mod->name, mod->init_size + mod->core_size);
58256 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58257 print_unload_info(m, mod);
58259 /* Informative for users. */
58260 @@ -3224,7 +3348,7 @@ static int m_show(struct seq_file *m, vo
58261 mod->state == MODULE_STATE_COMING ? "Loading":
58263 /* Used by oprofile and other similar tools. */
58264 - seq_printf(m, " 0x%pK", mod->module_core);
58265 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58269 @@ -3260,7 +3384,17 @@ static const struct file_operations proc
58271 static int __init proc_modules_init(void)
58273 +#ifndef CONFIG_GRKERNSEC_HIDESYM
58274 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58275 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58276 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58277 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58279 proc_create("modules", 0, NULL, &proc_modules_operations);
58282 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58286 module_init(proc_modules_init);
58287 @@ -3319,12 +3453,12 @@ struct module *__module_address(unsigned
58289 struct module *mod;
58291 - if (addr < module_addr_min || addr > module_addr_max)
58292 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58293 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
58296 list_for_each_entry_rcu(mod, &modules, list)
58297 - if (within_module_core(addr, mod)
58298 - || within_module_init(addr, mod))
58299 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
58303 @@ -3358,11 +3492,20 @@ bool is_module_text_address(unsigned lon
58305 struct module *__module_text_address(unsigned long addr)
58307 - struct module *mod = __module_address(addr);
58308 + struct module *mod;
58310 +#ifdef CONFIG_X86_32
58311 + addr = ktla_ktva(addr);
58314 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58317 + mod = __module_address(addr);
58320 /* Make sure it's within the text section. */
58321 - if (!within(addr, mod->module_init, mod->init_text_size)
58322 - && !within(addr, mod->module_core, mod->core_text_size))
58323 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58327 diff -urNp linux-2.6.39.4/kernel/mutex.c linux-2.6.39.4/kernel/mutex.c
58328 --- linux-2.6.39.4/kernel/mutex.c 2011-05-19 00:06:34.000000000 -0400
58329 +++ linux-2.6.39.4/kernel/mutex.c 2011-08-05 19:44:37.000000000 -0400
58330 @@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock,
58334 - struct thread_info *owner;
58335 + struct task_struct *owner;
58338 * If we own the BKL, then don't spin. The owner of
58339 @@ -205,7 +205,7 @@ __mutex_lock_common(struct mutex *lock,
58340 spin_lock_mutex(&lock->wait_lock, flags);
58342 debug_mutex_lock_common(lock, &waiter);
58343 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58344 + debug_mutex_add_waiter(lock, &waiter, task);
58346 /* add waiting tasks to the end of the waitqueue (FIFO): */
58347 list_add_tail(&waiter.list, &lock->wait_list);
58348 @@ -234,8 +234,7 @@ __mutex_lock_common(struct mutex *lock,
58349 * TASK_UNINTERRUPTIBLE case.)
58351 if (unlikely(signal_pending_state(state, task))) {
58352 - mutex_remove_waiter(lock, &waiter,
58353 - task_thread_info(task));
58354 + mutex_remove_waiter(lock, &waiter, task);
58355 mutex_release(&lock->dep_map, 1, ip);
58356 spin_unlock_mutex(&lock->wait_lock, flags);
58358 @@ -256,7 +255,7 @@ __mutex_lock_common(struct mutex *lock,
58360 lock_acquired(&lock->dep_map, ip);
58361 /* got the lock - rejoice! */
58362 - mutex_remove_waiter(lock, &waiter, current_thread_info());
58363 + mutex_remove_waiter(lock, &waiter, task);
58364 mutex_set_owner(lock);
58366 /* set it to 0 if there are no waiters left: */
58367 diff -urNp linux-2.6.39.4/kernel/mutex-debug.c linux-2.6.39.4/kernel/mutex-debug.c
58368 --- linux-2.6.39.4/kernel/mutex-debug.c 2011-05-19 00:06:34.000000000 -0400
58369 +++ linux-2.6.39.4/kernel/mutex-debug.c 2011-08-05 19:44:37.000000000 -0400
58370 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58373 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58374 - struct thread_info *ti)
58375 + struct task_struct *task)
58377 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58379 /* Mark the current thread as blocked on the lock: */
58380 - ti->task->blocked_on = waiter;
58381 + task->blocked_on = waiter;
58384 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58385 - struct thread_info *ti)
58386 + struct task_struct *task)
58388 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58389 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58390 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58391 - ti->task->blocked_on = NULL;
58392 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
58393 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58394 + task->blocked_on = NULL;
58396 list_del_init(&waiter->list);
58397 waiter->task = NULL;
58398 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
58401 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
58402 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
58403 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
58404 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
58405 mutex_clear_owner(lock);
58407 diff -urNp linux-2.6.39.4/kernel/mutex-debug.h linux-2.6.39.4/kernel/mutex-debug.h
58408 --- linux-2.6.39.4/kernel/mutex-debug.h 2011-05-19 00:06:34.000000000 -0400
58409 +++ linux-2.6.39.4/kernel/mutex-debug.h 2011-08-05 19:44:37.000000000 -0400
58410 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
58411 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58412 extern void debug_mutex_add_waiter(struct mutex *lock,
58413 struct mutex_waiter *waiter,
58414 - struct thread_info *ti);
58415 + struct task_struct *task);
58416 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58417 - struct thread_info *ti);
58418 + struct task_struct *task);
58419 extern void debug_mutex_unlock(struct mutex *lock);
58420 extern void debug_mutex_init(struct mutex *lock, const char *name,
58421 struct lock_class_key *key);
58423 static inline void mutex_set_owner(struct mutex *lock)
58425 - lock->owner = current_thread_info();
58426 + lock->owner = current;
58429 static inline void mutex_clear_owner(struct mutex *lock)
58430 diff -urNp linux-2.6.39.4/kernel/mutex.h linux-2.6.39.4/kernel/mutex.h
58431 --- linux-2.6.39.4/kernel/mutex.h 2011-05-19 00:06:34.000000000 -0400
58432 +++ linux-2.6.39.4/kernel/mutex.h 2011-08-05 19:44:37.000000000 -0400
58435 static inline void mutex_set_owner(struct mutex *lock)
58437 - lock->owner = current_thread_info();
58438 + lock->owner = current;
58441 static inline void mutex_clear_owner(struct mutex *lock)
58442 diff -urNp linux-2.6.39.4/kernel/padata.c linux-2.6.39.4/kernel/padata.c
58443 --- linux-2.6.39.4/kernel/padata.c 2011-05-19 00:06:34.000000000 -0400
58444 +++ linux-2.6.39.4/kernel/padata.c 2011-08-05 19:44:37.000000000 -0400
58445 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58447 padata->cb_cpu = cb_cpu;
58449 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58450 - atomic_set(&pd->seq_nr, -1);
58451 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58452 + atomic_set_unchecked(&pd->seq_nr, -1);
58454 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58455 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58457 target_cpu = padata_cpu_hash(padata);
58458 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58459 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58460 padata_init_pqueues(pd);
58461 padata_init_squeues(pd);
58462 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58463 - atomic_set(&pd->seq_nr, -1);
58464 + atomic_set_unchecked(&pd->seq_nr, -1);
58465 atomic_set(&pd->reorder_objects, 0);
58466 atomic_set(&pd->refcnt, 0);
58468 diff -urNp linux-2.6.39.4/kernel/panic.c linux-2.6.39.4/kernel/panic.c
58469 --- linux-2.6.39.4/kernel/panic.c 2011-05-19 00:06:34.000000000 -0400
58470 +++ linux-2.6.39.4/kernel/panic.c 2011-08-05 19:44:37.000000000 -0400
58471 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58474 printk(KERN_WARNING "------------[ cut here ]------------\n");
58475 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58476 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58477 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58479 printk(KERN_WARNING "Hardware name: %s\n", board);
58480 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58482 void __stack_chk_fail(void)
58484 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
58486 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58487 __builtin_return_address(0));
58489 EXPORT_SYMBOL(__stack_chk_fail);
58490 diff -urNp linux-2.6.39.4/kernel/perf_event.c linux-2.6.39.4/kernel/perf_event.c
58491 --- linux-2.6.39.4/kernel/perf_event.c 2011-05-19 00:06:34.000000000 -0400
58492 +++ linux-2.6.39.4/kernel/perf_event.c 2011-08-05 20:34:06.000000000 -0400
58493 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
58497 -static atomic64_t perf_event_id;
58498 +static atomic64_unchecked_t perf_event_id;
58500 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
58501 enum event_type_t event_type);
58502 @@ -2496,7 +2496,7 @@ static void __perf_event_read(void *info
58504 static inline u64 perf_event_count(struct perf_event *event)
58506 - return local64_read(&event->count) + atomic64_read(&event->child_count);
58507 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
58510 static u64 perf_event_read(struct perf_event *event)
58511 @@ -3031,9 +3031,9 @@ u64 perf_event_read_value(struct perf_ev
58512 mutex_lock(&event->child_mutex);
58513 total += perf_event_read(event);
58514 *enabled += event->total_time_enabled +
58515 - atomic64_read(&event->child_total_time_enabled);
58516 + atomic64_read_unchecked(&event->child_total_time_enabled);
58517 *running += event->total_time_running +
58518 - atomic64_read(&event->child_total_time_running);
58519 + atomic64_read_unchecked(&event->child_total_time_running);
58521 list_for_each_entry(child, &event->child_list, child_list) {
58522 total += perf_event_read(child);
58523 @@ -3396,10 +3396,10 @@ void perf_event_update_userpage(struct p
58524 userpg->offset -= local64_read(&event->hw.prev_count);
58526 userpg->time_enabled = event->total_time_enabled +
58527 - atomic64_read(&event->child_total_time_enabled);
58528 + atomic64_read_unchecked(&event->child_total_time_enabled);
58530 userpg->time_running = event->total_time_running +
58531 - atomic64_read(&event->child_total_time_running);
58532 + atomic64_read_unchecked(&event->child_total_time_running);
58536 @@ -4196,11 +4196,11 @@ static void perf_output_read_one(struct
58537 values[n++] = perf_event_count(event);
58538 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
58539 values[n++] = enabled +
58540 - atomic64_read(&event->child_total_time_enabled);
58541 + atomic64_read_unchecked(&event->child_total_time_enabled);
58543 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
58544 values[n++] = running +
58545 - atomic64_read(&event->child_total_time_running);
58546 + atomic64_read_unchecked(&event->child_total_time_running);
58548 if (read_format & PERF_FORMAT_ID)
58549 values[n++] = primary_event_id(event);
58550 @@ -6201,7 +6201,7 @@ perf_event_alloc(struct perf_event_attr
58551 event->parent = parent_event;
58553 event->ns = get_pid_ns(current->nsproxy->pid_ns);
58554 - event->id = atomic64_inc_return(&perf_event_id);
58555 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
58557 event->state = PERF_EVENT_STATE_INACTIVE;
58559 @@ -6724,10 +6724,10 @@ static void sync_child_event(struct perf
58561 * Add back the child's count to the parent's count:
58563 - atomic64_add(child_val, &parent_event->child_count);
58564 - atomic64_add(child_event->total_time_enabled,
58565 + atomic64_add_unchecked(child_val, &parent_event->child_count);
58566 + atomic64_add_unchecked(child_event->total_time_enabled,
58567 &parent_event->child_total_time_enabled);
58568 - atomic64_add(child_event->total_time_running,
58569 + atomic64_add_unchecked(child_event->total_time_running,
58570 &parent_event->child_total_time_running);
58573 diff -urNp linux-2.6.39.4/kernel/pid.c linux-2.6.39.4/kernel/pid.c
58574 --- linux-2.6.39.4/kernel/pid.c 2011-05-19 00:06:34.000000000 -0400
58575 +++ linux-2.6.39.4/kernel/pid.c 2011-08-05 19:44:37.000000000 -0400
58577 #include <linux/rculist.h>
58578 #include <linux/bootmem.h>
58579 #include <linux/hash.h>
58580 +#include <linux/security.h>
58581 #include <linux/pid_namespace.h>
58582 #include <linux/init_task.h>
58583 #include <linux/syscalls.h>
58584 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58586 int pid_max = PID_MAX_DEFAULT;
58588 -#define RESERVED_PIDS 300
58589 +#define RESERVED_PIDS 500
58591 int pid_max_min = RESERVED_PIDS + 1;
58592 int pid_max_max = PID_MAX_LIMIT;
58593 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58595 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58597 + struct task_struct *task;
58599 rcu_lockdep_assert(rcu_read_lock_held());
58600 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58601 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58603 + if (gr_pid_is_chrooted(task))
58609 struct task_struct *find_task_by_vpid(pid_t vnr)
58610 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58611 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58614 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58616 + rcu_lockdep_assert(rcu_read_lock_held());
58617 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58620 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58623 diff -urNp linux-2.6.39.4/kernel/posix-cpu-timers.c linux-2.6.39.4/kernel/posix-cpu-timers.c
58624 --- linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-05-19 00:06:34.000000000 -0400
58625 +++ linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-08-06 09:34:48.000000000 -0400
58627 #include <linux/posix-timers.h>
58628 #include <linux/errno.h>
58629 #include <linux/math64.h>
58630 +#include <linux/security.h>
58631 #include <asm/uaccess.h>
58632 #include <linux/kernel_stat.h>
58633 #include <trace/events/timer.h>
58634 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58636 static __init int init_posix_cpu_timers(void)
58638 - struct k_clock process = {
58639 + static struct k_clock process = {
58640 .clock_getres = process_cpu_clock_getres,
58641 .clock_get = process_cpu_clock_get,
58642 .timer_create = process_cpu_timer_create,
58643 .nsleep = process_cpu_nsleep,
58644 .nsleep_restart = process_cpu_nsleep_restart,
58646 - struct k_clock thread = {
58647 + static struct k_clock thread = {
58648 .clock_getres = thread_cpu_clock_getres,
58649 .clock_get = thread_cpu_clock_get,
58650 .timer_create = thread_cpu_timer_create,
58651 diff -urNp linux-2.6.39.4/kernel/posix-timers.c linux-2.6.39.4/kernel/posix-timers.c
58652 --- linux-2.6.39.4/kernel/posix-timers.c 2011-05-19 00:06:34.000000000 -0400
58653 +++ linux-2.6.39.4/kernel/posix-timers.c 2011-08-06 09:30:46.000000000 -0400
58655 #include <linux/idr.h>
58656 #include <linux/posix-clock.h>
58657 #include <linux/posix-timers.h>
58658 +#include <linux/grsecurity.h>
58659 #include <linux/syscalls.h>
58660 #include <linux/wait.h>
58661 #include <linux/workqueue.h>
58662 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58663 * which we beg off on and pass to do_sys_settimeofday().
58666 -static struct k_clock posix_clocks[MAX_CLOCKS];
58667 +static struct k_clock *posix_clocks[MAX_CLOCKS];
58670 * These ones are defined below.
58671 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58673 static __init int init_posix_timers(void)
58675 - struct k_clock clock_realtime = {
58676 + static struct k_clock clock_realtime = {
58677 .clock_getres = hrtimer_get_res,
58678 .clock_get = posix_clock_realtime_get,
58679 .clock_set = posix_clock_realtime_set,
58680 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58681 .timer_get = common_timer_get,
58682 .timer_del = common_timer_del,
58684 - struct k_clock clock_monotonic = {
58685 + static struct k_clock clock_monotonic = {
58686 .clock_getres = hrtimer_get_res,
58687 .clock_get = posix_ktime_get_ts,
58688 .nsleep = common_nsleep,
58689 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58690 .timer_get = common_timer_get,
58691 .timer_del = common_timer_del,
58693 - struct k_clock clock_monotonic_raw = {
58694 + static struct k_clock clock_monotonic_raw = {
58695 .clock_getres = hrtimer_get_res,
58696 .clock_get = posix_get_monotonic_raw,
58698 - struct k_clock clock_realtime_coarse = {
58699 + static struct k_clock clock_realtime_coarse = {
58700 .clock_getres = posix_get_coarse_res,
58701 .clock_get = posix_get_realtime_coarse,
58703 - struct k_clock clock_monotonic_coarse = {
58704 + static struct k_clock clock_monotonic_coarse = {
58705 .clock_getres = posix_get_coarse_res,
58706 .clock_get = posix_get_monotonic_coarse,
58708 - struct k_clock clock_boottime = {
58709 + static struct k_clock clock_boottime = {
58710 .clock_getres = hrtimer_get_res,
58711 .clock_get = posix_get_boottime,
58712 .nsleep = common_nsleep,
58713 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58714 .timer_del = common_timer_del,
58717 + pax_track_stack();
58719 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58720 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58721 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58722 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58726 - posix_clocks[clock_id] = *new_clock;
58727 + posix_clocks[clock_id] = new_clock;
58729 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58731 @@ -512,9 +515,9 @@ static struct k_clock *clockid_to_kclock
58732 return (id & CLOCKFD_MASK) == CLOCKFD ?
58733 &clock_posix_dynamic : &clock_posix_cpu;
58735 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58736 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58738 - return &posix_clocks[id];
58739 + return posix_clocks[id];
58742 static int common_timer_create(struct k_itimer *new_timer)
58743 @@ -956,6 +959,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58744 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58747 + /* only the CLOCK_REALTIME clock can be set, all other clocks
58748 + have their clock_set fptr set to a nosettime dummy function
58749 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58750 + call common_clock_set, which calls do_sys_settimeofday, which
58754 return kc->clock_set(which_clock, &new_tp);
58757 diff -urNp linux-2.6.39.4/kernel/power/poweroff.c linux-2.6.39.4/kernel/power/poweroff.c
58758 --- linux-2.6.39.4/kernel/power/poweroff.c 2011-05-19 00:06:34.000000000 -0400
58759 +++ linux-2.6.39.4/kernel/power/poweroff.c 2011-08-05 19:44:37.000000000 -0400
58760 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58761 .enable_mask = SYSRQ_ENABLE_BOOT,
58764 -static int pm_sysrq_init(void)
58765 +static int __init pm_sysrq_init(void)
58767 register_sysrq_key('o', &sysrq_poweroff_op);
58769 diff -urNp linux-2.6.39.4/kernel/power/process.c linux-2.6.39.4/kernel/power/process.c
58770 --- linux-2.6.39.4/kernel/power/process.c 2011-05-19 00:06:34.000000000 -0400
58771 +++ linux-2.6.39.4/kernel/power/process.c 2011-08-05 19:44:37.000000000 -0400
58772 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58773 u64 elapsed_csecs64;
58774 unsigned int elapsed_csecs;
58775 bool wakeup = false;
58776 + bool timedout = false;
58778 do_gettimeofday(&start);
58780 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58784 + if (time_after(jiffies, end_time))
58786 read_lock(&tasklist_lock);
58787 do_each_thread(g, p) {
58788 if (frozen(p) || !freezable(p))
58789 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58790 * try_to_stop() after schedule() in ptrace/signal
58791 * stop sees TIF_FREEZE.
58793 - if (!task_is_stopped_or_traced(p) &&
58794 - !freezer_should_skip(p))
58795 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58798 + printk(KERN_ERR "Task refusing to freeze:\n");
58799 + sched_show_task(p);
58802 } while_each_thread(g, p);
58803 read_unlock(&tasklist_lock);
58805 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58809 - if (!todo || time_after(jiffies, end_time))
58810 + if (!todo || timedout)
58813 if (pm_wakeup_pending()) {
58814 diff -urNp linux-2.6.39.4/kernel/printk.c linux-2.6.39.4/kernel/printk.c
58815 --- linux-2.6.39.4/kernel/printk.c 2011-05-19 00:06:34.000000000 -0400
58816 +++ linux-2.6.39.4/kernel/printk.c 2011-08-05 19:44:37.000000000 -0400
58817 @@ -284,12 +284,17 @@ static int check_syslog_permissions(int
58818 if (from_file && type != SYSLOG_ACTION_OPEN)
58821 +#ifdef CONFIG_GRKERNSEC_DMESG
58822 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58826 if (syslog_action_restricted(type)) {
58827 if (capable(CAP_SYSLOG))
58829 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58830 if (capable(CAP_SYS_ADMIN)) {
58831 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58832 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58833 "but no CAP_SYSLOG (deprecated).\n");
58836 diff -urNp linux-2.6.39.4/kernel/profile.c linux-2.6.39.4/kernel/profile.c
58837 --- linux-2.6.39.4/kernel/profile.c 2011-05-19 00:06:34.000000000 -0400
58838 +++ linux-2.6.39.4/kernel/profile.c 2011-08-05 19:44:37.000000000 -0400
58839 @@ -39,7 +39,7 @@ struct profile_hit {
58840 /* Oprofile timer tick hook */
58841 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58843 -static atomic_t *prof_buffer;
58844 +static atomic_unchecked_t *prof_buffer;
58845 static unsigned long prof_len, prof_shift;
58847 int prof_on __read_mostly;
58848 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
58852 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58853 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58854 hits[i].hits = hits[i].pc = 0;
58857 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
58858 * Add the current hit(s) and flush the write-queue out
58859 * to the global buffer:
58861 - atomic_add(nr_hits, &prof_buffer[pc]);
58862 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58863 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58864 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58865 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58866 hits[i].pc = hits[i].hits = 0;
58869 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
58870 if (prof_on != type || !prof_buffer)
58872 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58873 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58874 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58876 #endif /* !CONFIG_SMP */
58877 EXPORT_SYMBOL_GPL(profile_hits);
58878 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58880 buf++; p++; count--; read++;
58882 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58883 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58884 if (copy_to_user(buf, (void *)pnt, count))
58887 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58890 profile_discard_flip_buffers();
58891 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58892 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58896 diff -urNp linux-2.6.39.4/kernel/ptrace.c linux-2.6.39.4/kernel/ptrace.c
58897 --- linux-2.6.39.4/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
58898 +++ linux-2.6.39.4/kernel/ptrace.c 2011-08-05 19:44:37.000000000 -0400
58899 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
58903 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58904 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58905 + unsigned int log)
58907 const struct cred *cred = current_cred(), *tcred;
58909 @@ -143,7 +144,8 @@ int __ptrace_may_access(struct task_stru
58910 cred->gid == tcred->sgid &&
58911 cred->gid == tcred->gid))
58913 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58914 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58915 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58919 @@ -152,7 +154,9 @@ ok:
58922 dumpable = get_dumpable(task->mm);
58923 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58925 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58926 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58929 return security_ptrace_access_check(task, mode);
58930 @@ -162,7 +166,16 @@ bool ptrace_may_access(struct task_struc
58934 - err = __ptrace_may_access(task, mode);
58935 + err = __ptrace_may_access(task, mode, 0);
58936 + task_unlock(task);
58940 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58944 + err = __ptrace_may_access(task, mode, 1);
58948 @@ -189,7 +202,7 @@ static int ptrace_attach(struct task_str
58952 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58953 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58957 @@ -202,7 +215,7 @@ static int ptrace_attach(struct task_str
58958 goto unlock_tasklist;
58960 task->ptrace = PT_PTRACED;
58961 - if (task_ns_capable(task, CAP_SYS_PTRACE))
58962 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58963 task->ptrace |= PT_PTRACE_CAP;
58965 __ptrace_link(task, current);
58966 @@ -362,6 +375,8 @@ int ptrace_readdata(struct task_struct *
58970 + pax_track_stack();
58974 int this_len, retval;
58975 @@ -373,7 +388,7 @@ int ptrace_readdata(struct task_struct *
58979 - if (copy_to_user(dst, buf, retval))
58980 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58984 @@ -387,6 +402,8 @@ int ptrace_writedata(struct task_struct
58988 + pax_track_stack();
58992 int this_len, retval;
58993 @@ -569,9 +586,11 @@ int ptrace_request(struct task_struct *c
58997 - void __user *datavp = (void __user *) data;
58998 + void __user *datavp = (__force void __user *) data;
58999 unsigned long __user *datalp = datavp;
59001 + pax_track_stack();
59004 case PTRACE_PEEKTEXT:
59005 case PTRACE_PEEKDATA:
59006 @@ -717,14 +736,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
59010 + if (gr_handle_ptrace(child, request)) {
59012 + goto out_put_task_struct;
59015 if (request == PTRACE_ATTACH) {
59016 ret = ptrace_attach(child);
59018 * Some architectures need to do book-keeping after
59023 arch_ptrace_attach(child);
59024 + gr_audit_ptrace(child);
59026 goto out_put_task_struct;
59029 @@ -749,7 +775,7 @@ int generic_ptrace_peekdata(struct task_
59030 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
59031 if (copied != sizeof(tmp))
59033 - return put_user(tmp, (unsigned long __user *)data);
59034 + return put_user(tmp, (__force unsigned long __user *)data);
59037 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
59038 @@ -772,6 +798,8 @@ int compat_ptrace_request(struct task_st
59042 + pax_track_stack();
59045 case PTRACE_PEEKTEXT:
59046 case PTRACE_PEEKDATA:
59047 @@ -859,14 +887,21 @@ asmlinkage long compat_sys_ptrace(compat
59051 + if (gr_handle_ptrace(child, request)) {
59053 + goto out_put_task_struct;
59056 if (request == PTRACE_ATTACH) {
59057 ret = ptrace_attach(child);
59059 * Some architectures need to do book-keeping after
59064 arch_ptrace_attach(child);
59065 + gr_audit_ptrace(child);
59067 goto out_put_task_struct;
59070 diff -urNp linux-2.6.39.4/kernel/rcutorture.c linux-2.6.39.4/kernel/rcutorture.c
59071 --- linux-2.6.39.4/kernel/rcutorture.c 2011-05-19 00:06:34.000000000 -0400
59072 +++ linux-2.6.39.4/kernel/rcutorture.c 2011-08-05 19:44:37.000000000 -0400
59073 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
59075 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
59077 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59078 -static atomic_t n_rcu_torture_alloc;
59079 -static atomic_t n_rcu_torture_alloc_fail;
59080 -static atomic_t n_rcu_torture_free;
59081 -static atomic_t n_rcu_torture_mberror;
59082 -static atomic_t n_rcu_torture_error;
59083 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59084 +static atomic_unchecked_t n_rcu_torture_alloc;
59085 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
59086 +static atomic_unchecked_t n_rcu_torture_free;
59087 +static atomic_unchecked_t n_rcu_torture_mberror;
59088 +static atomic_unchecked_t n_rcu_torture_error;
59089 static long n_rcu_torture_boost_ktrerror;
59090 static long n_rcu_torture_boost_rterror;
59091 static long n_rcu_torture_boost_allocerror;
59092 @@ -225,11 +225,11 @@ rcu_torture_alloc(void)
59094 spin_lock_bh(&rcu_torture_lock);
59095 if (list_empty(&rcu_torture_freelist)) {
59096 - atomic_inc(&n_rcu_torture_alloc_fail);
59097 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
59098 spin_unlock_bh(&rcu_torture_lock);
59101 - atomic_inc(&n_rcu_torture_alloc);
59102 + atomic_inc_unchecked(&n_rcu_torture_alloc);
59103 p = rcu_torture_freelist.next;
59105 spin_unlock_bh(&rcu_torture_lock);
59106 @@ -242,7 +242,7 @@ rcu_torture_alloc(void)
59108 rcu_torture_free(struct rcu_torture *p)
59110 - atomic_inc(&n_rcu_torture_free);
59111 + atomic_inc_unchecked(&n_rcu_torture_free);
59112 spin_lock_bh(&rcu_torture_lock);
59113 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
59114 spin_unlock_bh(&rcu_torture_lock);
59115 @@ -362,7 +362,7 @@ rcu_torture_cb(struct rcu_head *p)
59116 i = rp->rtort_pipe_count;
59117 if (i > RCU_TORTURE_PIPE_LEN)
59118 i = RCU_TORTURE_PIPE_LEN;
59119 - atomic_inc(&rcu_torture_wcount[i]);
59120 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59121 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59122 rp->rtort_mbtest = 0;
59123 rcu_torture_free(rp);
59124 @@ -409,7 +409,7 @@ static void rcu_sync_torture_deferred_fr
59125 i = rp->rtort_pipe_count;
59126 if (i > RCU_TORTURE_PIPE_LEN)
59127 i = RCU_TORTURE_PIPE_LEN;
59128 - atomic_inc(&rcu_torture_wcount[i]);
59129 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59130 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59131 rp->rtort_mbtest = 0;
59132 list_del(&rp->rtort_free);
59133 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
59134 i = old_rp->rtort_pipe_count;
59135 if (i > RCU_TORTURE_PIPE_LEN)
59136 i = RCU_TORTURE_PIPE_LEN;
59137 - atomic_inc(&rcu_torture_wcount[i]);
59138 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59139 old_rp->rtort_pipe_count++;
59140 cur_ops->deferred_free(old_rp);
59142 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
59145 if (p->rtort_mbtest == 0)
59146 - atomic_inc(&n_rcu_torture_mberror);
59147 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59148 spin_lock(&rand_lock);
59149 cur_ops->read_delay(&rand);
59150 n_rcu_torture_timers++;
59151 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
59154 if (p->rtort_mbtest == 0)
59155 - atomic_inc(&n_rcu_torture_mberror);
59156 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59157 cur_ops->read_delay(&rand);
59159 pipe_count = p->rtort_pipe_count;
59160 @@ -1072,10 +1072,10 @@ rcu_torture_printk(char *page)
59161 rcu_torture_current,
59162 rcu_torture_current_version,
59163 list_empty(&rcu_torture_freelist),
59164 - atomic_read(&n_rcu_torture_alloc),
59165 - atomic_read(&n_rcu_torture_alloc_fail),
59166 - atomic_read(&n_rcu_torture_free),
59167 - atomic_read(&n_rcu_torture_mberror),
59168 + atomic_read_unchecked(&n_rcu_torture_alloc),
59169 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
59170 + atomic_read_unchecked(&n_rcu_torture_free),
59171 + atomic_read_unchecked(&n_rcu_torture_mberror),
59172 n_rcu_torture_boost_ktrerror,
59173 n_rcu_torture_boost_rterror,
59174 n_rcu_torture_boost_allocerror,
59175 @@ -1083,7 +1083,7 @@ rcu_torture_printk(char *page)
59176 n_rcu_torture_boost_failure,
59177 n_rcu_torture_boosts,
59178 n_rcu_torture_timers);
59179 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
59180 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
59181 n_rcu_torture_boost_ktrerror != 0 ||
59182 n_rcu_torture_boost_rterror != 0 ||
59183 n_rcu_torture_boost_allocerror != 0 ||
59184 @@ -1093,7 +1093,7 @@ rcu_torture_printk(char *page)
59185 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
59187 cnt += sprintf(&page[cnt], "!!! ");
59188 - atomic_inc(&n_rcu_torture_error);
59189 + atomic_inc_unchecked(&n_rcu_torture_error);
59192 cnt += sprintf(&page[cnt], "Reader Pipe: ");
59193 @@ -1107,7 +1107,7 @@ rcu_torture_printk(char *page)
59194 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
59195 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59196 cnt += sprintf(&page[cnt], " %d",
59197 - atomic_read(&rcu_torture_wcount[i]));
59198 + atomic_read_unchecked(&rcu_torture_wcount[i]));
59200 cnt += sprintf(&page[cnt], "\n");
59201 if (cur_ops->stats)
59202 @@ -1415,7 +1415,7 @@ rcu_torture_cleanup(void)
59204 if (cur_ops->cleanup)
59205 cur_ops->cleanup();
59206 - if (atomic_read(&n_rcu_torture_error))
59207 + if (atomic_read_unchecked(&n_rcu_torture_error))
59208 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
59210 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
59211 @@ -1479,11 +1479,11 @@ rcu_torture_init(void)
59213 rcu_torture_current = NULL;
59214 rcu_torture_current_version = 0;
59215 - atomic_set(&n_rcu_torture_alloc, 0);
59216 - atomic_set(&n_rcu_torture_alloc_fail, 0);
59217 - atomic_set(&n_rcu_torture_free, 0);
59218 - atomic_set(&n_rcu_torture_mberror, 0);
59219 - atomic_set(&n_rcu_torture_error, 0);
59220 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
59221 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
59222 + atomic_set_unchecked(&n_rcu_torture_free, 0);
59223 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
59224 + atomic_set_unchecked(&n_rcu_torture_error, 0);
59225 n_rcu_torture_boost_ktrerror = 0;
59226 n_rcu_torture_boost_rterror = 0;
59227 n_rcu_torture_boost_allocerror = 0;
59228 @@ -1491,7 +1491,7 @@ rcu_torture_init(void)
59229 n_rcu_torture_boost_failure = 0;
59230 n_rcu_torture_boosts = 0;
59231 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
59232 - atomic_set(&rcu_torture_wcount[i], 0);
59233 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
59234 for_each_possible_cpu(cpu) {
59235 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59236 per_cpu(rcu_torture_count, cpu)[i] = 0;
59237 diff -urNp linux-2.6.39.4/kernel/rcutree.c linux-2.6.39.4/kernel/rcutree.c
59238 --- linux-2.6.39.4/kernel/rcutree.c 2011-05-19 00:06:34.000000000 -0400
59239 +++ linux-2.6.39.4/kernel/rcutree.c 2011-08-05 19:44:37.000000000 -0400
59240 @@ -1389,7 +1389,7 @@ __rcu_process_callbacks(struct rcu_state
59242 * Do softirq processing for the current CPU.
59244 -static void rcu_process_callbacks(struct softirq_action *unused)
59245 +static void rcu_process_callbacks(void)
59248 * Memory references from any prior RCU read-side critical sections
59249 diff -urNp linux-2.6.39.4/kernel/rcutree_plugin.h linux-2.6.39.4/kernel/rcutree_plugin.h
59250 --- linux-2.6.39.4/kernel/rcutree_plugin.h 2011-05-19 00:06:34.000000000 -0400
59251 +++ linux-2.6.39.4/kernel/rcutree_plugin.h 2011-08-05 19:44:37.000000000 -0400
59252 @@ -730,7 +730,7 @@ void synchronize_rcu_expedited(void)
59254 /* Clean up and exit. */
59255 smp_mb(); /* ensure expedited GP seen before counter increment. */
59256 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
59257 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
59259 mutex_unlock(&sync_rcu_preempt_exp_mutex);
59261 @@ -1025,8 +1025,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
59263 #else /* #ifndef CONFIG_SMP */
59265 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
59266 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
59267 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
59268 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
59270 static int synchronize_sched_expedited_cpu_stop(void *data)
59272 @@ -1081,7 +1081,7 @@ void synchronize_sched_expedited(void)
59273 int firstsnap, s, snap, trycount = 0;
59275 /* Note that atomic_inc_return() implies full memory barrier. */
59276 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
59277 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
59281 @@ -1102,7 +1102,7 @@ void synchronize_sched_expedited(void)
59284 /* Check to see if someone else did our work for us. */
59285 - s = atomic_read(&sync_sched_expedited_done);
59286 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59287 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
59288 smp_mb(); /* ensure test happens before caller kfree */
59290 @@ -1117,7 +1117,7 @@ void synchronize_sched_expedited(void)
59291 * grace period works for us.
59294 - snap = atomic_read(&sync_sched_expedited_started) - 1;
59295 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
59296 smp_mb(); /* ensure read is before try_stop_cpus(). */
59299 @@ -1128,12 +1128,12 @@ void synchronize_sched_expedited(void)
59300 * than we did beat us to the punch.
59303 - s = atomic_read(&sync_sched_expedited_done);
59304 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59305 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
59306 smp_mb(); /* ensure test happens before caller kfree */
59309 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
59310 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59314 diff -urNp linux-2.6.39.4/kernel/relay.c linux-2.6.39.4/kernel/relay.c
59315 --- linux-2.6.39.4/kernel/relay.c 2011-05-19 00:06:34.000000000 -0400
59316 +++ linux-2.6.39.4/kernel/relay.c 2011-08-05 19:44:37.000000000 -0400
59317 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59321 + pax_track_stack();
59323 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59325 if (splice_grow_spd(pipe, &spd))
59326 diff -urNp linux-2.6.39.4/kernel/resource.c linux-2.6.39.4/kernel/resource.c
59327 --- linux-2.6.39.4/kernel/resource.c 2011-05-19 00:06:34.000000000 -0400
59328 +++ linux-2.6.39.4/kernel/resource.c 2011-08-05 19:44:37.000000000 -0400
59329 @@ -133,8 +133,18 @@ static const struct file_operations proc
59331 static int __init ioresources_init(void)
59333 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59334 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59335 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59336 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59337 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59338 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59339 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59342 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59343 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59347 __initcall(ioresources_init);
59348 diff -urNp linux-2.6.39.4/kernel/rtmutex-tester.c linux-2.6.39.4/kernel/rtmutex-tester.c
59349 --- linux-2.6.39.4/kernel/rtmutex-tester.c 2011-05-19 00:06:34.000000000 -0400
59350 +++ linux-2.6.39.4/kernel/rtmutex-tester.c 2011-08-05 19:44:37.000000000 -0400
59352 #define MAX_RT_TEST_MUTEXES 8
59354 static spinlock_t rttest_lock;
59355 -static atomic_t rttest_event;
59356 +static atomic_unchecked_t rttest_event;
59358 struct test_thread_data {
59360 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59362 case RTTEST_LOCKCONT:
59363 td->mutexes[td->opdata] = 1;
59364 - td->event = atomic_add_return(1, &rttest_event);
59365 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59369 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59372 case RTTEST_RESETEVENT:
59373 - atomic_set(&rttest_event, 0);
59374 + atomic_set_unchecked(&rttest_event, 0);
59378 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59381 td->mutexes[id] = 1;
59382 - td->event = atomic_add_return(1, &rttest_event);
59383 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59384 rt_mutex_lock(&mutexes[id]);
59385 - td->event = atomic_add_return(1, &rttest_event);
59386 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59387 td->mutexes[id] = 4;
59390 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59393 td->mutexes[id] = 1;
59394 - td->event = atomic_add_return(1, &rttest_event);
59395 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59396 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59397 - td->event = atomic_add_return(1, &rttest_event);
59398 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59399 td->mutexes[id] = ret ? 0 : 4;
59400 return ret ? -EINTR : 0;
59402 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59403 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59406 - td->event = atomic_add_return(1, &rttest_event);
59407 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59408 rt_mutex_unlock(&mutexes[id]);
59409 - td->event = atomic_add_return(1, &rttest_event);
59410 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59411 td->mutexes[id] = 0;
59414 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59417 td->mutexes[dat] = 2;
59418 - td->event = atomic_add_return(1, &rttest_event);
59419 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59423 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59426 td->mutexes[dat] = 3;
59427 - td->event = atomic_add_return(1, &rttest_event);
59428 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59431 case RTTEST_LOCKNOWAIT:
59432 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59435 td->mutexes[dat] = 1;
59436 - td->event = atomic_add_return(1, &rttest_event);
59437 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59441 diff -urNp linux-2.6.39.4/kernel/sched_autogroup.c linux-2.6.39.4/kernel/sched_autogroup.c
59442 --- linux-2.6.39.4/kernel/sched_autogroup.c 2011-05-19 00:06:34.000000000 -0400
59443 +++ linux-2.6.39.4/kernel/sched_autogroup.c 2011-08-05 19:44:37.000000000 -0400
59446 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59447 static struct autogroup autogroup_default;
59448 -static atomic_t autogroup_seq_nr;
59449 +static atomic_unchecked_t autogroup_seq_nr;
59451 static void __init autogroup_init(struct task_struct *init_task)
59453 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59455 kref_init(&ag->kref);
59456 init_rwsem(&ag->lock);
59457 - ag->id = atomic_inc_return(&autogroup_seq_nr);
59458 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59460 #ifdef CONFIG_RT_GROUP_SCHED
59462 diff -urNp linux-2.6.39.4/kernel/sched.c linux-2.6.39.4/kernel/sched.c
59463 --- linux-2.6.39.4/kernel/sched.c 2011-05-19 00:06:34.000000000 -0400
59464 +++ linux-2.6.39.4/kernel/sched.c 2011-08-05 19:44:37.000000000 -0400
59465 @@ -4078,6 +4078,8 @@ asmlinkage void __sched schedule(void)
59469 + pax_track_stack();
59473 cpu = smp_processor_id();
59474 @@ -4165,7 +4167,7 @@ EXPORT_SYMBOL(schedule);
59475 * Look out! "owner" is an entirely speculative pointer
59476 * access and not reliable.
59478 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
59479 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
59483 @@ -4179,10 +4181,10 @@ int mutex_spin_on_owner(struct mutex *lo
59484 * DEBUG_PAGEALLOC could have unmapped it if
59485 * the mutex owner just released it and exited.
59487 - if (probe_kernel_address(&owner->cpu, cpu))
59488 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
59491 - cpu = owner->cpu;
59492 + cpu = task_thread_info(owner)->cpu;
59496 @@ -4219,7 +4221,7 @@ int mutex_spin_on_owner(struct mutex *lo
59498 * Is that owner really running on that cpu?
59500 - if (task_thread_info(rq->curr) != owner || need_resched())
59501 + if (rq->curr != owner || need_resched())
59504 arch_mutex_cpu_relax();
59505 @@ -4778,6 +4780,8 @@ int can_nice(const struct task_struct *p
59506 /* convert nice value [19,-20] to rlimit style value [1,40] */
59507 int nice_rlim = 20 - nice;
59509 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59511 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59512 capable(CAP_SYS_NICE));
59514 @@ -4811,7 +4815,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59518 - if (increment < 0 && !can_nice(current, nice))
59519 + if (increment < 0 && (!can_nice(current, nice) ||
59520 + gr_handle_chroot_nice()))
59523 retval = security_task_setnice(current, nice);
59524 @@ -4957,6 +4962,7 @@ recheck:
59525 unsigned long rlim_rtprio =
59526 task_rlimit(p, RLIMIT_RTPRIO);
59528 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59529 /* can't set/change the rt policy */
59530 if (policy != p->policy && !rlim_rtprio)
59532 @@ -7164,7 +7170,7 @@ static void init_sched_groups_power(int
59536 - WARN_ON(!sd || !sd->groups);
59537 + BUG_ON(!sd || !sd->groups);
59539 if (cpu != group_first_cpu(sd->groups))
59541 diff -urNp linux-2.6.39.4/kernel/sched_fair.c linux-2.6.39.4/kernel/sched_fair.c
59542 --- linux-2.6.39.4/kernel/sched_fair.c 2011-05-19 00:06:34.000000000 -0400
59543 +++ linux-2.6.39.4/kernel/sched_fair.c 2011-08-05 19:44:37.000000000 -0400
59544 @@ -3999,7 +3999,7 @@ static void nohz_idle_balance(int this_c
59545 * run_rebalance_domains is triggered when needed from the scheduler tick.
59546 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59548 -static void run_rebalance_domains(struct softirq_action *h)
59549 +static void run_rebalance_domains(void)
59551 int this_cpu = smp_processor_id();
59552 struct rq *this_rq = cpu_rq(this_cpu);
59553 diff -urNp linux-2.6.39.4/kernel/signal.c linux-2.6.39.4/kernel/signal.c
59554 --- linux-2.6.39.4/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
59555 +++ linux-2.6.39.4/kernel/signal.c 2011-08-05 19:44:37.000000000 -0400
59556 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59558 int print_fatal_signals __read_mostly;
59560 -static void __user *sig_handler(struct task_struct *t, int sig)
59561 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
59563 return t->sighand->action[sig - 1].sa.sa_handler;
59566 -static int sig_handler_ignored(void __user *handler, int sig)
59567 +static int sig_handler_ignored(__sighandler_t handler, int sig)
59569 /* Is it explicitly or implicitly ignored? */
59570 return handler == SIG_IGN ||
59571 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59572 static int sig_task_ignored(struct task_struct *t, int sig,
59573 int from_ancestor_ns)
59575 - void __user *handler;
59576 + __sighandler_t handler;
59578 handler = sig_handler(t, sig);
59580 @@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
59581 atomic_inc(&user->sigpending);
59584 + if (!override_rlimit)
59585 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59587 if (override_rlimit ||
59588 atomic_read(&user->sigpending) <=
59589 task_rlimit(t, RLIMIT_SIGPENDING)) {
59590 @@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
59592 int unhandled_signal(struct task_struct *tsk, int sig)
59594 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59595 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59596 if (is_global_init(tsk))
59598 if (handler != SIG_IGN && handler != SIG_DFL)
59599 @@ -693,6 +696,12 @@ static int check_kill_permission(int sig
59603 + /* allow glibc communication via tgkill to other threads in our
59605 + if ((info->si_code != SI_TKILL || sig != (SIGRTMIN+1) ||
59606 + task_tgid_vnr(t) != info->si_pid) && gr_handle_signal(t, sig))
59609 return security_task_kill(t, info, sig, 0);
59612 @@ -1041,7 +1050,7 @@ __group_send_sig_info(int sig, struct si
59613 return send_signal(sig, info, p, 1);
59618 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59620 return send_signal(sig, info, t, 0);
59621 @@ -1078,6 +1087,7 @@ force_sig_info(int sig, struct siginfo *
59622 unsigned long int flags;
59623 int ret, blocked, ignored;
59624 struct k_sigaction *action;
59625 + int is_unhandled = 0;
59627 spin_lock_irqsave(&t->sighand->siglock, flags);
59628 action = &t->sighand->action[sig-1];
59629 @@ -1092,9 +1102,18 @@ force_sig_info(int sig, struct siginfo *
59631 if (action->sa.sa_handler == SIG_DFL)
59632 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59633 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59634 + is_unhandled = 1;
59635 ret = specific_send_sig_info(sig, info, t);
59636 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59638 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
59639 + normal operation */
59640 + if (is_unhandled) {
59641 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59642 + gr_handle_crash(t, sig);
59648 @@ -1153,8 +1172,11 @@ int group_send_sig_info(int sig, struct
59649 ret = check_kill_permission(sig, info, p);
59653 + if (!ret && sig) {
59654 ret = do_send_sig_info(sig, info, p, true);
59656 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59661 @@ -1718,6 +1740,8 @@ void ptrace_notify(int exit_code)
59665 + pax_track_stack();
59667 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59669 memset(&info, 0, sizeof info);
59670 @@ -2393,7 +2417,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59671 int error = -ESRCH;
59674 - p = find_task_by_vpid(pid);
59675 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59676 + /* allow glibc communication via tgkill to other threads in our
59678 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59679 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
59680 + p = find_task_by_vpid_unrestricted(pid);
59683 + p = find_task_by_vpid(pid);
59684 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59685 error = check_kill_permission(sig, info, p);
59687 diff -urNp linux-2.6.39.4/kernel/smp.c linux-2.6.39.4/kernel/smp.c
59688 --- linux-2.6.39.4/kernel/smp.c 2011-05-19 00:06:34.000000000 -0400
59689 +++ linux-2.6.39.4/kernel/smp.c 2011-08-05 19:44:37.000000000 -0400
59690 @@ -583,22 +583,22 @@ int smp_call_function(smp_call_func_t fu
59692 EXPORT_SYMBOL(smp_call_function);
59694 -void ipi_call_lock(void)
59695 +void ipi_call_lock(void) __acquires(call_function.lock)
59697 raw_spin_lock(&call_function.lock);
59700 -void ipi_call_unlock(void)
59701 +void ipi_call_unlock(void) __releases(call_function.lock)
59703 raw_spin_unlock(&call_function.lock);
59706 -void ipi_call_lock_irq(void)
59707 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
59709 raw_spin_lock_irq(&call_function.lock);
59712 -void ipi_call_unlock_irq(void)
59713 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
59715 raw_spin_unlock_irq(&call_function.lock);
59717 diff -urNp linux-2.6.39.4/kernel/softirq.c linux-2.6.39.4/kernel/softirq.c
59718 --- linux-2.6.39.4/kernel/softirq.c 2011-05-19 00:06:34.000000000 -0400
59719 +++ linux-2.6.39.4/kernel/softirq.c 2011-08-05 20:34:06.000000000 -0400
59720 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59722 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59724 -char *softirq_to_name[NR_SOFTIRQS] = {
59725 +const char * const softirq_to_name[NR_SOFTIRQS] = {
59726 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59727 "TASKLET", "SCHED", "HRTIMER", "RCU"
59729 @@ -235,7 +235,7 @@ restart:
59730 kstat_incr_softirqs_this_cpu(vec_nr);
59732 trace_softirq_entry(vec_nr);
59735 trace_softirq_exit(vec_nr);
59736 if (unlikely(prev_count != preempt_count())) {
59737 printk(KERN_ERR "huh, entered softirq %u %s %p"
59738 @@ -377,9 +377,11 @@ void raise_softirq(unsigned int nr)
59739 local_irq_restore(flags);
59742 -void open_softirq(int nr, void (*action)(struct softirq_action *))
59743 +void open_softirq(int nr, void (*action)(void))
59745 - softirq_vec[nr].action = action;
59746 + pax_open_kernel();
59747 + *(void **)&softirq_vec[nr].action = action;
59748 + pax_close_kernel();
59752 @@ -433,7 +435,7 @@ void __tasklet_hi_schedule_first(struct
59754 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59756 -static void tasklet_action(struct softirq_action *a)
59757 +static void tasklet_action(void)
59759 struct tasklet_struct *list;
59761 @@ -468,7 +470,7 @@ static void tasklet_action(struct softir
59765 -static void tasklet_hi_action(struct softirq_action *a)
59766 +static void tasklet_hi_action(void)
59768 struct tasklet_struct *list;
59770 diff -urNp linux-2.6.39.4/kernel/sys.c linux-2.6.39.4/kernel/sys.c
59771 --- linux-2.6.39.4/kernel/sys.c 2011-05-19 00:06:34.000000000 -0400
59772 +++ linux-2.6.39.4/kernel/sys.c 2011-08-05 19:44:37.000000000 -0400
59773 @@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59778 + if (gr_handle_chroot_setpriority(p, niceval)) {
59783 no_nice = security_task_setnice(p, niceval);
59786 @@ -538,6 +544,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59790 + if (gr_check_group_change(new->gid, new->egid, -1))
59793 if (rgid != (gid_t) -1 ||
59794 (egid != (gid_t) -1 && egid != old->gid))
59795 new->sgid = new->egid;
59796 @@ -567,6 +576,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59797 old = current_cred();
59801 + if (gr_check_group_change(gid, gid, gid))
59804 if (nsown_capable(CAP_SETGID))
59805 new->gid = new->egid = new->sgid = new->fsgid = gid;
59806 else if (gid == old->gid || gid == old->sgid)
59807 @@ -647,6 +660,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59811 + if (gr_check_user_change(new->uid, new->euid, -1))
59814 if (new->uid != old->uid) {
59815 retval = set_user(new);
59817 @@ -691,6 +707,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59818 old = current_cred();
59822 + if (gr_check_crash_uid(uid))
59824 + if (gr_check_user_change(uid, uid, uid))
59827 if (nsown_capable(CAP_SETUID)) {
59828 new->suid = new->uid = uid;
59829 if (uid != old->uid) {
59830 @@ -745,6 +767,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59834 + if (gr_check_user_change(ruid, euid, -1))
59837 if (ruid != (uid_t) -1) {
59839 if (ruid != old->uid) {
59840 @@ -809,6 +834,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59844 + if (gr_check_group_change(rgid, egid, -1))
59847 if (rgid != (gid_t) -1)
59849 if (egid != (gid_t) -1)
59850 @@ -855,6 +883,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59851 old = current_cred();
59852 old_fsuid = old->fsuid;
59854 + if (gr_check_user_change(-1, -1, uid))
59857 if (uid == old->uid || uid == old->euid ||
59858 uid == old->suid || uid == old->fsuid ||
59859 nsown_capable(CAP_SETUID)) {
59860 @@ -865,6 +896,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59868 @@ -891,12 +923,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59869 if (gid == old->gid || gid == old->egid ||
59870 gid == old->sgid || gid == old->fsgid ||
59871 nsown_capable(CAP_SETGID)) {
59872 + if (gr_check_group_change(-1, -1, gid))
59875 if (gid != old_fsgid) {
59885 @@ -1643,7 +1679,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59886 error = get_dumpable(me->mm);
59888 case PR_SET_DUMPABLE:
59889 - if (arg2 < 0 || arg2 > 1) {
59894 diff -urNp linux-2.6.39.4/kernel/sysctl.c linux-2.6.39.4/kernel/sysctl.c
59895 --- linux-2.6.39.4/kernel/sysctl.c 2011-05-19 00:06:34.000000000 -0400
59896 +++ linux-2.6.39.4/kernel/sysctl.c 2011-08-05 19:44:37.000000000 -0400
59900 #if defined(CONFIG_SYSCTL)
59901 +#include <linux/grsecurity.h>
59902 +#include <linux/grinternal.h>
59904 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59905 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59907 +extern int gr_handle_chroot_sysctl(const int op);
59909 /* External variables not in a header file. */
59910 extern int sysctl_overcommit_memory;
59911 @@ -196,6 +203,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59915 +extern struct ctl_table grsecurity_table[];
59917 static struct ctl_table root_table[];
59918 static struct ctl_table_root sysctl_table_root;
59919 @@ -225,6 +233,20 @@ extern struct ctl_table epoll_table[];
59920 int sysctl_legacy_va_layout;
59923 +#ifdef CONFIG_PAX_SOFTMODE
59924 +static ctl_table pax_table[] = {
59926 + .procname = "softmode",
59927 + .data = &pax_softmode,
59928 + .maxlen = sizeof(unsigned int),
59930 + .proc_handler = &proc_dointvec,
59937 /* The default sysctl tables: */
59939 static struct ctl_table root_table[] = {
59940 @@ -271,6 +293,22 @@ static int max_extfrag_threshold = 1000;
59943 static struct ctl_table kern_table[] = {
59944 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59946 + .procname = "grsecurity",
59948 + .child = grsecurity_table,
59952 +#ifdef CONFIG_PAX_SOFTMODE
59954 + .procname = "pax",
59956 + .child = pax_table,
59961 .procname = "sched_child_runs_first",
59962 .data = &sysctl_sched_child_runs_first,
59963 @@ -545,7 +583,7 @@ static struct ctl_table kern_table[] = {
59964 .data = &modprobe_path,
59965 .maxlen = KMOD_PATH_LEN,
59967 - .proc_handler = proc_dostring,
59968 + .proc_handler = proc_dostring_modpriv,
59971 .procname = "modules_disabled",
59972 @@ -707,16 +745,20 @@ static struct ctl_table kern_table[] = {
59978 .procname = "kptr_restrict",
59979 .data = &kptr_restrict,
59980 .maxlen = sizeof(int),
59982 .proc_handler = proc_dmesg_restrict,
59983 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59992 .procname = "ngroups_max",
59993 .data = &ngroups_max,
59994 @@ -1189,6 +1231,13 @@ static struct ctl_table vm_table[] = {
59995 .proc_handler = proc_dointvec_minmax,
59999 + .procname = "heap_stack_gap",
60000 + .data = &sysctl_heap_stack_gap,
60001 + .maxlen = sizeof(sysctl_heap_stack_gap),
60003 + .proc_handler = proc_doulongvec_minmax,
60007 .procname = "nr_trim_pages",
60008 @@ -1698,6 +1747,17 @@ static int test_perm(int mode, int op)
60009 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
60014 + if (table->parent != NULL && table->parent->procname != NULL &&
60015 + table->procname != NULL &&
60016 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
60018 + if (gr_handle_chroot_sysctl(op))
60020 + error = gr_handle_sysctl(table, op);
60024 if (root->permissions)
60025 mode = root->permissions(root, current->nsproxy, table);
60026 @@ -2102,6 +2162,16 @@ int proc_dostring(struct ctl_table *tabl
60027 buffer, lenp, ppos);
60030 +int proc_dostring_modpriv(struct ctl_table *table, int write,
60031 + void __user *buffer, size_t *lenp, loff_t *ppos)
60033 + if (write && !capable(CAP_SYS_MODULE))
60036 + return _proc_do_string(table->data, table->maxlen, write,
60037 + buffer, lenp, ppos);
60040 static size_t proc_skip_spaces(char **buf)
60043 @@ -2207,6 +2277,8 @@ static int proc_put_long(void __user **b
60047 + if (len > sizeof(tmp))
60048 + len = sizeof(tmp);
60049 if (copy_to_user(*buf, tmp, len))
60052 @@ -2523,8 +2595,11 @@ static int __do_proc_doulongvec_minmax(v
60055 val = convdiv * (*i) / convmul;
60058 err = proc_put_char(&buffer, &left, '\t');
60062 err = proc_put_long(&buffer, &left, val, false);
60065 @@ -2919,6 +2994,12 @@ int proc_dostring(struct ctl_table *tabl
60069 +int proc_dostring_modpriv(struct ctl_table *table, int write,
60070 + void __user *buffer, size_t *lenp, loff_t *ppos)
60075 int proc_dointvec(struct ctl_table *table, int write,
60076 void __user *buffer, size_t *lenp, loff_t *ppos)
60078 @@ -2975,6 +3056,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
60079 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
60080 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
60081 EXPORT_SYMBOL(proc_dostring);
60082 +EXPORT_SYMBOL(proc_dostring_modpriv);
60083 EXPORT_SYMBOL(proc_doulongvec_minmax);
60084 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
60085 EXPORT_SYMBOL(register_sysctl_table);
60086 diff -urNp linux-2.6.39.4/kernel/sysctl_check.c linux-2.6.39.4/kernel/sysctl_check.c
60087 --- linux-2.6.39.4/kernel/sysctl_check.c 2011-05-19 00:06:34.000000000 -0400
60088 +++ linux-2.6.39.4/kernel/sysctl_check.c 2011-08-05 19:44:37.000000000 -0400
60089 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
60090 set_fail(&fail, table, "Directory with extra2");
60092 if ((table->proc_handler == proc_dostring) ||
60093 + (table->proc_handler == proc_dostring_modpriv) ||
60094 (table->proc_handler == proc_dointvec) ||
60095 (table->proc_handler == proc_dointvec_minmax) ||
60096 (table->proc_handler == proc_dointvec_jiffies) ||
60097 diff -urNp linux-2.6.39.4/kernel/taskstats.c linux-2.6.39.4/kernel/taskstats.c
60098 --- linux-2.6.39.4/kernel/taskstats.c 2011-07-09 09:18:51.000000000 -0400
60099 +++ linux-2.6.39.4/kernel/taskstats.c 2011-08-05 19:44:37.000000000 -0400
60101 #include <linux/cgroup.h>
60102 #include <linux/fs.h>
60103 #include <linux/file.h>
60104 +#include <linux/grsecurity.h>
60105 #include <net/genetlink.h>
60106 #include <asm/atomic.h>
60108 +extern int gr_is_taskstats_denied(int pid);
60111 * Maximum length of a cpumask that can be specified in
60112 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
60113 @@ -558,6 +561,9 @@ err:
60115 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
60117 + if (gr_is_taskstats_denied(current->pid))
60120 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
60121 return cmd_attr_register_cpumask(info);
60122 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
60123 diff -urNp linux-2.6.39.4/kernel/time/tick-broadcast.c linux-2.6.39.4/kernel/time/tick-broadcast.c
60124 --- linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-05-19 00:06:34.000000000 -0400
60125 +++ linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-08-05 19:44:37.000000000 -0400
60126 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
60127 * then clear the broadcast bit.
60129 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
60130 - int cpu = smp_processor_id();
60131 + cpu = smp_processor_id();
60133 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
60134 tick_broadcast_clear_oneshot(cpu);
60135 diff -urNp linux-2.6.39.4/kernel/time/timekeeping.c linux-2.6.39.4/kernel/time/timekeeping.c
60136 --- linux-2.6.39.4/kernel/time/timekeeping.c 2011-05-19 00:06:34.000000000 -0400
60137 +++ linux-2.6.39.4/kernel/time/timekeeping.c 2011-08-05 19:44:37.000000000 -0400
60139 #include <linux/init.h>
60140 #include <linux/mm.h>
60141 #include <linux/sched.h>
60142 +#include <linux/grsecurity.h>
60143 #include <linux/syscore_ops.h>
60144 #include <linux/clocksource.h>
60145 #include <linux/jiffies.h>
60146 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
60147 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
60150 + gr_log_timechange();
60152 write_seqlock_irqsave(&xtime_lock, flags);
60154 timekeeping_forward_now();
60155 diff -urNp linux-2.6.39.4/kernel/time/timer_list.c linux-2.6.39.4/kernel/time/timer_list.c
60156 --- linux-2.6.39.4/kernel/time/timer_list.c 2011-05-19 00:06:34.000000000 -0400
60157 +++ linux-2.6.39.4/kernel/time/timer_list.c 2011-08-05 19:44:37.000000000 -0400
60158 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
60160 static void print_name_offset(struct seq_file *m, void *sym)
60162 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60163 + SEQ_printf(m, "<%p>", NULL);
60165 char symname[KSYM_NAME_LEN];
60167 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
60168 SEQ_printf(m, "<%pK>", sym);
60170 SEQ_printf(m, "%s", symname);
60175 @@ -112,7 +116,11 @@ next_one:
60177 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
60179 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60180 + SEQ_printf(m, " .base: %p\n", NULL);
60182 SEQ_printf(m, " .base: %pK\n", base);
60184 SEQ_printf(m, " .index: %d\n",
60186 SEQ_printf(m, " .resolution: %Lu nsecs\n",
60187 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
60189 struct proc_dir_entry *pe;
60191 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60192 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
60194 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
60199 diff -urNp linux-2.6.39.4/kernel/time/timer_stats.c linux-2.6.39.4/kernel/time/timer_stats.c
60200 --- linux-2.6.39.4/kernel/time/timer_stats.c 2011-05-19 00:06:34.000000000 -0400
60201 +++ linux-2.6.39.4/kernel/time/timer_stats.c 2011-08-05 19:44:37.000000000 -0400
60202 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
60203 static unsigned long nr_entries;
60204 static struct entry entries[MAX_ENTRIES];
60206 -static atomic_t overflow_count;
60207 +static atomic_unchecked_t overflow_count;
60210 * The entries are in a hash-table, for fast lookup:
60211 @@ -140,7 +140,7 @@ static void reset_entries(void)
60213 memset(entries, 0, sizeof(entries));
60214 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
60215 - atomic_set(&overflow_count, 0);
60216 + atomic_set_unchecked(&overflow_count, 0);
60219 static struct entry *alloc_entry(void)
60220 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
60224 - atomic_inc(&overflow_count);
60225 + atomic_inc_unchecked(&overflow_count);
60228 raw_spin_unlock_irqrestore(lock, flags);
60229 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
60231 static void print_name_offset(struct seq_file *m, unsigned long addr)
60233 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60234 + seq_printf(m, "<%p>", NULL);
60236 char symname[KSYM_NAME_LEN];
60238 if (lookup_symbol_name(addr, symname) < 0)
60239 seq_printf(m, "<%p>", (void *)addr);
60241 seq_printf(m, "%s", symname);
60245 static int tstats_show(struct seq_file *m, void *v)
60246 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
60248 seq_puts(m, "Timer Stats Version: v0.2\n");
60249 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
60250 - if (atomic_read(&overflow_count))
60251 + if (atomic_read_unchecked(&overflow_count))
60252 seq_printf(m, "Overflow: %d entries\n",
60253 - atomic_read(&overflow_count));
60254 + atomic_read_unchecked(&overflow_count));
60256 for (i = 0; i < nr_entries; i++) {
60257 entry = entries + i;
60258 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
60260 struct proc_dir_entry *pe;
60262 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60263 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
60265 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
60270 diff -urNp linux-2.6.39.4/kernel/time.c linux-2.6.39.4/kernel/time.c
60271 --- linux-2.6.39.4/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
60272 +++ linux-2.6.39.4/kernel/time.c 2011-08-05 19:44:37.000000000 -0400
60273 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
60277 + /* we log in do_settimeofday called below, so don't log twice
60280 + gr_log_timechange();
60282 /* SMP safe, global irq locking makes it work. */
60284 update_vsyscall_tz();
60285 diff -urNp linux-2.6.39.4/kernel/timer.c linux-2.6.39.4/kernel/timer.c
60286 --- linux-2.6.39.4/kernel/timer.c 2011-05-19 00:06:34.000000000 -0400
60287 +++ linux-2.6.39.4/kernel/timer.c 2011-08-05 19:44:37.000000000 -0400
60288 @@ -1305,7 +1305,7 @@ void update_process_times(int user_tick)
60290 * This function runs timers and the timer-tq in bottom half context.
60292 -static void run_timer_softirq(struct softirq_action *h)
60293 +static void run_timer_softirq(void)
60295 struct tvec_base *base = __this_cpu_read(tvec_bases);
60297 diff -urNp linux-2.6.39.4/kernel/trace/blktrace.c linux-2.6.39.4/kernel/trace/blktrace.c
60298 --- linux-2.6.39.4/kernel/trace/blktrace.c 2011-05-19 00:06:34.000000000 -0400
60299 +++ linux-2.6.39.4/kernel/trace/blktrace.c 2011-08-05 19:44:37.000000000 -0400
60300 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
60301 struct blk_trace *bt = filp->private_data;
60304 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
60305 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
60307 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
60309 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
60312 bt = buf->chan->private_data;
60313 - atomic_inc(&bt->dropped);
60314 + atomic_inc_unchecked(&bt->dropped);
60318 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60322 - atomic_set(&bt->dropped, 0);
60323 + atomic_set_unchecked(&bt->dropped, 0);
60326 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60327 diff -urNp linux-2.6.39.4/kernel/trace/ftrace.c linux-2.6.39.4/kernel/trace/ftrace.c
60328 --- linux-2.6.39.4/kernel/trace/ftrace.c 2011-06-03 00:04:14.000000000 -0400
60329 +++ linux-2.6.39.4/kernel/trace/ftrace.c 2011-08-05 20:34:06.000000000 -0400
60330 @@ -1107,13 +1107,18 @@ ftrace_code_disable(struct module *mod,
60334 + ret = ftrace_arch_code_modify_prepare();
60335 + FTRACE_WARN_ON(ret);
60339 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60340 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60342 ftrace_bug(ret, ip);
60343 rec->flags |= FTRACE_FL_FAILED;
60347 + return ret ? 0 : 1;
60351 @@ -2011,7 +2016,7 @@ static void ftrace_free_entry_rcu(struct
60354 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60358 struct ftrace_func_probe *entry;
60359 struct ftrace_page *pg;
60360 diff -urNp linux-2.6.39.4/kernel/trace/trace.c linux-2.6.39.4/kernel/trace/trace.c
60361 --- linux-2.6.39.4/kernel/trace/trace.c 2011-05-19 00:06:34.000000000 -0400
60362 +++ linux-2.6.39.4/kernel/trace/trace.c 2011-08-05 19:44:37.000000000 -0400
60363 @@ -3330,6 +3330,8 @@ static ssize_t tracing_splice_read_pipe(
60367 + pax_track_stack();
60369 if (splice_grow_spd(pipe, &spd))
60372 @@ -3813,6 +3815,8 @@ tracing_buffers_splice_read(struct file
60373 int entries, size, i;
60376 + pax_track_stack();
60378 if (splice_grow_spd(pipe, &spd))
60381 @@ -3981,10 +3985,9 @@ static const struct file_operations trac
60385 -static struct dentry *d_tracer;
60387 struct dentry *tracing_init_dentry(void)
60389 + static struct dentry *d_tracer;
60393 @@ -4004,10 +4007,9 @@ struct dentry *tracing_init_dentry(void)
60397 -static struct dentry *d_percpu;
60399 struct dentry *tracing_dentry_percpu(void)
60401 + static struct dentry *d_percpu;
60403 struct dentry *d_tracer;
60405 diff -urNp linux-2.6.39.4/kernel/trace/trace_events.c linux-2.6.39.4/kernel/trace/trace_events.c
60406 --- linux-2.6.39.4/kernel/trace/trace_events.c 2011-05-19 00:06:34.000000000 -0400
60407 +++ linux-2.6.39.4/kernel/trace/trace_events.c 2011-08-05 20:34:06.000000000 -0400
60408 @@ -1241,10 +1241,6 @@ static LIST_HEAD(ftrace_module_file_list
60409 struct ftrace_module_file_ops {
60410 struct list_head list;
60411 struct module *mod;
60412 - struct file_operations id;
60413 - struct file_operations enable;
60414 - struct file_operations format;
60415 - struct file_operations filter;
60418 static struct ftrace_module_file_ops *
60419 @@ -1265,17 +1261,12 @@ trace_create_file_ops(struct module *mod
60421 file_ops->mod = mod;
60423 - file_ops->id = ftrace_event_id_fops;
60424 - file_ops->id.owner = mod;
60426 - file_ops->enable = ftrace_enable_fops;
60427 - file_ops->enable.owner = mod;
60429 - file_ops->filter = ftrace_event_filter_fops;
60430 - file_ops->filter.owner = mod;
60432 - file_ops->format = ftrace_event_format_fops;
60433 - file_ops->format.owner = mod;
60434 + pax_open_kernel();
60435 + *(void **)&mod->trace_id.owner = mod;
60436 + *(void **)&mod->trace_enable.owner = mod;
60437 + *(void **)&mod->trace_filter.owner = mod;
60438 + *(void **)&mod->trace_format.owner = mod;
60439 + pax_close_kernel();
60441 list_add(&file_ops->list, &ftrace_module_file_list);
60443 @@ -1299,8 +1290,8 @@ static void trace_module_add_events(stru
60445 for_each_event(call, start, end) {
60446 __trace_add_event_call(*call, mod,
60447 - &file_ops->id, &file_ops->enable,
60448 - &file_ops->filter, &file_ops->format);
60449 + &mod->trace_id, &mod->trace_enable,
60450 + &mod->trace_filter, &mod->trace_format);
60454 diff -urNp linux-2.6.39.4/kernel/trace/trace_mmiotrace.c linux-2.6.39.4/kernel/trace/trace_mmiotrace.c
60455 --- linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-05-19 00:06:34.000000000 -0400
60456 +++ linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-08-05 19:44:37.000000000 -0400
60457 @@ -24,7 +24,7 @@ struct header_iter {
60458 static struct trace_array *mmio_trace_array;
60459 static bool overrun_detected;
60460 static unsigned long prev_overruns;
60461 -static atomic_t dropped_count;
60462 +static atomic_unchecked_t dropped_count;
60464 static void mmio_reset_data(struct trace_array *tr)
60466 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60468 static unsigned long count_overruns(struct trace_iterator *iter)
60470 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
60471 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60472 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60474 if (over > prev_overruns)
60475 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60476 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60477 sizeof(*entry), 0, pc);
60479 - atomic_inc(&dropped_count);
60480 + atomic_inc_unchecked(&dropped_count);
60483 entry = ring_buffer_event_data(event);
60484 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60485 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60486 sizeof(*entry), 0, pc);
60488 - atomic_inc(&dropped_count);
60489 + atomic_inc_unchecked(&dropped_count);
60492 entry = ring_buffer_event_data(event);
60493 diff -urNp linux-2.6.39.4/kernel/trace/trace_output.c linux-2.6.39.4/kernel/trace/trace_output.c
60494 --- linux-2.6.39.4/kernel/trace/trace_output.c 2011-05-19 00:06:34.000000000 -0400
60495 +++ linux-2.6.39.4/kernel/trace/trace_output.c 2011-08-05 19:44:37.000000000 -0400
60496 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60498 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60500 - p = mangle_path(s->buffer + s->len, p, "\n");
60501 + p = mangle_path(s->buffer + s->len, p, "\n\\");
60503 s->len = p - s->buffer;
60505 diff -urNp linux-2.6.39.4/kernel/trace/trace_stack.c linux-2.6.39.4/kernel/trace/trace_stack.c
60506 --- linux-2.6.39.4/kernel/trace/trace_stack.c 2011-05-19 00:06:34.000000000 -0400
60507 +++ linux-2.6.39.4/kernel/trace/trace_stack.c 2011-08-05 19:44:37.000000000 -0400
60508 @@ -50,7 +50,7 @@ static inline void check_stack(void)
60511 /* we do not handle interrupt stacks yet */
60512 - if (!object_is_on_stack(&this_size))
60513 + if (!object_starts_on_stack(&this_size))
60516 local_irq_save(flags);
60517 diff -urNp linux-2.6.39.4/kernel/trace/trace_workqueue.c linux-2.6.39.4/kernel/trace/trace_workqueue.c
60518 --- linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-05-19 00:06:34.000000000 -0400
60519 +++ linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-08-05 19:44:37.000000000 -0400
60520 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60523 /* Can be inserted from interrupt or user context, need to be atomic */
60524 - atomic_t inserted;
60525 + atomic_unchecked_t inserted;
60527 * Don't need to be atomic, works are serialized in a single workqueue thread
60529 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60530 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60531 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60532 if (node->pid == wq_thread->pid) {
60533 - atomic_inc(&node->inserted);
60534 + atomic_inc_unchecked(&node->inserted);
60538 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60539 tsk = get_pid_task(pid, PIDTYPE_PID);
60541 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60542 - atomic_read(&cws->inserted), cws->executed,
60543 + atomic_read_unchecked(&cws->inserted), cws->executed,
60545 put_task_struct(tsk);
60547 diff -urNp linux-2.6.39.4/lib/bug.c linux-2.6.39.4/lib/bug.c
60548 --- linux-2.6.39.4/lib/bug.c 2011-05-19 00:06:34.000000000 -0400
60549 +++ linux-2.6.39.4/lib/bug.c 2011-08-05 19:44:37.000000000 -0400
60550 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60551 return BUG_TRAP_TYPE_NONE;
60553 bug = find_bug(bugaddr);
60555 + return BUG_TRAP_TYPE_NONE;
60559 diff -urNp linux-2.6.39.4/lib/debugobjects.c linux-2.6.39.4/lib/debugobjects.c
60560 --- linux-2.6.39.4/lib/debugobjects.c 2011-07-09 09:18:51.000000000 -0400
60561 +++ linux-2.6.39.4/lib/debugobjects.c 2011-08-05 19:44:37.000000000 -0400
60562 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60566 - is_on_stack = object_is_on_stack(addr);
60567 + is_on_stack = object_starts_on_stack(addr);
60568 if (is_on_stack == onstack)
60571 diff -urNp linux-2.6.39.4/lib/dma-debug.c linux-2.6.39.4/lib/dma-debug.c
60572 --- linux-2.6.39.4/lib/dma-debug.c 2011-05-19 00:06:34.000000000 -0400
60573 +++ linux-2.6.39.4/lib/dma-debug.c 2011-08-05 19:44:37.000000000 -0400
60574 @@ -862,7 +862,7 @@ out:
60576 static void check_for_stack(struct device *dev, void *addr)
60578 - if (object_is_on_stack(addr))
60579 + if (object_starts_on_stack(addr))
60580 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60581 "stack [addr=%p]\n", addr);
60583 diff -urNp linux-2.6.39.4/lib/inflate.c linux-2.6.39.4/lib/inflate.c
60584 --- linux-2.6.39.4/lib/inflate.c 2011-05-19 00:06:34.000000000 -0400
60585 +++ linux-2.6.39.4/lib/inflate.c 2011-08-05 19:44:37.000000000 -0400
60586 @@ -269,7 +269,7 @@ static void free(void *where)
60587 malloc_ptr = free_mem_ptr;
60590 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60591 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60592 #define free(a) kfree(a)
60595 diff -urNp linux-2.6.39.4/lib/Kconfig.debug linux-2.6.39.4/lib/Kconfig.debug
60596 --- linux-2.6.39.4/lib/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
60597 +++ linux-2.6.39.4/lib/Kconfig.debug 2011-08-05 19:44:37.000000000 -0400
60598 @@ -1078,6 +1078,7 @@ config LATENCYTOP
60599 depends on DEBUG_KERNEL
60600 depends on STACKTRACE_SUPPORT
60602 + depends on !GRKERNSEC_HIDESYM
60603 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60605 select KALLSYMS_ALL
60606 diff -urNp linux-2.6.39.4/lib/kref.c linux-2.6.39.4/lib/kref.c
60607 --- linux-2.6.39.4/lib/kref.c 2011-05-19 00:06:34.000000000 -0400
60608 +++ linux-2.6.39.4/lib/kref.c 2011-08-05 19:44:37.000000000 -0400
60609 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60611 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60613 - WARN_ON(release == NULL);
60614 + BUG_ON(release == NULL);
60615 WARN_ON(release == (void (*)(struct kref *))kfree);
60617 if (atomic_dec_and_test(&kref->refcount)) {
60618 diff -urNp linux-2.6.39.4/lib/radix-tree.c linux-2.6.39.4/lib/radix-tree.c
60619 --- linux-2.6.39.4/lib/radix-tree.c 2011-05-19 00:06:34.000000000 -0400
60620 +++ linux-2.6.39.4/lib/radix-tree.c 2011-08-05 19:44:37.000000000 -0400
60621 @@ -80,7 +80,7 @@ struct radix_tree_preload {
60623 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60625 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60626 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60628 static inline void *ptr_to_indirect(void *ptr)
60630 diff -urNp linux-2.6.39.4/lib/vsprintf.c linux-2.6.39.4/lib/vsprintf.c
60631 --- linux-2.6.39.4/lib/vsprintf.c 2011-05-19 00:06:34.000000000 -0400
60632 +++ linux-2.6.39.4/lib/vsprintf.c 2011-08-05 19:44:37.000000000 -0400
60634 * - scnprintf and vscnprintf
60637 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60638 +#define __INCLUDED_BY_HIDESYM 1
60640 #include <stdarg.h>
60641 #include <linux/module.h>
60642 #include <linux/types.h>
60643 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60644 char sym[KSYM_SYMBOL_LEN];
60646 sprint_backtrace(sym, value);
60647 - else if (ext != 'f' && ext != 's')
60648 + else if (ext != 'f' && ext != 's' && ext != 'a')
60649 sprint_symbol(sym, value);
60651 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60652 @@ -797,7 +800,11 @@ char *uuid_string(char *buf, char *end,
60653 return string(buf, end, uuid, spec);
60656 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60657 +int kptr_restrict __read_mostly = 2;
60659 int kptr_restrict __read_mostly;
60663 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60664 @@ -811,6 +818,8 @@ int kptr_restrict __read_mostly;
60665 * - 'S' For symbolic direct pointers with offset
60666 * - 's' For symbolic direct pointers without offset
60667 * - 'B' For backtraced symbolic direct pointers with offset
60668 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60669 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60670 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60671 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60672 * - 'M' For a 6-byte MAC address, it prints the address in the
60673 @@ -855,12 +864,12 @@ char *pointer(const char *fmt, char *buf
60675 if (!ptr && *fmt != 'K') {
60677 - * Print (null) with the same width as a pointer so it makes
60678 + * Print (nil) with the same width as a pointer so it makes
60679 * tabular output look nice.
60681 if (spec.field_width == -1)
60682 spec.field_width = 2 * sizeof(void *);
60683 - return string(buf, end, "(null)", spec);
60684 + return string(buf, end, "(nil)", spec);
60688 @@ -870,6 +879,13 @@ char *pointer(const char *fmt, char *buf
60692 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60695 + return symbol_string(buf, end, ptr, spec, *fmt);
60700 return symbol_string(buf, end, ptr, spec, *fmt);
60702 @@ -1632,11 +1648,11 @@ int bstr_printf(char *buf, size_t size,
60703 typeof(type) value; \
60704 if (sizeof(type) == 8) { \
60705 args = PTR_ALIGN(args, sizeof(u32)); \
60706 - *(u32 *)&value = *(u32 *)args; \
60707 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60708 + *(u32 *)&value = *(const u32 *)args; \
60709 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60711 args = PTR_ALIGN(args, sizeof(type)); \
60712 - value = *(typeof(type) *)args; \
60713 + value = *(const typeof(type) *)args; \
60715 args += sizeof(type); \
60717 @@ -1699,7 +1715,7 @@ int bstr_printf(char *buf, size_t size,
60718 case FORMAT_TYPE_STR: {
60719 const char *str_arg = args;
60720 args += strlen(str_arg) + 1;
60721 - str = string(str, end, (char *)str_arg, spec);
60722 + str = string(str, end, str_arg, spec);
60726 diff -urNp linux-2.6.39.4/localversion-grsec linux-2.6.39.4/localversion-grsec
60727 --- linux-2.6.39.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60728 +++ linux-2.6.39.4/localversion-grsec 2011-08-05 19:44:37.000000000 -0400
60731 diff -urNp linux-2.6.39.4/Makefile linux-2.6.39.4/Makefile
60732 --- linux-2.6.39.4/Makefile 2011-08-05 21:11:51.000000000 -0400
60733 +++ linux-2.6.39.4/Makefile 2011-08-07 14:17:20.000000000 -0400
60734 @@ -237,8 +237,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60738 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60739 -HOSTCXXFLAGS = -O2
60740 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60741 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60742 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60744 # Decide whether to build built-in, modular, or both.
60745 # Normally, just do built-in.
60746 @@ -356,10 +357,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60747 KBUILD_CPPFLAGS := -D__KERNEL__
60749 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60750 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
60751 -fno-strict-aliasing -fno-common \
60752 -Werror-implicit-function-declaration \
60753 -Wno-format-security \
60754 -fno-delete-null-pointer-checks
60755 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60756 KBUILD_AFLAGS_KERNEL :=
60757 KBUILD_CFLAGS_KERNEL :=
60758 KBUILD_AFLAGS := -D__ASSEMBLY__
60759 @@ -397,8 +400,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
60760 # Rules shared between *config targets and build targets
60762 # Basic helpers built in scripts/
60763 -PHONY += scripts_basic
60765 +PHONY += scripts_basic gcc-plugins
60766 +scripts_basic: gcc-plugins
60767 $(Q)$(MAKE) $(build)=scripts/basic
60768 $(Q)rm -f .tmp_quiet_recordmcount
60770 @@ -548,6 +551,25 @@ else
60771 KBUILD_CFLAGS += -O2
60774 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60775 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60776 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
60777 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60779 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60780 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60782 + $(Q)$(MAKE) $(build)=tools/gcc
60785 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60786 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60788 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60790 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60793 include $(srctree)/arch/$(SRCARCH)/Makefile
60795 ifneq ($(CONFIG_FRAME_WARN),0)
60796 @@ -685,7 +707,7 @@ export mod_strip_cmd
60799 ifeq ($(KBUILD_EXTMOD),)
60800 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60801 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60803 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60804 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60805 @@ -947,7 +969,7 @@ ifneq ($(KBUILD_SRC),)
60808 # prepare2 creates a makefile if using a separate output directory
60809 -prepare2: prepare3 outputmakefile
60810 +prepare2: prepare3 outputmakefile gcc-plugins
60812 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60813 include/config/auto.conf
60814 @@ -1375,7 +1397,7 @@ clean: $(clean-dirs)
60816 $(call cmd,rmfiles)
60817 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60818 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60819 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60820 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60821 -o -name '*.symtypes' -o -name 'modules.order' \
60822 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60823 diff -urNp linux-2.6.39.4/mm/filemap.c linux-2.6.39.4/mm/filemap.c
60824 --- linux-2.6.39.4/mm/filemap.c 2011-05-19 00:06:34.000000000 -0400
60825 +++ linux-2.6.39.4/mm/filemap.c 2011-08-05 19:44:37.000000000 -0400
60826 @@ -1724,7 +1724,7 @@ int generic_file_mmap(struct file * file
60827 struct address_space *mapping = file->f_mapping;
60829 if (!mapping->a_ops->readpage)
60832 file_accessed(file);
60833 vma->vm_ops = &generic_file_vm_ops;
60834 vma->vm_flags |= VM_CAN_NONLINEAR;
60835 @@ -2120,6 +2120,7 @@ inline int generic_write_checks(struct f
60836 *pos = i_size_read(inode);
60838 if (limit != RLIM_INFINITY) {
60839 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60840 if (*pos >= limit) {
60841 send_sig(SIGXFSZ, current, 0);
60843 diff -urNp linux-2.6.39.4/mm/fremap.c linux-2.6.39.4/mm/fremap.c
60844 --- linux-2.6.39.4/mm/fremap.c 2011-05-19 00:06:34.000000000 -0400
60845 +++ linux-2.6.39.4/mm/fremap.c 2011-08-05 19:44:37.000000000 -0400
60846 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60848 vma = find_vma(mm, start);
60850 +#ifdef CONFIG_PAX_SEGMEXEC
60851 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60856 * Make sure the vma is shared, that it supports prefaulting,
60857 * and that the remapped range is valid and fully within
60858 @@ -224,7 +229,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60860 * drop PG_Mlocked flag for over-mapped range
60862 - unsigned int saved_flags = vma->vm_flags;
60863 + unsigned long saved_flags = vma->vm_flags;
60864 munlock_vma_pages_range(vma, start, start + size);
60865 vma->vm_flags = saved_flags;
60867 diff -urNp linux-2.6.39.4/mm/highmem.c linux-2.6.39.4/mm/highmem.c
60868 --- linux-2.6.39.4/mm/highmem.c 2011-05-19 00:06:34.000000000 -0400
60869 +++ linux-2.6.39.4/mm/highmem.c 2011-08-05 19:44:37.000000000 -0400
60870 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60871 * So no dangers, even with speculative execution.
60873 page = pte_page(pkmap_page_table[i]);
60874 + pax_open_kernel();
60875 pte_clear(&init_mm, (unsigned long)page_address(page),
60876 &pkmap_page_table[i]);
60878 + pax_close_kernel();
60879 set_page_address(page, NULL);
60882 @@ -186,9 +187,11 @@ start:
60885 vaddr = PKMAP_ADDR(last_pkmap_nr);
60887 + pax_open_kernel();
60888 set_pte_at(&init_mm, vaddr,
60889 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60891 + pax_close_kernel();
60892 pkmap_count[last_pkmap_nr] = 1;
60893 set_page_address(page, (void *)vaddr);
60895 diff -urNp linux-2.6.39.4/mm/huge_memory.c linux-2.6.39.4/mm/huge_memory.c
60896 --- linux-2.6.39.4/mm/huge_memory.c 2011-05-19 00:06:34.000000000 -0400
60897 +++ linux-2.6.39.4/mm/huge_memory.c 2011-08-05 19:44:37.000000000 -0400
60898 @@ -702,7 +702,7 @@ out:
60899 * run pte_offset_map on the pmd, if an huge pmd could
60900 * materialize from under us from a different thread.
60902 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60903 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60904 return VM_FAULT_OOM;
60905 /* if an huge pmd materialized from under us just retry later */
60906 if (unlikely(pmd_trans_huge(*pmd)))
60907 diff -urNp linux-2.6.39.4/mm/hugetlb.c linux-2.6.39.4/mm/hugetlb.c
60908 --- linux-2.6.39.4/mm/hugetlb.c 2011-07-09 09:18:51.000000000 -0400
60909 +++ linux-2.6.39.4/mm/hugetlb.c 2011-08-05 19:44:37.000000000 -0400
60910 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60914 +#ifdef CONFIG_PAX_SEGMEXEC
60915 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60917 + struct mm_struct *mm = vma->vm_mm;
60918 + struct vm_area_struct *vma_m;
60919 + unsigned long address_m;
60922 + vma_m = pax_find_mirror_vma(vma);
60926 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60927 + address_m = address + SEGMEXEC_TASK_SIZE;
60928 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60929 + get_page(page_m);
60930 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
60931 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60936 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60938 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
60939 make_huge_pte(vma, new_page, 1));
60940 page_remove_rmap(old_page);
60941 hugepage_add_new_anon_rmap(new_page, vma, address);
60943 +#ifdef CONFIG_PAX_SEGMEXEC
60944 + pax_mirror_huge_pte(vma, address, new_page);
60947 /* Make the old page be freed below */
60948 new_page = old_page;
60949 mmu_notifier_invalidate_range_end(mm,
60950 @@ -2591,6 +2617,10 @@ retry:
60951 && (vma->vm_flags & VM_SHARED)));
60952 set_huge_pte_at(mm, address, ptep, new_pte);
60954 +#ifdef CONFIG_PAX_SEGMEXEC
60955 + pax_mirror_huge_pte(vma, address, page);
60958 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60959 /* Optimization, do the COW without a second fault */
60960 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60961 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60962 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60963 struct hstate *h = hstate_vma(vma);
60965 +#ifdef CONFIG_PAX_SEGMEXEC
60966 + struct vm_area_struct *vma_m;
60969 ptep = huge_pte_offset(mm, address);
60971 entry = huge_ptep_get(ptep);
60972 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60973 VM_FAULT_SET_HINDEX(h - hstates);
60976 +#ifdef CONFIG_PAX_SEGMEXEC
60977 + vma_m = pax_find_mirror_vma(vma);
60979 + unsigned long address_m;
60981 + if (vma->vm_start > vma_m->vm_start) {
60982 + address_m = address;
60983 + address -= SEGMEXEC_TASK_SIZE;
60985 + h = hstate_vma(vma);
60987 + address_m = address + SEGMEXEC_TASK_SIZE;
60989 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60990 + return VM_FAULT_OOM;
60991 + address_m &= HPAGE_MASK;
60992 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60996 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60998 return VM_FAULT_OOM;
60999 diff -urNp linux-2.6.39.4/mm/internal.h linux-2.6.39.4/mm/internal.h
61000 --- linux-2.6.39.4/mm/internal.h 2011-05-19 00:06:34.000000000 -0400
61001 +++ linux-2.6.39.4/mm/internal.h 2011-08-05 19:44:37.000000000 -0400
61002 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
61003 * in mm/page_alloc.c
61005 extern void __free_pages_bootmem(struct page *page, unsigned int order);
61006 +extern void free_compound_page(struct page *page);
61007 extern void prep_compound_page(struct page *page, unsigned long order);
61008 #ifdef CONFIG_MEMORY_FAILURE
61009 extern bool is_free_buddy_page(struct page *page);
61010 diff -urNp linux-2.6.39.4/mm/Kconfig linux-2.6.39.4/mm/Kconfig
61011 --- linux-2.6.39.4/mm/Kconfig 2011-05-19 00:06:34.000000000 -0400
61012 +++ linux-2.6.39.4/mm/Kconfig 2011-08-05 19:44:37.000000000 -0400
61013 @@ -240,7 +240,7 @@ config KSM
61014 config DEFAULT_MMAP_MIN_ADDR
61015 int "Low address space to protect from user allocation"
61020 This is the portion of low virtual memory which should be protected
61021 from userspace allocation. Keeping a user from writing to low pages
61022 diff -urNp linux-2.6.39.4/mm/kmemleak.c linux-2.6.39.4/mm/kmemleak.c
61023 --- linux-2.6.39.4/mm/kmemleak.c 2011-06-03 00:04:14.000000000 -0400
61024 +++ linux-2.6.39.4/mm/kmemleak.c 2011-08-05 19:44:37.000000000 -0400
61025 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
61027 for (i = 0; i < object->trace_len; i++) {
61028 void *ptr = (void *)object->trace[i];
61029 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
61030 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
61034 diff -urNp linux-2.6.39.4/mm/maccess.c linux-2.6.39.4/mm/maccess.c
61035 --- linux-2.6.39.4/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
61036 +++ linux-2.6.39.4/mm/maccess.c 2011-08-05 19:44:37.000000000 -0400
61037 @@ -15,10 +15,10 @@
61038 * happens, handle that and return -EFAULT.
61041 -long __weak probe_kernel_read(void *dst, void *src, size_t size)
61042 +long __weak probe_kernel_read(void *dst, const void *src, size_t size)
61043 __attribute__((alias("__probe_kernel_read")));
61045 -long __probe_kernel_read(void *dst, void *src, size_t size)
61046 +long __probe_kernel_read(void *dst, const void *src, size_t size)
61049 mm_segment_t old_fs = get_fs();
61050 @@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
61051 * Safely write to address @dst from the buffer at @src. If a kernel fault
61052 * happens, handle that and return -EFAULT.
61054 -long __weak probe_kernel_write(void *dst, void *src, size_t size)
61055 +long __weak probe_kernel_write(void *dst, const void *src, size_t size)
61056 __attribute__((alias("__probe_kernel_write")));
61058 -long __probe_kernel_write(void *dst, void *src, size_t size)
61059 +long __probe_kernel_write(void *dst, const void *src, size_t size)
61062 mm_segment_t old_fs = get_fs();
61063 diff -urNp linux-2.6.39.4/mm/madvise.c linux-2.6.39.4/mm/madvise.c
61064 --- linux-2.6.39.4/mm/madvise.c 2011-05-19 00:06:34.000000000 -0400
61065 +++ linux-2.6.39.4/mm/madvise.c 2011-08-05 19:44:37.000000000 -0400
61066 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
61068 unsigned long new_flags = vma->vm_flags;
61070 +#ifdef CONFIG_PAX_SEGMEXEC
61071 + struct vm_area_struct *vma_m;
61074 switch (behavior) {
61076 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
61077 @@ -110,6 +114,13 @@ success:
61079 * vm_flags is protected by the mmap_sem held in write mode.
61082 +#ifdef CONFIG_PAX_SEGMEXEC
61083 + vma_m = pax_find_mirror_vma(vma);
61085 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
61088 vma->vm_flags = new_flags;
61091 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
61092 struct vm_area_struct ** prev,
61093 unsigned long start, unsigned long end)
61096 +#ifdef CONFIG_PAX_SEGMEXEC
61097 + struct vm_area_struct *vma_m;
61101 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
61103 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
61104 zap_page_range(vma, start, end - start, &details);
61106 zap_page_range(vma, start, end - start, NULL);
61108 +#ifdef CONFIG_PAX_SEGMEXEC
61109 + vma_m = pax_find_mirror_vma(vma);
61111 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
61112 + struct zap_details details = {
61113 + .nonlinear_vma = vma_m,
61114 + .last_index = ULONG_MAX,
61116 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
61118 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
61125 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
61129 +#ifdef CONFIG_PAX_SEGMEXEC
61130 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
61131 + if (end > SEGMEXEC_TASK_SIZE)
61136 + if (end > TASK_SIZE)
61142 diff -urNp linux-2.6.39.4/mm/memory.c linux-2.6.39.4/mm/memory.c
61143 --- linux-2.6.39.4/mm/memory.c 2011-05-19 00:06:34.000000000 -0400
61144 +++ linux-2.6.39.4/mm/memory.c 2011-08-05 19:44:37.000000000 -0400
61145 @@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
61148 pmd = pmd_offset(pud, start);
61150 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
61152 pmd_free_tlb(tlb, pmd, start);
61157 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
61158 @@ -291,9 +295,12 @@ static inline void free_pud_range(struct
61159 if (end - 1 > ceiling - 1)
61162 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
61163 pud = pud_offset(pgd, start);
61165 pud_free_tlb(tlb, pud, start);
61171 @@ -1410,12 +1417,6 @@ no_page_table:
61175 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
61177 - return stack_guard_page_start(vma, addr) ||
61178 - stack_guard_page_end(vma, addr+PAGE_SIZE);
61182 * __get_user_pages() - pin user pages in memory
61183 * @tsk: task_struct of target task
61184 @@ -1488,10 +1489,10 @@ int __get_user_pages(struct task_struct
61185 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
61189 + while (nr_pages) {
61190 struct vm_area_struct *vma;
61192 - vma = find_extend_vma(mm, start);
61193 + vma = find_vma(mm, start);
61194 if (!vma && in_gate_area(mm, start)) {
61195 unsigned long pg = start & PAGE_MASK;
61197 @@ -1539,7 +1540,7 @@ int __get_user_pages(struct task_struct
61202 + if (!vma || start < vma->vm_start ||
61203 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
61204 !(vm_flags & vma->vm_flags))
61205 return i ? : -EFAULT;
61206 @@ -1566,11 +1567,6 @@ int __get_user_pages(struct task_struct
61208 unsigned int fault_flags = 0;
61210 - /* For mlock, just skip the stack guard page. */
61211 - if (foll_flags & FOLL_MLOCK) {
61212 - if (stack_guard_page(vma, start))
61215 if (foll_flags & FOLL_WRITE)
61216 fault_flags |= FAULT_FLAG_WRITE;
61218 @@ -1644,7 +1640,7 @@ next_page:
61219 start += PAGE_SIZE;
61221 } while (nr_pages && start < vma->vm_end);
61222 - } while (nr_pages);
61226 EXPORT_SYMBOL(__get_user_pages);
61227 @@ -1795,6 +1791,10 @@ static int insert_page(struct vm_area_st
61228 page_add_file_rmap(page);
61229 set_pte_at(mm, addr, pte, mk_pte(page, prot));
61231 +#ifdef CONFIG_PAX_SEGMEXEC
61232 + pax_mirror_file_pte(vma, addr, page, ptl);
61236 pte_unmap_unlock(pte, ptl);
61238 @@ -1829,10 +1829,22 @@ out:
61239 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
61243 +#ifdef CONFIG_PAX_SEGMEXEC
61244 + struct vm_area_struct *vma_m;
61247 if (addr < vma->vm_start || addr >= vma->vm_end)
61249 if (!page_count(page))
61252 +#ifdef CONFIG_PAX_SEGMEXEC
61253 + vma_m = pax_find_mirror_vma(vma);
61255 + vma_m->vm_flags |= VM_INSERTPAGE;
61258 vma->vm_flags |= VM_INSERTPAGE;
61259 return insert_page(vma, addr, page, vma->vm_page_prot);
61261 @@ -1918,6 +1930,7 @@ int vm_insert_mixed(struct vm_area_struc
61264 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
61265 + BUG_ON(vma->vm_mirror);
61267 if (addr < vma->vm_start || addr >= vma->vm_end)
61269 @@ -2233,6 +2246,186 @@ static inline void cow_user_page(struct
61270 copy_user_highpage(dst, src, va, vma);
61273 +#ifdef CONFIG_PAX_SEGMEXEC
61274 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
61276 + struct mm_struct *mm = vma->vm_mm;
61278 + pte_t *pte, entry;
61280 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
61282 + if (!pte_present(entry)) {
61283 + if (!pte_none(entry)) {
61284 + BUG_ON(pte_file(entry));
61285 + free_swap_and_cache(pte_to_swp_entry(entry));
61286 + pte_clear_not_present_full(mm, address, pte, 0);
61289 + struct page *page;
61291 + flush_cache_page(vma, address, pte_pfn(entry));
61292 + entry = ptep_clear_flush(vma, address, pte);
61293 + BUG_ON(pte_dirty(entry));
61294 + page = vm_normal_page(vma, address, entry);
61296 + update_hiwater_rss(mm);
61297 + if (PageAnon(page))
61298 + dec_mm_counter_fast(mm, MM_ANONPAGES);
61300 + dec_mm_counter_fast(mm, MM_FILEPAGES);
61301 + page_remove_rmap(page);
61302 + page_cache_release(page);
61305 + pte_unmap_unlock(pte, ptl);
61308 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
61310 + * the ptl of the lower mapped page is held on entry and is not released on exit
61311 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
61313 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61315 + struct mm_struct *mm = vma->vm_mm;
61316 + unsigned long address_m;
61317 + spinlock_t *ptl_m;
61318 + struct vm_area_struct *vma_m;
61320 + pte_t *pte_m, entry_m;
61322 + BUG_ON(!page_m || !PageAnon(page_m));
61324 + vma_m = pax_find_mirror_vma(vma);
61328 + BUG_ON(!PageLocked(page_m));
61329 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61330 + address_m = address + SEGMEXEC_TASK_SIZE;
61331 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61332 + pte_m = pte_offset_map(pmd_m, address_m);
61333 + ptl_m = pte_lockptr(mm, pmd_m);
61334 + if (ptl != ptl_m) {
61335 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61336 + if (!pte_none(*pte_m))
61340 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61341 + page_cache_get(page_m);
61342 + page_add_anon_rmap(page_m, vma_m, address_m);
61343 + inc_mm_counter_fast(mm, MM_ANONPAGES);
61344 + set_pte_at(mm, address_m, pte_m, entry_m);
61345 + update_mmu_cache(vma_m, address_m, entry_m);
61347 + if (ptl != ptl_m)
61348 + spin_unlock(ptl_m);
61349 + pte_unmap(pte_m);
61350 + unlock_page(page_m);
61353 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61355 + struct mm_struct *mm = vma->vm_mm;
61356 + unsigned long address_m;
61357 + spinlock_t *ptl_m;
61358 + struct vm_area_struct *vma_m;
61360 + pte_t *pte_m, entry_m;
61362 + BUG_ON(!page_m || PageAnon(page_m));
61364 + vma_m = pax_find_mirror_vma(vma);
61368 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61369 + address_m = address + SEGMEXEC_TASK_SIZE;
61370 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61371 + pte_m = pte_offset_map(pmd_m, address_m);
61372 + ptl_m = pte_lockptr(mm, pmd_m);
61373 + if (ptl != ptl_m) {
61374 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61375 + if (!pte_none(*pte_m))
61379 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61380 + page_cache_get(page_m);
61381 + page_add_file_rmap(page_m);
61382 + inc_mm_counter_fast(mm, MM_FILEPAGES);
61383 + set_pte_at(mm, address_m, pte_m, entry_m);
61384 + update_mmu_cache(vma_m, address_m, entry_m);
61386 + if (ptl != ptl_m)
61387 + spin_unlock(ptl_m);
61388 + pte_unmap(pte_m);
61391 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61393 + struct mm_struct *mm = vma->vm_mm;
61394 + unsigned long address_m;
61395 + spinlock_t *ptl_m;
61396 + struct vm_area_struct *vma_m;
61398 + pte_t *pte_m, entry_m;
61400 + vma_m = pax_find_mirror_vma(vma);
61404 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61405 + address_m = address + SEGMEXEC_TASK_SIZE;
61406 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61407 + pte_m = pte_offset_map(pmd_m, address_m);
61408 + ptl_m = pte_lockptr(mm, pmd_m);
61409 + if (ptl != ptl_m) {
61410 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61411 + if (!pte_none(*pte_m))
61415 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61416 + set_pte_at(mm, address_m, pte_m, entry_m);
61418 + if (ptl != ptl_m)
61419 + spin_unlock(ptl_m);
61420 + pte_unmap(pte_m);
61423 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61425 + struct page *page_m;
61428 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61432 + page_m = vm_normal_page(vma, address, entry);
61434 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61435 + else if (PageAnon(page_m)) {
61436 + if (pax_find_mirror_vma(vma)) {
61437 + pte_unmap_unlock(pte, ptl);
61438 + lock_page(page_m);
61439 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61440 + if (pte_same(entry, *pte))
61441 + pax_mirror_anon_pte(vma, address, page_m, ptl);
61443 + unlock_page(page_m);
61446 + pax_mirror_file_pte(vma, address, page_m, ptl);
61449 + pte_unmap_unlock(pte, ptl);
61454 * This routine handles present pages, when users try to write
61455 * to a shared page. It is done by copying the page to a new address
61456 @@ -2444,6 +2637,12 @@ gotten:
61458 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61459 if (likely(pte_same(*page_table, orig_pte))) {
61461 +#ifdef CONFIG_PAX_SEGMEXEC
61462 + if (pax_find_mirror_vma(vma))
61463 + BUG_ON(!trylock_page(new_page));
61467 if (!PageAnon(old_page)) {
61468 dec_mm_counter_fast(mm, MM_FILEPAGES);
61469 @@ -2495,6 +2694,10 @@ gotten:
61470 page_remove_rmap(old_page);
61473 +#ifdef CONFIG_PAX_SEGMEXEC
61474 + pax_mirror_anon_pte(vma, address, new_page, ptl);
61477 /* Free the old page.. */
61478 new_page = old_page;
61479 ret |= VM_FAULT_WRITE;
61480 @@ -2905,6 +3108,11 @@ static int do_swap_page(struct mm_struct
61482 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61483 try_to_free_swap(page);
61485 +#ifdef CONFIG_PAX_SEGMEXEC
61486 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61492 @@ -2928,6 +3136,11 @@ static int do_swap_page(struct mm_struct
61494 /* No need to invalidate - it was non-present before */
61495 update_mmu_cache(vma, address, page_table);
61497 +#ifdef CONFIG_PAX_SEGMEXEC
61498 + pax_mirror_anon_pte(vma, address, page, ptl);
61502 pte_unmap_unlock(page_table, ptl);
61504 @@ -2947,40 +3160,6 @@ out_release:
61508 - * This is like a special single-page "expand_{down|up}wards()",
61509 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
61510 - * doesn't hit another vma.
61512 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61514 - address &= PAGE_MASK;
61515 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61516 - struct vm_area_struct *prev = vma->vm_prev;
61519 - * Is there a mapping abutting this one below?
61521 - * That's only ok if it's the same stack mapping
61522 - * that has gotten split..
61524 - if (prev && prev->vm_end == address)
61525 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61527 - expand_stack(vma, address - PAGE_SIZE);
61529 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61530 - struct vm_area_struct *next = vma->vm_next;
61532 - /* As VM_GROWSDOWN but s/below/above/ */
61533 - if (next && next->vm_start == address + PAGE_SIZE)
61534 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61536 - expand_upwards(vma, address + PAGE_SIZE);
61542 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61543 * but allow concurrent faults), and pte mapped but not yet locked.
61544 * We return with mmap_sem still held, but pte unmapped and unlocked.
61545 @@ -2989,27 +3168,23 @@ static int do_anonymous_page(struct mm_s
61546 unsigned long address, pte_t *page_table, pmd_t *pmd,
61547 unsigned int flags)
61549 - struct page *page;
61550 + struct page *page = NULL;
61554 - pte_unmap(page_table);
61556 - /* Check if we need to add a guard page to the stack */
61557 - if (check_stack_guard_page(vma, address) < 0)
61558 - return VM_FAULT_SIGBUS;
61560 - /* Use the zero-page for reads */
61561 if (!(flags & FAULT_FLAG_WRITE)) {
61562 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61563 vma->vm_page_prot));
61564 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61565 + ptl = pte_lockptr(mm, pmd);
61567 if (!pte_none(*page_table))
61572 /* Allocate our own private page. */
61573 + pte_unmap(page_table);
61575 if (unlikely(anon_vma_prepare(vma)))
61577 page = alloc_zeroed_user_highpage_movable(vma, address);
61578 @@ -3028,6 +3203,11 @@ static int do_anonymous_page(struct mm_s
61579 if (!pte_none(*page_table))
61582 +#ifdef CONFIG_PAX_SEGMEXEC
61583 + if (pax_find_mirror_vma(vma))
61584 + BUG_ON(!trylock_page(page));
61587 inc_mm_counter_fast(mm, MM_ANONPAGES);
61588 page_add_new_anon_rmap(page, vma, address);
61590 @@ -3035,6 +3215,12 @@ setpte:
61592 /* No need to invalidate - it was non-present before */
61593 update_mmu_cache(vma, address, page_table);
61595 +#ifdef CONFIG_PAX_SEGMEXEC
61597 + pax_mirror_anon_pte(vma, address, page, ptl);
61601 pte_unmap_unlock(page_table, ptl);
61603 @@ -3172,6 +3358,12 @@ static int __do_fault(struct mm_struct *
61605 /* Only go through if we didn't race with anybody else... */
61606 if (likely(pte_same(*page_table, orig_pte))) {
61608 +#ifdef CONFIG_PAX_SEGMEXEC
61609 + if (anon && pax_find_mirror_vma(vma))
61610 + BUG_ON(!trylock_page(page));
61613 flush_icache_page(vma, page);
61614 entry = mk_pte(page, vma->vm_page_prot);
61615 if (flags & FAULT_FLAG_WRITE)
61616 @@ -3191,6 +3383,14 @@ static int __do_fault(struct mm_struct *
61618 /* no need to invalidate: a not-present page won't be cached */
61619 update_mmu_cache(vma, address, page_table);
61621 +#ifdef CONFIG_PAX_SEGMEXEC
61623 + pax_mirror_anon_pte(vma, address, page, ptl);
61625 + pax_mirror_file_pte(vma, address, page, ptl);
61630 mem_cgroup_uncharge_page(page);
61631 @@ -3338,6 +3538,12 @@ int handle_pte_fault(struct mm_struct *m
61632 if (flags & FAULT_FLAG_WRITE)
61633 flush_tlb_fix_spurious_fault(vma, address);
61636 +#ifdef CONFIG_PAX_SEGMEXEC
61637 + pax_mirror_pte(vma, address, pte, pmd, ptl);
61642 pte_unmap_unlock(pte, ptl);
61644 @@ -3354,6 +3560,10 @@ int handle_mm_fault(struct mm_struct *mm
61648 +#ifdef CONFIG_PAX_SEGMEXEC
61649 + struct vm_area_struct *vma_m;
61652 __set_current_state(TASK_RUNNING);
61654 count_vm_event(PGFAULT);
61655 @@ -3364,6 +3574,34 @@ int handle_mm_fault(struct mm_struct *mm
61656 if (unlikely(is_vm_hugetlb_page(vma)))
61657 return hugetlb_fault(mm, vma, address, flags);
61659 +#ifdef CONFIG_PAX_SEGMEXEC
61660 + vma_m = pax_find_mirror_vma(vma);
61662 + unsigned long address_m;
61667 + if (vma->vm_start > vma_m->vm_start) {
61668 + address_m = address;
61669 + address -= SEGMEXEC_TASK_SIZE;
61672 + address_m = address + SEGMEXEC_TASK_SIZE;
61674 + pgd_m = pgd_offset(mm, address_m);
61675 + pud_m = pud_alloc(mm, pgd_m, address_m);
61677 + return VM_FAULT_OOM;
61678 + pmd_m = pmd_alloc(mm, pud_m, address_m);
61680 + return VM_FAULT_OOM;
61681 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61682 + return VM_FAULT_OOM;
61683 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61687 pgd = pgd_offset(mm, address);
61688 pud = pud_alloc(mm, pgd, address);
61690 @@ -3393,7 +3631,7 @@ int handle_mm_fault(struct mm_struct *mm
61691 * run pte_offset_map on the pmd, if an huge pmd could
61692 * materialize from under us from a different thread.
61694 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61695 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61696 return VM_FAULT_OOM;
61697 /* if an huge pmd materialized from under us just retry later */
61698 if (unlikely(pmd_trans_huge(*pmd)))
61699 @@ -3497,7 +3735,7 @@ static int __init gate_vma_init(void)
61700 gate_vma.vm_start = FIXADDR_USER_START;
61701 gate_vma.vm_end = FIXADDR_USER_END;
61702 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61703 - gate_vma.vm_page_prot = __P101;
61704 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61706 * Make sure the vDSO gets into every core dump.
61707 * Dumping its contents makes post-mortem fully interpretable later
61708 diff -urNp linux-2.6.39.4/mm/memory-failure.c linux-2.6.39.4/mm/memory-failure.c
61709 --- linux-2.6.39.4/mm/memory-failure.c 2011-07-09 09:18:51.000000000 -0400
61710 +++ linux-2.6.39.4/mm/memory-failure.c 2011-08-05 19:44:37.000000000 -0400
61711 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61713 int sysctl_memory_failure_recovery __read_mostly = 1;
61715 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61716 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61718 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61720 @@ -1013,7 +1013,7 @@ int __memory_failure(unsigned long pfn,
61723 nr_pages = 1 << compound_trans_order(hpage);
61724 - atomic_long_add(nr_pages, &mce_bad_pages);
61725 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61728 * We need/can do nothing about count=0 pages.
61729 @@ -1043,7 +1043,7 @@ int __memory_failure(unsigned long pfn,
61730 if (!PageHWPoison(hpage)
61731 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61732 || (p != hpage && TestSetPageHWPoison(hpage))) {
61733 - atomic_long_sub(nr_pages, &mce_bad_pages);
61734 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61737 set_page_hwpoison_huge_page(hpage);
61738 @@ -1101,7 +1101,7 @@ int __memory_failure(unsigned long pfn,
61740 if (hwpoison_filter(p)) {
61741 if (TestClearPageHWPoison(p))
61742 - atomic_long_sub(nr_pages, &mce_bad_pages);
61743 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61744 unlock_page(hpage);
61747 @@ -1227,7 +1227,7 @@ int unpoison_memory(unsigned long pfn)
61750 if (TestClearPageHWPoison(p))
61751 - atomic_long_sub(nr_pages, &mce_bad_pages);
61752 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61753 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61756 @@ -1241,7 +1241,7 @@ int unpoison_memory(unsigned long pfn)
61758 if (TestClearPageHWPoison(page)) {
61759 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61760 - atomic_long_sub(nr_pages, &mce_bad_pages);
61761 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61763 if (PageHuge(page))
61764 clear_page_hwpoison_huge_page(page);
61765 @@ -1354,7 +1354,7 @@ static int soft_offline_huge_page(struct
61768 if (!PageHWPoison(hpage))
61769 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61770 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61771 set_page_hwpoison_huge_page(hpage);
61772 dequeue_hwpoisoned_huge_page(hpage);
61773 /* keep elevated page count for bad page */
61774 @@ -1484,7 +1484,7 @@ int soft_offline_page(struct page *page,
61778 - atomic_long_add(1, &mce_bad_pages);
61779 + atomic_long_add_unchecked(1, &mce_bad_pages);
61780 SetPageHWPoison(page);
61781 /* keep elevated page count for bad page */
61783 diff -urNp linux-2.6.39.4/mm/mempolicy.c linux-2.6.39.4/mm/mempolicy.c
61784 --- linux-2.6.39.4/mm/mempolicy.c 2011-05-19 00:06:34.000000000 -0400
61785 +++ linux-2.6.39.4/mm/mempolicy.c 2011-08-05 19:44:37.000000000 -0400
61786 @@ -643,6 +643,10 @@ static int mbind_range(struct mm_struct
61787 unsigned long vmstart;
61788 unsigned long vmend;
61790 +#ifdef CONFIG_PAX_SEGMEXEC
61791 + struct vm_area_struct *vma_m;
61794 vma = find_vma_prev(mm, start, &prev);
61795 if (!vma || vma->vm_start > start)
61797 @@ -673,6 +677,16 @@ static int mbind_range(struct mm_struct
61798 err = policy_vma(vma, new_pol);
61802 +#ifdef CONFIG_PAX_SEGMEXEC
61803 + vma_m = pax_find_mirror_vma(vma);
61805 + err = policy_vma(vma_m, new_pol);
61814 @@ -1106,6 +1120,17 @@ static long do_mbind(unsigned long start
61819 +#ifdef CONFIG_PAX_SEGMEXEC
61820 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61821 + if (end > SEGMEXEC_TASK_SIZE)
61826 + if (end > TASK_SIZE)
61832 @@ -1324,6 +1349,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61836 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61837 + if (mm != current->mm &&
61838 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61845 * Check if this process has the right to modify the specified
61846 * process. The right exists if the process has administrative
61847 @@ -1333,8 +1366,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61849 tcred = __task_cred(task);
61850 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61851 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61852 - !capable(CAP_SYS_NICE)) {
61853 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61857 @@ -2634,7 +2666,7 @@ int show_numa_map(struct seq_file *m, vo
61860 seq_printf(m, " file=");
61861 - seq_path(m, &file->f_path, "\n\t= ");
61862 + seq_path(m, &file->f_path, "\n\t\\= ");
61863 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
61864 seq_printf(m, " heap");
61865 } else if (vma->vm_start <= mm->start_stack &&
61866 diff -urNp linux-2.6.39.4/mm/migrate.c linux-2.6.39.4/mm/migrate.c
61867 --- linux-2.6.39.4/mm/migrate.c 2011-07-09 09:18:51.000000000 -0400
61868 +++ linux-2.6.39.4/mm/migrate.c 2011-08-05 19:44:37.000000000 -0400
61869 @@ -1133,6 +1133,8 @@ static int do_pages_move(struct mm_struc
61870 unsigned long chunk_start;
61873 + pax_track_stack();
61875 task_nodes = cpuset_mems_allowed(task);
61878 @@ -1317,6 +1319,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61882 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61883 + if (mm != current->mm &&
61884 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61891 * Check if this process has the right to modify the specified
61892 * process. The right exists if the process has administrative
61893 @@ -1326,8 +1336,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61895 tcred = __task_cred(task);
61896 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61897 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61898 - !capable(CAP_SYS_NICE)) {
61899 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61903 diff -urNp linux-2.6.39.4/mm/mlock.c linux-2.6.39.4/mm/mlock.c
61904 --- linux-2.6.39.4/mm/mlock.c 2011-05-19 00:06:34.000000000 -0400
61905 +++ linux-2.6.39.4/mm/mlock.c 2011-08-05 19:44:37.000000000 -0400
61907 #include <linux/pagemap.h>
61908 #include <linux/mempolicy.h>
61909 #include <linux/syscalls.h>
61910 +#include <linux/security.h>
61911 #include <linux/sched.h>
61912 #include <linux/module.h>
61913 #include <linux/rmap.h>
61914 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61918 + if (end > TASK_SIZE)
61921 vma = find_vma_prev(current->mm, start, &prev);
61922 if (!vma || vma->vm_start > start)
61924 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61925 for (nstart = start ; ; ) {
61926 unsigned int newflags;
61928 +#ifdef CONFIG_PAX_SEGMEXEC
61929 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61933 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61935 newflags = vma->vm_flags | VM_LOCKED;
61936 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61937 lock_limit >>= PAGE_SHIFT;
61939 /* check against resource limits */
61940 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61941 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61942 error = do_mlock(start, len, 1);
61943 up_write(¤t->mm->mmap_sem);
61944 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61945 static int do_mlockall(int flags)
61947 struct vm_area_struct * vma, * prev = NULL;
61948 - unsigned int def_flags = 0;
61950 if (flags & MCL_FUTURE)
61951 - def_flags = VM_LOCKED;
61952 - current->mm->def_flags = def_flags;
61953 + current->mm->def_flags |= VM_LOCKED;
61955 + current->mm->def_flags &= ~VM_LOCKED;
61956 if (flags == MCL_FUTURE)
61959 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61960 - unsigned int newflags;
61961 + unsigned long newflags;
61963 +#ifdef CONFIG_PAX_SEGMEXEC
61964 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61968 + BUG_ON(vma->vm_end > TASK_SIZE);
61969 newflags = vma->vm_flags | VM_LOCKED;
61970 if (!(flags & MCL_CURRENT))
61971 newflags &= ~VM_LOCKED;
61972 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61973 lock_limit >>= PAGE_SHIFT;
61976 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61977 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61978 capable(CAP_IPC_LOCK))
61979 ret = do_mlockall(flags);
61980 diff -urNp linux-2.6.39.4/mm/mmap.c linux-2.6.39.4/mm/mmap.c
61981 --- linux-2.6.39.4/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
61982 +++ linux-2.6.39.4/mm/mmap.c 2011-08-05 20:34:06.000000000 -0400
61984 #define arch_rebalance_pgtables(addr, len) (addr)
61987 +static inline void verify_mm_writelocked(struct mm_struct *mm)
61989 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61990 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61991 + up_read(&mm->mmap_sem);
61997 static void unmap_region(struct mm_struct *mm,
61998 struct vm_area_struct *vma, struct vm_area_struct *prev,
61999 unsigned long start, unsigned long end);
62000 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
62001 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
62004 -pgprot_t protection_map[16] = {
62005 +pgprot_t protection_map[16] __read_only = {
62006 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
62007 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
62010 pgprot_t vm_get_page_prot(unsigned long vm_flags)
62012 - return __pgprot(pgprot_val(protection_map[vm_flags &
62013 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
62014 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
62015 pgprot_val(arch_vm_get_page_prot(vm_flags)));
62017 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62018 + if (!(__supported_pte_mask & _PAGE_NX) &&
62019 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
62020 + (vm_flags & (VM_READ | VM_WRITE)))
62021 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
62026 EXPORT_SYMBOL(vm_get_page_prot);
62028 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
62029 int sysctl_overcommit_ratio = 50; /* default is 50% */
62030 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
62031 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
62032 struct percpu_counter vm_committed_as;
62035 @@ -232,6 +252,7 @@ static struct vm_area_struct *remove_vma
62036 struct vm_area_struct *next = vma->vm_next;
62039 + BUG_ON(vma->vm_mirror);
62040 if (vma->vm_ops && vma->vm_ops->close)
62041 vma->vm_ops->close(vma);
62042 if (vma->vm_file) {
62043 @@ -276,6 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
62044 * not page aligned -Ram Gupta
62046 rlim = rlimit(RLIMIT_DATA);
62047 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
62048 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
62049 (mm->end_data - mm->start_data) > rlim)
62051 @@ -719,6 +741,12 @@ static int
62052 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
62053 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62056 +#ifdef CONFIG_PAX_SEGMEXEC
62057 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
62061 if (is_mergeable_vma(vma, file, vm_flags) &&
62062 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62063 if (vma->vm_pgoff == vm_pgoff)
62064 @@ -738,6 +766,12 @@ static int
62065 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
62066 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62069 +#ifdef CONFIG_PAX_SEGMEXEC
62070 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
62074 if (is_mergeable_vma(vma, file, vm_flags) &&
62075 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62077 @@ -780,13 +814,20 @@ can_vma_merge_after(struct vm_area_struc
62078 struct vm_area_struct *vma_merge(struct mm_struct *mm,
62079 struct vm_area_struct *prev, unsigned long addr,
62080 unsigned long end, unsigned long vm_flags,
62081 - struct anon_vma *anon_vma, struct file *file,
62082 + struct anon_vma *anon_vma, struct file *file,
62083 pgoff_t pgoff, struct mempolicy *policy)
62085 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
62086 struct vm_area_struct *area, *next;
62089 +#ifdef CONFIG_PAX_SEGMEXEC
62090 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
62091 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
62093 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
62097 * We later require that vma->vm_flags == vm_flags,
62098 * so this tests vma->vm_flags & VM_SPECIAL, too.
62099 @@ -802,6 +843,15 @@ struct vm_area_struct *vma_merge(struct
62100 if (next && next->vm_end == end) /* cases 6, 7, 8 */
62101 next = next->vm_next;
62103 +#ifdef CONFIG_PAX_SEGMEXEC
62105 + prev_m = pax_find_mirror_vma(prev);
62107 + area_m = pax_find_mirror_vma(area);
62109 + next_m = pax_find_mirror_vma(next);
62113 * Can it merge with the predecessor?
62115 @@ -821,9 +871,24 @@ struct vm_area_struct *vma_merge(struct
62117 err = vma_adjust(prev, prev->vm_start,
62118 next->vm_end, prev->vm_pgoff, NULL);
62119 - } else /* cases 2, 5, 7 */
62121 +#ifdef CONFIG_PAX_SEGMEXEC
62122 + if (!err && prev_m)
62123 + err = vma_adjust(prev_m, prev_m->vm_start,
62124 + next_m->vm_end, prev_m->vm_pgoff, NULL);
62127 + } else { /* cases 2, 5, 7 */
62128 err = vma_adjust(prev, prev->vm_start,
62129 end, prev->vm_pgoff, NULL);
62131 +#ifdef CONFIG_PAX_SEGMEXEC
62132 + if (!err && prev_m)
62133 + err = vma_adjust(prev_m, prev_m->vm_start,
62134 + end_m, prev_m->vm_pgoff, NULL);
62140 khugepaged_enter_vma_merge(prev);
62141 @@ -837,12 +902,27 @@ struct vm_area_struct *vma_merge(struct
62142 mpol_equal(policy, vma_policy(next)) &&
62143 can_vma_merge_before(next, vm_flags,
62144 anon_vma, file, pgoff+pglen)) {
62145 - if (prev && addr < prev->vm_end) /* case 4 */
62146 + if (prev && addr < prev->vm_end) { /* case 4 */
62147 err = vma_adjust(prev, prev->vm_start,
62148 addr, prev->vm_pgoff, NULL);
62149 - else /* cases 3, 8 */
62151 +#ifdef CONFIG_PAX_SEGMEXEC
62152 + if (!err && prev_m)
62153 + err = vma_adjust(prev_m, prev_m->vm_start,
62154 + addr_m, prev_m->vm_pgoff, NULL);
62157 + } else { /* cases 3, 8 */
62158 err = vma_adjust(area, addr, next->vm_end,
62159 next->vm_pgoff - pglen, NULL);
62161 +#ifdef CONFIG_PAX_SEGMEXEC
62162 + if (!err && area_m)
62163 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
62164 + next_m->vm_pgoff - pglen, NULL);
62170 khugepaged_enter_vma_merge(area);
62171 @@ -958,14 +1038,11 @@ none:
62172 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
62173 struct file *file, long pages)
62175 - const unsigned long stack_flags
62176 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
62179 mm->shared_vm += pages;
62180 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
62181 mm->exec_vm += pages;
62182 - } else if (flags & stack_flags)
62183 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
62184 mm->stack_vm += pages;
62185 if (flags & (VM_RESERVED|VM_IO))
62186 mm->reserved_vm += pages;
62187 @@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
62188 * (the exception is when the underlying filesystem is noexec
62189 * mounted, in which case we dont add PROT_EXEC.)
62191 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
62192 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
62193 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
62196 @@ -1018,7 +1095,7 @@ unsigned long do_mmap_pgoff(struct file
62197 /* Obtain the address to map to. we verify (or select) it and ensure
62198 * that it represents a valid section of the address space.
62200 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
62201 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
62202 if (addr & ~PAGE_MASK)
62205 @@ -1029,6 +1106,36 @@ unsigned long do_mmap_pgoff(struct file
62206 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
62207 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
62209 +#ifdef CONFIG_PAX_MPROTECT
62210 + if (mm->pax_flags & MF_PAX_MPROTECT) {
62211 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
62212 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
62213 + gr_log_rwxmmap(file);
62215 +#ifdef CONFIG_PAX_EMUPLT
62216 + vm_flags &= ~VM_EXEC;
62223 + if (!(vm_flags & VM_EXEC))
62224 + vm_flags &= ~VM_MAYEXEC;
62226 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62227 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62230 + vm_flags &= ~VM_MAYWRITE;
62234 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62235 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
62236 + vm_flags &= ~VM_PAGEEXEC;
62239 if (flags & MAP_LOCKED)
62240 if (!can_do_mlock())
62242 @@ -1040,6 +1147,7 @@ unsigned long do_mmap_pgoff(struct file
62243 locked += mm->locked_vm;
62244 lock_limit = rlimit(RLIMIT_MEMLOCK);
62245 lock_limit >>= PAGE_SHIFT;
62246 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62247 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
62250 @@ -1110,6 +1218,9 @@ unsigned long do_mmap_pgoff(struct file
62254 + if (!gr_acl_handle_mmap(file, prot))
62257 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
62259 EXPORT_SYMBOL(do_mmap_pgoff);
62260 @@ -1187,10 +1298,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
62262 int vma_wants_writenotify(struct vm_area_struct *vma)
62264 - unsigned int vm_flags = vma->vm_flags;
62265 + unsigned long vm_flags = vma->vm_flags;
62267 /* If it was private or non-writable, the write bit is already clear */
62268 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
62269 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
62272 /* The backer wishes to know when pages are first written to? */
62273 @@ -1239,14 +1350,24 @@ unsigned long mmap_region(struct file *f
62274 unsigned long charged = 0;
62275 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
62277 +#ifdef CONFIG_PAX_SEGMEXEC
62278 + struct vm_area_struct *vma_m = NULL;
62282 + * mm->mmap_sem is required to protect against another thread
62283 + * changing the mappings in case we sleep.
62285 + verify_mm_writelocked(mm);
62287 /* Clear old maps */
62290 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62291 if (vma && vma->vm_start < addr + len) {
62292 if (do_munmap(mm, addr, len))
62294 - goto munmap_back;
62295 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62296 + BUG_ON(vma && vma->vm_start < addr + len);
62299 /* Check against address space limit. */
62300 @@ -1295,6 +1416,16 @@ munmap_back:
62304 +#ifdef CONFIG_PAX_SEGMEXEC
62305 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
62306 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62315 vma->vm_start = addr;
62316 vma->vm_end = addr + len;
62317 @@ -1318,6 +1449,19 @@ munmap_back:
62318 error = file->f_op->mmap(file, vma);
62320 goto unmap_and_free_vma;
62322 +#ifdef CONFIG_PAX_SEGMEXEC
62323 + if (vma_m && (vm_flags & VM_EXECUTABLE))
62324 + added_exe_file_vma(mm);
62327 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62328 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
62329 + vma->vm_flags |= VM_PAGEEXEC;
62330 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62334 if (vm_flags & VM_EXECUTABLE)
62335 added_exe_file_vma(mm);
62337 @@ -1353,6 +1497,11 @@ munmap_back:
62338 vma_link(mm, vma, prev, rb_link, rb_parent);
62339 file = vma->vm_file;
62341 +#ifdef CONFIG_PAX_SEGMEXEC
62343 + BUG_ON(pax_mirror_vma(vma_m, vma));
62346 /* Once vma denies write, undo our temporary denial count */
62347 if (correct_wcount)
62348 atomic_inc(&inode->i_writecount);
62349 @@ -1361,6 +1510,7 @@ out:
62351 mm->total_vm += len >> PAGE_SHIFT;
62352 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62353 + track_exec_limit(mm, addr, addr + len, vm_flags);
62354 if (vm_flags & VM_LOCKED) {
62355 if (!mlock_vma_pages_range(vma, addr, addr + len))
62356 mm->locked_vm += (len >> PAGE_SHIFT);
62357 @@ -1378,6 +1528,12 @@ unmap_and_free_vma:
62358 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62362 +#ifdef CONFIG_PAX_SEGMEXEC
62364 + kmem_cache_free(vm_area_cachep, vma_m);
62367 kmem_cache_free(vm_area_cachep, vma);
62370 @@ -1385,6 +1541,44 @@ unacct_error:
62374 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62377 +#ifdef CONFIG_STACK_GROWSUP
62378 + if (addr > sysctl_heap_stack_gap)
62379 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62381 + vma = find_vma(current->mm, 0);
62382 + if (vma && (vma->vm_flags & VM_GROWSUP))
62388 + if (addr + len > vma->vm_start)
62391 + if (vma->vm_flags & VM_GROWSDOWN)
62392 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62393 +#ifdef CONFIG_STACK_GROWSUP
62394 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62395 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62401 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62403 + if (vma->vm_start < len)
62405 + if (!(vma->vm_flags & VM_GROWSDOWN))
62406 + return vma->vm_start - len;
62407 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
62408 + return vma->vm_start - len - sysctl_heap_stack_gap;
62412 /* Get an address range which is currently unmapped.
62413 * For shmat() with addr=0.
62415 @@ -1411,18 +1605,23 @@ arch_get_unmapped_area(struct file *filp
62416 if (flags & MAP_FIXED)
62419 +#ifdef CONFIG_PAX_RANDMMAP
62420 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62424 addr = PAGE_ALIGN(addr);
62425 - vma = find_vma(mm, addr);
62426 - if (TASK_SIZE - len >= addr &&
62427 - (!vma || addr + len <= vma->vm_start))
62429 + if (TASK_SIZE - len >= addr) {
62430 + vma = find_vma(mm, addr);
62431 + if (check_heap_stack_gap(vma, addr, len))
62435 if (len > mm->cached_hole_size) {
62436 - start_addr = addr = mm->free_area_cache;
62437 + start_addr = addr = mm->free_area_cache;
62439 - start_addr = addr = TASK_UNMAPPED_BASE;
62440 - mm->cached_hole_size = 0;
62441 + start_addr = addr = mm->mmap_base;
62442 + mm->cached_hole_size = 0;
62446 @@ -1433,34 +1632,40 @@ full_search:
62447 * Start a new search - just in case we missed
62450 - if (start_addr != TASK_UNMAPPED_BASE) {
62451 - addr = TASK_UNMAPPED_BASE;
62452 - start_addr = addr;
62453 + if (start_addr != mm->mmap_base) {
62454 + start_addr = addr = mm->mmap_base;
62455 mm->cached_hole_size = 0;
62460 - if (!vma || addr + len <= vma->vm_start) {
62462 - * Remember the place where we stopped the search:
62464 - mm->free_area_cache = addr + len;
62467 + if (check_heap_stack_gap(vma, addr, len))
62469 if (addr + mm->cached_hole_size < vma->vm_start)
62470 mm->cached_hole_size = vma->vm_start - addr;
62471 addr = vma->vm_end;
62475 + * Remember the place where we stopped the search:
62477 + mm->free_area_cache = addr + len;
62482 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62485 +#ifdef CONFIG_PAX_SEGMEXEC
62486 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62491 * Is this a new hole at the lowest possible address?
62493 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62494 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62495 mm->free_area_cache = addr;
62496 mm->cached_hole_size = ~0UL;
62498 @@ -1478,7 +1683,7 @@ arch_get_unmapped_area_topdown(struct fi
62500 struct vm_area_struct *vma;
62501 struct mm_struct *mm = current->mm;
62502 - unsigned long addr = addr0;
62503 + unsigned long base = mm->mmap_base, addr = addr0;
62505 /* requested length too big for entire address space */
62506 if (len > TASK_SIZE)
62507 @@ -1487,13 +1692,18 @@ arch_get_unmapped_area_topdown(struct fi
62508 if (flags & MAP_FIXED)
62511 +#ifdef CONFIG_PAX_RANDMMAP
62512 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62515 /* requesting a specific address */
62517 addr = PAGE_ALIGN(addr);
62518 - vma = find_vma(mm, addr);
62519 - if (TASK_SIZE - len >= addr &&
62520 - (!vma || addr + len <= vma->vm_start))
62522 + if (TASK_SIZE - len >= addr) {
62523 + vma = find_vma(mm, addr);
62524 + if (check_heap_stack_gap(vma, addr, len))
62529 /* check if free_area_cache is useful for us */
62530 @@ -1508,7 +1718,7 @@ arch_get_unmapped_area_topdown(struct fi
62531 /* make sure it can fit in the remaining address space */
62533 vma = find_vma(mm, addr-len);
62534 - if (!vma || addr <= vma->vm_start)
62535 + if (check_heap_stack_gap(vma, addr - len, len))
62536 /* remember the address as a hint for next time */
62537 return (mm->free_area_cache = addr-len);
62539 @@ -1525,7 +1735,7 @@ arch_get_unmapped_area_topdown(struct fi
62540 * return with success:
62542 vma = find_vma(mm, addr);
62543 - if (!vma || addr+len <= vma->vm_start)
62544 + if (check_heap_stack_gap(vma, addr, len))
62545 /* remember the address as a hint for next time */
62546 return (mm->free_area_cache = addr);
62548 @@ -1534,8 +1744,8 @@ arch_get_unmapped_area_topdown(struct fi
62549 mm->cached_hole_size = vma->vm_start - addr;
62551 /* try just below the current vma->vm_start */
62552 - addr = vma->vm_start-len;
62553 - } while (len < vma->vm_start);
62554 + addr = skip_heap_stack_gap(vma, len);
62555 + } while (!IS_ERR_VALUE(addr));
62559 @@ -1544,13 +1754,21 @@ bottomup:
62560 * can happen with large stack limits and large mmap()
62563 + mm->mmap_base = TASK_UNMAPPED_BASE;
62565 +#ifdef CONFIG_PAX_RANDMMAP
62566 + if (mm->pax_flags & MF_PAX_RANDMMAP)
62567 + mm->mmap_base += mm->delta_mmap;
62570 + mm->free_area_cache = mm->mmap_base;
62571 mm->cached_hole_size = ~0UL;
62572 - mm->free_area_cache = TASK_UNMAPPED_BASE;
62573 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62575 * Restore the topdown base:
62577 - mm->free_area_cache = mm->mmap_base;
62578 + mm->mmap_base = base;
62579 + mm->free_area_cache = base;
62580 mm->cached_hole_size = ~0UL;
62583 @@ -1559,6 +1777,12 @@ bottomup:
62585 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62588 +#ifdef CONFIG_PAX_SEGMEXEC
62589 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62594 * Is this a new hole at the highest possible address?
62596 @@ -1566,8 +1790,10 @@ void arch_unmap_area_topdown(struct mm_s
62597 mm->free_area_cache = addr;
62599 /* dont allow allocations above current base */
62600 - if (mm->free_area_cache > mm->mmap_base)
62601 + if (mm->free_area_cache > mm->mmap_base) {
62602 mm->free_area_cache = mm->mmap_base;
62603 + mm->cached_hole_size = ~0UL;
62608 @@ -1675,6 +1901,28 @@ out:
62609 return prev ? prev->vm_next : vma;
62612 +#ifdef CONFIG_PAX_SEGMEXEC
62613 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62615 + struct vm_area_struct *vma_m;
62617 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62618 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62619 + BUG_ON(vma->vm_mirror);
62622 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62623 + vma_m = vma->vm_mirror;
62624 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62625 + BUG_ON(vma->vm_file != vma_m->vm_file);
62626 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62627 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62628 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62629 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62635 * Verify that the stack growth is acceptable and
62636 * update accounting. This is shared with both the
62637 @@ -1691,6 +1939,7 @@ static int acct_stack_growth(struct vm_a
62640 /* Stack limit test */
62641 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
62642 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62645 @@ -1701,6 +1950,7 @@ static int acct_stack_growth(struct vm_a
62646 locked = mm->locked_vm + grow;
62647 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62648 limit >>= PAGE_SHIFT;
62649 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62650 if (locked > limit && !capable(CAP_IPC_LOCK))
62653 @@ -1731,37 +1981,48 @@ static int acct_stack_growth(struct vm_a
62654 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62655 * vma is the last one with address > vma->vm_end. Have to extend vma.
62657 +#ifndef CONFIG_IA64
62660 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62665 if (!(vma->vm_flags & VM_GROWSUP))
62668 + /* Also guard against wrapping around to address 0. */
62669 + if (address < PAGE_ALIGN(address+1))
62670 + address = PAGE_ALIGN(address+1);
62675 * We must make sure the anon_vma is allocated
62676 * so that the anon_vma locking is not a noop.
62678 if (unlikely(anon_vma_prepare(vma)))
62680 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62681 + if (locknext && anon_vma_prepare(vma->vm_next))
62683 vma_lock_anon_vma(vma);
62685 + vma_lock_anon_vma(vma->vm_next);
62688 * vma->vm_start/vm_end cannot change under us because the caller
62689 * is required to hold the mmap_sem in read mode. We need the
62690 - * anon_vma lock to serialize against concurrent expand_stacks.
62691 - * Also guard against wrapping around to address 0.
62692 + * anon_vma locks to serialize against concurrent expand_stacks
62693 + * and expand_upwards.
62695 - if (address < PAGE_ALIGN(address+4))
62696 - address = PAGE_ALIGN(address+4);
62698 - vma_unlock_anon_vma(vma);
62703 /* Somebody else might have raced and expanded it already */
62704 - if (address > vma->vm_end) {
62705 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62707 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62708 unsigned long size, grow;
62710 size = address - vma->vm_start;
62711 @@ -1776,6 +2037,8 @@ int expand_upwards(struct vm_area_struct
62716 + vma_unlock_anon_vma(vma->vm_next);
62717 vma_unlock_anon_vma(vma);
62718 khugepaged_enter_vma_merge(vma);
62720 @@ -1789,6 +2052,8 @@ static int expand_downwards(struct vm_ar
62721 unsigned long address)
62724 + bool lockprev = false;
62725 + struct vm_area_struct *prev;
62728 * We must make sure the anon_vma is allocated
62729 @@ -1802,6 +2067,15 @@ static int expand_downwards(struct vm_ar
62733 + prev = vma->vm_prev;
62734 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62735 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62737 + if (lockprev && anon_vma_prepare(prev))
62740 + vma_lock_anon_vma(prev);
62742 vma_lock_anon_vma(vma);
62745 @@ -1811,9 +2085,17 @@ static int expand_downwards(struct vm_ar
62748 /* Somebody else might have raced and expanded it already */
62749 - if (address < vma->vm_start) {
62750 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62752 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62753 unsigned long size, grow;
62755 +#ifdef CONFIG_PAX_SEGMEXEC
62756 + struct vm_area_struct *vma_m;
62758 + vma_m = pax_find_mirror_vma(vma);
62761 size = vma->vm_end - address;
62762 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62764 @@ -1823,11 +2105,22 @@ static int expand_downwards(struct vm_ar
62766 vma->vm_start = address;
62767 vma->vm_pgoff -= grow;
62768 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62770 +#ifdef CONFIG_PAX_SEGMEXEC
62772 + vma_m->vm_start -= grow << PAGE_SHIFT;
62773 + vma_m->vm_pgoff -= grow;
62777 perf_event_mmap(vma);
62781 vma_unlock_anon_vma(vma);
62783 + vma_unlock_anon_vma(prev);
62784 khugepaged_enter_vma_merge(vma);
62787 @@ -1902,6 +2195,13 @@ static void remove_vma_list(struct mm_st
62789 long nrpages = vma_pages(vma);
62791 +#ifdef CONFIG_PAX_SEGMEXEC
62792 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62793 + vma = remove_vma(vma);
62798 mm->total_vm -= nrpages;
62799 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62800 vma = remove_vma(vma);
62801 @@ -1947,6 +2247,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62802 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62803 vma->vm_prev = NULL;
62806 +#ifdef CONFIG_PAX_SEGMEXEC
62807 + if (vma->vm_mirror) {
62808 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62809 + vma->vm_mirror->vm_mirror = NULL;
62810 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
62811 + vma->vm_mirror = NULL;
62815 rb_erase(&vma->vm_rb, &mm->mm_rb);
62818 @@ -1975,14 +2285,33 @@ static int __split_vma(struct mm_struct
62819 struct vm_area_struct *new;
62822 +#ifdef CONFIG_PAX_SEGMEXEC
62823 + struct vm_area_struct *vma_m, *new_m = NULL;
62824 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62827 if (is_vm_hugetlb_page(vma) && (addr &
62828 ~(huge_page_mask(hstate_vma(vma)))))
62831 +#ifdef CONFIG_PAX_SEGMEXEC
62832 + vma_m = pax_find_mirror_vma(vma);
62835 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62839 +#ifdef CONFIG_PAX_SEGMEXEC
62841 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62843 + kmem_cache_free(vm_area_cachep, new);
62849 /* most fields are the same, copy all, and then fixup */
62852 @@ -1995,6 +2324,22 @@ static int __split_vma(struct mm_struct
62853 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62856 +#ifdef CONFIG_PAX_SEGMEXEC
62859 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
62860 + new_m->vm_mirror = new;
62861 + new->vm_mirror = new_m;
62864 + new_m->vm_end = addr_m;
62866 + new_m->vm_start = addr_m;
62867 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62872 pol = mpol_dup(vma_policy(vma));
62874 err = PTR_ERR(pol);
62875 @@ -2020,6 +2365,42 @@ static int __split_vma(struct mm_struct
62877 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62879 +#ifdef CONFIG_PAX_SEGMEXEC
62880 + if (!err && vma_m) {
62881 + if (anon_vma_clone(new_m, vma_m))
62882 + goto out_free_mpol;
62885 + vma_set_policy(new_m, pol);
62887 + if (new_m->vm_file) {
62888 + get_file(new_m->vm_file);
62889 + if (vma_m->vm_flags & VM_EXECUTABLE)
62890 + added_exe_file_vma(mm);
62893 + if (new_m->vm_ops && new_m->vm_ops->open)
62894 + new_m->vm_ops->open(new_m);
62897 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62898 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62900 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62903 + if (new_m->vm_ops && new_m->vm_ops->close)
62904 + new_m->vm_ops->close(new_m);
62905 + if (new_m->vm_file) {
62906 + if (vma_m->vm_flags & VM_EXECUTABLE)
62907 + removed_exe_file_vma(mm);
62908 + fput(new_m->vm_file);
62918 @@ -2032,10 +2413,18 @@ static int __split_vma(struct mm_struct
62919 removed_exe_file_vma(mm);
62920 fput(new->vm_file);
62922 - unlink_anon_vmas(new);
62927 +#ifdef CONFIG_PAX_SEGMEXEC
62929 + unlink_anon_vmas(new_m);
62930 + kmem_cache_free(vm_area_cachep, new_m);
62934 + unlink_anon_vmas(new);
62935 kmem_cache_free(vm_area_cachep, new);
62938 @@ -2048,6 +2437,15 @@ static int __split_vma(struct mm_struct
62939 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62940 unsigned long addr, int new_below)
62943 +#ifdef CONFIG_PAX_SEGMEXEC
62944 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62945 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62946 + if (mm->map_count >= sysctl_max_map_count-1)
62951 if (mm->map_count >= sysctl_max_map_count)
62954 @@ -2059,11 +2457,30 @@ int split_vma(struct mm_struct *mm, stru
62955 * work. This now handles partial unmappings.
62956 * Jeremy Fitzhardinge <jeremy@goop.org>
62958 +#ifdef CONFIG_PAX_SEGMEXEC
62959 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62961 + int ret = __do_munmap(mm, start, len);
62962 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62965 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62968 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62970 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62974 struct vm_area_struct *vma, *prev, *last;
62977 + * mm->mmap_sem is required to protect against another thread
62978 + * changing the mappings in case we sleep.
62980 + verify_mm_writelocked(mm);
62982 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62985 @@ -2137,6 +2554,8 @@ int do_munmap(struct mm_struct *mm, unsi
62986 /* Fix up all other VM information */
62987 remove_vma_list(mm, vma);
62989 + track_exec_limit(mm, start, end, 0UL);
62994 @@ -2149,22 +2568,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62996 profile_munmap(addr);
62998 +#ifdef CONFIG_PAX_SEGMEXEC
62999 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
63000 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
63004 down_write(&mm->mmap_sem);
63005 ret = do_munmap(mm, addr, len);
63006 up_write(&mm->mmap_sem);
63010 -static inline void verify_mm_writelocked(struct mm_struct *mm)
63012 -#ifdef CONFIG_DEBUG_VM
63013 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
63015 - up_read(&mm->mmap_sem);
63021 * this is really a simplified "do_mmap". it only handles
63022 * anonymous maps. eventually we may be able to do some
63023 @@ -2178,6 +2593,7 @@ unsigned long do_brk(unsigned long addr,
63024 struct rb_node ** rb_link, * rb_parent;
63025 pgoff_t pgoff = addr >> PAGE_SHIFT;
63027 + unsigned long charged;
63029 len = PAGE_ALIGN(len);
63031 @@ -2189,16 +2605,30 @@ unsigned long do_brk(unsigned long addr,
63033 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
63035 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
63036 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
63037 + flags &= ~VM_EXEC;
63039 +#ifdef CONFIG_PAX_MPROTECT
63040 + if (mm->pax_flags & MF_PAX_MPROTECT)
63041 + flags &= ~VM_MAYEXEC;
63047 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
63048 if (error & ~PAGE_MASK)
63051 + charged = len >> PAGE_SHIFT;
63054 * mlock MCL_FUTURE?
63056 if (mm->def_flags & VM_LOCKED) {
63057 unsigned long locked, lock_limit;
63058 - locked = len >> PAGE_SHIFT;
63059 + locked = charged;
63060 locked += mm->locked_vm;
63061 lock_limit = rlimit(RLIMIT_MEMLOCK);
63062 lock_limit >>= PAGE_SHIFT;
63063 @@ -2215,22 +2645,22 @@ unsigned long do_brk(unsigned long addr,
63065 * Clear old maps. this also does some error checking for us
63068 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63069 if (vma && vma->vm_start < addr + len) {
63070 if (do_munmap(mm, addr, len))
63072 - goto munmap_back;
63073 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63074 + BUG_ON(vma && vma->vm_start < addr + len);
63077 /* Check against address space limits *after* clearing old maps... */
63078 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
63079 + if (!may_expand_vm(mm, charged))
63082 if (mm->map_count > sysctl_max_map_count)
63085 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
63086 + if (security_vm_enough_memory(charged))
63089 /* Can we just expand an old private anonymous mapping? */
63090 @@ -2244,7 +2674,7 @@ unsigned long do_brk(unsigned long addr,
63092 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63094 - vm_unacct_memory(len >> PAGE_SHIFT);
63095 + vm_unacct_memory(charged);
63099 @@ -2258,11 +2688,12 @@ unsigned long do_brk(unsigned long addr,
63100 vma_link(mm, vma, prev, rb_link, rb_parent);
63102 perf_event_mmap(vma);
63103 - mm->total_vm += len >> PAGE_SHIFT;
63104 + mm->total_vm += charged;
63105 if (flags & VM_LOCKED) {
63106 if (!mlock_vma_pages_range(vma, addr, addr + len))
63107 - mm->locked_vm += (len >> PAGE_SHIFT);
63108 + mm->locked_vm += charged;
63110 + track_exec_limit(mm, addr, addr + len, flags);
63114 @@ -2309,8 +2740,10 @@ void exit_mmap(struct mm_struct *mm)
63115 * Walk the list again, actually closing and freeing it,
63116 * with preemption enabled, without holding any MM locks.
63120 + vma->vm_mirror = NULL;
63121 vma = remove_vma(vma);
63124 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
63126 @@ -2324,6 +2757,13 @@ int insert_vm_struct(struct mm_struct *
63127 struct vm_area_struct * __vma, * prev;
63128 struct rb_node ** rb_link, * rb_parent;
63130 +#ifdef CONFIG_PAX_SEGMEXEC
63131 + struct vm_area_struct *vma_m = NULL;
63134 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
63138 * The vm_pgoff of a purely anonymous vma should be irrelevant
63139 * until its first write fault, when page's anon_vma and index
63140 @@ -2346,7 +2786,22 @@ int insert_vm_struct(struct mm_struct *
63141 if ((vma->vm_flags & VM_ACCOUNT) &&
63142 security_vm_enough_memory_mm(mm, vma_pages(vma)))
63145 +#ifdef CONFIG_PAX_SEGMEXEC
63146 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
63147 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63153 vma_link(mm, vma, prev, rb_link, rb_parent);
63155 +#ifdef CONFIG_PAX_SEGMEXEC
63157 + BUG_ON(pax_mirror_vma(vma_m, vma));
63163 @@ -2364,6 +2819,8 @@ struct vm_area_struct *copy_vma(struct v
63164 struct rb_node **rb_link, *rb_parent;
63165 struct mempolicy *pol;
63167 + BUG_ON(vma->vm_mirror);
63170 * If anonymous vma has not yet been faulted, update new pgoff
63171 * to match new location, to increase its chance of merging.
63172 @@ -2414,6 +2871,39 @@ struct vm_area_struct *copy_vma(struct v
63176 +#ifdef CONFIG_PAX_SEGMEXEC
63177 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
63179 + struct vm_area_struct *prev_m;
63180 + struct rb_node **rb_link_m, *rb_parent_m;
63181 + struct mempolicy *pol_m;
63183 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
63184 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
63185 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
63187 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
63188 + if (anon_vma_clone(vma_m, vma))
63190 + pol_m = vma_policy(vma_m);
63192 + vma_set_policy(vma_m, pol_m);
63193 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
63194 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
63195 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
63196 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
63197 + if (vma_m->vm_file)
63198 + get_file(vma_m->vm_file);
63199 + if (vma_m->vm_ops && vma_m->vm_ops->open)
63200 + vma_m->vm_ops->open(vma_m);
63201 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
63202 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
63203 + vma_m->vm_mirror = vma;
63204 + vma->vm_mirror = vma_m;
63210 * Return true if the calling process may expand its vm space by the passed
63212 @@ -2424,7 +2914,7 @@ int may_expand_vm(struct mm_struct *mm,
63215 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
63217 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
63218 if (cur + npages > lim)
63221 @@ -2495,6 +2985,22 @@ int install_special_mapping(struct mm_st
63222 vma->vm_start = addr;
63223 vma->vm_end = addr + len;
63225 +#ifdef CONFIG_PAX_MPROTECT
63226 + if (mm->pax_flags & MF_PAX_MPROTECT) {
63227 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
63228 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
63230 + if (!(vm_flags & VM_EXEC))
63231 + vm_flags &= ~VM_MAYEXEC;
63233 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
63234 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
63237 + vm_flags &= ~VM_MAYWRITE;
63241 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
63242 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
63244 diff -urNp linux-2.6.39.4/mm/mprotect.c linux-2.6.39.4/mm/mprotect.c
63245 --- linux-2.6.39.4/mm/mprotect.c 2011-05-19 00:06:34.000000000 -0400
63246 +++ linux-2.6.39.4/mm/mprotect.c 2011-08-05 19:44:37.000000000 -0400
63247 @@ -23,10 +23,16 @@
63248 #include <linux/mmu_notifier.h>
63249 #include <linux/migrate.h>
63250 #include <linux/perf_event.h>
63252 +#ifdef CONFIG_PAX_MPROTECT
63253 +#include <linux/elf.h>
63256 #include <asm/uaccess.h>
63257 #include <asm/pgtable.h>
63258 #include <asm/cacheflush.h>
63259 #include <asm/tlbflush.h>
63260 +#include <asm/mmu_context.h>
63262 #ifndef pgprot_modify
63263 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
63264 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
63265 flush_tlb_range(vma, start, end);
63268 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63269 +/* called while holding the mmap semaphor for writing except stack expansion */
63270 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
63272 + unsigned long oldlimit, newlimit = 0UL;
63274 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
63277 + spin_lock(&mm->page_table_lock);
63278 + oldlimit = mm->context.user_cs_limit;
63279 + if ((prot & VM_EXEC) && oldlimit < end)
63280 + /* USER_CS limit moved up */
63282 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
63283 + /* USER_CS limit moved down */
63284 + newlimit = start;
63287 + mm->context.user_cs_limit = newlimit;
63291 + cpus_clear(mm->context.cpu_user_cs_mask);
63292 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
63295 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
63297 + spin_unlock(&mm->page_table_lock);
63298 + if (newlimit == end) {
63299 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
63301 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
63302 + if (is_vm_hugetlb_page(vma))
63303 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
63305 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
63311 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
63312 unsigned long start, unsigned long end, unsigned long newflags)
63313 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
63315 int dirty_accountable = 0;
63317 +#ifdef CONFIG_PAX_SEGMEXEC
63318 + struct vm_area_struct *vma_m = NULL;
63319 + unsigned long start_m, end_m;
63321 + start_m = start + SEGMEXEC_TASK_SIZE;
63322 + end_m = end + SEGMEXEC_TASK_SIZE;
63325 if (newflags == oldflags) {
63330 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
63331 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63333 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63336 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63341 * If we make a private mapping writable we increase our commit;
63342 * but (without finer accounting) cannot reduce our commit if we
63343 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63347 +#ifdef CONFIG_PAX_SEGMEXEC
63348 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63349 + if (start != vma->vm_start) {
63350 + error = split_vma(mm, vma, start, 1);
63353 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63354 + *pprev = (*pprev)->vm_next;
63357 + if (end != vma->vm_end) {
63358 + error = split_vma(mm, vma, end, 0);
63363 + if (pax_find_mirror_vma(vma)) {
63364 + error = __do_munmap(mm, start_m, end_m - start_m);
63368 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63373 + vma->vm_flags = newflags;
63374 + error = pax_mirror_vma(vma_m, vma);
63376 + vma->vm_flags = oldflags;
63384 * First try to merge with previous and/or next vma.
63386 @@ -204,9 +306,21 @@ success:
63387 * vm_flags and vm_page_prot are protected by the mmap_sem
63388 * held in write mode.
63391 +#ifdef CONFIG_PAX_SEGMEXEC
63392 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63393 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63396 vma->vm_flags = newflags;
63398 +#ifdef CONFIG_PAX_MPROTECT
63399 + if (mm->binfmt && mm->binfmt->handle_mprotect)
63400 + mm->binfmt->handle_mprotect(vma, newflags);
63403 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63404 - vm_get_page_prot(newflags));
63405 + vm_get_page_prot(vma->vm_flags));
63407 if (vma_wants_writenotify(vma)) {
63408 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63409 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63414 +#ifdef CONFIG_PAX_SEGMEXEC
63415 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63416 + if (end > SEGMEXEC_TASK_SIZE)
63421 + if (end > TASK_SIZE)
63424 if (!arch_validate_prot(prot))
63427 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63429 * Does the application expect PROT_READ to imply PROT_EXEC:
63431 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63432 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63435 vm_flags = calc_vm_prot_bits(prot);
63436 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63437 if (start > vma->vm_start)
63440 +#ifdef CONFIG_PAX_MPROTECT
63441 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63442 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
63445 for (nstart = start ; ; ) {
63446 unsigned long newflags;
63448 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63450 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63451 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63452 + if (prot & (PROT_WRITE | PROT_EXEC))
63453 + gr_log_rwxmprotect(vma->vm_file);
63459 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63463 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63464 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63468 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
63472 if (nstart < prev->vm_end)
63473 diff -urNp linux-2.6.39.4/mm/mremap.c linux-2.6.39.4/mm/mremap.c
63474 --- linux-2.6.39.4/mm/mremap.c 2011-05-19 00:06:34.000000000 -0400
63475 +++ linux-2.6.39.4/mm/mremap.c 2011-08-05 19:44:37.000000000 -0400
63476 @@ -114,6 +114,12 @@ static void move_ptes(struct vm_area_str
63478 pte = ptep_clear_flush(vma, old_addr, old_pte);
63479 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63481 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63482 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63483 + pte = pte_exprotect(pte);
63486 set_pte_at(mm, new_addr, new_pte, pte);
63489 @@ -273,6 +279,11 @@ static struct vm_area_struct *vma_to_res
63490 if (is_vm_hugetlb_page(vma))
63493 +#ifdef CONFIG_PAX_SEGMEXEC
63494 + if (pax_find_mirror_vma(vma))
63498 /* We can't remap across vm area boundaries */
63499 if (old_len > vma->vm_end - addr)
63501 @@ -329,20 +340,25 @@ static unsigned long mremap_to(unsigned
63502 unsigned long ret = -EINVAL;
63503 unsigned long charged = 0;
63504 unsigned long map_flags;
63505 + unsigned long pax_task_size = TASK_SIZE;
63507 if (new_addr & ~PAGE_MASK)
63510 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63511 +#ifdef CONFIG_PAX_SEGMEXEC
63512 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63513 + pax_task_size = SEGMEXEC_TASK_SIZE;
63516 + pax_task_size -= PAGE_SIZE;
63518 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63521 /* Check if the location we're moving into overlaps the
63522 * old location at all, and fail if it does.
63524 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
63527 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
63528 + if (addr + old_len > new_addr && new_addr + new_len > addr)
63531 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63532 @@ -414,6 +430,7 @@ unsigned long do_mremap(unsigned long ad
63533 struct vm_area_struct *vma;
63534 unsigned long ret = -EINVAL;
63535 unsigned long charged = 0;
63536 + unsigned long pax_task_size = TASK_SIZE;
63538 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63540 @@ -432,6 +449,17 @@ unsigned long do_mremap(unsigned long ad
63544 +#ifdef CONFIG_PAX_SEGMEXEC
63545 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63546 + pax_task_size = SEGMEXEC_TASK_SIZE;
63549 + pax_task_size -= PAGE_SIZE;
63551 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63552 + old_len > pax_task_size || addr > pax_task_size-old_len)
63555 if (flags & MREMAP_FIXED) {
63556 if (flags & MREMAP_MAYMOVE)
63557 ret = mremap_to(addr, old_len, new_addr, new_len);
63558 @@ -481,6 +509,7 @@ unsigned long do_mremap(unsigned long ad
63562 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63566 @@ -507,7 +536,13 @@ unsigned long do_mremap(unsigned long ad
63567 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63571 + map_flags = vma->vm_flags;
63572 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63573 + if (!(ret & ~PAGE_MASK)) {
63574 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63575 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63579 if (ret & ~PAGE_MASK)
63580 diff -urNp linux-2.6.39.4/mm/nobootmem.c linux-2.6.39.4/mm/nobootmem.c
63581 --- linux-2.6.39.4/mm/nobootmem.c 2011-05-19 00:06:34.000000000 -0400
63582 +++ linux-2.6.39.4/mm/nobootmem.c 2011-08-05 19:44:37.000000000 -0400
63583 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63584 unsigned long __init free_all_memory_core_early(int nodeid)
63588 + u64 start, end, startrange, endrange;
63589 unsigned long count = 0;
63590 - struct range *range = NULL;
63591 + struct range *range = NULL, rangerange = { 0, 0 };
63594 nr_range = get_free_all_memory_range(&range, nodeid);
63595 + startrange = __pa(range) >> PAGE_SHIFT;
63596 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63598 for (i = 0; i < nr_range; i++) {
63599 start = range[i].start;
63600 end = range[i].end;
63601 + if (start <= endrange && startrange < end) {
63602 + BUG_ON(rangerange.start | rangerange.end);
63603 + rangerange = range[i];
63606 count += end - start;
63607 __free_pages_memory(start, end);
63609 + start = rangerange.start;
63610 + end = rangerange.end;
63611 + count += end - start;
63612 + __free_pages_memory(start, end);
63616 diff -urNp linux-2.6.39.4/mm/nommu.c linux-2.6.39.4/mm/nommu.c
63617 --- linux-2.6.39.4/mm/nommu.c 2011-08-05 21:11:51.000000000 -0400
63618 +++ linux-2.6.39.4/mm/nommu.c 2011-08-05 21:12:20.000000000 -0400
63619 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63620 int sysctl_overcommit_ratio = 50; /* default is 50% */
63621 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63622 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63623 -int heap_stack_gap = 0;
63625 atomic_long_t mmap_pages_allocated;
63627 @@ -833,15 +832,6 @@ struct vm_area_struct *find_vma(struct m
63628 EXPORT_SYMBOL(find_vma);
63632 - * - we don't extend stack VMAs under NOMMU conditions
63634 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63636 - return find_vma(mm, addr);
63640 * expand a stack to a given address
63641 * - not supported under NOMMU conditions
63643 @@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, stru
63645 /* most fields are the same, copy all, and then fixup */
63647 + INIT_LIST_HEAD(&new->anon_vma_chain);
63648 *region = *vma->vm_region;
63649 new->vm_region = region;
63651 diff -urNp linux-2.6.39.4/mm/page_alloc.c linux-2.6.39.4/mm/page_alloc.c
63652 --- linux-2.6.39.4/mm/page_alloc.c 2011-06-03 00:04:14.000000000 -0400
63653 +++ linux-2.6.39.4/mm/page_alloc.c 2011-08-05 19:44:37.000000000 -0400
63654 @@ -337,7 +337,7 @@ out:
63655 * This usage means that zero-order pages may not be compound.
63658 -static void free_compound_page(struct page *page)
63659 +void free_compound_page(struct page *page)
63661 __free_pages_ok(page, compound_order(page));
63663 @@ -650,6 +650,10 @@ static bool free_pages_prepare(struct pa
63667 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63668 + unsigned long index = 1UL << order;
63671 trace_mm_page_free_direct(page, order);
63672 kmemcheck_free_shadow(page, order);
63674 @@ -665,6 +669,12 @@ static bool free_pages_prepare(struct pa
63675 debug_check_no_obj_freed(page_address(page),
63676 PAGE_SIZE << order);
63679 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63680 + for (; index; --index)
63681 + sanitize_highpage(page + index - 1);
63684 arch_free_page(page, order);
63685 kernel_map_pages(page, 1 << order, 0);
63687 @@ -780,8 +790,10 @@ static int prep_new_page(struct page *pa
63688 arch_alloc_page(page, order);
63689 kernel_map_pages(page, 1 << order, 1);
63691 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
63692 if (gfp_flags & __GFP_ZERO)
63693 prep_zero_page(page, order, gfp_flags);
63696 if (order && (gfp_flags & __GFP_COMP))
63697 prep_compound_page(page, order);
63698 @@ -2504,6 +2516,8 @@ void __show_free_areas(unsigned int filt
63702 + pax_track_stack();
63704 for_each_populated_zone(zone) {
63705 if (skip_free_areas_zone(filter, zone))
63707 diff -urNp linux-2.6.39.4/mm/percpu.c linux-2.6.39.4/mm/percpu.c
63708 --- linux-2.6.39.4/mm/percpu.c 2011-05-19 00:06:34.000000000 -0400
63709 +++ linux-2.6.39.4/mm/percpu.c 2011-08-05 19:44:37.000000000 -0400
63710 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63711 static unsigned int pcpu_last_unit_cpu __read_mostly;
63713 /* the address of the first chunk which starts with the kernel static area */
63714 -void *pcpu_base_addr __read_mostly;
63715 +void *pcpu_base_addr __read_only;
63716 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63718 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63719 diff -urNp linux-2.6.39.4/mm/rmap.c linux-2.6.39.4/mm/rmap.c
63720 --- linux-2.6.39.4/mm/rmap.c 2011-05-19 00:06:34.000000000 -0400
63721 +++ linux-2.6.39.4/mm/rmap.c 2011-08-05 19:44:37.000000000 -0400
63722 @@ -131,6 +131,10 @@ int anon_vma_prepare(struct vm_area_stru
63723 struct anon_vma *anon_vma = vma->anon_vma;
63724 struct anon_vma_chain *avc;
63726 +#ifdef CONFIG_PAX_SEGMEXEC
63727 + struct anon_vma_chain *avc_m = NULL;
63731 if (unlikely(!anon_vma)) {
63732 struct mm_struct *mm = vma->vm_mm;
63733 @@ -140,6 +144,12 @@ int anon_vma_prepare(struct vm_area_stru
63737 +#ifdef CONFIG_PAX_SEGMEXEC
63738 + avc_m = anon_vma_chain_alloc();
63740 + goto out_enomem_free_avc;
63743 anon_vma = find_mergeable_anon_vma(vma);
63746 @@ -153,6 +163,21 @@ int anon_vma_prepare(struct vm_area_stru
63747 /* page_table_lock to protect against threads */
63748 spin_lock(&mm->page_table_lock);
63749 if (likely(!vma->anon_vma)) {
63751 +#ifdef CONFIG_PAX_SEGMEXEC
63752 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63755 + BUG_ON(vma_m->anon_vma);
63756 + vma_m->anon_vma = anon_vma;
63757 + avc_m->anon_vma = anon_vma;
63758 + avc_m->vma = vma;
63759 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63760 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
63765 vma->anon_vma = anon_vma;
63766 avc->anon_vma = anon_vma;
63768 @@ -166,12 +191,24 @@ int anon_vma_prepare(struct vm_area_stru
63770 if (unlikely(allocated))
63771 put_anon_vma(allocated);
63773 +#ifdef CONFIG_PAX_SEGMEXEC
63774 + if (unlikely(avc_m))
63775 + anon_vma_chain_free(avc_m);
63779 anon_vma_chain_free(avc);
63783 out_enomem_free_avc:
63785 +#ifdef CONFIG_PAX_SEGMEXEC
63787 + anon_vma_chain_free(avc_m);
63790 anon_vma_chain_free(avc);
63793 @@ -198,7 +235,7 @@ static void anon_vma_chain_link(struct v
63794 * Attach the anon_vmas from src to dst.
63795 * Returns 0 on success, -ENOMEM on failure.
63797 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63798 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63800 struct anon_vma_chain *avc, *pavc;
63802 @@ -220,7 +257,7 @@ int anon_vma_clone(struct vm_area_struct
63803 * the corresponding VMA in the parent process is attached to.
63804 * Returns 0 on success, non-zero on failure.
63806 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63807 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63809 struct anon_vma_chain *avc;
63810 struct anon_vma *anon_vma;
63811 diff -urNp linux-2.6.39.4/mm/shmem.c linux-2.6.39.4/mm/shmem.c
63812 --- linux-2.6.39.4/mm/shmem.c 2011-06-03 00:04:14.000000000 -0400
63813 +++ linux-2.6.39.4/mm/shmem.c 2011-08-05 19:44:37.000000000 -0400
63815 #include <linux/percpu_counter.h>
63816 #include <linux/swap.h>
63818 -static struct vfsmount *shm_mnt;
63819 +struct vfsmount *shm_mnt;
63821 #ifdef CONFIG_SHMEM
63823 @@ -1087,6 +1087,8 @@ static int shmem_writepage(struct page *
63826 entry = shmem_swp_entry(info, index, NULL);
63831 * The more uptodate page coming down from a stacked
63832 @@ -1158,6 +1160,8 @@ static struct page *shmem_swapin(swp_ent
63833 struct vm_area_struct pvma;
63836 + pax_track_stack();
63838 spol = mpol_cond_copy(&mpol,
63839 mpol_shared_policy_lookup(&info->policy, idx));
63841 @@ -2014,7 +2018,7 @@ static int shmem_symlink(struct inode *d
63843 info = SHMEM_I(inode);
63844 inode->i_size = len-1;
63845 - if (len <= (char *)inode - (char *)info) {
63846 + if (len <= (char *)inode - (char *)info && len <= 64) {
63848 memcpy(info, symname, len);
63849 inode->i_op = &shmem_symlink_inline_operations;
63850 @@ -2362,8 +2366,7 @@ int shmem_fill_super(struct super_block
63853 /* Round up to L1_CACHE_BYTES to resist false sharing */
63854 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63855 - L1_CACHE_BYTES), GFP_KERNEL);
63856 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63860 diff -urNp linux-2.6.39.4/mm/slab.c linux-2.6.39.4/mm/slab.c
63861 --- linux-2.6.39.4/mm/slab.c 2011-05-19 00:06:34.000000000 -0400
63862 +++ linux-2.6.39.4/mm/slab.c 2011-08-05 19:44:37.000000000 -0400
63863 @@ -150,7 +150,7 @@
63865 /* Legal flag mask for kmem_cache_create(). */
63867 -# define CREATE_MASK (SLAB_RED_ZONE | \
63868 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63869 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63871 SLAB_STORE_USER | \
63872 @@ -158,7 +158,7 @@
63873 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63874 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63876 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63877 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63879 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63880 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63881 @@ -287,7 +287,7 @@ struct kmem_list3 {
63882 * Need this for bootstrapping a per node allocator.
63884 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63885 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63886 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63887 #define CACHE_CACHE 0
63888 #define SIZE_AC MAX_NUMNODES
63889 #define SIZE_L3 (2 * MAX_NUMNODES)
63890 @@ -388,10 +388,10 @@ static void kmem_list3_init(struct kmem_
63891 if ((x)->max_freeable < i) \
63892 (x)->max_freeable = i; \
63894 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63895 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63896 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63897 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63898 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63899 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63900 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63901 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63903 #define STATS_INC_ACTIVE(x) do { } while (0)
63904 #define STATS_DEC_ACTIVE(x) do { } while (0)
63905 @@ -537,7 +537,7 @@ static inline void *index_to_obj(struct
63906 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63908 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63909 - const struct slab *slab, void *obj)
63910 + const struct slab *slab, const void *obj)
63912 u32 offset = (obj - slab->s_mem);
63913 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63914 @@ -563,7 +563,7 @@ struct cache_names {
63915 static struct cache_names __initdata cache_names[] = {
63916 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63917 #include <linux/kmalloc_sizes.h>
63923 @@ -1529,7 +1529,7 @@ void __init kmem_cache_init(void)
63924 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63925 sizes[INDEX_AC].cs_size,
63926 ARCH_KMALLOC_MINALIGN,
63927 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63928 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63931 if (INDEX_AC != INDEX_L3) {
63932 @@ -1537,7 +1537,7 @@ void __init kmem_cache_init(void)
63933 kmem_cache_create(names[INDEX_L3].name,
63934 sizes[INDEX_L3].cs_size,
63935 ARCH_KMALLOC_MINALIGN,
63936 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63937 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63941 @@ -1555,7 +1555,7 @@ void __init kmem_cache_init(void)
63942 sizes->cs_cachep = kmem_cache_create(names->name,
63944 ARCH_KMALLOC_MINALIGN,
63945 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63946 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63949 #ifdef CONFIG_ZONE_DMA
63950 @@ -4270,10 +4270,10 @@ static int s_show(struct seq_file *m, vo
63954 - unsigned long allochit = atomic_read(&cachep->allochit);
63955 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63956 - unsigned long freehit = atomic_read(&cachep->freehit);
63957 - unsigned long freemiss = atomic_read(&cachep->freemiss);
63958 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63959 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63960 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63961 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63963 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63964 allochit, allocmiss, freehit, freemiss);
63965 @@ -4530,15 +4530,66 @@ static const struct file_operations proc
63967 static int __init slab_proc_init(void)
63969 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63970 + mode_t gr_mode = S_IRUGO;
63972 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63973 + gr_mode = S_IRUSR;
63976 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63977 #ifdef CONFIG_DEBUG_SLAB_LEAK
63978 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63979 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63983 module_init(slab_proc_init);
63986 +void check_object_size(const void *ptr, unsigned long n, bool to)
63989 +#ifdef CONFIG_PAX_USERCOPY
63990 + struct page *page;
63991 + struct kmem_cache *cachep = NULL;
63992 + struct slab *slabp;
63993 + unsigned int objnr;
63994 + unsigned long offset;
63999 + if (ZERO_OR_NULL_PTR(ptr))
64002 + if (!virt_addr_valid(ptr))
64005 + page = virt_to_head_page(ptr);
64007 + if (!PageSlab(page)) {
64008 + if (object_is_on_stack(ptr, n) == -1)
64013 + cachep = page_get_cache(page);
64014 + if (!(cachep->flags & SLAB_USERCOPY))
64017 + slabp = page_get_slab(page);
64018 + objnr = obj_to_index(cachep, slabp, ptr);
64019 + BUG_ON(objnr >= cachep->num);
64020 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
64021 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
64025 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
64029 +EXPORT_SYMBOL(check_object_size);
64032 * ksize - get the actual amount of memory allocated for a given object
64033 * @objp: Pointer to the object
64034 diff -urNp linux-2.6.39.4/mm/slob.c linux-2.6.39.4/mm/slob.c
64035 --- linux-2.6.39.4/mm/slob.c 2011-05-19 00:06:34.000000000 -0400
64036 +++ linux-2.6.39.4/mm/slob.c 2011-08-05 19:44:37.000000000 -0400
64038 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
64039 * alloc_pages() directly, allocating compound pages so the page order
64040 * does not have to be separately tracked, and also stores the exact
64041 - * allocation size in page->private so that it can be used to accurately
64042 + * allocation size in slob_page->size so that it can be used to accurately
64043 * provide ksize(). These objects are detected in kfree() because slob_page()
64044 * is false for them.
64049 #include <linux/kernel.h>
64050 +#include <linux/sched.h>
64051 #include <linux/slab.h>
64052 #include <linux/mm.h>
64053 #include <linux/swap.h> /* struct reclaim_state */
64054 @@ -102,7 +103,8 @@ struct slob_page {
64055 unsigned long flags; /* mandatory */
64056 atomic_t _count; /* mandatory */
64057 slobidx_t units; /* free units left in page */
64058 - unsigned long pad[2];
64059 + unsigned long pad[1];
64060 + unsigned long size; /* size when >=PAGE_SIZE */
64061 slob_t *free; /* first free slob_t in page */
64062 struct list_head list; /* linked list of free pages */
64064 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
64066 static inline int is_slob_page(struct slob_page *sp)
64068 - return PageSlab((struct page *)sp);
64069 + return PageSlab((struct page *)sp) && !sp->size;
64072 static inline void set_slob_page(struct slob_page *sp)
64073 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
64075 static inline struct slob_page *slob_page(const void *addr)
64077 - return (struct slob_page *)virt_to_page(addr);
64078 + return (struct slob_page *)virt_to_head_page(addr);
64082 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
64084 * Return the size of a slob block.
64086 -static slobidx_t slob_units(slob_t *s)
64087 +static slobidx_t slob_units(const slob_t *s)
64091 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
64093 * Return the next free slob block pointer after this one.
64095 -static slob_t *slob_next(slob_t *s)
64096 +static slob_t *slob_next(const slob_t *s)
64098 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
64100 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
64102 * Returns true if s is the last free block in its page.
64104 -static int slob_last(slob_t *s)
64105 +static int slob_last(const slob_t *s)
64107 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
64109 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
64113 + set_slob_page(page);
64114 return page_address(page);
64117 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
64121 - set_slob_page(sp);
64123 spin_lock_irqsave(&slob_lock, flags);
64124 sp->units = SLOB_UNITS(PAGE_SIZE);
64127 INIT_LIST_HEAD(&sp->list);
64128 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
64129 set_slob_page_free(sp, slob_list);
64130 @@ -476,10 +479,9 @@ out:
64131 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
64134 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64135 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
64138 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64142 lockdep_trace_alloc(gfp);
64143 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
64148 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
64149 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
64150 + m[0].units = size;
64151 + m[1].units = align;
64152 ret = (void *)m + align;
64154 trace_kmalloc_node(_RET_IP_, ret,
64155 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
64157 ret = slob_new_pages(gfp, order, node);
64159 - struct page *page;
64160 - page = virt_to_page(ret);
64161 - page->private = size;
64162 + struct slob_page *sp;
64163 + sp = slob_page(ret);
64167 trace_kmalloc_node(_RET_IP_, ret,
64168 size, PAGE_SIZE << order, gfp, node);
64171 - kmemleak_alloc(ret, size, 1, gfp);
64175 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64177 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64178 + void *ret = __kmalloc_node_align(size, gfp, node, align);
64180 + if (!ZERO_OR_NULL_PTR(ret))
64181 + kmemleak_alloc(ret, size, 1, gfp);
64184 EXPORT_SYMBOL(__kmalloc_node);
64185 @@ -531,13 +545,88 @@ void kfree(const void *block)
64186 sp = slob_page(block);
64187 if (is_slob_page(sp)) {
64188 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64189 - unsigned int *m = (unsigned int *)(block - align);
64190 - slob_free(m, *m + align);
64192 + slob_t *m = (slob_t *)(block - align);
64193 + slob_free(m, m[0].units + align);
64195 + clear_slob_page(sp);
64196 + free_slob_page(sp);
64198 put_page(&sp->page);
64201 EXPORT_SYMBOL(kfree);
64203 +void check_object_size(const void *ptr, unsigned long n, bool to)
64206 +#ifdef CONFIG_PAX_USERCOPY
64207 + struct slob_page *sp;
64208 + const slob_t *free;
64209 + const void *base;
64210 + unsigned long flags;
64215 + if (ZERO_OR_NULL_PTR(ptr))
64218 + if (!virt_addr_valid(ptr))
64221 + sp = slob_page(ptr);
64222 + if (!PageSlab((struct page*)sp)) {
64223 + if (object_is_on_stack(ptr, n) == -1)
64229 + base = page_address(&sp->page);
64230 + if (base <= ptr && n <= sp->size - (ptr - base))
64235 + /* some tricky double walking to find the chunk */
64236 + spin_lock_irqsave(&slob_lock, flags);
64237 + base = (void *)((unsigned long)ptr & PAGE_MASK);
64240 + while (!slob_last(free) && (void *)free <= ptr) {
64241 + base = free + slob_units(free);
64242 + free = slob_next(free);
64245 + while (base < (void *)free) {
64246 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
64247 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
64250 + if (ptr < base + align)
64253 + offset = ptr - base - align;
64254 + if (offset >= m) {
64259 + if (n > m - offset)
64262 + spin_unlock_irqrestore(&slob_lock, flags);
64266 + spin_unlock_irqrestore(&slob_lock, flags);
64268 + pax_report_usercopy(ptr, n, to, NULL);
64272 +EXPORT_SYMBOL(check_object_size);
64274 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
64275 size_t ksize(const void *block)
64277 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
64278 sp = slob_page(block);
64279 if (is_slob_page(sp)) {
64280 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64281 - unsigned int *m = (unsigned int *)(block - align);
64282 - return SLOB_UNITS(*m) * SLOB_UNIT;
64283 + slob_t *m = (slob_t *)(block - align);
64284 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
64286 - return sp->page.private;
64289 EXPORT_SYMBOL(ksize);
64291 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
64293 struct kmem_cache *c;
64295 +#ifdef CONFIG_PAX_USERCOPY
64296 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
64297 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
64299 c = slob_alloc(sizeof(struct kmem_cache),
64300 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
64305 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
64309 +#ifdef CONFIG_PAX_USERCOPY
64310 + b = __kmalloc_node_align(c->size, flags, node, c->align);
64312 if (c->size < PAGE_SIZE) {
64313 b = slob_alloc(c->size, flags, c->align, node);
64314 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64315 SLOB_UNITS(c->size) * SLOB_UNIT,
64318 + struct slob_page *sp;
64320 b = slob_new_pages(flags, get_order(c->size), node);
64321 + sp = slob_page(b);
64322 + sp->size = c->size;
64323 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64324 PAGE_SIZE << get_order(c->size),
64331 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
64333 static void __kmem_cache_free(void *b, int size)
64335 - if (size < PAGE_SIZE)
64336 + struct slob_page *sp = slob_page(b);
64338 + if (is_slob_page(sp))
64339 slob_free(b, size);
64342 + clear_slob_page(sp);
64343 + free_slob_page(sp);
64345 slob_free_pages(b, get_order(size));
64349 static void kmem_rcu_free(struct rcu_head *head)
64350 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64352 void kmem_cache_free(struct kmem_cache *c, void *b)
64354 + int size = c->size;
64356 +#ifdef CONFIG_PAX_USERCOPY
64357 + if (size + c->align < PAGE_SIZE) {
64358 + size += c->align;
64363 kmemleak_free_recursive(b, c->flags);
64364 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64365 struct slob_rcu *slob_rcu;
64366 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64367 - slob_rcu->size = c->size;
64368 + slob_rcu = b + (size - sizeof(struct slob_rcu));
64369 + slob_rcu->size = size;
64370 call_rcu(&slob_rcu->head, kmem_rcu_free);
64372 - __kmem_cache_free(b, c->size);
64373 + __kmem_cache_free(b, size);
64376 +#ifdef CONFIG_PAX_USERCOPY
64377 + trace_kfree(_RET_IP_, b);
64379 trace_kmem_cache_free(_RET_IP_, b);
64383 EXPORT_SYMBOL(kmem_cache_free);
64385 diff -urNp linux-2.6.39.4/mm/slub.c linux-2.6.39.4/mm/slub.c
64386 --- linux-2.6.39.4/mm/slub.c 2011-06-03 00:04:14.000000000 -0400
64387 +++ linux-2.6.39.4/mm/slub.c 2011-08-05 19:44:37.000000000 -0400
64388 @@ -431,7 +431,7 @@ static void print_track(const char *s, s
64392 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64393 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64394 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64397 @@ -2183,6 +2183,8 @@ void kmem_cache_free(struct kmem_cache *
64399 page = virt_to_head_page(x);
64401 + BUG_ON(!PageSlab(page));
64403 slab_free(s, page, x, _RET_IP_);
64405 trace_kmem_cache_free(_RET_IP_, x);
64406 @@ -2216,7 +2218,7 @@ static int slub_min_objects;
64407 * Merge control. If this is set then no merging of slab caches will occur.
64408 * (Could be removed. This was introduced to pacify the merge skeptics.)
64410 -static int slub_nomerge;
64411 +static int slub_nomerge = 1;
64414 * Calculate the order of allocation given an slab object size.
64415 @@ -2644,7 +2646,7 @@ static int kmem_cache_open(struct kmem_c
64416 * list to avoid pounding the page allocator excessively.
64418 set_min_partial(s, ilog2(s->size));
64420 + atomic_set(&s->refcount, 1);
64422 s->remote_node_defrag_ratio = 1000;
64424 @@ -2750,8 +2752,7 @@ static inline int kmem_cache_close(struc
64425 void kmem_cache_destroy(struct kmem_cache *s)
64427 down_write(&slub_lock);
64429 - if (!s->refcount) {
64430 + if (atomic_dec_and_test(&s->refcount)) {
64431 list_del(&s->list);
64432 if (kmem_cache_close(s)) {
64433 printk(KERN_ERR "SLUB %s: %s called for cache that "
64434 @@ -2961,6 +2962,46 @@ void *__kmalloc_node(size_t size, gfp_t
64435 EXPORT_SYMBOL(__kmalloc_node);
64438 +void check_object_size(const void *ptr, unsigned long n, bool to)
64441 +#ifdef CONFIG_PAX_USERCOPY
64442 + struct page *page;
64443 + struct kmem_cache *s = NULL;
64444 + unsigned long offset;
64449 + if (ZERO_OR_NULL_PTR(ptr))
64452 + if (!virt_addr_valid(ptr))
64455 + page = virt_to_head_page(ptr);
64457 + if (!PageSlab(page)) {
64458 + if (object_is_on_stack(ptr, n) == -1)
64464 + if (!(s->flags & SLAB_USERCOPY))
64467 + offset = (ptr - page_address(page)) % s->size;
64468 + if (offset <= s->objsize && n <= s->objsize - offset)
64472 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64476 +EXPORT_SYMBOL(check_object_size);
64478 size_t ksize(const void *object)
64481 @@ -3205,7 +3246,7 @@ static void __init kmem_cache_bootstrap_
64484 list_add(&s->list, &slab_caches);
64485 - s->refcount = -1;
64486 + atomic_set(&s->refcount, -1);
64488 for_each_node_state(node, N_NORMAL_MEMORY) {
64489 struct kmem_cache_node *n = get_node(s, node);
64490 @@ -3322,17 +3363,17 @@ void __init kmem_cache_init(void)
64492 /* Caches that are not of the two-to-the-power-of size */
64493 if (KMALLOC_MIN_SIZE <= 32) {
64494 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64495 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64499 if (KMALLOC_MIN_SIZE <= 64) {
64500 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64501 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64505 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64506 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64507 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64511 @@ -3400,7 +3441,7 @@ static int slab_unmergeable(struct kmem_
64513 * We may have set a slab to be unmergeable during bootstrap.
64515 - if (s->refcount < 0)
64516 + if (atomic_read(&s->refcount) < 0)
64520 @@ -3459,7 +3500,7 @@ struct kmem_cache *kmem_cache_create(con
64521 down_write(&slub_lock);
64522 s = find_mergeable(size, align, flags, name, ctor);
64525 + atomic_inc(&s->refcount);
64527 * Adjust the object sizes so that we clear
64528 * the complete object on kzalloc.
64529 @@ -3468,7 +3509,7 @@ struct kmem_cache *kmem_cache_create(con
64530 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64532 if (sysfs_slab_alias(s, name)) {
64534 + atomic_dec(&s->refcount);
64537 up_write(&slub_lock);
64538 @@ -4201,7 +4242,7 @@ SLAB_ATTR_RO(ctor);
64540 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64542 - return sprintf(buf, "%d\n", s->refcount - 1);
64543 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64545 SLAB_ATTR_RO(aliases);
64547 @@ -4945,7 +4986,13 @@ static const struct file_operations proc
64549 static int __init slab_proc_init(void)
64551 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64552 + mode_t gr_mode = S_IRUGO;
64554 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64555 + gr_mode = S_IRUSR;
64558 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64561 module_init(slab_proc_init);
64562 diff -urNp linux-2.6.39.4/mm/swap.c linux-2.6.39.4/mm/swap.c
64563 --- linux-2.6.39.4/mm/swap.c 2011-05-19 00:06:34.000000000 -0400
64564 +++ linux-2.6.39.4/mm/swap.c 2011-08-05 19:44:37.000000000 -0400
64566 #include <linux/backing-dev.h>
64567 #include <linux/memcontrol.h>
64568 #include <linux/gfp.h>
64569 +#include <linux/hugetlb.h>
64571 #include "internal.h"
64573 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64575 __page_cache_release(page);
64576 dtor = get_compound_page_dtor(page);
64577 + if (!PageHuge(page))
64578 + BUG_ON(dtor != free_compound_page);
64582 diff -urNp linux-2.6.39.4/mm/swapfile.c linux-2.6.39.4/mm/swapfile.c
64583 --- linux-2.6.39.4/mm/swapfile.c 2011-05-19 00:06:34.000000000 -0400
64584 +++ linux-2.6.39.4/mm/swapfile.c 2011-08-05 19:44:37.000000000 -0400
64585 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
64587 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64588 /* Activity counter to indicate that a swapon or swapoff has occurred */
64589 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
64590 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64592 static inline unsigned char swap_count(unsigned char ent)
64594 @@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64596 filp_close(swap_file, NULL);
64598 - atomic_inc(&proc_poll_event);
64599 + atomic_inc_unchecked(&proc_poll_event);
64600 wake_up_interruptible(&proc_poll_wait);
64603 @@ -1690,8 +1690,8 @@ static unsigned swaps_poll(struct file *
64605 poll_wait(file, &proc_poll_wait, wait);
64607 - if (s->event != atomic_read(&proc_poll_event)) {
64608 - s->event = atomic_read(&proc_poll_event);
64609 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64610 + s->event = atomic_read_unchecked(&proc_poll_event);
64611 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64614 @@ -1797,7 +1797,7 @@ static int swaps_open(struct inode *inod
64617 s->seq.private = s;
64618 - s->event = atomic_read(&proc_poll_event);
64619 + s->event = atomic_read_unchecked(&proc_poll_event);
64623 @@ -2131,7 +2131,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64624 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64626 mutex_unlock(&swapon_mutex);
64627 - atomic_inc(&proc_poll_event);
64628 + atomic_inc_unchecked(&proc_poll_event);
64629 wake_up_interruptible(&proc_poll_wait);
64631 if (S_ISREG(inode->i_mode))
64632 diff -urNp linux-2.6.39.4/mm/util.c linux-2.6.39.4/mm/util.c
64633 --- linux-2.6.39.4/mm/util.c 2011-05-19 00:06:34.000000000 -0400
64634 +++ linux-2.6.39.4/mm/util.c 2011-08-05 19:44:37.000000000 -0400
64635 @@ -112,6 +112,7 @@ EXPORT_SYMBOL(memdup_user);
64636 * allocated buffer. Use this if you don't want to free the buffer immediately
64637 * like, for example, with RCU.
64640 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64643 @@ -145,6 +146,7 @@ EXPORT_SYMBOL(__krealloc);
64644 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64645 * %NULL pointer, the object pointed to is freed.
64648 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64651 @@ -219,6 +221,12 @@ EXPORT_SYMBOL(strndup_user);
64652 void arch_pick_mmap_layout(struct mm_struct *mm)
64654 mm->mmap_base = TASK_UNMAPPED_BASE;
64656 +#ifdef CONFIG_PAX_RANDMMAP
64657 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64658 + mm->mmap_base += mm->delta_mmap;
64661 mm->get_unmapped_area = arch_get_unmapped_area;
64662 mm->unmap_area = arch_unmap_area;
64664 diff -urNp linux-2.6.39.4/mm/vmalloc.c linux-2.6.39.4/mm/vmalloc.c
64665 --- linux-2.6.39.4/mm/vmalloc.c 2011-05-19 00:06:34.000000000 -0400
64666 +++ linux-2.6.39.4/mm/vmalloc.c 2011-08-05 19:44:37.000000000 -0400
64667 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64669 pte = pte_offset_kernel(pmd, addr);
64671 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64672 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64674 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64675 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64676 + BUG_ON(!pte_exec(*pte));
64677 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64683 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64684 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64686 } while (pte++, addr += PAGE_SIZE, addr != end);
64689 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64690 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64693 + int ret = -ENOMEM;
64696 * nr is a running index into the array which helps higher level
64697 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64698 pte = pte_alloc_kernel(pmd, addr);
64702 + pax_open_kernel();
64704 struct page *page = pages[*nr];
64706 - if (WARN_ON(!pte_none(*pte)))
64708 - if (WARN_ON(!page))
64710 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64711 + if (pgprot_val(prot) & _PAGE_NX)
64714 + if (WARN_ON(!pte_none(*pte))) {
64718 + if (WARN_ON(!page)) {
64722 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64724 } while (pte++, addr += PAGE_SIZE, addr != end);
64728 + pax_close_kernel();
64732 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64733 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64734 * and fall back on vmalloc() if that fails. Others
64735 * just put it in the vmalloc space.
64737 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64738 +#ifdef CONFIG_MODULES
64739 +#ifdef MODULES_VADDR
64740 unsigned long addr = (unsigned long)x;
64741 if (addr >= MODULES_VADDR && addr < MODULES_END)
64745 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64746 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64752 return is_vmalloc_addr(x);
64755 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64757 if (!pgd_none(*pgd)) {
64758 pud_t *pud = pud_offset(pgd, addr);
64760 + if (!pud_large(*pud))
64762 if (!pud_none(*pud)) {
64763 pmd_t *pmd = pmd_offset(pud, addr);
64765 + if (!pmd_large(*pmd))
64767 if (!pmd_none(*pmd)) {
64770 @@ -1296,6 +1336,16 @@ static struct vm_struct *__get_vm_area_n
64771 struct vm_struct *area;
64773 BUG_ON(in_interrupt());
64775 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64776 + if (flags & VM_KERNEXEC) {
64777 + if (start != VMALLOC_START || end != VMALLOC_END)
64779 + start = (unsigned long)MODULES_EXEC_VADDR;
64780 + end = (unsigned long)MODULES_EXEC_END;
64784 if (flags & VM_IOREMAP) {
64785 int bit = fls(size);
64787 @@ -1514,6 +1564,11 @@ void *vmap(struct page **pages, unsigned
64788 if (count > totalram_pages)
64791 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64792 + if (!(pgprot_val(prot) & _PAGE_NX))
64793 + flags |= VM_KERNEXEC;
64796 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64797 __builtin_return_address(0));
64799 @@ -1610,6 +1665,13 @@ void *__vmalloc_node_range(unsigned long
64800 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64803 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64804 + if (!(pgprot_val(prot) & _PAGE_NX))
64805 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64806 + node, gfp_mask, caller);
64810 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64813 @@ -1649,6 +1711,7 @@ static void *__vmalloc_node(unsigned lon
64814 gfp_mask, prot, node, caller);
64818 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64820 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64821 @@ -1672,6 +1735,7 @@ static inline void *__vmalloc_node_flags
64822 * For tight control over page level allocator and protection flags
64823 * use __vmalloc() instead.
64826 void *vmalloc(unsigned long size)
64828 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64829 @@ -1688,6 +1752,7 @@ EXPORT_SYMBOL(vmalloc);
64830 * For tight control over page level allocator and protection flags
64831 * use __vmalloc() instead.
64834 void *vzalloc(unsigned long size)
64836 return __vmalloc_node_flags(size, -1,
64837 @@ -1702,6 +1767,7 @@ EXPORT_SYMBOL(vzalloc);
64838 * The resulting memory area is zeroed so it can be mapped to userspace
64839 * without leaking data.
64841 +#undef vmalloc_user
64842 void *vmalloc_user(unsigned long size)
64844 struct vm_struct *area;
64845 @@ -1729,6 +1795,7 @@ EXPORT_SYMBOL(vmalloc_user);
64846 * For tight control over page level allocator and protection flags
64847 * use __vmalloc() instead.
64849 +#undef vmalloc_node
64850 void *vmalloc_node(unsigned long size, int node)
64852 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64853 @@ -1748,6 +1815,7 @@ EXPORT_SYMBOL(vmalloc_node);
64854 * For tight control over page level allocator and protection flags
64855 * use __vmalloc_node() instead.
64857 +#undef vzalloc_node
64858 void *vzalloc_node(unsigned long size, int node)
64860 return __vmalloc_node_flags(size, node,
64861 @@ -1770,10 +1838,10 @@ EXPORT_SYMBOL(vzalloc_node);
64862 * For tight control over page level allocator and protection flags
64863 * use __vmalloc() instead.
64866 +#undef vmalloc_exec
64867 void *vmalloc_exec(unsigned long size)
64869 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64870 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64871 -1, __builtin_return_address(0));
64874 @@ -1792,6 +1860,7 @@ void *vmalloc_exec(unsigned long size)
64875 * Allocate enough 32bit PA addressable pages to cover @size from the
64876 * page level allocator and map them into contiguous kernel virtual space.
64879 void *vmalloc_32(unsigned long size)
64881 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64882 @@ -1806,6 +1875,7 @@ EXPORT_SYMBOL(vmalloc_32);
64883 * The resulting memory area is 32bit addressable and zeroed so it can be
64884 * mapped to userspace without leaking data.
64886 +#undef vmalloc_32_user
64887 void *vmalloc_32_user(unsigned long size)
64889 struct vm_struct *area;
64890 @@ -2068,6 +2138,8 @@ int remap_vmalloc_range(struct vm_area_s
64891 unsigned long uaddr = vma->vm_start;
64892 unsigned long usize = vma->vm_end - vma->vm_start;
64894 + BUG_ON(vma->vm_mirror);
64896 if ((PAGE_SIZE-1) & (unsigned long)addr)
64899 diff -urNp linux-2.6.39.4/mm/vmstat.c linux-2.6.39.4/mm/vmstat.c
64900 --- linux-2.6.39.4/mm/vmstat.c 2011-05-19 00:06:34.000000000 -0400
64901 +++ linux-2.6.39.4/mm/vmstat.c 2011-08-05 19:44:37.000000000 -0400
64902 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64904 * vm_stat contains the global counters
64906 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64907 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64908 EXPORT_SYMBOL(vm_stat);
64911 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64912 v = p->vm_stat_diff[i];
64913 p->vm_stat_diff[i] = 0;
64914 local_irq_restore(flags);
64915 - atomic_long_add(v, &zone->vm_stat[i]);
64916 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64917 global_diff[i] += v;
64919 /* 3 seconds idle till flush */
64920 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64922 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64923 if (global_diff[i])
64924 - atomic_long_add(global_diff[i], &vm_stat[i]);
64925 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64929 @@ -1205,10 +1205,20 @@ static int __init setup_vmstat(void)
64930 start_cpu_timer(cpu);
64932 #ifdef CONFIG_PROC_FS
64933 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64934 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64935 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64936 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64938 + mode_t gr_mode = S_IRUGO;
64939 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64940 + gr_mode = S_IRUSR;
64942 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64943 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64944 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64945 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64947 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64949 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64954 diff -urNp linux-2.6.39.4/net/8021q/vlan.c linux-2.6.39.4/net/8021q/vlan.c
64955 --- linux-2.6.39.4/net/8021q/vlan.c 2011-05-19 00:06:34.000000000 -0400
64956 +++ linux-2.6.39.4/net/8021q/vlan.c 2011-08-05 19:44:37.000000000 -0400
64957 @@ -592,8 +592,7 @@ static int vlan_ioctl_handler(struct net
64959 if (!capable(CAP_NET_ADMIN))
64961 - if ((args.u.name_type >= 0) &&
64962 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64963 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64964 struct vlan_net *vn;
64966 vn = net_generic(net, vlan_net_id);
64967 diff -urNp linux-2.6.39.4/net/atm/atm_misc.c linux-2.6.39.4/net/atm/atm_misc.c
64968 --- linux-2.6.39.4/net/atm/atm_misc.c 2011-05-19 00:06:34.000000000 -0400
64969 +++ linux-2.6.39.4/net/atm/atm_misc.c 2011-08-05 19:44:37.000000000 -0400
64970 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64971 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64973 atm_return(vcc, truesize);
64974 - atomic_inc(&vcc->stats->rx_drop);
64975 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64978 EXPORT_SYMBOL(atm_charge);
64979 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64982 atm_return(vcc, guess);
64983 - atomic_inc(&vcc->stats->rx_drop);
64984 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64987 EXPORT_SYMBOL(atm_alloc_charge);
64988 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64990 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64992 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64993 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64995 #undef __HANDLE_ITEM
64997 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64999 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
65001 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65002 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
65004 #undef __HANDLE_ITEM
65006 diff -urNp linux-2.6.39.4/net/atm/lec.h linux-2.6.39.4/net/atm/lec.h
65007 --- linux-2.6.39.4/net/atm/lec.h 2011-05-19 00:06:34.000000000 -0400
65008 +++ linux-2.6.39.4/net/atm/lec.h 2011-08-05 20:34:06.000000000 -0400
65009 @@ -48,7 +48,7 @@ struct lane2_ops {
65010 const u8 *tlvs, u32 sizeoftlvs);
65011 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
65012 const u8 *tlvs, u32 sizeoftlvs);
65017 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
65018 diff -urNp linux-2.6.39.4/net/atm/mpc.h linux-2.6.39.4/net/atm/mpc.h
65019 --- linux-2.6.39.4/net/atm/mpc.h 2011-05-19 00:06:34.000000000 -0400
65020 +++ linux-2.6.39.4/net/atm/mpc.h 2011-08-05 20:34:06.000000000 -0400
65021 @@ -33,7 +33,7 @@ struct mpoa_client {
65022 struct mpc_parameters parameters; /* parameters for this client */
65024 const struct net_device_ops *old_ops;
65025 - struct net_device_ops new_ops;
65026 + net_device_ops_no_const new_ops;
65030 diff -urNp linux-2.6.39.4/net/atm/mpoa_caches.c linux-2.6.39.4/net/atm/mpoa_caches.c
65031 --- linux-2.6.39.4/net/atm/mpoa_caches.c 2011-05-19 00:06:34.000000000 -0400
65032 +++ linux-2.6.39.4/net/atm/mpoa_caches.c 2011-08-05 19:44:37.000000000 -0400
65033 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
65034 struct timeval now;
65035 struct k_message msg;
65037 + pax_track_stack();
65039 do_gettimeofday(&now);
65041 read_lock_bh(&client->ingress_lock);
65042 diff -urNp linux-2.6.39.4/net/atm/proc.c linux-2.6.39.4/net/atm/proc.c
65043 --- linux-2.6.39.4/net/atm/proc.c 2011-05-19 00:06:34.000000000 -0400
65044 +++ linux-2.6.39.4/net/atm/proc.c 2011-08-05 19:44:37.000000000 -0400
65045 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
65046 const struct k_atm_aal_stats *stats)
65048 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
65049 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
65050 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
65051 - atomic_read(&stats->rx_drop));
65052 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
65053 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
65054 + atomic_read_unchecked(&stats->rx_drop));
65057 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
65058 @@ -191,7 +191,12 @@ static void vcc_info(struct seq_file *se
65060 struct sock *sk = sk_atm(vcc);
65062 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65063 + seq_printf(seq, "%p ", NULL);
65065 seq_printf(seq, "%p ", vcc);
65069 seq_printf(seq, "Unassigned ");
65071 @@ -218,7 +223,11 @@ static void svc_info(struct seq_file *se
65074 seq_printf(seq, sizeof(void *) == 4 ?
65075 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65076 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
65078 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
65081 seq_printf(seq, "%3d %3d %5d ",
65082 vcc->dev->number, vcc->vpi, vcc->vci);
65083 diff -urNp linux-2.6.39.4/net/atm/resources.c linux-2.6.39.4/net/atm/resources.c
65084 --- linux-2.6.39.4/net/atm/resources.c 2011-05-19 00:06:34.000000000 -0400
65085 +++ linux-2.6.39.4/net/atm/resources.c 2011-08-05 19:44:37.000000000 -0400
65086 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
65087 static void copy_aal_stats(struct k_atm_aal_stats *from,
65088 struct atm_aal_stats *to)
65090 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
65091 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
65093 #undef __HANDLE_ITEM
65095 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
65096 static void subtract_aal_stats(struct k_atm_aal_stats *from,
65097 struct atm_aal_stats *to)
65099 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65100 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
65102 #undef __HANDLE_ITEM
65104 diff -urNp linux-2.6.39.4/net/batman-adv/hard-interface.c linux-2.6.39.4/net/batman-adv/hard-interface.c
65105 --- linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-05-19 00:06:34.000000000 -0400
65106 +++ linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-08-05 19:44:37.000000000 -0400
65107 @@ -339,8 +339,8 @@ int hardif_enable_interface(struct hard_
65108 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
65109 dev_add_pack(&hard_iface->batman_adv_ptype);
65111 - atomic_set(&hard_iface->seqno, 1);
65112 - atomic_set(&hard_iface->frag_seqno, 1);
65113 + atomic_set_unchecked(&hard_iface->seqno, 1);
65114 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
65115 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
65116 hard_iface->net_dev->name);
65118 diff -urNp linux-2.6.39.4/net/batman-adv/routing.c linux-2.6.39.4/net/batman-adv/routing.c
65119 --- linux-2.6.39.4/net/batman-adv/routing.c 2011-05-19 00:06:34.000000000 -0400
65120 +++ linux-2.6.39.4/net/batman-adv/routing.c 2011-08-05 19:44:37.000000000 -0400
65121 @@ -625,7 +625,7 @@ void receive_bat_packet(struct ethhdr *e
65124 /* could be changed by schedule_own_packet() */
65125 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
65126 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
65128 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
65130 diff -urNp linux-2.6.39.4/net/batman-adv/send.c linux-2.6.39.4/net/batman-adv/send.c
65131 --- linux-2.6.39.4/net/batman-adv/send.c 2011-05-19 00:06:34.000000000 -0400
65132 +++ linux-2.6.39.4/net/batman-adv/send.c 2011-08-05 19:44:37.000000000 -0400
65133 @@ -277,7 +277,7 @@ void schedule_own_packet(struct hard_ifa
65135 /* change sequence number to network order */
65136 batman_packet->seqno =
65137 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
65138 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
65140 if (vis_server == VIS_TYPE_SERVER_SYNC)
65141 batman_packet->flags |= VIS_SERVER;
65142 @@ -291,7 +291,7 @@ void schedule_own_packet(struct hard_ifa
65144 batman_packet->gw_flags = 0;
65146 - atomic_inc(&hard_iface->seqno);
65147 + atomic_inc_unchecked(&hard_iface->seqno);
65149 slide_own_bcast_window(hard_iface);
65150 send_time = own_send_time(bat_priv);
65151 diff -urNp linux-2.6.39.4/net/batman-adv/soft-interface.c linux-2.6.39.4/net/batman-adv/soft-interface.c
65152 --- linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-05-19 00:06:34.000000000 -0400
65153 +++ linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-08-05 19:44:37.000000000 -0400
65154 @@ -386,7 +386,7 @@ int interface_tx(struct sk_buff *skb, st
65156 /* set broadcast sequence number */
65157 bcast_packet->seqno =
65158 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
65159 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
65161 add_bcast_packet_to_list(bat_priv, skb);
65163 @@ -579,7 +579,7 @@ struct net_device *softif_create(char *n
65164 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
65166 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
65167 - atomic_set(&bat_priv->bcast_seqno, 1);
65168 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
65169 atomic_set(&bat_priv->hna_local_changed, 0);
65171 bat_priv->primary_if = NULL;
65172 diff -urNp linux-2.6.39.4/net/batman-adv/types.h linux-2.6.39.4/net/batman-adv/types.h
65173 --- linux-2.6.39.4/net/batman-adv/types.h 2011-05-19 00:06:34.000000000 -0400
65174 +++ linux-2.6.39.4/net/batman-adv/types.h 2011-08-05 19:44:37.000000000 -0400
65175 @@ -38,8 +38,8 @@ struct hard_iface {
65178 struct net_device *net_dev;
65180 - atomic_t frag_seqno;
65181 + atomic_unchecked_t seqno;
65182 + atomic_unchecked_t frag_seqno;
65183 unsigned char *packet_buff;
65185 struct kobject *hardif_obj;
65186 @@ -141,7 +141,7 @@ struct bat_priv {
65187 atomic_t orig_interval; /* uint */
65188 atomic_t hop_penalty; /* uint */
65189 atomic_t log_level; /* uint */
65190 - atomic_t bcast_seqno;
65191 + atomic_unchecked_t bcast_seqno;
65192 atomic_t bcast_queue_left;
65193 atomic_t batman_queue_left;
65195 diff -urNp linux-2.6.39.4/net/batman-adv/unicast.c linux-2.6.39.4/net/batman-adv/unicast.c
65196 --- linux-2.6.39.4/net/batman-adv/unicast.c 2011-05-19 00:06:34.000000000 -0400
65197 +++ linux-2.6.39.4/net/batman-adv/unicast.c 2011-08-05 19:44:37.000000000 -0400
65198 @@ -263,7 +263,7 @@ int frag_send_skb(struct sk_buff *skb, s
65199 frag1->flags = UNI_FRAG_HEAD | large_tail;
65200 frag2->flags = large_tail;
65202 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
65203 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
65204 frag1->seqno = htons(seqno - 1);
65205 frag2->seqno = htons(seqno);
65207 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_core.c linux-2.6.39.4/net/bluetooth/l2cap_core.c
65208 --- linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-05-19 00:06:34.000000000 -0400
65209 +++ linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-08-05 19:44:37.000000000 -0400
65210 @@ -2202,7 +2202,7 @@ static inline int l2cap_config_req(struc
65212 /* Reject if config buffer is too small. */
65213 len = cmd_len - sizeof(*req);
65214 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65215 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65216 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
65217 l2cap_build_conf_rsp(sk, rsp,
65218 L2CAP_CONF_REJECT, flags), rsp);
65219 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_sock.c linux-2.6.39.4/net/bluetooth/l2cap_sock.c
65220 --- linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-05-19 00:06:34.000000000 -0400
65221 +++ linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-08-05 19:44:37.000000000 -0400
65222 @@ -446,6 +446,7 @@ static int l2cap_sock_getsockopt_old(str
65226 + memset(&cinfo, 0, sizeof(cinfo));
65227 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
65228 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
65230 diff -urNp linux-2.6.39.4/net/bluetooth/rfcomm/sock.c linux-2.6.39.4/net/bluetooth/rfcomm/sock.c
65231 --- linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-05-19 00:06:34.000000000 -0400
65232 +++ linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-08-05 19:44:37.000000000 -0400
65233 @@ -787,6 +787,7 @@ static int rfcomm_sock_getsockopt_old(st
65235 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
65237 + memset(&cinfo, 0, sizeof(cinfo));
65238 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
65239 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
65241 diff -urNp linux-2.6.39.4/net/bridge/br_multicast.c linux-2.6.39.4/net/bridge/br_multicast.c
65242 --- linux-2.6.39.4/net/bridge/br_multicast.c 2011-05-19 00:06:34.000000000 -0400
65243 +++ linux-2.6.39.4/net/bridge/br_multicast.c 2011-08-05 19:44:37.000000000 -0400
65244 @@ -1482,7 +1482,7 @@ static int br_multicast_ipv6_rcv(struct
65245 nexthdr = ip6h->nexthdr;
65246 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
65248 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
65249 + if (nexthdr != IPPROTO_ICMPV6)
65252 /* Okay, we found ICMPv6 header */
65253 diff -urNp linux-2.6.39.4/net/bridge/netfilter/ebtables.c linux-2.6.39.4/net/bridge/netfilter/ebtables.c
65254 --- linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-05-19 00:06:34.000000000 -0400
65255 +++ linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-08-05 19:44:37.000000000 -0400
65256 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
65257 tmp.valid_hooks = t->table->valid_hooks;
65259 mutex_unlock(&ebt_mutex);
65260 - if (copy_to_user(user, &tmp, *len) != 0){
65261 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
65262 BUGPRINT("c2u Didn't work\n");
65265 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
65269 + pax_track_stack();
65271 memset(&tinfo, 0, sizeof(tinfo));
65273 if (cmd == EBT_SO_GET_ENTRIES) {
65274 diff -urNp linux-2.6.39.4/net/caif/caif_socket.c linux-2.6.39.4/net/caif/caif_socket.c
65275 --- linux-2.6.39.4/net/caif/caif_socket.c 2011-05-19 00:06:34.000000000 -0400
65276 +++ linux-2.6.39.4/net/caif/caif_socket.c 2011-08-05 19:44:37.000000000 -0400
65277 @@ -48,18 +48,19 @@ static struct dentry *debugfsdir;
65278 #ifdef CONFIG_DEBUG_FS
65279 struct debug_fs_counter {
65280 atomic_t caif_nr_socks;
65281 - atomic_t num_connect_req;
65282 - atomic_t num_connect_resp;
65283 - atomic_t num_connect_fail_resp;
65284 - atomic_t num_disconnect;
65285 - atomic_t num_remote_shutdown_ind;
65286 - atomic_t num_tx_flow_off_ind;
65287 - atomic_t num_tx_flow_on_ind;
65288 - atomic_t num_rx_flow_off;
65289 - atomic_t num_rx_flow_on;
65290 + atomic_unchecked_t num_connect_req;
65291 + atomic_unchecked_t num_connect_resp;
65292 + atomic_unchecked_t num_connect_fail_resp;
65293 + atomic_unchecked_t num_disconnect;
65294 + atomic_unchecked_t num_remote_shutdown_ind;
65295 + atomic_unchecked_t num_tx_flow_off_ind;
65296 + atomic_unchecked_t num_tx_flow_on_ind;
65297 + atomic_unchecked_t num_rx_flow_off;
65298 + atomic_unchecked_t num_rx_flow_on;
65300 static struct debug_fs_counter cnt;
65301 #define dbfs_atomic_inc(v) atomic_inc(v)
65302 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_unchecked(v)
65303 #define dbfs_atomic_dec(v) atomic_dec(v)
65305 #define dbfs_atomic_inc(v)
65306 @@ -159,7 +160,7 @@ static int caif_queue_rcv_skb(struct soc
65307 atomic_read(&cf_sk->sk.sk_rmem_alloc),
65308 sk_rcvbuf_lowwater(cf_sk));
65309 set_rx_flow_off(cf_sk);
65310 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65311 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65312 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65315 @@ -169,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc
65316 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
65317 set_rx_flow_off(cf_sk);
65318 pr_debug("sending flow OFF due to rmem_schedule\n");
65319 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65320 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65321 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65324 @@ -218,21 +219,21 @@ static void caif_ctrl_cb(struct cflayer
65326 case CAIF_CTRLCMD_FLOW_ON_IND:
65327 /* OK from modem to start sending again */
65328 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
65329 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
65330 set_tx_flow_on(cf_sk);
65331 cf_sk->sk.sk_state_change(&cf_sk->sk);
65334 case CAIF_CTRLCMD_FLOW_OFF_IND:
65335 /* Modem asks us to shut up */
65336 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
65337 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
65338 set_tx_flow_off(cf_sk);
65339 cf_sk->sk.sk_state_change(&cf_sk->sk);
65342 case CAIF_CTRLCMD_INIT_RSP:
65343 /* We're now connected */
65344 - dbfs_atomic_inc(&cnt.num_connect_resp);
65345 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
65346 cf_sk->sk.sk_state = CAIF_CONNECTED;
65347 set_tx_flow_on(cf_sk);
65348 cf_sk->sk.sk_state_change(&cf_sk->sk);
65349 @@ -247,7 +248,7 @@ static void caif_ctrl_cb(struct cflayer
65351 case CAIF_CTRLCMD_INIT_FAIL_RSP:
65352 /* Connect request failed */
65353 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
65354 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
65355 cf_sk->sk.sk_err = ECONNREFUSED;
65356 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
65357 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65358 @@ -261,7 +262,7 @@ static void caif_ctrl_cb(struct cflayer
65360 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
65361 /* Modem has closed this connection, or device is down. */
65362 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
65363 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
65364 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65365 cf_sk->sk.sk_err = ECONNRESET;
65366 set_rx_flow_on(cf_sk);
65367 @@ -281,7 +282,7 @@ static void caif_check_flow_release(stru
65370 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
65371 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
65372 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
65373 set_rx_flow_on(cf_sk);
65374 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
65376 @@ -864,7 +865,7 @@ static int caif_connect(struct socket *s
65377 /*ifindex = id of the interface.*/
65378 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
65380 - dbfs_atomic_inc(&cnt.num_connect_req);
65381 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
65382 cf_sk->layer.receive = caif_sktrecv_cb;
65383 err = caif_connect_client(&cf_sk->conn_req,
65384 &cf_sk->layer, &ifindex, &headroom, &tailroom);
65385 @@ -952,7 +953,7 @@ static int caif_release(struct socket *s
65386 spin_unlock(&sk->sk_receive_queue.lock);
65389 - dbfs_atomic_inc(&cnt.num_disconnect);
65390 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
65392 if (cf_sk->debugfs_socket_dir != NULL)
65393 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
65394 diff -urNp linux-2.6.39.4/net/caif/cfctrl.c linux-2.6.39.4/net/caif/cfctrl.c
65395 --- linux-2.6.39.4/net/caif/cfctrl.c 2011-05-19 00:06:34.000000000 -0400
65396 +++ linux-2.6.39.4/net/caif/cfctrl.c 2011-08-05 19:44:37.000000000 -0400
65398 #include <linux/stddef.h>
65399 #include <linux/spinlock.h>
65400 #include <linux/slab.h>
65401 +#include <linux/sched.h>
65402 #include <net/caif/caif_layer.h>
65403 #include <net/caif/cfpkt.h>
65404 #include <net/caif/cfctrl.h>
65405 @@ -46,8 +47,8 @@ struct cflayer *cfctrl_create(void)
65406 dev_info.id = 0xff;
65407 memset(this, 0, sizeof(*this));
65408 cfsrvl_init(&this->serv, 0, &dev_info, false);
65409 - atomic_set(&this->req_seq_no, 1);
65410 - atomic_set(&this->rsp_seq_no, 1);
65411 + atomic_set_unchecked(&this->req_seq_no, 1);
65412 + atomic_set_unchecked(&this->rsp_seq_no, 1);
65413 this->serv.layer.receive = cfctrl_recv;
65414 sprintf(this->serv.layer.name, "ctrl");
65415 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65416 @@ -116,8 +117,8 @@ void cfctrl_insert_req(struct cfctrl *ct
65417 struct cfctrl_request_info *req)
65419 spin_lock(&ctrl->info_list_lock);
65420 - atomic_inc(&ctrl->req_seq_no);
65421 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
65422 + atomic_inc_unchecked(&ctrl->req_seq_no);
65423 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65424 list_add_tail(&req->list, &ctrl->list);
65425 spin_unlock(&ctrl->info_list_lock);
65427 @@ -136,7 +137,7 @@ struct cfctrl_request_info *cfctrl_remov
65429 pr_warn("Requests are not received in order\n");
65431 - atomic_set(&ctrl->rsp_seq_no,
65432 + atomic_set_unchecked(&ctrl->rsp_seq_no,
65434 list_del(&p->list);
65436 @@ -385,6 +386,7 @@ static int cfctrl_recv(struct cflayer *l
65437 struct cfctrl *cfctrl = container_obj(layer);
65438 struct cfctrl_request_info rsp, *req;
65440 + pax_track_stack();
65442 cfpkt_extr_head(pkt, &cmdrsp, 1);
65443 cmd = cmdrsp & CFCTRL_CMD_MASK;
65444 diff -urNp linux-2.6.39.4/net/can/bcm.c linux-2.6.39.4/net/can/bcm.c
65445 --- linux-2.6.39.4/net/can/bcm.c 2011-05-19 00:06:34.000000000 -0400
65446 +++ linux-2.6.39.4/net/can/bcm.c 2011-08-05 19:44:37.000000000 -0400
65447 @@ -165,9 +165,15 @@ static int bcm_proc_show(struct seq_file
65448 struct bcm_sock *bo = bcm_sk(sk);
65451 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65452 + seq_printf(m, ">>> socket %p", NULL);
65453 + seq_printf(m, " / sk %p", NULL);
65454 + seq_printf(m, " / bo %p", NULL);
65456 seq_printf(m, ">>> socket %p", sk->sk_socket);
65457 seq_printf(m, " / sk %p", sk);
65458 seq_printf(m, " / bo %p", bo);
65460 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
65461 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
65462 seq_printf(m, " <<<\n");
65463 diff -urNp linux-2.6.39.4/net/core/datagram.c linux-2.6.39.4/net/core/datagram.c
65464 --- linux-2.6.39.4/net/core/datagram.c 2011-05-19 00:06:34.000000000 -0400
65465 +++ linux-2.6.39.4/net/core/datagram.c 2011-08-05 19:44:37.000000000 -0400
65466 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65470 - atomic_inc(&sk->sk_drops);
65471 + atomic_inc_unchecked(&sk->sk_drops);
65472 sk_mem_reclaim_partial(sk);
65475 diff -urNp linux-2.6.39.4/net/core/dev.c linux-2.6.39.4/net/core/dev.c
65476 --- linux-2.6.39.4/net/core/dev.c 2011-06-03 00:04:14.000000000 -0400
65477 +++ linux-2.6.39.4/net/core/dev.c 2011-08-05 20:34:06.000000000 -0400
65478 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65479 if (no_module && capable(CAP_NET_ADMIN))
65480 no_module = request_module("netdev-%s", name);
65481 if (no_module && capable(CAP_SYS_MODULE)) {
65482 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65483 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
65485 if (!request_module("%s", name))
65486 pr_err("Loading kernel module for a network device "
65487 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65488 "instead\n", name);
65492 EXPORT_SYMBOL(dev_load);
65493 @@ -1951,7 +1955,7 @@ static int illegal_highdma(struct net_de
65495 struct dev_gso_cb {
65496 void (*destructor)(struct sk_buff *skb);
65500 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65502 @@ -2901,7 +2905,7 @@ int netif_rx_ni(struct sk_buff *skb)
65504 EXPORT_SYMBOL(netif_rx_ni);
65506 -static void net_tx_action(struct softirq_action *h)
65507 +static void net_tx_action(void)
65509 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65511 @@ -3765,7 +3769,7 @@ void netif_napi_del(struct napi_struct *
65513 EXPORT_SYMBOL(netif_napi_del);
65515 -static void net_rx_action(struct softirq_action *h)
65516 +static void net_rx_action(void)
65518 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65519 unsigned long time_limit = jiffies + 2;
65520 diff -urNp linux-2.6.39.4/net/core/flow.c linux-2.6.39.4/net/core/flow.c
65521 --- linux-2.6.39.4/net/core/flow.c 2011-05-19 00:06:34.000000000 -0400
65522 +++ linux-2.6.39.4/net/core/flow.c 2011-08-05 19:44:37.000000000 -0400
65523 @@ -60,7 +60,7 @@ struct flow_cache {
65524 struct timer_list rnd_timer;
65527 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
65528 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65529 EXPORT_SYMBOL(flow_cache_genid);
65530 static struct flow_cache flow_cache_global;
65531 static struct kmem_cache *flow_cachep __read_mostly;
65532 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65534 static int flow_entry_valid(struct flow_cache_entry *fle)
65536 - if (atomic_read(&flow_cache_genid) != fle->genid)
65537 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65539 if (fle->object && !fle->object->ops->check(fle->object))
65541 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65542 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65545 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65546 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65550 @@ -274,7 +274,7 @@ nocache:
65552 flo = resolver(net, key, family, dir, flo, ctx);
65554 - fle->genid = atomic_read(&flow_cache_genid);
65555 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
65559 diff -urNp linux-2.6.39.4/net/core/rtnetlink.c linux-2.6.39.4/net/core/rtnetlink.c
65560 --- linux-2.6.39.4/net/core/rtnetlink.c 2011-05-19 00:06:34.000000000 -0400
65561 +++ linux-2.6.39.4/net/core/rtnetlink.c 2011-08-05 20:34:06.000000000 -0400
65564 rtnl_doit_func doit;
65565 rtnl_dumpit_func dumpit;
65569 static DEFINE_MUTEX(rtnl_mutex);
65571 diff -urNp linux-2.6.39.4/net/core/skbuff.c linux-2.6.39.4/net/core/skbuff.c
65572 --- linux-2.6.39.4/net/core/skbuff.c 2011-06-03 00:04:14.000000000 -0400
65573 +++ linux-2.6.39.4/net/core/skbuff.c 2011-08-05 19:44:37.000000000 -0400
65574 @@ -1542,6 +1542,8 @@ int skb_splice_bits(struct sk_buff *skb,
65575 struct sock *sk = skb->sk;
65578 + pax_track_stack();
65580 if (splice_grow_spd(pipe, &spd))
65583 diff -urNp linux-2.6.39.4/net/core/sock.c linux-2.6.39.4/net/core/sock.c
65584 --- linux-2.6.39.4/net/core/sock.c 2011-05-19 00:06:34.000000000 -0400
65585 +++ linux-2.6.39.4/net/core/sock.c 2011-08-05 19:44:37.000000000 -0400
65586 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65588 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65589 (unsigned)sk->sk_rcvbuf) {
65590 - atomic_inc(&sk->sk_drops);
65591 + atomic_inc_unchecked(&sk->sk_drops);
65595 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65598 if (!sk_rmem_schedule(sk, skb->truesize)) {
65599 - atomic_inc(&sk->sk_drops);
65600 + atomic_inc_unchecked(&sk->sk_drops);
65604 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65605 skb_dst_force(skb);
65607 spin_lock_irqsave(&list->lock, flags);
65608 - skb->dropcount = atomic_read(&sk->sk_drops);
65609 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65610 __skb_queue_tail(list, skb);
65611 spin_unlock_irqrestore(&list->lock, flags);
65613 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65616 if (sk_rcvqueues_full(sk, skb)) {
65617 - atomic_inc(&sk->sk_drops);
65618 + atomic_inc_unchecked(&sk->sk_drops);
65619 goto discard_and_relse;
65622 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65623 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65624 } else if (sk_add_backlog(sk, skb)) {
65625 bh_unlock_sock(sk);
65626 - atomic_inc(&sk->sk_drops);
65627 + atomic_inc_unchecked(&sk->sk_drops);
65628 goto discard_and_relse;
65631 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65635 - if (copy_to_user(optval, address, len))
65636 + if (len > sizeof(address) || copy_to_user(optval, address, len))
65640 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65644 - if (copy_to_user(optval, &v, len))
65645 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
65648 if (put_user(len, optlen))
65649 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65652 atomic_set(&sk->sk_refcnt, 1);
65653 - atomic_set(&sk->sk_drops, 0);
65654 + atomic_set_unchecked(&sk->sk_drops, 0);
65656 EXPORT_SYMBOL(sock_init_data);
65658 diff -urNp linux-2.6.39.4/net/decnet/sysctl_net_decnet.c linux-2.6.39.4/net/decnet/sysctl_net_decnet.c
65659 --- linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-05-19 00:06:34.000000000 -0400
65660 +++ linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-08-05 19:44:37.000000000 -0400
65661 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65663 if (len > *lenp) len = *lenp;
65665 - if (copy_to_user(buffer, addr, len))
65666 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
65670 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65672 if (len > *lenp) len = *lenp;
65674 - if (copy_to_user(buffer, devname, len))
65675 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
65679 diff -urNp linux-2.6.39.4/net/econet/Kconfig linux-2.6.39.4/net/econet/Kconfig
65680 --- linux-2.6.39.4/net/econet/Kconfig 2011-05-19 00:06:34.000000000 -0400
65681 +++ linux-2.6.39.4/net/econet/Kconfig 2011-08-05 19:44:37.000000000 -0400
65685 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65686 - depends on EXPERIMENTAL && INET
65687 + depends on EXPERIMENTAL && INET && BROKEN
65689 Econet is a fairly old and slow networking protocol mainly used by
65690 Acorn computers to access file and print servers. It uses native
65691 diff -urNp linux-2.6.39.4/net/ipv4/fib_frontend.c linux-2.6.39.4/net/ipv4/fib_frontend.c
65692 --- linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-05-19 00:06:34.000000000 -0400
65693 +++ linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-08-05 19:44:37.000000000 -0400
65694 @@ -968,12 +968,12 @@ static int fib_inetaddr_event(struct not
65695 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65698 - atomic_inc(&net->ipv4.dev_addr_genid);
65699 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65700 rt_cache_flush(dev_net(dev), -1);
65703 fib_del_ifaddr(ifa, NULL);
65704 - atomic_inc(&net->ipv4.dev_addr_genid);
65705 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65706 if (ifa->ifa_dev->ifa_list == NULL) {
65707 /* Last address was deleted from this interface.
65709 @@ -1009,7 +1009,7 @@ static int fib_netdev_event(struct notif
65710 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65713 - atomic_inc(&net->ipv4.dev_addr_genid);
65714 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65715 rt_cache_flush(dev_net(dev), -1);
65718 diff -urNp linux-2.6.39.4/net/ipv4/fib_semantics.c linux-2.6.39.4/net/ipv4/fib_semantics.c
65719 --- linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-05-19 00:06:34.000000000 -0400
65720 +++ linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-08-05 19:44:37.000000000 -0400
65721 @@ -701,7 +701,7 @@ __be32 fib_info_update_nh_saddr(struct n
65722 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65724 nh->nh_parent->fib_scope);
65725 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65726 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65728 return nh->nh_saddr;
65730 diff -urNp linux-2.6.39.4/net/ipv4/inet_diag.c linux-2.6.39.4/net/ipv4/inet_diag.c
65731 --- linux-2.6.39.4/net/ipv4/inet_diag.c 2011-07-09 09:18:51.000000000 -0400
65732 +++ linux-2.6.39.4/net/ipv4/inet_diag.c 2011-08-05 19:44:37.000000000 -0400
65733 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65734 r->idiag_retrans = 0;
65736 r->id.idiag_if = sk->sk_bound_dev_if;
65738 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65739 + r->id.idiag_cookie[0] = 0;
65740 + r->id.idiag_cookie[1] = 0;
65742 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65743 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65746 r->id.idiag_sport = inet->inet_sport;
65747 r->id.idiag_dport = inet->inet_dport;
65748 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65749 r->idiag_family = tw->tw_family;
65750 r->idiag_retrans = 0;
65751 r->id.idiag_if = tw->tw_bound_dev_if;
65753 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65754 + r->id.idiag_cookie[0] = 0;
65755 + r->id.idiag_cookie[1] = 0;
65757 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65758 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65761 r->id.idiag_sport = tw->tw_sport;
65762 r->id.idiag_dport = tw->tw_dport;
65763 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65764 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65768 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65770 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65771 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65772 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65773 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65778 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65779 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65780 r->idiag_retrans = req->retrans;
65782 r->id.idiag_if = sk->sk_bound_dev_if;
65784 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65785 + r->id.idiag_cookie[0] = 0;
65786 + r->id.idiag_cookie[1] = 0;
65788 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65789 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65792 tmo = req->expires - jiffies;
65794 diff -urNp linux-2.6.39.4/net/ipv4/inet_hashtables.c linux-2.6.39.4/net/ipv4/inet_hashtables.c
65795 --- linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-05-19 00:06:34.000000000 -0400
65796 +++ linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-08-05 19:44:37.000000000 -0400
65797 @@ -18,11 +18,14 @@
65798 #include <linux/sched.h>
65799 #include <linux/slab.h>
65800 #include <linux/wait.h>
65801 +#include <linux/security.h>
65803 #include <net/inet_connection_sock.h>
65804 #include <net/inet_hashtables.h>
65805 #include <net/ip.h>
65807 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65810 * Allocate and initialize a new local port bind bucket.
65811 * The bindhash mutex for snum's hash chain must be held here.
65812 @@ -529,6 +532,8 @@ ok:
65813 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65814 spin_unlock(&head->lock);
65816 + gr_update_task_in_ip_table(current, inet_sk(sk));
65819 inet_twsk_deschedule(tw, death_row);
65821 diff -urNp linux-2.6.39.4/net/ipv4/inetpeer.c linux-2.6.39.4/net/ipv4/inetpeer.c
65822 --- linux-2.6.39.4/net/ipv4/inetpeer.c 2011-07-09 09:18:51.000000000 -0400
65823 +++ linux-2.6.39.4/net/ipv4/inetpeer.c 2011-08-05 19:44:37.000000000 -0400
65824 @@ -480,6 +480,8 @@ struct inet_peer *inet_getpeer(struct in
65825 unsigned int sequence;
65826 int invalidated, newrefcnt = 0;
65828 + pax_track_stack();
65830 /* Look up for the address quickly, lockless.
65831 * Because of a concurrent writer, we might not find an existing entry.
65833 @@ -516,8 +518,8 @@ found: /* The existing node has been fo
65836 atomic_set(&p->refcnt, 1);
65837 - atomic_set(&p->rid, 0);
65838 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65839 + atomic_set_unchecked(&p->rid, 0);
65840 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65841 p->tcp_ts_stamp = 0;
65842 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65843 p->rate_tokens = 0;
65844 diff -urNp linux-2.6.39.4/net/ipv4/ip_fragment.c linux-2.6.39.4/net/ipv4/ip_fragment.c
65845 --- linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-05-19 00:06:34.000000000 -0400
65846 +++ linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-08-05 19:44:37.000000000 -0400
65847 @@ -297,7 +297,7 @@ static inline int ip_frag_too_far(struct
65851 - end = atomic_inc_return(&peer->rid);
65852 + end = atomic_inc_return_unchecked(&peer->rid);
65855 rc = qp->q.fragments && (end - start) > max;
65856 diff -urNp linux-2.6.39.4/net/ipv4/ip_sockglue.c linux-2.6.39.4/net/ipv4/ip_sockglue.c
65857 --- linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-05-19 00:06:34.000000000 -0400
65858 +++ linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-08-05 19:44:37.000000000 -0400
65859 @@ -1064,6 +1064,8 @@ static int do_ip_getsockopt(struct sock
65863 + pax_track_stack();
65865 if (level != SOL_IP)
65866 return -EOPNOTSUPP;
65868 diff -urNp linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
65869 --- linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-05-19 00:06:34.000000000 -0400
65870 +++ linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-05 19:44:37.000000000 -0400
65871 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65875 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65876 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65877 if (*octets == NULL) {
65878 if (net_ratelimit())
65879 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65880 diff -urNp linux-2.6.39.4/net/ipv4/raw.c linux-2.6.39.4/net/ipv4/raw.c
65881 --- linux-2.6.39.4/net/ipv4/raw.c 2011-05-19 00:06:34.000000000 -0400
65882 +++ linux-2.6.39.4/net/ipv4/raw.c 2011-08-14 11:22:59.000000000 -0400
65883 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65884 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65886 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65887 - atomic_inc(&sk->sk_drops);
65888 + atomic_inc_unchecked(&sk->sk_drops);
65890 return NET_RX_DROP;
65892 @@ -730,16 +730,20 @@ static int raw_init(struct sock *sk)
65894 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65896 + struct icmp_filter filter;
65898 if (optlen > sizeof(struct icmp_filter))
65899 optlen = sizeof(struct icmp_filter);
65900 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65901 + if (copy_from_user(&filter, optval, optlen))
65903 + raw_sk(sk)->filter = filter;
65907 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65909 int len, ret = -EFAULT;
65910 + struct icmp_filter filter;
65912 if (get_user(len, optlen))
65914 @@ -749,8 +753,9 @@ static int raw_geticmpfilter(struct sock
65915 if (len > sizeof(struct icmp_filter))
65916 len = sizeof(struct icmp_filter);
65918 - if (put_user(len, optlen) ||
65919 - copy_to_user(optval, &raw_sk(sk)->filter, len))
65920 + filter = raw_sk(sk)->filter;
65921 + if (put_user(len, optlen) || len > sizeof filter ||
65922 + copy_to_user(optval, &filter, len))
65926 @@ -978,7 +983,13 @@ static void raw_sock_seq_show(struct seq
65927 sk_wmem_alloc_get(sp),
65928 sk_rmem_alloc_get(sp),
65929 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65930 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65931 + atomic_read(&sp->sk_refcnt),
65932 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65937 + atomic_read_unchecked(&sp->sk_drops));
65940 static int raw_seq_show(struct seq_file *seq, void *v)
65941 diff -urNp linux-2.6.39.4/net/ipv4/route.c linux-2.6.39.4/net/ipv4/route.c
65942 --- linux-2.6.39.4/net/ipv4/route.c 2011-07-09 09:18:51.000000000 -0400
65943 +++ linux-2.6.39.4/net/ipv4/route.c 2011-08-05 19:44:37.000000000 -0400
65944 @@ -303,7 +303,7 @@ static inline unsigned int rt_hash(__be3
65946 static inline int rt_genid(struct net *net)
65948 - return atomic_read(&net->ipv4.rt_genid);
65949 + return atomic_read_unchecked(&net->ipv4.rt_genid);
65952 #ifdef CONFIG_PROC_FS
65953 @@ -831,7 +831,7 @@ static void rt_cache_invalidate(struct n
65954 unsigned char shuffle;
65956 get_random_bytes(&shuffle, sizeof(shuffle));
65957 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65958 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65962 @@ -2833,7 +2833,7 @@ static int rt_fill_info(struct net *net,
65963 rt->peer->pmtu_expires - jiffies : 0;
65965 inet_peer_refcheck(rt->peer);
65966 - id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
65967 + id = atomic_read_unchecked(&rt->peer->ip_id_count) & 0xffff;
65968 if (rt->peer->tcp_ts_stamp) {
65969 ts = rt->peer->tcp_ts;
65970 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
65971 diff -urNp linux-2.6.39.4/net/ipv4/tcp.c linux-2.6.39.4/net/ipv4/tcp.c
65972 --- linux-2.6.39.4/net/ipv4/tcp.c 2011-05-19 00:06:34.000000000 -0400
65973 +++ linux-2.6.39.4/net/ipv4/tcp.c 2011-08-05 19:44:37.000000000 -0400
65974 @@ -2121,6 +2121,8 @@ static int do_tcp_setsockopt(struct sock
65978 + pax_track_stack();
65980 /* These are data/string values, all the others are ints */
65982 case TCP_CONGESTION: {
65983 @@ -2500,6 +2502,8 @@ static int do_tcp_getsockopt(struct sock
65984 struct tcp_sock *tp = tcp_sk(sk);
65987 + pax_track_stack();
65989 if (get_user(len, optlen))
65992 diff -urNp linux-2.6.39.4/net/ipv4/tcp_ipv4.c linux-2.6.39.4/net/ipv4/tcp_ipv4.c
65993 --- linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-05-19 00:06:34.000000000 -0400
65994 +++ linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-08-05 19:44:37.000000000 -0400
65995 @@ -86,6 +86,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65996 int sysctl_tcp_low_latency __read_mostly;
65997 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65999 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66000 +extern int grsec_enable_blackhole;
66003 #ifdef CONFIG_TCP_MD5SIG
66004 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
66005 @@ -1594,6 +1597,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
66009 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66010 + if (!grsec_enable_blackhole)
66012 tcp_v4_send_reset(rsk, skb);
66015 @@ -1656,12 +1662,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
66016 TCP_SKB_CB(skb)->sacked = 0;
66018 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66021 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66024 goto no_tcp_socket;
66028 - if (sk->sk_state == TCP_TIME_WAIT)
66029 + if (sk->sk_state == TCP_TIME_WAIT) {
66030 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66036 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
66037 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66038 @@ -1711,6 +1724,10 @@ no_tcp_socket:
66040 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66042 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66043 + if (!grsec_enable_blackhole || (ret == 1 &&
66044 + (skb->dev->flags & IFF_LOOPBACK)))
66046 tcp_v4_send_reset(NULL, skb);
66049 @@ -2374,7 +2391,11 @@ static void get_openreq4(struct sock *sk
66050 0, /* non standard timer */
66051 0, /* open_requests have no inode */
66052 atomic_read(&sk->sk_refcnt),
66053 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66061 @@ -2424,7 +2445,12 @@ static void get_tcp4_sock(struct sock *s
66063 icsk->icsk_probes_out,
66065 - atomic_read(&sk->sk_refcnt), sk,
66066 + atomic_read(&sk->sk_refcnt),
66067 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66072 jiffies_to_clock_t(icsk->icsk_rto),
66073 jiffies_to_clock_t(icsk->icsk_ack.ato),
66074 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
66075 @@ -2452,7 +2478,13 @@ static void get_timewait4_sock(struct in
66076 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
66077 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
66078 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66079 - atomic_read(&tw->tw_refcnt), tw, len);
66080 + atomic_read(&tw->tw_refcnt),
66081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66090 diff -urNp linux-2.6.39.4/net/ipv4/tcp_minisocks.c linux-2.6.39.4/net/ipv4/tcp_minisocks.c
66091 --- linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-05-19 00:06:34.000000000 -0400
66092 +++ linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-08-05 19:44:37.000000000 -0400
66094 #include <net/inet_common.h>
66095 #include <net/xfrm.h>
66097 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66098 +extern int grsec_enable_blackhole;
66101 int sysctl_tcp_syncookies __read_mostly = 1;
66102 EXPORT_SYMBOL(sysctl_tcp_syncookies);
66104 @@ -745,6 +749,10 @@ listen_overflow:
66107 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
66109 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66110 + if (!grsec_enable_blackhole)
66112 if (!(flg & TCP_FLAG_RST))
66113 req->rsk_ops->send_reset(sk, skb);
66115 diff -urNp linux-2.6.39.4/net/ipv4/tcp_output.c linux-2.6.39.4/net/ipv4/tcp_output.c
66116 --- linux-2.6.39.4/net/ipv4/tcp_output.c 2011-05-19 00:06:34.000000000 -0400
66117 +++ linux-2.6.39.4/net/ipv4/tcp_output.c 2011-08-05 19:44:37.000000000 -0400
66118 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
66120 int s_data_desired = 0;
66122 + pax_track_stack();
66124 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
66125 s_data_desired = cvp->s_data_desired;
66126 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
66127 diff -urNp linux-2.6.39.4/net/ipv4/tcp_probe.c linux-2.6.39.4/net/ipv4/tcp_probe.c
66128 --- linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-05-19 00:06:34.000000000 -0400
66129 +++ linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-08-05 19:44:37.000000000 -0400
66130 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
66131 if (cnt + width >= len)
66134 - if (copy_to_user(buf + cnt, tbuf, width))
66135 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
66139 diff -urNp linux-2.6.39.4/net/ipv4/tcp_timer.c linux-2.6.39.4/net/ipv4/tcp_timer.c
66140 --- linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-05-19 00:06:34.000000000 -0400
66141 +++ linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-08-05 19:44:37.000000000 -0400
66143 #include <linux/gfp.h>
66144 #include <net/tcp.h>
66146 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66147 +extern int grsec_lastack_retries;
66150 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
66151 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
66152 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
66153 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
66157 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66158 + if ((sk->sk_state == TCP_LAST_ACK) &&
66159 + (grsec_lastack_retries > 0) &&
66160 + (grsec_lastack_retries < retry_until))
66161 + retry_until = grsec_lastack_retries;
66164 if (retransmits_timed_out(sk, retry_until,
66165 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
66166 /* Has it gone just too far? */
66167 diff -urNp linux-2.6.39.4/net/ipv4/udp.c linux-2.6.39.4/net/ipv4/udp.c
66168 --- linux-2.6.39.4/net/ipv4/udp.c 2011-07-09 09:18:51.000000000 -0400
66169 +++ linux-2.6.39.4/net/ipv4/udp.c 2011-08-05 19:44:37.000000000 -0400
66171 #include <linux/types.h>
66172 #include <linux/fcntl.h>
66173 #include <linux/module.h>
66174 +#include <linux/security.h>
66175 #include <linux/socket.h>
66176 #include <linux/sockios.h>
66177 #include <linux/igmp.h>
66178 @@ -107,6 +108,10 @@
66179 #include <net/xfrm.h>
66180 #include "udp_impl.h"
66182 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66183 +extern int grsec_enable_blackhole;
66186 struct udp_table udp_table __read_mostly;
66187 EXPORT_SYMBOL(udp_table);
66189 @@ -564,6 +569,9 @@ found:
66193 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
66194 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
66197 * This routine is called by the ICMP module when it gets some
66198 * sort of error condition. If err < 0 then the socket should
66199 @@ -853,9 +861,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
66200 dport = usin->sin_port;
66204 + err = gr_search_udp_sendmsg(sk, usin);
66208 if (sk->sk_state != TCP_ESTABLISHED)
66209 return -EDESTADDRREQ;
66211 + err = gr_search_udp_sendmsg(sk, NULL);
66215 daddr = inet->inet_daddr;
66216 dport = inet->inet_dport;
66217 /* Open fast path for connected socket.
66218 @@ -1090,7 +1107,7 @@ static unsigned int first_packet_length(
66219 udp_lib_checksum_complete(skb)) {
66220 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66222 - atomic_inc(&sk->sk_drops);
66223 + atomic_inc_unchecked(&sk->sk_drops);
66224 __skb_unlink(skb, rcvq);
66225 __skb_queue_tail(&list_kill, skb);
66227 @@ -1176,6 +1193,10 @@ try_again:
66231 + err = gr_search_udp_recvmsg(sk, skb);
66235 ulen = skb->len - sizeof(struct udphdr);
66238 @@ -1475,7 +1496,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
66241 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66242 - atomic_inc(&sk->sk_drops);
66243 + atomic_inc_unchecked(&sk->sk_drops);
66247 @@ -1494,7 +1515,7 @@ static void flush_stack(struct sock **st
66248 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
66251 - atomic_inc(&sk->sk_drops);
66252 + atomic_inc_unchecked(&sk->sk_drops);
66253 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
66255 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66256 @@ -1663,6 +1684,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
66259 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
66260 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66261 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66263 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
66266 @@ -2090,8 +2114,13 @@ static void udp4_format_sock(struct sock
66267 sk_wmem_alloc_get(sp),
66268 sk_rmem_alloc_get(sp),
66269 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
66270 - atomic_read(&sp->sk_refcnt), sp,
66271 - atomic_read(&sp->sk_drops), len);
66272 + atomic_read(&sp->sk_refcnt),
66273 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66278 + atomic_read_unchecked(&sp->sk_drops), len);
66281 int udp4_seq_show(struct seq_file *seq, void *v)
66282 diff -urNp linux-2.6.39.4/net/ipv6/inet6_connection_sock.c linux-2.6.39.4/net/ipv6/inet6_connection_sock.c
66283 --- linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-05-19 00:06:34.000000000 -0400
66284 +++ linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-08-05 19:44:37.000000000 -0400
66285 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
66288 struct rt6_info *rt = (struct rt6_info *)dst;
66289 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
66290 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
66294 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
66297 struct rt6_info *rt = (struct rt6_info *)dst;
66298 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
66299 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
66300 __sk_dst_reset(sk);
66303 diff -urNp linux-2.6.39.4/net/ipv6/ipv6_sockglue.c linux-2.6.39.4/net/ipv6/ipv6_sockglue.c
66304 --- linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-05-19 00:06:34.000000000 -0400
66305 +++ linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-08-05 19:44:37.000000000 -0400
66306 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
66308 int retv = -ENOPROTOOPT;
66310 + pax_track_stack();
66312 if (optval == NULL)
66315 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
66319 + pax_track_stack();
66321 if (ip6_mroute_opt(optname))
66322 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
66324 diff -urNp linux-2.6.39.4/net/ipv6/raw.c linux-2.6.39.4/net/ipv6/raw.c
66325 --- linux-2.6.39.4/net/ipv6/raw.c 2011-05-19 00:06:34.000000000 -0400
66326 +++ linux-2.6.39.4/net/ipv6/raw.c 2011-08-14 11:25:44.000000000 -0400
66327 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
66329 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
66330 skb_checksum_complete(skb)) {
66331 - atomic_inc(&sk->sk_drops);
66332 + atomic_inc_unchecked(&sk->sk_drops);
66334 return NET_RX_DROP;
66336 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66337 struct raw6_sock *rp = raw6_sk(sk);
66339 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
66340 - atomic_inc(&sk->sk_drops);
66341 + atomic_inc_unchecked(&sk->sk_drops);
66343 return NET_RX_DROP;
66345 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66347 if (inet->hdrincl) {
66348 if (skb_checksum_complete(skb)) {
66349 - atomic_inc(&sk->sk_drops);
66350 + atomic_inc_unchecked(&sk->sk_drops);
66352 return NET_RX_DROP;
66354 @@ -601,7 +601,7 @@ out:
66358 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
66359 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66360 struct flowi6 *fl6, struct dst_entry **dstp,
66361 unsigned int flags)
66363 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
66367 + pax_track_stack();
66369 /* Rough check on arithmetic overflow,
66370 better check is made in ip6_append_data().
66372 @@ -909,12 +911,15 @@ do_confirm:
66373 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
66374 char __user *optval, int optlen)
66376 + struct icmp6_filter filter;
66379 case ICMPV6_FILTER:
66380 if (optlen > sizeof(struct icmp6_filter))
66381 optlen = sizeof(struct icmp6_filter);
66382 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66383 + if (copy_from_user(&filter, optval, optlen))
66385 + raw6_sk(sk)->filter = filter;
66388 return -ENOPROTOOPT;
66389 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
66390 char __user *optval, int __user *optlen)
66393 + struct icmp6_filter filter;
66396 case ICMPV6_FILTER:
66397 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66398 len = sizeof(struct icmp6_filter);
66399 if (put_user(len, optlen))
66401 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66402 + filter = raw6_sk(sk)->filter;
66403 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
66407 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66411 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66412 + atomic_read(&sp->sk_refcnt),
66413 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66418 + atomic_read_unchecked(&sp->sk_drops));
66421 static int raw6_seq_show(struct seq_file *seq, void *v)
66422 diff -urNp linux-2.6.39.4/net/ipv6/tcp_ipv6.c linux-2.6.39.4/net/ipv6/tcp_ipv6.c
66423 --- linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-05-19 00:06:34.000000000 -0400
66424 +++ linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-08-05 19:44:37.000000000 -0400
66425 @@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66429 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66430 +extern int grsec_enable_blackhole;
66433 static void tcp_v6_hash(struct sock *sk)
66435 if (sk->sk_state != TCP_CLOSE) {
66436 @@ -1660,6 +1664,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66440 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66441 + if (!grsec_enable_blackhole)
66443 tcp_v6_send_reset(sk, skb);
66446 @@ -1739,12 +1746,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66447 TCP_SKB_CB(skb)->sacked = 0;
66449 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66452 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66455 goto no_tcp_socket;
66459 - if (sk->sk_state == TCP_TIME_WAIT)
66460 + if (sk->sk_state == TCP_TIME_WAIT) {
66461 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66467 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66468 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66469 @@ -1792,6 +1807,10 @@ no_tcp_socket:
66471 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66473 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66474 + if (!grsec_enable_blackhole || (ret == 1 &&
66475 + (skb->dev->flags & IFF_LOOPBACK)))
66477 tcp_v6_send_reset(NULL, skb);
66480 @@ -2052,7 +2071,13 @@ static void get_openreq6(struct seq_file
66482 0, /* non standard timer */
66483 0, /* open_requests have no inode */
66486 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66494 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66495 @@ -2102,7 +2127,12 @@ static void get_tcp6_sock(struct seq_fil
66497 icsk->icsk_probes_out,
66499 - atomic_read(&sp->sk_refcnt), sp,
66500 + atomic_read(&sp->sk_refcnt),
66501 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66506 jiffies_to_clock_t(icsk->icsk_rto),
66507 jiffies_to_clock_t(icsk->icsk_ack.ato),
66508 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66509 @@ -2137,7 +2167,13 @@ static void get_timewait6_sock(struct se
66510 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66511 tw->tw_substate, 0, 0,
66512 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66513 - atomic_read(&tw->tw_refcnt), tw);
66514 + atomic_read(&tw->tw_refcnt),
66515 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66523 static int tcp6_seq_show(struct seq_file *seq, void *v)
66524 diff -urNp linux-2.6.39.4/net/ipv6/udp.c linux-2.6.39.4/net/ipv6/udp.c
66525 --- linux-2.6.39.4/net/ipv6/udp.c 2011-07-09 09:18:51.000000000 -0400
66526 +++ linux-2.6.39.4/net/ipv6/udp.c 2011-08-05 19:44:37.000000000 -0400
66528 #include <linux/seq_file.h>
66529 #include "udp_impl.h"
66531 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66532 +extern int grsec_enable_blackhole;
66535 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66537 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66538 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66542 - atomic_inc(&sk->sk_drops);
66543 + atomic_inc_unchecked(&sk->sk_drops);
66544 drop_no_sk_drops_inc:
66545 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66547 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66551 - atomic_inc(&sk->sk_drops);
66552 + atomic_inc_unchecked(&sk->sk_drops);
66553 UDP6_INC_STATS_BH(sock_net(sk),
66554 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66555 UDP6_INC_STATS_BH(sock_net(sk),
66556 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66557 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66558 proto == IPPROTO_UDPLITE);
66560 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66561 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66563 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66566 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66567 if (!sock_owned_by_user(sk))
66568 udpv6_queue_rcv_skb(sk, skb);
66569 else if (sk_add_backlog(sk, skb)) {
66570 - atomic_inc(&sk->sk_drops);
66571 + atomic_inc_unchecked(&sk->sk_drops);
66572 bh_unlock_sock(sk);
66575 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66579 - atomic_read(&sp->sk_refcnt), sp,
66580 - atomic_read(&sp->sk_drops));
66581 + atomic_read(&sp->sk_refcnt),
66582 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66587 + atomic_read_unchecked(&sp->sk_drops));
66590 int udp6_seq_show(struct seq_file *seq, void *v)
66591 diff -urNp linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c
66592 --- linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-05-19 00:06:34.000000000 -0400
66593 +++ linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-08-05 19:44:37.000000000 -0400
66594 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
66595 add_wait_queue(&self->open_wait, &wait);
66597 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66598 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66599 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66601 /* As far as I can see, we protect open_count - Jean II */
66602 spin_lock_irqsave(&self->spinlock, flags);
66603 if (!tty_hung_up_p(filp)) {
66605 - self->open_count--;
66606 + local_dec(&self->open_count);
66608 spin_unlock_irqrestore(&self->spinlock, flags);
66609 - self->blocked_open++;
66610 + local_inc(&self->blocked_open);
66613 if (tty->termios->c_cflag & CBAUD) {
66614 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
66617 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66618 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66619 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66623 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
66625 /* ++ is not atomic, so this should be protected - Jean II */
66626 spin_lock_irqsave(&self->spinlock, flags);
66627 - self->open_count++;
66628 + local_inc(&self->open_count);
66629 spin_unlock_irqrestore(&self->spinlock, flags);
66631 - self->blocked_open--;
66632 + local_dec(&self->blocked_open);
66634 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66635 - __FILE__,__LINE__, tty->driver->name, self->open_count);
66636 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66639 self->flags |= ASYNC_NORMAL_ACTIVE;
66640 @@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
66642 /* ++ is not atomic, so this should be protected - Jean II */
66643 spin_lock_irqsave(&self->spinlock, flags);
66644 - self->open_count++;
66645 + local_inc(&self->open_count);
66647 tty->driver_data = self;
66649 spin_unlock_irqrestore(&self->spinlock, flags);
66651 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66652 - self->line, self->open_count);
66653 + self->line, local_read(&self->open_count));
66655 /* Not really used by us, but lets do it anyway */
66656 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66657 @@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
66661 - if ((tty->count == 1) && (self->open_count != 1)) {
66662 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66664 * Uh, oh. tty->count is 1, which means that the tty
66665 * structure will be freed. state->count should always
66666 @@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
66668 IRDA_DEBUG(0, "%s(), bad serial port count; "
66669 "tty->count is 1, state->count is %d\n", __func__ ,
66670 - self->open_count);
66671 - self->open_count = 1;
66672 + local_read(&self->open_count));
66673 + local_set(&self->open_count, 1);
66676 - if (--self->open_count < 0) {
66677 + if (local_dec_return(&self->open_count) < 0) {
66678 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66679 - __func__, self->line, self->open_count);
66680 - self->open_count = 0;
66681 + __func__, self->line, local_read(&self->open_count));
66682 + local_set(&self->open_count, 0);
66684 - if (self->open_count) {
66685 + if (local_read(&self->open_count)) {
66686 spin_unlock_irqrestore(&self->spinlock, flags);
66688 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66689 @@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
66693 - if (self->blocked_open) {
66694 + if (local_read(&self->blocked_open)) {
66695 if (self->close_delay)
66696 schedule_timeout_interruptible(self->close_delay);
66697 wake_up_interruptible(&self->open_wait);
66698 @@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
66699 spin_lock_irqsave(&self->spinlock, flags);
66700 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66702 - self->open_count = 0;
66703 + local_set(&self->open_count, 0);
66704 spin_unlock_irqrestore(&self->spinlock, flags);
66706 wake_up_interruptible(&self->open_wait);
66707 @@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
66710 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66711 - seq_printf(m, "Open count: %d\n", self->open_count);
66712 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66713 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66714 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66716 diff -urNp linux-2.6.39.4/net/iucv/af_iucv.c linux-2.6.39.4/net/iucv/af_iucv.c
66717 --- linux-2.6.39.4/net/iucv/af_iucv.c 2011-05-19 00:06:34.000000000 -0400
66718 +++ linux-2.6.39.4/net/iucv/af_iucv.c 2011-08-05 19:44:37.000000000 -0400
66719 @@ -653,10 +653,10 @@ static int iucv_sock_autobind(struct soc
66721 write_lock_bh(&iucv_sk_list.lock);
66723 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66724 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66725 while (__iucv_get_sock_by_name(name)) {
66726 sprintf(name, "%08x",
66727 - atomic_inc_return(&iucv_sk_list.autobind_name));
66728 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66731 write_unlock_bh(&iucv_sk_list.lock);
66732 diff -urNp linux-2.6.39.4/net/key/af_key.c linux-2.6.39.4/net/key/af_key.c
66733 --- linux-2.6.39.4/net/key/af_key.c 2011-05-19 00:06:34.000000000 -0400
66734 +++ linux-2.6.39.4/net/key/af_key.c 2011-08-05 19:44:37.000000000 -0400
66735 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66736 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66737 struct xfrm_kmaddress k;
66739 + pax_track_stack();
66741 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66742 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66743 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66744 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66745 static u32 get_acqseq(void)
66748 - static atomic_t acqseq;
66749 + static atomic_unchecked_t acqseq;
66752 - res = atomic_inc_return(&acqseq);
66753 + res = atomic_inc_return_unchecked(&acqseq);
66757 @@ -3657,7 +3659,11 @@ static int pfkey_seq_show(struct seq_fil
66758 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
66760 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
66761 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66766 atomic_read(&s->sk_refcnt),
66767 sk_rmem_alloc_get(s),
66768 sk_wmem_alloc_get(s),
66769 diff -urNp linux-2.6.39.4/net/lapb/lapb_iface.c linux-2.6.39.4/net/lapb/lapb_iface.c
66770 --- linux-2.6.39.4/net/lapb/lapb_iface.c 2011-05-19 00:06:34.000000000 -0400
66771 +++ linux-2.6.39.4/net/lapb/lapb_iface.c 2011-08-05 20:34:06.000000000 -0400
66772 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66776 - lapb->callbacks = *callbacks;
66777 + lapb->callbacks = callbacks;
66779 __lapb_insert_cb(lapb);
66781 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66783 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66785 - if (lapb->callbacks.connect_confirmation)
66786 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
66787 + if (lapb->callbacks->connect_confirmation)
66788 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
66791 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66793 - if (lapb->callbacks.connect_indication)
66794 - lapb->callbacks.connect_indication(lapb->dev, reason);
66795 + if (lapb->callbacks->connect_indication)
66796 + lapb->callbacks->connect_indication(lapb->dev, reason);
66799 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66801 - if (lapb->callbacks.disconnect_confirmation)
66802 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66803 + if (lapb->callbacks->disconnect_confirmation)
66804 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66807 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66809 - if (lapb->callbacks.disconnect_indication)
66810 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
66811 + if (lapb->callbacks->disconnect_indication)
66812 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
66815 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66817 - if (lapb->callbacks.data_indication)
66818 - return lapb->callbacks.data_indication(lapb->dev, skb);
66819 + if (lapb->callbacks->data_indication)
66820 + return lapb->callbacks->data_indication(lapb->dev, skb);
66823 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66824 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66828 - if (lapb->callbacks.data_transmit) {
66829 - lapb->callbacks.data_transmit(lapb->dev, skb);
66830 + if (lapb->callbacks->data_transmit) {
66831 + lapb->callbacks->data_transmit(lapb->dev, skb);
66835 diff -urNp linux-2.6.39.4/net/mac80211/debugfs_sta.c linux-2.6.39.4/net/mac80211/debugfs_sta.c
66836 --- linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-05-19 00:06:34.000000000 -0400
66837 +++ linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-08-05 19:44:37.000000000 -0400
66838 @@ -115,6 +115,8 @@ static ssize_t sta_agg_status_read(struc
66839 struct tid_ampdu_rx *tid_rx;
66840 struct tid_ampdu_tx *tid_tx;
66842 + pax_track_stack();
66846 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66847 @@ -215,6 +217,8 @@ static ssize_t sta_ht_capa_read(struct f
66848 struct sta_info *sta = file->private_data;
66849 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66851 + pax_track_stack();
66853 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66854 htc->ht_supported ? "" : "not ");
66855 if (htc->ht_supported) {
66856 diff -urNp linux-2.6.39.4/net/mac80211/ieee80211_i.h linux-2.6.39.4/net/mac80211/ieee80211_i.h
66857 --- linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-05-19 00:06:34.000000000 -0400
66858 +++ linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-08-05 19:44:37.000000000 -0400
66860 #include <net/ieee80211_radiotap.h>
66861 #include <net/cfg80211.h>
66862 #include <net/mac80211.h>
66863 +#include <asm/local.h>
66865 #include "sta_info.h"
66867 @@ -714,7 +715,7 @@ struct ieee80211_local {
66868 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66869 spinlock_t queue_stop_reason_lock;
66872 + local_t open_count;
66873 int monitors, cooked_mntrs;
66874 /* number of interfaces with corresponding FIF_ flags */
66875 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66876 diff -urNp linux-2.6.39.4/net/mac80211/iface.c linux-2.6.39.4/net/mac80211/iface.c
66877 --- linux-2.6.39.4/net/mac80211/iface.c 2011-05-19 00:06:34.000000000 -0400
66878 +++ linux-2.6.39.4/net/mac80211/iface.c 2011-08-05 19:44:37.000000000 -0400
66879 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66883 - if (local->open_count == 0) {
66884 + if (local_read(&local->open_count) == 0) {
66885 res = drv_start(local);
66888 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66889 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66891 if (!is_valid_ether_addr(dev->dev_addr)) {
66892 - if (!local->open_count)
66893 + if (!local_read(&local->open_count))
66895 return -EADDRNOTAVAIL;
66897 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66898 mutex_unlock(&local->mtx);
66901 - local->open_count++;
66902 + local_inc(&local->open_count);
66904 if (hw_reconf_flags) {
66905 ieee80211_hw_config(local, hw_reconf_flags);
66906 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66908 drv_remove_interface(local, &sdata->vif);
66910 - if (!local->open_count)
66911 + if (!local_read(&local->open_count))
66915 @@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
66919 - local->open_count--;
66920 + local_dec(&local->open_count);
66922 switch (sdata->vif.type) {
66923 case NL80211_IFTYPE_AP_VLAN:
66924 @@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
66926 ieee80211_recalc_ps(local, -1);
66928 - if (local->open_count == 0) {
66929 + if (local_read(&local->open_count) == 0) {
66930 if (local->ops->napi_poll)
66931 napi_disable(&local->napi);
66932 ieee80211_clear_tx_pending(local);
66933 diff -urNp linux-2.6.39.4/net/mac80211/main.c linux-2.6.39.4/net/mac80211/main.c
66934 --- linux-2.6.39.4/net/mac80211/main.c 2011-05-19 00:06:34.000000000 -0400
66935 +++ linux-2.6.39.4/net/mac80211/main.c 2011-08-05 19:44:37.000000000 -0400
66936 @@ -215,7 +215,7 @@ int ieee80211_hw_config(struct ieee80211
66937 local->hw.conf.power_level = power;
66940 - if (changed && local->open_count) {
66941 + if (changed && local_read(&local->open_count)) {
66942 ret = drv_config(local, changed);
66945 diff -urNp linux-2.6.39.4/net/mac80211/mlme.c linux-2.6.39.4/net/mac80211/mlme.c
66946 --- linux-2.6.39.4/net/mac80211/mlme.c 2011-06-03 00:04:14.000000000 -0400
66947 +++ linux-2.6.39.4/net/mac80211/mlme.c 2011-08-05 19:44:37.000000000 -0400
66948 @@ -1431,6 +1431,8 @@ static bool ieee80211_assoc_success(stru
66949 bool have_higher_than_11mbit = false;
66950 u16 ap_ht_cap_flags;
66952 + pax_track_stack();
66954 /* AssocResp and ReassocResp have identical structure */
66956 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66957 diff -urNp linux-2.6.39.4/net/mac80211/pm.c linux-2.6.39.4/net/mac80211/pm.c
66958 --- linux-2.6.39.4/net/mac80211/pm.c 2011-05-19 00:06:34.000000000 -0400
66959 +++ linux-2.6.39.4/net/mac80211/pm.c 2011-08-05 19:44:37.000000000 -0400
66960 @@ -95,7 +95,7 @@ int __ieee80211_suspend(struct ieee80211
66963 /* stop hardware - this must stop RX */
66964 - if (local->open_count)
66965 + if (local_read(&local->open_count))
66966 ieee80211_stop_device(local);
66968 local->suspended = true;
66969 diff -urNp linux-2.6.39.4/net/mac80211/rate.c linux-2.6.39.4/net/mac80211/rate.c
66970 --- linux-2.6.39.4/net/mac80211/rate.c 2011-05-19 00:06:34.000000000 -0400
66971 +++ linux-2.6.39.4/net/mac80211/rate.c 2011-08-05 19:44:37.000000000 -0400
66972 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66976 - if (local->open_count)
66977 + if (local_read(&local->open_count))
66980 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66981 diff -urNp linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c
66982 --- linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-05-19 00:06:34.000000000 -0400
66983 +++ linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-05 19:44:37.000000000 -0400
66984 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66986 spin_unlock_irqrestore(&events->lock, status);
66988 - if (copy_to_user(buf, pb, p))
66989 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66993 diff -urNp linux-2.6.39.4/net/mac80211/util.c linux-2.6.39.4/net/mac80211/util.c
66994 --- linux-2.6.39.4/net/mac80211/util.c 2011-05-19 00:06:34.000000000 -0400
66995 +++ linux-2.6.39.4/net/mac80211/util.c 2011-08-05 19:44:37.000000000 -0400
66996 @@ -1129,7 +1129,7 @@ int ieee80211_reconfig(struct ieee80211_
66997 local->resuming = true;
66999 /* restart hardware */
67000 - if (local->open_count) {
67001 + if (local_read(&local->open_count)) {
67003 * Upon resume hardware can sometimes be goofy due to
67004 * various platform / driver / bus issues, so restarting
67005 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c
67006 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-09 09:18:51.000000000 -0400
67007 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-05 19:44:37.000000000 -0400
67008 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
67009 /* Increase the refcnt counter of the dest */
67010 atomic_inc(&dest->refcnt);
67012 - conn_flags = atomic_read(&dest->conn_flags);
67013 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
67014 if (cp->protocol != IPPROTO_UDP)
67015 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
67016 /* Bind with the destination and its corresponding transmitter */
67017 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
67018 atomic_set(&cp->refcnt, 1);
67020 atomic_set(&cp->n_control, 0);
67021 - atomic_set(&cp->in_pkts, 0);
67022 + atomic_set_unchecked(&cp->in_pkts, 0);
67024 atomic_inc(&ipvs->conn_count);
67025 if (flags & IP_VS_CONN_F_NO_CPORT)
67026 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
67028 /* Don't drop the entry if its number of incoming packets is not
67029 located in [0, 8] */
67030 - i = atomic_read(&cp->in_pkts);
67031 + i = atomic_read_unchecked(&cp->in_pkts);
67032 if (i > 8 || i < 0) return 0;
67034 if (!todrop_rate[i]) return 0;
67035 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c
67036 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-09 09:18:51.000000000 -0400
67037 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-05 19:44:37.000000000 -0400
67038 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
67039 ret = cp->packet_xmit(skb, cp, pd->pp);
67040 /* do not touch skb anymore */
67042 - atomic_inc(&cp->in_pkts);
67043 + atomic_inc_unchecked(&cp->in_pkts);
67044 ip_vs_conn_put(cp);
67047 @@ -1633,7 +1633,7 @@ ip_vs_in(unsigned int hooknum, struct sk
67048 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
67049 pkts = sysctl_sync_threshold(ipvs);
67051 - pkts = atomic_add_return(1, &cp->in_pkts);
67052 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67054 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
67055 cp->protocol == IPPROTO_SCTP) {
67056 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c
67057 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-19 00:06:34.000000000 -0400
67058 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-05 19:44:37.000000000 -0400
67059 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
67060 ip_vs_rs_hash(ipvs, dest);
67061 write_unlock_bh(&ipvs->rs_lock);
67063 - atomic_set(&dest->conn_flags, conn_flags);
67064 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
67066 /* bind the service */
67068 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
67069 " %-7s %-6d %-10d %-10d\n",
67072 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67073 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67074 atomic_read(&dest->weight),
67075 atomic_read(&dest->activeconns),
67076 atomic_read(&dest->inactconns));
67077 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
67078 "%-7s %-6d %-10d %-10d\n",
67079 ntohl(dest->addr.ip),
67081 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67082 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67083 atomic_read(&dest->weight),
67084 atomic_read(&dest->activeconns),
67085 atomic_read(&dest->inactconns));
67086 @@ -2287,6 +2287,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
67087 struct ip_vs_dest_user *udest_compat;
67088 struct ip_vs_dest_user_kern udest;
67090 + pax_track_stack();
67092 if (!capable(CAP_NET_ADMIN))
67095 @@ -2501,7 +2503,7 @@ __ip_vs_get_dest_entries(struct net *net
67097 entry.addr = dest->addr.ip;
67098 entry.port = dest->port;
67099 - entry.conn_flags = atomic_read(&dest->conn_flags);
67100 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
67101 entry.weight = atomic_read(&dest->weight);
67102 entry.u_threshold = dest->u_threshold;
67103 entry.l_threshold = dest->l_threshold;
67104 @@ -3029,7 +3031,7 @@ static int ip_vs_genl_fill_dest(struct s
67105 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
67107 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
67108 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67109 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67110 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
67111 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
67112 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
67113 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c
67114 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-05-19 00:06:34.000000000 -0400
67115 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-05 19:44:37.000000000 -0400
67116 @@ -648,7 +648,7 @@ control:
67117 * i.e only increment in_pkts for Templates.
67119 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
67120 - int pkts = atomic_add_return(1, &cp->in_pkts);
67121 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67123 if (pkts % sysctl_sync_period(ipvs) != 1)
67125 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
67128 memcpy(&cp->in_seq, opt, sizeof(*opt));
67129 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67130 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67132 cp->old_state = cp->state;
67134 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c
67135 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-19 00:06:34.000000000 -0400
67136 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-05 19:44:37.000000000 -0400
67137 @@ -1127,7 +1127,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
67140 /* do not touch skb anymore */
67141 - atomic_inc(&cp->in_pkts);
67142 + atomic_inc_unchecked(&cp->in_pkts);
67146 @@ -1245,7 +1245,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
67149 /* do not touch skb anymore */
67150 - atomic_inc(&cp->in_pkts);
67151 + atomic_inc_unchecked(&cp->in_pkts);
67155 diff -urNp linux-2.6.39.4/net/netfilter/Kconfig linux-2.6.39.4/net/netfilter/Kconfig
67156 --- linux-2.6.39.4/net/netfilter/Kconfig 2011-05-19 00:06:34.000000000 -0400
67157 +++ linux-2.6.39.4/net/netfilter/Kconfig 2011-08-05 19:44:37.000000000 -0400
67158 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
67160 To compile it as a module, choose M here. If unsure, say N.
67162 +config NETFILTER_XT_MATCH_GRADM
67163 + tristate '"gradm" match support'
67164 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
67165 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
67167 + The gradm match allows to match on grsecurity RBAC being enabled.
67168 + It is useful when iptables rules are applied early on bootup to
67169 + prevent connections to the machine (except from a trusted host)
67170 + while the RBAC system is disabled.
67172 config NETFILTER_XT_MATCH_HASHLIMIT
67173 tristate '"hashlimit" match support'
67174 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
67175 diff -urNp linux-2.6.39.4/net/netfilter/Makefile linux-2.6.39.4/net/netfilter/Makefile
67176 --- linux-2.6.39.4/net/netfilter/Makefile 2011-05-19 00:06:34.000000000 -0400
67177 +++ linux-2.6.39.4/net/netfilter/Makefile 2011-08-05 19:44:37.000000000 -0400
67178 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
67179 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
67180 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
67181 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
67182 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
67183 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
67184 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
67185 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
67186 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_log.c linux-2.6.39.4/net/netfilter/nfnetlink_log.c
67187 --- linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-05-19 00:06:34.000000000 -0400
67188 +++ linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-08-05 19:44:37.000000000 -0400
67189 @@ -70,7 +70,7 @@ struct nfulnl_instance {
67192 static DEFINE_SPINLOCK(instances_lock);
67193 -static atomic_t global_seq;
67194 +static atomic_unchecked_t global_seq;
67196 #define INSTANCE_BUCKETS 16
67197 static struct hlist_head instance_table[INSTANCE_BUCKETS];
67198 @@ -506,7 +506,7 @@ __build_packet_message(struct nfulnl_ins
67199 /* global sequence number */
67200 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
67201 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
67202 - htonl(atomic_inc_return(&global_seq)));
67203 + htonl(atomic_inc_return_unchecked(&global_seq)));
67206 struct nlattr *nla;
67207 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_queue.c linux-2.6.39.4/net/netfilter/nfnetlink_queue.c
67208 --- linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-05-19 00:06:34.000000000 -0400
67209 +++ linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-08-05 19:44:37.000000000 -0400
67210 @@ -58,7 +58,7 @@ struct nfqnl_instance {
67213 unsigned int queue_total;
67214 - atomic_t id_sequence; /* 'sequence' of pkt ids */
67215 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
67216 struct list_head queue_list; /* packets in queue */
67219 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
67220 nfmsg->version = NFNETLINK_V0;
67221 nfmsg->res_id = htons(queue->queue_num);
67223 - entry->id = atomic_inc_return(&queue->id_sequence);
67224 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
67225 pmsg.packet_id = htonl(entry->id);
67226 pmsg.hw_protocol = entskb->protocol;
67227 pmsg.hook = entry->hook;
67228 @@ -869,7 +869,7 @@ static int seq_show(struct seq_file *s,
67229 inst->peer_pid, inst->queue_total,
67230 inst->copy_mode, inst->copy_range,
67231 inst->queue_dropped, inst->queue_user_dropped,
67232 - atomic_read(&inst->id_sequence), 1);
67233 + atomic_read_unchecked(&inst->id_sequence), 1);
67236 static const struct seq_operations nfqnl_seq_ops = {
67237 diff -urNp linux-2.6.39.4/net/netfilter/xt_gradm.c linux-2.6.39.4/net/netfilter/xt_gradm.c
67238 --- linux-2.6.39.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
67239 +++ linux-2.6.39.4/net/netfilter/xt_gradm.c 2011-08-05 19:44:37.000000000 -0400
67242 + * gradm match for netfilter
67243 + * Copyright © Zbigniew Krzystolik, 2010
67245 + * This program is free software; you can redistribute it and/or modify
67246 + * it under the terms of the GNU General Public License; either version
67247 + * 2 or 3 as published by the Free Software Foundation.
67249 +#include <linux/module.h>
67250 +#include <linux/moduleparam.h>
67251 +#include <linux/skbuff.h>
67252 +#include <linux/netfilter/x_tables.h>
67253 +#include <linux/grsecurity.h>
67254 +#include <linux/netfilter/xt_gradm.h>
67257 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
67259 + const struct xt_gradm_mtinfo *info = par->matchinfo;
67260 + bool retval = false;
67261 + if (gr_acl_is_enabled())
67263 + return retval ^ info->invflags;
67266 +static struct xt_match gradm_mt_reg __read_mostly = {
67269 + .family = NFPROTO_UNSPEC,
67270 + .match = gradm_mt,
67271 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
67272 + .me = THIS_MODULE,
67275 +static int __init gradm_mt_init(void)
67277 + return xt_register_match(&gradm_mt_reg);
67280 +static void __exit gradm_mt_exit(void)
67282 + xt_unregister_match(&gradm_mt_reg);
67285 +module_init(gradm_mt_init);
67286 +module_exit(gradm_mt_exit);
67287 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
67288 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
67289 +MODULE_LICENSE("GPL");
67290 +MODULE_ALIAS("ipt_gradm");
67291 +MODULE_ALIAS("ip6t_gradm");
67292 diff -urNp linux-2.6.39.4/net/netfilter/xt_statistic.c linux-2.6.39.4/net/netfilter/xt_statistic.c
67293 --- linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-05-19 00:06:34.000000000 -0400
67294 +++ linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-08-05 19:44:37.000000000 -0400
67296 #include <linux/netfilter/x_tables.h>
67298 struct xt_statistic_priv {
67300 + atomic_unchecked_t count;
67301 } ____cacheline_aligned_in_smp;
67303 MODULE_LICENSE("GPL");
67304 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
67306 case XT_STATISTIC_MODE_NTH:
67308 - oval = atomic_read(&info->master->count);
67309 + oval = atomic_read_unchecked(&info->master->count);
67310 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
67311 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
67312 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
67316 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
67317 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
67318 if (info->master == NULL)
67320 - atomic_set(&info->master->count, info->u.nth.count);
67321 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
67325 diff -urNp linux-2.6.39.4/net/netlink/af_netlink.c linux-2.6.39.4/net/netlink/af_netlink.c
67326 --- linux-2.6.39.4/net/netlink/af_netlink.c 2011-05-19 00:06:34.000000000 -0400
67327 +++ linux-2.6.39.4/net/netlink/af_netlink.c 2011-08-05 19:44:37.000000000 -0400
67328 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
67329 sk->sk_error_report(sk);
67332 - atomic_inc(&sk->sk_drops);
67333 + atomic_inc_unchecked(&sk->sk_drops);
67336 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
67337 @@ -1992,15 +1992,23 @@ static int netlink_seq_show(struct seq_f
67338 struct netlink_sock *nlk = nlk_sk(s);
67340 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
67341 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67348 nlk->groups ? (u32)nlk->groups[0] : 0,
67349 sk_rmem_alloc_get(s),
67350 sk_wmem_alloc_get(s),
67351 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67356 atomic_read(&s->sk_refcnt),
67357 - atomic_read(&s->sk_drops),
67358 + atomic_read_unchecked(&s->sk_drops),
67362 diff -urNp linux-2.6.39.4/net/netrom/af_netrom.c linux-2.6.39.4/net/netrom/af_netrom.c
67363 --- linux-2.6.39.4/net/netrom/af_netrom.c 2011-05-19 00:06:34.000000000 -0400
67364 +++ linux-2.6.39.4/net/netrom/af_netrom.c 2011-08-05 19:44:37.000000000 -0400
67365 @@ -840,6 +840,7 @@ static int nr_getname(struct socket *soc
67366 struct sock *sk = sock->sk;
67367 struct nr_sock *nr = nr_sk(sk);
67369 + memset(sax, 0, sizeof(*sax));
67372 if (sk->sk_state != TCP_ESTABLISHED) {
67373 @@ -854,7 +855,6 @@ static int nr_getname(struct socket *soc
67374 *uaddr_len = sizeof(struct full_sockaddr_ax25);
67376 sax->fsa_ax25.sax25_family = AF_NETROM;
67377 - sax->fsa_ax25.sax25_ndigis = 0;
67378 sax->fsa_ax25.sax25_call = nr->source_addr;
67379 *uaddr_len = sizeof(struct sockaddr_ax25);
67381 diff -urNp linux-2.6.39.4/net/packet/af_packet.c linux-2.6.39.4/net/packet/af_packet.c
67382 --- linux-2.6.39.4/net/packet/af_packet.c 2011-07-09 09:18:51.000000000 -0400
67383 +++ linux-2.6.39.4/net/packet/af_packet.c 2011-08-05 19:44:37.000000000 -0400
67384 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
67386 spin_lock(&sk->sk_receive_queue.lock);
67387 po->stats.tp_packets++;
67388 - skb->dropcount = atomic_read(&sk->sk_drops);
67389 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
67390 __skb_queue_tail(&sk->sk_receive_queue, skb);
67391 spin_unlock(&sk->sk_receive_queue.lock);
67392 sk->sk_data_ready(sk, skb->len);
67396 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
67397 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
67400 if (skb_head != skb->data && skb_shared(skb)) {
67401 @@ -2159,7 +2159,7 @@ static int packet_getsockopt(struct sock
67402 case PACKET_HDRLEN:
67403 if (len > sizeof(int))
67405 - if (copy_from_user(&val, optval, len))
67406 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
67410 @@ -2197,7 +2197,7 @@ static int packet_getsockopt(struct sock
67412 if (put_user(len, optlen))
67414 - if (copy_to_user(optval, data, len))
67415 + if (len > sizeof(st) || copy_to_user(optval, data, len))
67419 @@ -2709,7 +2709,11 @@ static int packet_seq_show(struct seq_fi
67422 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
67423 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67428 atomic_read(&s->sk_refcnt),
67431 diff -urNp linux-2.6.39.4/net/phonet/af_phonet.c linux-2.6.39.4/net/phonet/af_phonet.c
67432 --- linux-2.6.39.4/net/phonet/af_phonet.c 2011-05-19 00:06:34.000000000 -0400
67433 +++ linux-2.6.39.4/net/phonet/af_phonet.c 2011-08-05 20:34:06.000000000 -0400
67434 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
67436 struct phonet_protocol *pp;
67438 - if (protocol >= PHONET_NPROTO)
67439 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67443 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
67447 - if (protocol >= PHONET_NPROTO)
67448 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67451 err = proto_register(pp->prot, 1);
67452 diff -urNp linux-2.6.39.4/net/phonet/pep.c linux-2.6.39.4/net/phonet/pep.c
67453 --- linux-2.6.39.4/net/phonet/pep.c 2011-05-19 00:06:34.000000000 -0400
67454 +++ linux-2.6.39.4/net/phonet/pep.c 2011-08-05 19:44:37.000000000 -0400
67455 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
67457 case PNS_PEP_CTRL_REQ:
67458 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
67459 - atomic_inc(&sk->sk_drops);
67460 + atomic_inc_unchecked(&sk->sk_drops);
67463 __skb_pull(skb, 4);
67464 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
67467 if (pn->rx_credits == 0) {
67468 - atomic_inc(&sk->sk_drops);
67469 + atomic_inc_unchecked(&sk->sk_drops);
67473 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
67476 if (pn->rx_credits == 0) {
67477 - atomic_inc(&sk->sk_drops);
67478 + atomic_inc_unchecked(&sk->sk_drops);
67482 diff -urNp linux-2.6.39.4/net/phonet/socket.c linux-2.6.39.4/net/phonet/socket.c
67483 --- linux-2.6.39.4/net/phonet/socket.c 2011-05-19 00:06:34.000000000 -0400
67484 +++ linux-2.6.39.4/net/phonet/socket.c 2011-08-05 19:44:37.000000000 -0400
67485 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_f
67486 pn->resource, sk->sk_state,
67487 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
67488 sock_i_uid(sk), sock_i_ino(sk),
67489 - atomic_read(&sk->sk_refcnt), sk,
67490 - atomic_read(&sk->sk_drops), &len);
67491 + atomic_read(&sk->sk_refcnt),
67492 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67497 + atomic_read_unchecked(&sk->sk_drops), &len);
67499 seq_printf(seq, "%*s\n", 127 - len, "");
67501 diff -urNp linux-2.6.39.4/net/rds/cong.c linux-2.6.39.4/net/rds/cong.c
67502 --- linux-2.6.39.4/net/rds/cong.c 2011-05-19 00:06:34.000000000 -0400
67503 +++ linux-2.6.39.4/net/rds/cong.c 2011-08-05 19:44:37.000000000 -0400
67505 * finds that the saved generation number is smaller than the global generation
67506 * number, it wakes up the process.
67508 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
67509 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
67512 * Congestion monitoring
67513 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
67514 rdsdebug("waking map %p for %pI4\n",
67515 map, &map->m_addr);
67516 rds_stats_inc(s_cong_update_received);
67517 - atomic_inc(&rds_cong_generation);
67518 + atomic_inc_unchecked(&rds_cong_generation);
67519 if (waitqueue_active(&map->m_waitq))
67520 wake_up(&map->m_waitq);
67521 if (waitqueue_active(&rds_poll_waitq))
67522 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
67524 int rds_cong_updated_since(unsigned long *recent)
67526 - unsigned long gen = atomic_read(&rds_cong_generation);
67527 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
67529 if (likely(*recent == gen))
67531 diff -urNp linux-2.6.39.4/net/rds/ib_cm.c linux-2.6.39.4/net/rds/ib_cm.c
67532 --- linux-2.6.39.4/net/rds/ib_cm.c 2011-05-19 00:06:34.000000000 -0400
67533 +++ linux-2.6.39.4/net/rds/ib_cm.c 2011-08-05 19:44:37.000000000 -0400
67534 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
67535 /* Clear the ACK state */
67536 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67537 #ifdef KERNEL_HAS_ATOMIC64
67538 - atomic64_set(&ic->i_ack_next, 0);
67539 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67541 ic->i_ack_next = 0;
67543 diff -urNp linux-2.6.39.4/net/rds/ib.h linux-2.6.39.4/net/rds/ib.h
67544 --- linux-2.6.39.4/net/rds/ib.h 2011-05-19 00:06:34.000000000 -0400
67545 +++ linux-2.6.39.4/net/rds/ib.h 2011-08-05 19:44:37.000000000 -0400
67546 @@ -127,7 +127,7 @@ struct rds_ib_connection {
67548 unsigned long i_ack_flags;
67549 #ifdef KERNEL_HAS_ATOMIC64
67550 - atomic64_t i_ack_next; /* next ACK to send */
67551 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67553 spinlock_t i_ack_lock; /* protect i_ack_next */
67554 u64 i_ack_next; /* next ACK to send */
67555 diff -urNp linux-2.6.39.4/net/rds/ib_recv.c linux-2.6.39.4/net/rds/ib_recv.c
67556 --- linux-2.6.39.4/net/rds/ib_recv.c 2011-05-19 00:06:34.000000000 -0400
67557 +++ linux-2.6.39.4/net/rds/ib_recv.c 2011-08-05 19:44:37.000000000 -0400
67558 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67559 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
67562 - atomic64_set(&ic->i_ack_next, seq);
67563 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67564 if (ack_required) {
67565 smp_mb__before_clear_bit();
67566 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67567 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67568 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67569 smp_mb__after_clear_bit();
67571 - return atomic64_read(&ic->i_ack_next);
67572 + return atomic64_read_unchecked(&ic->i_ack_next);
67576 diff -urNp linux-2.6.39.4/net/rds/iw_cm.c linux-2.6.39.4/net/rds/iw_cm.c
67577 --- linux-2.6.39.4/net/rds/iw_cm.c 2011-05-19 00:06:34.000000000 -0400
67578 +++ linux-2.6.39.4/net/rds/iw_cm.c 2011-08-05 19:44:37.000000000 -0400
67579 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
67580 /* Clear the ACK state */
67581 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67582 #ifdef KERNEL_HAS_ATOMIC64
67583 - atomic64_set(&ic->i_ack_next, 0);
67584 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67586 ic->i_ack_next = 0;
67588 diff -urNp linux-2.6.39.4/net/rds/iw.h linux-2.6.39.4/net/rds/iw.h
67589 --- linux-2.6.39.4/net/rds/iw.h 2011-05-19 00:06:34.000000000 -0400
67590 +++ linux-2.6.39.4/net/rds/iw.h 2011-08-05 19:44:37.000000000 -0400
67591 @@ -133,7 +133,7 @@ struct rds_iw_connection {
67593 unsigned long i_ack_flags;
67594 #ifdef KERNEL_HAS_ATOMIC64
67595 - atomic64_t i_ack_next; /* next ACK to send */
67596 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67598 spinlock_t i_ack_lock; /* protect i_ack_next */
67599 u64 i_ack_next; /* next ACK to send */
67600 diff -urNp linux-2.6.39.4/net/rds/iw_rdma.c linux-2.6.39.4/net/rds/iw_rdma.c
67601 --- linux-2.6.39.4/net/rds/iw_rdma.c 2011-05-19 00:06:34.000000000 -0400
67602 +++ linux-2.6.39.4/net/rds/iw_rdma.c 2011-08-05 19:44:37.000000000 -0400
67603 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
67604 struct rdma_cm_id *pcm_id;
67607 + pax_track_stack();
67609 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
67610 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
67612 diff -urNp linux-2.6.39.4/net/rds/iw_recv.c linux-2.6.39.4/net/rds/iw_recv.c
67613 --- linux-2.6.39.4/net/rds/iw_recv.c 2011-05-19 00:06:34.000000000 -0400
67614 +++ linux-2.6.39.4/net/rds/iw_recv.c 2011-08-05 19:44:37.000000000 -0400
67615 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67616 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
67619 - atomic64_set(&ic->i_ack_next, seq);
67620 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67621 if (ack_required) {
67622 smp_mb__before_clear_bit();
67623 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67624 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67625 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67626 smp_mb__after_clear_bit();
67628 - return atomic64_read(&ic->i_ack_next);
67629 + return atomic64_read_unchecked(&ic->i_ack_next);
67633 diff -urNp linux-2.6.39.4/net/rxrpc/af_rxrpc.c linux-2.6.39.4/net/rxrpc/af_rxrpc.c
67634 --- linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-05-19 00:06:34.000000000 -0400
67635 +++ linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-08-05 19:44:37.000000000 -0400
67636 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
67637 __be32 rxrpc_epoch;
67639 /* current debugging ID */
67640 -atomic_t rxrpc_debug_id;
67641 +atomic_unchecked_t rxrpc_debug_id;
67643 /* count of skbs currently in use */
67644 atomic_t rxrpc_n_skbs;
67645 diff -urNp linux-2.6.39.4/net/rxrpc/ar-ack.c linux-2.6.39.4/net/rxrpc/ar-ack.c
67646 --- linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-05-19 00:06:34.000000000 -0400
67647 +++ linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-08-05 19:44:37.000000000 -0400
67648 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
67650 _enter("{%d,%d,%d,%d},",
67651 call->acks_hard, call->acks_unacked,
67652 - atomic_read(&call->sequence),
67653 + atomic_read_unchecked(&call->sequence),
67654 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
67657 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
67659 /* each Tx packet has a new serial number */
67661 - htonl(atomic_inc_return(&call->conn->serial));
67662 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
67664 hdr = (struct rxrpc_header *) txb->head;
67665 hdr->serial = sp->hdr.serial;
67666 @@ -405,7 +405,7 @@ static void rxrpc_rotate_tx_window(struc
67668 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
67670 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
67671 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
67675 @@ -631,7 +631,7 @@ process_further:
67677 latest = ntohl(sp->hdr.serial);
67678 hard = ntohl(ack.firstPacket);
67679 - tx = atomic_read(&call->sequence);
67680 + tx = atomic_read_unchecked(&call->sequence);
67682 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67684 @@ -844,6 +844,8 @@ void rxrpc_process_call(struct work_stru
67685 u32 abort_code = RX_PROTOCOL_ERROR;
67688 + pax_track_stack();
67690 //printk("\n--------------------\n");
67691 _enter("{%d,%s,%lx} [%lu]",
67692 call->debug_id, rxrpc_call_states[call->state], call->events,
67693 @@ -1163,7 +1165,7 @@ void rxrpc_process_call(struct work_stru
67694 goto maybe_reschedule;
67696 send_ACK_with_skew:
67697 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
67698 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
67699 ntohl(ack.serial));
67701 mtu = call->conn->trans->peer->if_mtu;
67702 @@ -1175,7 +1177,7 @@ send_ACK:
67703 ackinfo.rxMTU = htonl(5692);
67704 ackinfo.jumbo_max = htonl(4);
67706 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67707 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67708 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67710 ntohs(ack.maxSkew),
67711 @@ -1193,7 +1195,7 @@ send_ACK:
67713 _debug("send message");
67715 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67716 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67717 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
67720 diff -urNp linux-2.6.39.4/net/rxrpc/ar-call.c linux-2.6.39.4/net/rxrpc/ar-call.c
67721 --- linux-2.6.39.4/net/rxrpc/ar-call.c 2011-05-19 00:06:34.000000000 -0400
67722 +++ linux-2.6.39.4/net/rxrpc/ar-call.c 2011-08-05 19:44:37.000000000 -0400
67723 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
67724 spin_lock_init(&call->lock);
67725 rwlock_init(&call->state_lock);
67726 atomic_set(&call->usage, 1);
67727 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67728 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67729 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
67731 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
67732 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connection.c linux-2.6.39.4/net/rxrpc/ar-connection.c
67733 --- linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-05-19 00:06:34.000000000 -0400
67734 +++ linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-08-05 19:44:37.000000000 -0400
67735 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
67736 rwlock_init(&conn->lock);
67737 spin_lock_init(&conn->state_lock);
67738 atomic_set(&conn->usage, 1);
67739 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
67740 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67741 conn->avail_calls = RXRPC_MAXCALLS;
67742 conn->size_align = 4;
67743 conn->header_size = sizeof(struct rxrpc_header);
67744 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connevent.c linux-2.6.39.4/net/rxrpc/ar-connevent.c
67745 --- linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-05-19 00:06:34.000000000 -0400
67746 +++ linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-08-05 19:44:37.000000000 -0400
67747 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
67749 len = iov[0].iov_len + iov[1].iov_len;
67751 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67752 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67753 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
67755 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67756 diff -urNp linux-2.6.39.4/net/rxrpc/ar-input.c linux-2.6.39.4/net/rxrpc/ar-input.c
67757 --- linux-2.6.39.4/net/rxrpc/ar-input.c 2011-05-19 00:06:34.000000000 -0400
67758 +++ linux-2.6.39.4/net/rxrpc/ar-input.c 2011-08-05 19:44:37.000000000 -0400
67759 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
67760 /* track the latest serial number on this connection for ACK packet
67762 serial = ntohl(sp->hdr.serial);
67763 - hi_serial = atomic_read(&call->conn->hi_serial);
67764 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
67765 while (serial > hi_serial)
67766 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
67767 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
67770 /* request ACK generation for any ACK or DATA packet that requests
67771 diff -urNp linux-2.6.39.4/net/rxrpc/ar-internal.h linux-2.6.39.4/net/rxrpc/ar-internal.h
67772 --- linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-05-19 00:06:34.000000000 -0400
67773 +++ linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-08-05 19:44:37.000000000 -0400
67774 @@ -272,8 +272,8 @@ struct rxrpc_connection {
67775 int error; /* error code for local abort */
67776 int debug_id; /* debug ID for printks */
67777 unsigned call_counter; /* call ID counter */
67778 - atomic_t serial; /* packet serial number counter */
67779 - atomic_t hi_serial; /* highest serial number received */
67780 + atomic_unchecked_t serial; /* packet serial number counter */
67781 + atomic_unchecked_t hi_serial; /* highest serial number received */
67782 u8 avail_calls; /* number of calls available */
67783 u8 size_align; /* data size alignment (for security) */
67784 u8 header_size; /* rxrpc + security header size */
67785 @@ -346,7 +346,7 @@ struct rxrpc_call {
67787 rwlock_t state_lock; /* lock for state transition */
67789 - atomic_t sequence; /* Tx data packet sequence counter */
67790 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
67791 u32 abort_code; /* local/remote abort code */
67792 enum { /* current state of call */
67793 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
67794 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
67796 extern atomic_t rxrpc_n_skbs;
67797 extern __be32 rxrpc_epoch;
67798 -extern atomic_t rxrpc_debug_id;
67799 +extern atomic_unchecked_t rxrpc_debug_id;
67800 extern struct workqueue_struct *rxrpc_workqueue;
67803 diff -urNp linux-2.6.39.4/net/rxrpc/ar-local.c linux-2.6.39.4/net/rxrpc/ar-local.c
67804 --- linux-2.6.39.4/net/rxrpc/ar-local.c 2011-05-19 00:06:34.000000000 -0400
67805 +++ linux-2.6.39.4/net/rxrpc/ar-local.c 2011-08-05 19:44:37.000000000 -0400
67806 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
67807 spin_lock_init(&local->lock);
67808 rwlock_init(&local->services_lock);
67809 atomic_set(&local->usage, 1);
67810 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
67811 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67812 memcpy(&local->srx, srx, sizeof(*srx));
67815 diff -urNp linux-2.6.39.4/net/rxrpc/ar-output.c linux-2.6.39.4/net/rxrpc/ar-output.c
67816 --- linux-2.6.39.4/net/rxrpc/ar-output.c 2011-05-19 00:06:34.000000000 -0400
67817 +++ linux-2.6.39.4/net/rxrpc/ar-output.c 2011-08-05 19:44:37.000000000 -0400
67818 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
67819 sp->hdr.cid = call->cid;
67820 sp->hdr.callNumber = call->call_id;
67822 - htonl(atomic_inc_return(&call->sequence));
67823 + htonl(atomic_inc_return_unchecked(&call->sequence));
67825 - htonl(atomic_inc_return(&conn->serial));
67826 + htonl(atomic_inc_return_unchecked(&conn->serial));
67827 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
67828 sp->hdr.userStatus = 0;
67829 sp->hdr.securityIndex = conn->security_ix;
67830 diff -urNp linux-2.6.39.4/net/rxrpc/ar-peer.c linux-2.6.39.4/net/rxrpc/ar-peer.c
67831 --- linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-05-19 00:06:34.000000000 -0400
67832 +++ linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-08-05 19:44:37.000000000 -0400
67833 @@ -71,7 +71,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
67834 INIT_LIST_HEAD(&peer->error_targets);
67835 spin_lock_init(&peer->lock);
67836 atomic_set(&peer->usage, 1);
67837 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
67838 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67839 memcpy(&peer->srx, srx, sizeof(*srx));
67841 rxrpc_assess_MTU_size(peer);
67842 diff -urNp linux-2.6.39.4/net/rxrpc/ar-proc.c linux-2.6.39.4/net/rxrpc/ar-proc.c
67843 --- linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-05-19 00:06:34.000000000 -0400
67844 +++ linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-08-05 19:44:37.000000000 -0400
67845 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
67846 atomic_read(&conn->usage),
67847 rxrpc_conn_states[conn->state],
67848 key_serial(conn->key),
67849 - atomic_read(&conn->serial),
67850 - atomic_read(&conn->hi_serial));
67851 + atomic_read_unchecked(&conn->serial),
67852 + atomic_read_unchecked(&conn->hi_serial));
67856 diff -urNp linux-2.6.39.4/net/rxrpc/ar-transport.c linux-2.6.39.4/net/rxrpc/ar-transport.c
67857 --- linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-05-19 00:06:34.000000000 -0400
67858 +++ linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-08-05 19:44:37.000000000 -0400
67859 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
67860 spin_lock_init(&trans->client_lock);
67861 rwlock_init(&trans->conn_lock);
67862 atomic_set(&trans->usage, 1);
67863 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
67864 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67866 if (peer->srx.transport.family == AF_INET) {
67867 switch (peer->srx.transport_type) {
67868 diff -urNp linux-2.6.39.4/net/rxrpc/rxkad.c linux-2.6.39.4/net/rxrpc/rxkad.c
67869 --- linux-2.6.39.4/net/rxrpc/rxkad.c 2011-05-19 00:06:34.000000000 -0400
67870 +++ linux-2.6.39.4/net/rxrpc/rxkad.c 2011-08-05 19:44:37.000000000 -0400
67871 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
67875 + pax_track_stack();
67877 sp = rxrpc_skb(skb);
67880 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
67884 + pax_track_stack();
67888 sp = rxrpc_skb(skb);
67889 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
67891 len = iov[0].iov_len + iov[1].iov_len;
67893 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67894 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67895 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
67897 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67898 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
67900 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
67902 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
67903 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67904 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
67906 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
67907 diff -urNp linux-2.6.39.4/net/sctp/proc.c linux-2.6.39.4/net/sctp/proc.c
67908 --- linux-2.6.39.4/net/sctp/proc.c 2011-05-19 00:06:34.000000000 -0400
67909 +++ linux-2.6.39.4/net/sctp/proc.c 2011-08-05 19:44:37.000000000 -0400
67910 @@ -212,7 +212,12 @@ static int sctp_eps_seq_show(struct seq_
67911 sctp_for_each_hentry(epb, node, &head->chain) {
67914 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
67915 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
67916 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67921 sctp_sk(sk)->type, sk->sk_state, hash,
67922 epb->bind_addr.port,
67923 sock_i_uid(sk), sock_i_ino(sk));
67924 @@ -318,7 +323,12 @@ static int sctp_assocs_seq_show(struct s
67926 "%8p %8p %-3d %-3d %-2d %-4d "
67927 "%4d %8d %8d %7d %5lu %-5d %5d ",
67928 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
67929 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67934 + sctp_sk(sk)->type, sk->sk_state,
67935 assoc->state, hash,
67937 assoc->sndbuf_used,
67938 diff -urNp linux-2.6.39.4/net/sctp/socket.c linux-2.6.39.4/net/sctp/socket.c
67939 --- linux-2.6.39.4/net/sctp/socket.c 2011-05-19 00:06:34.000000000 -0400
67940 +++ linux-2.6.39.4/net/sctp/socket.c 2011-08-05 19:44:37.000000000 -0400
67941 @@ -4433,7 +4433,7 @@ static int sctp_getsockopt_peer_addrs(st
67942 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
67943 if (space_left < addrlen)
67945 - if (copy_to_user(to, &temp, addrlen))
67946 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
67950 diff -urNp linux-2.6.39.4/net/socket.c linux-2.6.39.4/net/socket.c
67951 --- linux-2.6.39.4/net/socket.c 2011-06-03 00:04:14.000000000 -0400
67952 +++ linux-2.6.39.4/net/socket.c 2011-08-05 19:44:37.000000000 -0400
67954 #include <linux/nsproxy.h>
67955 #include <linux/magic.h>
67956 #include <linux/slab.h>
67957 +#include <linux/in.h>
67959 #include <asm/uaccess.h>
67960 #include <asm/unistd.h>
67961 @@ -105,6 +106,8 @@
67962 #include <linux/sockios.h>
67963 #include <linux/atalk.h>
67965 +#include <linux/grsock.h>
67967 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
67968 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
67969 unsigned long nr_segs, loff_t pos);
67970 @@ -330,7 +333,7 @@ static struct dentry *sockfs_mount(struc
67971 &sockfs_dentry_operations, SOCKFS_MAGIC);
67974 -static struct vfsmount *sock_mnt __read_mostly;
67975 +struct vfsmount *sock_mnt __read_mostly;
67977 static struct file_system_type sock_fs_type = {
67979 @@ -1179,6 +1182,8 @@ int __sock_create(struct net *net, int f
67980 return -EAFNOSUPPORT;
67981 if (type < 0 || type >= SOCK_MAX)
67983 + if (protocol < 0)
67988 @@ -1311,6 +1316,16 @@ SYSCALL_DEFINE3(socket, int, family, int
67989 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
67990 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
67992 + if(!gr_search_socket(family, type, protocol)) {
67993 + retval = -EACCES;
67997 + if (gr_handle_sock_all(family, type, protocol)) {
67998 + retval = -EACCES;
68002 retval = sock_create(family, type, protocol, &sock);
68005 @@ -1423,6 +1438,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
68007 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
68009 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
68013 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
68017 err = security_socket_bind(sock,
68018 (struct sockaddr *)&address,
68020 @@ -1431,6 +1454,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
68021 (struct sockaddr *)
68022 &address, addrlen);
68025 fput_light(sock->file, fput_needed);
68028 @@ -1454,10 +1478,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
68029 if ((unsigned)backlog > somaxconn)
68030 backlog = somaxconn;
68032 + if (gr_handle_sock_server_other(sock->sk)) {
68037 + err = gr_search_listen(sock);
68041 err = security_socket_listen(sock, backlog);
68043 err = sock->ops->listen(sock, backlog);
68046 fput_light(sock->file, fput_needed);
68049 @@ -1501,6 +1535,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
68050 newsock->type = sock->type;
68051 newsock->ops = sock->ops;
68053 + if (gr_handle_sock_server_other(sock->sk)) {
68055 + sock_release(newsock);
68059 + err = gr_search_accept(sock);
68061 + sock_release(newsock);
68066 * We don't need try_module_get here, as the listening socket (sock)
68067 * has the protocol module (sock->ops->owner) held.
68068 @@ -1539,6 +1585,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
68069 fd_install(newfd, newfile);
68072 + gr_attach_curr_ip(newsock->sk);
68075 fput_light(sock->file, fput_needed);
68077 @@ -1571,6 +1619,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68080 struct socket *sock;
68081 + struct sockaddr *sck;
68082 struct sockaddr_storage address;
68083 int err, fput_needed;
68085 @@ -1581,6 +1630,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68089 + sck = (struct sockaddr *)&address;
68091 + if (gr_handle_sock_client(sck)) {
68096 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
68101 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
68103 @@ -1882,6 +1942,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
68104 int err, ctl_len, iov_size, total_len;
68107 + pax_track_stack();
68110 if (MSG_CMSG_COMPAT & flags) {
68111 if (get_compat_msghdr(&msg_sys, msg_compat))
68112 diff -urNp linux-2.6.39.4/net/sunrpc/sched.c linux-2.6.39.4/net/sunrpc/sched.c
68113 --- linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:11:51.000000000 -0400
68114 +++ linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:12:20.000000000 -0400
68115 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
68117 static void rpc_task_set_debuginfo(struct rpc_task *task)
68119 - static atomic_t rpc_pid;
68120 + static atomic_unchecked_t rpc_pid;
68122 - task->tk_pid = atomic_inc_return(&rpc_pid);
68123 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
68126 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
68127 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c
68128 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-19 00:06:34.000000000 -0400
68129 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-05 19:44:37.000000000 -0400
68130 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
68131 static unsigned int min_max_inline = 4096;
68132 static unsigned int max_max_inline = 65536;
68134 -atomic_t rdma_stat_recv;
68135 -atomic_t rdma_stat_read;
68136 -atomic_t rdma_stat_write;
68137 -atomic_t rdma_stat_sq_starve;
68138 -atomic_t rdma_stat_rq_starve;
68139 -atomic_t rdma_stat_rq_poll;
68140 -atomic_t rdma_stat_rq_prod;
68141 -atomic_t rdma_stat_sq_poll;
68142 -atomic_t rdma_stat_sq_prod;
68143 +atomic_unchecked_t rdma_stat_recv;
68144 +atomic_unchecked_t rdma_stat_read;
68145 +atomic_unchecked_t rdma_stat_write;
68146 +atomic_unchecked_t rdma_stat_sq_starve;
68147 +atomic_unchecked_t rdma_stat_rq_starve;
68148 +atomic_unchecked_t rdma_stat_rq_poll;
68149 +atomic_unchecked_t rdma_stat_rq_prod;
68150 +atomic_unchecked_t rdma_stat_sq_poll;
68151 +atomic_unchecked_t rdma_stat_sq_prod;
68153 /* Temporary NFS request map and context caches */
68154 struct kmem_cache *svc_rdma_map_cachep;
68155 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
68159 - if (len && copy_to_user(buffer, str_buf, len))
68160 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
68164 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
68166 .procname = "rdma_stat_read",
68167 .data = &rdma_stat_read,
68168 - .maxlen = sizeof(atomic_t),
68169 + .maxlen = sizeof(atomic_unchecked_t),
68171 .proc_handler = read_reset_stat,
68174 .procname = "rdma_stat_recv",
68175 .data = &rdma_stat_recv,
68176 - .maxlen = sizeof(atomic_t),
68177 + .maxlen = sizeof(atomic_unchecked_t),
68179 .proc_handler = read_reset_stat,
68182 .procname = "rdma_stat_write",
68183 .data = &rdma_stat_write,
68184 - .maxlen = sizeof(atomic_t),
68185 + .maxlen = sizeof(atomic_unchecked_t),
68187 .proc_handler = read_reset_stat,
68190 .procname = "rdma_stat_sq_starve",
68191 .data = &rdma_stat_sq_starve,
68192 - .maxlen = sizeof(atomic_t),
68193 + .maxlen = sizeof(atomic_unchecked_t),
68195 .proc_handler = read_reset_stat,
68198 .procname = "rdma_stat_rq_starve",
68199 .data = &rdma_stat_rq_starve,
68200 - .maxlen = sizeof(atomic_t),
68201 + .maxlen = sizeof(atomic_unchecked_t),
68203 .proc_handler = read_reset_stat,
68206 .procname = "rdma_stat_rq_poll",
68207 .data = &rdma_stat_rq_poll,
68208 - .maxlen = sizeof(atomic_t),
68209 + .maxlen = sizeof(atomic_unchecked_t),
68211 .proc_handler = read_reset_stat,
68214 .procname = "rdma_stat_rq_prod",
68215 .data = &rdma_stat_rq_prod,
68216 - .maxlen = sizeof(atomic_t),
68217 + .maxlen = sizeof(atomic_unchecked_t),
68219 .proc_handler = read_reset_stat,
68222 .procname = "rdma_stat_sq_poll",
68223 .data = &rdma_stat_sq_poll,
68224 - .maxlen = sizeof(atomic_t),
68225 + .maxlen = sizeof(atomic_unchecked_t),
68227 .proc_handler = read_reset_stat,
68230 .procname = "rdma_stat_sq_prod",
68231 .data = &rdma_stat_sq_prod,
68232 - .maxlen = sizeof(atomic_t),
68233 + .maxlen = sizeof(atomic_unchecked_t),
68235 .proc_handler = read_reset_stat,
68237 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
68238 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-19 00:06:34.000000000 -0400
68239 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-05 19:44:37.000000000 -0400
68240 @@ -499,7 +499,7 @@ next_sge:
68241 svc_rdma_put_context(ctxt, 0);
68244 - atomic_inc(&rdma_stat_read);
68245 + atomic_inc_unchecked(&rdma_stat_read);
68247 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
68248 chl_map->ch[ch_no].count -= read_wr.num_sge;
68249 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68251 list_del_init(&ctxt->dto_q);
68253 - atomic_inc(&rdma_stat_rq_starve);
68254 + atomic_inc_unchecked(&rdma_stat_rq_starve);
68255 clear_bit(XPT_DATA, &xprt->xpt_flags);
68258 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68259 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
68260 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
68261 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
68262 - atomic_inc(&rdma_stat_recv);
68263 + atomic_inc_unchecked(&rdma_stat_recv);
68265 /* Build up the XDR from the receive buffers. */
68266 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
68267 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c
68268 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-19 00:06:34.000000000 -0400
68269 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-05 19:44:37.000000000 -0400
68270 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
68271 write_wr.wr.rdma.remote_addr = to;
68274 - atomic_inc(&rdma_stat_write);
68275 + atomic_inc_unchecked(&rdma_stat_write);
68276 if (svc_rdma_send(xprt, &write_wr))
68279 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c
68280 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-19 00:06:34.000000000 -0400
68281 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-05 19:44:37.000000000 -0400
68282 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
68285 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
68286 - atomic_inc(&rdma_stat_rq_poll);
68287 + atomic_inc_unchecked(&rdma_stat_rq_poll);
68289 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
68290 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
68291 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
68295 - atomic_inc(&rdma_stat_rq_prod);
68296 + atomic_inc_unchecked(&rdma_stat_rq_prod);
68298 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
68300 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
68303 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
68304 - atomic_inc(&rdma_stat_sq_poll);
68305 + atomic_inc_unchecked(&rdma_stat_sq_poll);
68306 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
68307 if (wc.status != IB_WC_SUCCESS)
68308 /* Close the transport */
68309 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
68313 - atomic_inc(&rdma_stat_sq_prod);
68314 + atomic_inc_unchecked(&rdma_stat_sq_prod);
68317 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
68318 @@ -1271,7 +1271,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
68319 spin_lock_bh(&xprt->sc_lock);
68320 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
68321 spin_unlock_bh(&xprt->sc_lock);
68322 - atomic_inc(&rdma_stat_sq_starve);
68323 + atomic_inc_unchecked(&rdma_stat_sq_starve);
68325 /* See if we can opportunistically reap SQ WR to make room */
68327 diff -urNp linux-2.6.39.4/net/sysctl_net.c linux-2.6.39.4/net/sysctl_net.c
68328 --- linux-2.6.39.4/net/sysctl_net.c 2011-05-19 00:06:34.000000000 -0400
68329 +++ linux-2.6.39.4/net/sysctl_net.c 2011-08-05 19:44:37.000000000 -0400
68330 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
68331 struct ctl_table *table)
68333 /* Allow network administrator to have same access as root. */
68334 - if (capable(CAP_NET_ADMIN)) {
68335 + if (capable_nolog(CAP_NET_ADMIN)) {
68336 int mode = (table->mode >> 6) & 7;
68337 return (mode << 6) | (mode << 3) | mode;
68339 diff -urNp linux-2.6.39.4/net/unix/af_unix.c linux-2.6.39.4/net/unix/af_unix.c
68340 --- linux-2.6.39.4/net/unix/af_unix.c 2011-05-19 00:06:34.000000000 -0400
68341 +++ linux-2.6.39.4/net/unix/af_unix.c 2011-08-05 19:44:37.000000000 -0400
68342 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
68343 err = -ECONNREFUSED;
68344 if (!S_ISSOCK(inode->i_mode))
68347 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
68352 u = unix_find_socket_byinode(inode);
68355 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
68357 struct dentry *dentry;
68358 dentry = unix_sk(u)->dentry;
68360 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
68367 touch_atime(unix_sk(u)->mnt, dentry);
68369 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
68370 err = security_path_mknod(&nd.path, dentry, mode, 0);
68372 goto out_mknod_drop_write;
68373 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
68375 + goto out_mknod_drop_write;
68377 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
68378 out_mknod_drop_write:
68379 mnt_drop_write(nd.path.mnt);
68381 goto out_mknod_dput;
68383 + gr_handle_create(dentry, nd.path.mnt);
68385 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
68386 dput(nd.path.dentry);
68387 nd.path.dentry = dentry;
68388 @@ -2255,7 +2275,11 @@ static int unix_seq_show(struct seq_file
68389 unix_state_lock(s);
68391 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
68392 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68397 atomic_read(&s->sk_refcnt),
68399 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
68400 diff -urNp linux-2.6.39.4/net/wireless/core.h linux-2.6.39.4/net/wireless/core.h
68401 --- linux-2.6.39.4/net/wireless/core.h 2011-05-19 00:06:34.000000000 -0400
68402 +++ linux-2.6.39.4/net/wireless/core.h 2011-08-05 20:34:06.000000000 -0400
68403 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
68406 /* rfkill support */
68407 - struct rfkill_ops rfkill_ops;
68408 + rfkill_ops_no_const rfkill_ops;
68409 struct rfkill *rfkill;
68410 struct work_struct rfkill_sync;
68412 diff -urNp linux-2.6.39.4/net/wireless/wext-core.c linux-2.6.39.4/net/wireless/wext-core.c
68413 --- linux-2.6.39.4/net/wireless/wext-core.c 2011-05-19 00:06:34.000000000 -0400
68414 +++ linux-2.6.39.4/net/wireless/wext-core.c 2011-08-05 19:44:37.000000000 -0400
68415 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
68418 /* Support for very large requests */
68419 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
68420 - (user_length > descr->max_tokens)) {
68421 + if (user_length > descr->max_tokens) {
68422 /* Allow userspace to GET more than max so
68423 * we can support any size GET requests.
68424 * There is still a limit : -ENOMEM.
68425 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
68429 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
68431 - * If this is a GET, but not NOMAX, it means that the extra
68432 - * data is not bounded by userspace, but by max_tokens. Thus
68433 - * set the length to max_tokens. This matches the extra data
68435 - * The driver should fill it with the number of tokens it
68436 - * provided, and it may check iwp->length rather than having
68437 - * knowledge of max_tokens. If the driver doesn't change the
68438 - * iwp->length, this ioctl just copies back max_token tokens
68439 - * filled with zeroes. Hopefully the driver isn't claiming
68440 - * them to be valid data.
68442 - iwp->length = descr->max_tokens;
68445 err = handler(dev, info, (union iwreq_data *) iwp, extra);
68447 iwp->length += essid_compat;
68448 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_policy.c linux-2.6.39.4/net/xfrm/xfrm_policy.c
68449 --- linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-05-19 00:06:34.000000000 -0400
68450 +++ linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-08-05 19:44:37.000000000 -0400
68451 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
68453 policy->walk.dead = 1;
68455 - atomic_inc(&policy->genid);
68456 + atomic_inc_unchecked(&policy->genid);
68458 if (del_timer(&policy->timer))
68459 xfrm_pol_put(policy);
68460 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
68461 hlist_add_head(&policy->bydst, chain);
68462 xfrm_pol_hold(policy);
68463 net->xfrm.policy_count[dir]++;
68464 - atomic_inc(&flow_cache_genid);
68465 + atomic_inc_unchecked(&flow_cache_genid);
68467 __xfrm_policy_unlink(delpol, dir);
68468 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
68469 @@ -1527,7 +1527,7 @@ free_dst:
68475 xfrm_dst_alloc_copy(void **target, const void *src, int size)
68478 @@ -1539,7 +1539,7 @@ xfrm_dst_alloc_copy(void **target, const
68484 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
68486 #ifdef CONFIG_XFRM_SUB_POLICY
68487 @@ -1551,7 +1551,7 @@ xfrm_dst_update_parent(struct dst_entry
68493 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
68495 #ifdef CONFIG_XFRM_SUB_POLICY
68496 @@ -1645,7 +1645,7 @@ xfrm_resolve_and_create_bundle(struct xf
68498 xdst->num_pols = num_pols;
68499 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
68500 - xdst->policy_genid = atomic_read(&pols[0]->genid);
68501 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
68505 @@ -2332,7 +2332,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
68506 if (xdst->xfrm_genid != dst->xfrm->genid)
68508 if (xdst->num_pols > 0 &&
68509 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
68510 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
68513 mtu = dst_mtu(dst->child);
68514 @@ -2860,7 +2860,7 @@ static int xfrm_policy_migrate(struct xf
68515 sizeof(pol->xfrm_vec[i].saddr));
68516 pol->xfrm_vec[i].encap_family = mp->new_family;
68517 /* flush bundles */
68518 - atomic_inc(&pol->genid);
68519 + atomic_inc_unchecked(&pol->genid);
68523 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_user.c linux-2.6.39.4/net/xfrm/xfrm_user.c
68524 --- linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-05-19 00:06:34.000000000 -0400
68525 +++ linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-08-05 19:44:37.000000000 -0400
68526 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
68527 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
68530 + pax_track_stack();
68532 if (xp->xfrm_nr == 0)
68535 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
68539 + pax_track_stack();
68541 if (attrs[XFRMA_MIGRATE] == NULL)
68544 diff -urNp linux-2.6.39.4/scripts/basic/fixdep.c linux-2.6.39.4/scripts/basic/fixdep.c
68545 --- linux-2.6.39.4/scripts/basic/fixdep.c 2011-05-19 00:06:34.000000000 -0400
68546 +++ linux-2.6.39.4/scripts/basic/fixdep.c 2011-08-05 19:44:37.000000000 -0400
68547 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
68549 static void parse_config_file(const char *map, size_t len)
68551 - const int *end = (const int *) (map + len);
68552 + const unsigned int *end = (const unsigned int *) (map + len);
68553 /* start at +1, so that p can never be < map */
68554 - const int *m = (const int *) map + 1;
68555 + const unsigned int *m = (const unsigned int *) map + 1;
68558 for (; m < end; m++) {
68559 @@ -405,7 +405,7 @@ static void print_deps(void)
68560 static void traps(void)
68562 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
68563 - int *p = (int *)test;
68564 + unsigned int *p = (unsigned int *)test;
68566 if (*p != INT_CONF) {
68567 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
68568 diff -urNp linux-2.6.39.4/scripts/gcc-plugin.sh linux-2.6.39.4/scripts/gcc-plugin.sh
68569 --- linux-2.6.39.4/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
68570 +++ linux-2.6.39.4/scripts/gcc-plugin.sh 2011-08-05 20:34:06.000000000 -0400
68574 +echo "#include \"gcc-plugin.h\"" | $* -x c - -c -o /dev/null -I`$* -print-file-name=plugin`/include>/dev/null 2>&1 && echo "y"
68575 diff -urNp linux-2.6.39.4/scripts/Makefile.build linux-2.6.39.4/scripts/Makefile.build
68576 --- linux-2.6.39.4/scripts/Makefile.build 2011-05-19 00:06:34.000000000 -0400
68577 +++ linux-2.6.39.4/scripts/Makefile.build 2011-08-05 19:44:37.000000000 -0400
68578 @@ -93,7 +93,7 @@ endif
68581 # Do not include host rules unless needed
68582 -ifneq ($(hostprogs-y)$(hostprogs-m),)
68583 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
68584 include scripts/Makefile.host
68587 diff -urNp linux-2.6.39.4/scripts/Makefile.clean linux-2.6.39.4/scripts/Makefile.clean
68588 --- linux-2.6.39.4/scripts/Makefile.clean 2011-05-19 00:06:34.000000000 -0400
68589 +++ linux-2.6.39.4/scripts/Makefile.clean 2011-08-05 19:44:37.000000000 -0400
68590 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
68591 __clean-files := $(extra-y) $(always) \
68592 $(targets) $(clean-files) \
68594 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
68595 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
68596 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
68598 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
68600 diff -urNp linux-2.6.39.4/scripts/Makefile.host linux-2.6.39.4/scripts/Makefile.host
68601 --- linux-2.6.39.4/scripts/Makefile.host 2011-05-19 00:06:34.000000000 -0400
68602 +++ linux-2.6.39.4/scripts/Makefile.host 2011-08-05 19:44:37.000000000 -0400
68604 # Note: Shared libraries consisting of C++ files are not supported
68606 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
68607 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
68610 # Executables compiled from a single .c file
68611 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
68612 # Shared libaries (only .c supported)
68613 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
68614 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
68615 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
68616 # Remove .so files from "xxx-objs"
68617 host-cobjs := $(filter-out %.so,$(host-cobjs))
68619 diff -urNp linux-2.6.39.4/scripts/mod/file2alias.c linux-2.6.39.4/scripts/mod/file2alias.c
68620 --- linux-2.6.39.4/scripts/mod/file2alias.c 2011-05-19 00:06:34.000000000 -0400
68621 +++ linux-2.6.39.4/scripts/mod/file2alias.c 2011-08-05 19:44:37.000000000 -0400
68622 @@ -72,7 +72,7 @@ static void device_id_check(const char *
68623 unsigned long size, unsigned long id_size,
68629 if (size % id_size || size < id_size) {
68630 if (cross_build != 0)
68631 @@ -102,7 +102,7 @@ static void device_id_check(const char *
68632 /* USB is special because the bcdDevice can be matched against a numeric range */
68633 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
68634 static void do_usb_entry(struct usb_device_id *id,
68635 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
68636 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
68637 unsigned char range_lo, unsigned char range_hi,
68638 unsigned char max, struct module *mod)
68640 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
68641 for (i = 0; i < count; i++) {
68642 const char *id = (char *)devs[i].id;
68643 char acpi_id[sizeof(devs[0].id)];
68647 buf_printf(&mod->dev_table_buf,
68648 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68649 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
68651 for (j = 0; j < PNP_MAX_DEVICES; j++) {
68652 const char *id = (char *)card->devs[j].id;
68654 + unsigned int i2, j2;
68658 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
68659 /* add an individual alias for every device entry */
68661 char acpi_id[sizeof(card->devs[0].id)];
68665 buf_printf(&mod->dev_table_buf,
68666 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68667 @@ -768,7 +768,7 @@ static void dmi_ascii_filter(char *d, co
68668 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
68672 + unsigned int i, j;
68674 sprintf(alias, "dmi*");
68676 diff -urNp linux-2.6.39.4/scripts/mod/modpost.c linux-2.6.39.4/scripts/mod/modpost.c
68677 --- linux-2.6.39.4/scripts/mod/modpost.c 2011-05-19 00:06:34.000000000 -0400
68678 +++ linux-2.6.39.4/scripts/mod/modpost.c 2011-08-05 19:44:37.000000000 -0400
68679 @@ -896,6 +896,7 @@ enum mismatch {
68680 ANY_INIT_TO_ANY_EXIT,
68681 ANY_EXIT_TO_ANY_INIT,
68682 EXPORT_TO_INIT_EXIT,
68686 struct sectioncheck {
68687 @@ -1004,6 +1005,12 @@ const struct sectioncheck sectioncheck[]
68688 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
68689 .mismatch = EXPORT_TO_INIT_EXIT,
68690 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
68692 +/* Do not reference code from writable data */
68694 + .fromsec = { DATA_SECTIONS, NULL },
68695 + .tosec = { TEXT_SECTIONS, NULL },
68696 + .mismatch = DATA_TO_TEXT
68700 @@ -1126,10 +1133,10 @@ static Elf_Sym *find_elf_symbol(struct e
68702 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
68704 - if (sym->st_value == addr)
68706 /* Find a symbol nearby - addr are maybe negative */
68707 d = sym->st_value - addr;
68711 d = addr - sym->st_value;
68712 if (d < distance) {
68713 @@ -1408,6 +1415,14 @@ static void report_sec_mismatch(const ch
68714 tosym, prl_to, prl_to, tosym);
68717 + case DATA_TO_TEXT:
68720 + "The variable %s references\n"
68721 + "the %s %s%s%s\n",
68722 + fromsym, to, sec2annotation(tosec), tosym, to_p);
68726 fprintf(stderr, "\n");
68728 @@ -1633,7 +1648,7 @@ static void section_rel(const char *modn
68729 static void check_sec_ref(struct module *mod, const char *modname,
68730 struct elf_info *elf)
68734 Elf_Shdr *sechdrs = elf->sechdrs;
68736 /* Walk through all sections */
68737 @@ -1731,7 +1746,7 @@ void __attribute__((format(printf, 2, 3)
68741 -void buf_write(struct buffer *buf, const char *s, int len)
68742 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
68744 if (buf->size - buf->pos < len) {
68745 buf->size += len + SZ;
68746 @@ -1943,7 +1958,7 @@ static void write_if_changed(struct buff
68747 if (fstat(fileno(file), &st) < 0)
68750 - if (st.st_size != b->pos)
68751 + if (st.st_size != (off_t)b->pos)
68754 tmp = NOFAIL(malloc(b->pos));
68755 diff -urNp linux-2.6.39.4/scripts/mod/modpost.h linux-2.6.39.4/scripts/mod/modpost.h
68756 --- linux-2.6.39.4/scripts/mod/modpost.h 2011-05-19 00:06:34.000000000 -0400
68757 +++ linux-2.6.39.4/scripts/mod/modpost.h 2011-08-05 19:44:37.000000000 -0400
68758 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
68764 + unsigned int pos;
68765 + unsigned int size;
68768 void __attribute__((format(printf, 2, 3)))
68769 buf_printf(struct buffer *buf, const char *fmt, ...);
68772 -buf_write(struct buffer *buf, const char *s, int len);
68773 +buf_write(struct buffer *buf, const char *s, unsigned int len);
68776 struct module *next;
68777 diff -urNp linux-2.6.39.4/scripts/mod/sumversion.c linux-2.6.39.4/scripts/mod/sumversion.c
68778 --- linux-2.6.39.4/scripts/mod/sumversion.c 2011-05-19 00:06:34.000000000 -0400
68779 +++ linux-2.6.39.4/scripts/mod/sumversion.c 2011-08-05 19:44:37.000000000 -0400
68780 @@ -470,7 +470,7 @@ static void write_version(const char *fi
68784 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
68785 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
68786 warn("writing sum in %s failed: %s\n",
68787 filename, strerror(errno));
68789 diff -urNp linux-2.6.39.4/scripts/pnmtologo.c linux-2.6.39.4/scripts/pnmtologo.c
68790 --- linux-2.6.39.4/scripts/pnmtologo.c 2011-05-19 00:06:34.000000000 -0400
68791 +++ linux-2.6.39.4/scripts/pnmtologo.c 2011-08-05 19:44:37.000000000 -0400
68792 @@ -237,14 +237,14 @@ static void write_header(void)
68793 fprintf(out, " * Linux logo %s\n", logoname);
68794 fputs(" */\n\n", out);
68795 fputs("#include <linux/linux_logo.h>\n\n", out);
68796 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
68797 + fprintf(out, "static unsigned char %s_data[] = {\n",
68801 static void write_footer(void)
68803 fputs("\n};\n\n", out);
68804 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
68805 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
68806 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
68807 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
68808 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
68809 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
68810 fputs("\n};\n\n", out);
68812 /* write logo clut */
68813 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
68814 + fprintf(out, "static unsigned char %s_clut[] = {\n",
68817 for (i = 0; i < logo_clutsize; i++) {
68818 diff -urNp linux-2.6.39.4/security/apparmor/lsm.c linux-2.6.39.4/security/apparmor/lsm.c
68819 --- linux-2.6.39.4/security/apparmor/lsm.c 2011-06-25 12:55:23.000000000 -0400
68820 +++ linux-2.6.39.4/security/apparmor/lsm.c 2011-08-05 20:34:06.000000000 -0400
68821 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
68825 -static struct security_operations apparmor_ops = {
68826 +static struct security_operations apparmor_ops __read_only = {
68827 .name = "apparmor",
68829 .ptrace_access_check = apparmor_ptrace_access_check,
68830 diff -urNp linux-2.6.39.4/security/commoncap.c linux-2.6.39.4/security/commoncap.c
68831 --- linux-2.6.39.4/security/commoncap.c 2011-05-19 00:06:34.000000000 -0400
68832 +++ linux-2.6.39.4/security/commoncap.c 2011-08-05 19:44:37.000000000 -0400
68834 #include <linux/prctl.h>
68835 #include <linux/securebits.h>
68836 #include <linux/user_namespace.h>
68837 +#include <net/sock.h>
68840 * If a non-root user executes a setuid-root binary in
68841 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
68843 int cap_netlink_recv(struct sk_buff *skb, int cap)
68845 - if (!cap_raised(current_cap(), cap))
68846 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
68850 @@ -580,6 +581,9 @@ int cap_bprm_secureexec(struct linux_bin
68852 const struct cred *cred = current_cred();
68854 + if (gr_acl_enable_at_secure())
68857 if (cred->uid != 0) {
68858 if (bprm->cap_effective)
68860 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_api.c linux-2.6.39.4/security/integrity/ima/ima_api.c
68861 --- linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-05-19 00:06:34.000000000 -0400
68862 +++ linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-08-05 19:44:37.000000000 -0400
68863 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
68866 /* can overflow, only indicator */
68867 - atomic_long_inc(&ima_htable.violations);
68868 + atomic_long_inc_unchecked(&ima_htable.violations);
68870 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
68872 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_fs.c linux-2.6.39.4/security/integrity/ima/ima_fs.c
68873 --- linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-05-19 00:06:34.000000000 -0400
68874 +++ linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-08-05 19:44:37.000000000 -0400
68875 @@ -28,12 +28,12 @@
68876 static int valid_policy = 1;
68877 #define TMPBUFLEN 12
68878 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
68879 - loff_t *ppos, atomic_long_t *val)
68880 + loff_t *ppos, atomic_long_unchecked_t *val)
68882 char tmpbuf[TMPBUFLEN];
68885 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
68886 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
68887 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
68890 diff -urNp linux-2.6.39.4/security/integrity/ima/ima.h linux-2.6.39.4/security/integrity/ima/ima.h
68891 --- linux-2.6.39.4/security/integrity/ima/ima.h 2011-05-19 00:06:34.000000000 -0400
68892 +++ linux-2.6.39.4/security/integrity/ima/ima.h 2011-08-05 19:44:37.000000000 -0400
68893 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
68894 extern spinlock_t ima_queue_lock;
68896 struct ima_h_table {
68897 - atomic_long_t len; /* number of stored measurements in the list */
68898 - atomic_long_t violations;
68899 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
68900 + atomic_long_unchecked_t violations;
68901 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
68903 extern struct ima_h_table ima_htable;
68904 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_queue.c linux-2.6.39.4/security/integrity/ima/ima_queue.c
68905 --- linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-05-19 00:06:34.000000000 -0400
68906 +++ linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-08-05 19:44:37.000000000 -0400
68907 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
68908 INIT_LIST_HEAD(&qe->later);
68909 list_add_tail_rcu(&qe->later, &ima_measurements);
68911 - atomic_long_inc(&ima_htable.len);
68912 + atomic_long_inc_unchecked(&ima_htable.len);
68913 key = ima_hash_key(entry->digest);
68914 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
68916 diff -urNp linux-2.6.39.4/security/Kconfig linux-2.6.39.4/security/Kconfig
68917 --- linux-2.6.39.4/security/Kconfig 2011-05-19 00:06:34.000000000 -0400
68918 +++ linux-2.6.39.4/security/Kconfig 2011-08-05 19:44:37.000000000 -0400
68921 menu "Security options"
68923 +source grsecurity/Kconfig
68927 + config ARCH_TRACK_EXEC_LIMIT
68930 + config PAX_PER_CPU_PGD
68933 + config TASK_SIZE_MAX_SHIFT
68935 + depends on X86_64
68936 + default 47 if !PAX_PER_CPU_PGD
68937 + default 42 if PAX_PER_CPU_PGD
68939 + config PAX_ENABLE_PAE
68941 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
68944 + bool "Enable various PaX features"
68945 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
68947 + This allows you to enable various PaX features. PaX adds
68948 + intrusion prevention mechanisms to the kernel that reduce
68949 + the risks posed by exploitable memory corruption bugs.
68951 +menu "PaX Control"
68954 +config PAX_SOFTMODE
68955 + bool 'Support soft mode'
68956 + select PAX_PT_PAX_FLAGS
68958 + Enabling this option will allow you to run PaX in soft mode, that
68959 + is, PaX features will not be enforced by default, only on executables
68960 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
68961 + is the only way to mark executables for soft mode use.
68963 + Soft mode can be activated by using the "pax_softmode=1" kernel command
68964 + line option on boot. Furthermore you can control various PaX features
68965 + at runtime via the entries in /proc/sys/kernel/pax.
68968 + bool 'Use legacy ELF header marking'
68970 + Enabling this option will allow you to control PaX features on
68971 + a per executable basis via the 'chpax' utility available at
68972 + http://pax.grsecurity.net/. The control flags will be read from
68973 + an otherwise reserved part of the ELF header. This marking has
68974 + numerous drawbacks (no support for soft-mode, toolchain does not
68975 + know about the non-standard use of the ELF header) therefore it
68976 + has been deprecated in favour of PT_PAX_FLAGS support.
68978 + Note that if you enable PT_PAX_FLAGS marking support as well,
68979 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
68981 +config PAX_PT_PAX_FLAGS
68982 + bool 'Use ELF program header marking'
68984 + Enabling this option will allow you to control PaX features on
68985 + a per executable basis via the 'paxctl' utility available at
68986 + http://pax.grsecurity.net/. The control flags will be read from
68987 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
68988 + has the benefits of supporting both soft mode and being fully
68989 + integrated into the toolchain (the binutils patch is available
68990 + from http://pax.grsecurity.net).
68992 + If your toolchain does not support PT_PAX_FLAGS markings,
68993 + you can create one in most cases with 'paxctl -C'.
68995 + Note that if you enable the legacy EI_PAX marking support as well,
68996 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
68999 + prompt 'MAC system integration'
69000 + default PAX_HAVE_ACL_FLAGS
69002 + Mandatory Access Control systems have the option of controlling
69003 + PaX flags on a per executable basis, choose the method supported
69004 + by your particular system.
69006 + - "none": if your MAC system does not interact with PaX,
69007 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
69008 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
69010 + NOTE: this option is for developers/integrators only.
69012 + config PAX_NO_ACL_FLAGS
69015 + config PAX_HAVE_ACL_FLAGS
69018 + config PAX_HOOK_ACL_FLAGS
69024 +menu "Non-executable pages"
69028 + bool "Enforce non-executable pages"
69029 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
69031 + By design some architectures do not allow for protecting memory
69032 + pages against execution or even if they do, Linux does not make
69033 + use of this feature. In practice this means that if a page is
69034 + readable (such as the stack or heap) it is also executable.
69036 + There is a well known exploit technique that makes use of this
69037 + fact and a common programming mistake where an attacker can
69038 + introduce code of his choice somewhere in the attacked program's
69039 + memory (typically the stack or the heap) and then execute it.
69041 + If the attacked program was running with different (typically
69042 + higher) privileges than that of the attacker, then he can elevate
69043 + his own privilege level (e.g. get a root shell, write to files for
69044 + which he does not have write access to, etc).
69046 + Enabling this option will let you choose from various features
69047 + that prevent the injection and execution of 'foreign' code in
69050 + This will also break programs that rely on the old behaviour and
69051 + expect that dynamically allocated memory via the malloc() family
69052 + of functions is executable (which it is not). Notable examples
69053 + are the XFree86 4.x server, the java runtime and wine.
69055 +config PAX_PAGEEXEC
69056 + bool "Paging based non-executable pages"
69057 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
69058 + select S390_SWITCH_AMODE if S390
69059 + select S390_EXEC_PROTECT if S390
69060 + select ARCH_TRACK_EXEC_LIMIT if X86_32
69062 + This implementation is based on the paging feature of the CPU.
69063 + On i386 without hardware non-executable bit support there is a
69064 + variable but usually low performance impact, however on Intel's
69065 + P4 core based CPUs it is very high so you should not enable this
69066 + for kernels meant to be used on such CPUs.
69068 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
69069 + with hardware non-executable bit support there is no performance
69070 + impact, on ppc the impact is negligible.
69072 + Note that several architectures require various emulations due to
69073 + badly designed userland ABIs, this will cause a performance impact
69074 + but will disappear as soon as userland is fixed. For example, ppc
69075 + userland MUST have been built with secure-plt by a recent toolchain.
69077 +config PAX_SEGMEXEC
69078 + bool "Segmentation based non-executable pages"
69079 + depends on PAX_NOEXEC && X86_32
69081 + This implementation is based on the segmentation feature of the
69082 + CPU and has a very small performance impact, however applications
69083 + will be limited to a 1.5 GB address space instead of the normal
69086 +config PAX_EMUTRAMP
69087 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
69088 + default y if PARISC
69090 + There are some programs and libraries that for one reason or
69091 + another attempt to execute special small code snippets from
69092 + non-executable memory pages. Most notable examples are the
69093 + signal handler return code generated by the kernel itself and
69094 + the GCC trampolines.
69096 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
69097 + such programs will no longer work under your kernel.
69099 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
69100 + utilities to enable trampoline emulation for the affected programs
69101 + yet still have the protection provided by the non-executable pages.
69103 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
69104 + your system will not even boot.
69106 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
69107 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
69108 + for the affected files.
69110 + NOTE: enabling this feature *may* open up a loophole in the
69111 + protection provided by non-executable pages that an attacker
69112 + could abuse. Therefore the best solution is to not have any
69113 + files on your system that would require this option. This can
69114 + be achieved by not using libc5 (which relies on the kernel
69115 + signal handler return code) and not using or rewriting programs
69116 + that make use of the nested function implementation of GCC.
69117 + Skilled users can just fix GCC itself so that it implements
69118 + nested function calls in a way that does not interfere with PaX.
69120 +config PAX_EMUSIGRT
69121 + bool "Automatically emulate sigreturn trampolines"
69122 + depends on PAX_EMUTRAMP && PARISC
69125 + Enabling this option will have the kernel automatically detect
69126 + and emulate signal return trampolines executing on the stack
69127 + that would otherwise lead to task termination.
69129 + This solution is intended as a temporary one for users with
69130 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
69131 + Modula-3 runtime, etc) or executables linked to such, basically
69132 + everything that does not specify its own SA_RESTORER function in
69133 + normal executable memory like glibc 2.1+ does.
69135 + On parisc you MUST enable this option, otherwise your system will
69138 + NOTE: this feature cannot be disabled on a per executable basis
69139 + and since it *does* open up a loophole in the protection provided
69140 + by non-executable pages, the best solution is to not have any
69141 + files on your system that would require this option.
69143 +config PAX_MPROTECT
69144 + bool "Restrict mprotect()"
69145 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
69147 + Enabling this option will prevent programs from
69148 + - changing the executable status of memory pages that were
69149 + not originally created as executable,
69150 + - making read-only executable pages writable again,
69151 + - creating executable pages from anonymous memory,
69152 + - making read-only-after-relocations (RELRO) data pages writable again.
69154 + You should say Y here to complete the protection provided by
69155 + the enforcement of non-executable pages.
69157 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69158 + this feature on a per file basis.
69160 +config PAX_MPROTECT_COMPAT
69161 + bool "Use legacy/compat protection demoting (read help)"
69162 + depends on PAX_MPROTECT
69165 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
69166 + by sending the proper error code to the application. For some broken
69167 + userland, this can cause problems with Python or other applications. The
69168 + current implementation however allows for applications like clamav to
69169 + detect if JIT compilation/execution is allowed and to fall back gracefully
69170 + to an interpreter-based mode if it does not. While we encourage everyone
69171 + to use the current implementation as-is and push upstream to fix broken
69172 + userland (note that the RWX logging option can assist with this), in some
69173 + environments this may not be possible. Having to disable MPROTECT
69174 + completely on certain binaries reduces the security benefit of PaX,
69175 + so this option is provided for those environments to revert to the old
69178 +config PAX_ELFRELOCS
69179 + bool "Allow ELF text relocations (read help)"
69180 + depends on PAX_MPROTECT
69183 + Non-executable pages and mprotect() restrictions are effective
69184 + in preventing the introduction of new executable code into an
69185 + attacked task's address space. There remain only two venues
69186 + for this kind of attack: if the attacker can execute already
69187 + existing code in the attacked task then he can either have it
69188 + create and mmap() a file containing his code or have it mmap()
69189 + an already existing ELF library that does not have position
69190 + independent code in it and use mprotect() on it to make it
69191 + writable and copy his code there. While protecting against
69192 + the former approach is beyond PaX, the latter can be prevented
69193 + by having only PIC ELF libraries on one's system (which do not
69194 + need to relocate their code). If you are sure this is your case,
69195 + as is the case with all modern Linux distributions, then leave
69196 + this option disabled. You should say 'n' here.
69198 +config PAX_ETEXECRELOCS
69199 + bool "Allow ELF ET_EXEC text relocations"
69200 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
69201 + select PAX_ELFRELOCS
69204 + On some architectures there are incorrectly created applications
69205 + that require text relocations and would not work without enabling
69206 + this option. If you are an alpha, ia64 or parisc user, you should
69207 + enable this option and disable it once you have made sure that
69208 + none of your applications need it.
69211 + bool "Automatically emulate ELF PLT"
69212 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
69215 + Enabling this option will have the kernel automatically detect
69216 + and emulate the Procedure Linkage Table entries in ELF files.
69217 + On some architectures such entries are in writable memory, and
69218 + become non-executable leading to task termination. Therefore
69219 + it is mandatory that you enable this option on alpha, parisc,
69220 + sparc and sparc64, otherwise your system would not even boot.
69222 + NOTE: this feature *does* open up a loophole in the protection
69223 + provided by the non-executable pages, therefore the proper
69224 + solution is to modify the toolchain to produce a PLT that does
69225 + not need to be writable.
69227 +config PAX_DLRESOLVE
69228 + bool 'Emulate old glibc resolver stub'
69229 + depends on PAX_EMUPLT && SPARC
69232 + This option is needed if userland has an old glibc (before 2.4)
69233 + that puts a 'save' instruction into the runtime generated resolver
69234 + stub that needs special emulation.
69236 +config PAX_KERNEXEC
69237 + bool "Enforce non-executable kernel pages"
69238 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
69239 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
69241 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
69242 + that is, enabling this option will make it harder to inject
69243 + and execute 'foreign' code in kernel memory itself.
69245 + Note that on x86_64 kernels there is a known regression when
69246 + this feature and KVM/VMX are both enabled in the host kernel.
69248 +config PAX_KERNEXEC_MODULE_TEXT
69249 + int "Minimum amount of memory reserved for module code"
69251 + depends on PAX_KERNEXEC && X86_32 && MODULES
69253 + Due to implementation details the kernel must reserve a fixed
69254 + amount of memory for module code at compile time that cannot be
69255 + changed at runtime. Here you can specify the minimum amount
69256 + in MB that will be reserved. Due to the same implementation
69257 + details this size will always be rounded up to the next 2/4 MB
69258 + boundary (depends on PAE) so the actually available memory for
69259 + module code will usually be more than this minimum.
69261 + The default 4 MB should be enough for most users but if you have
69262 + an excessive number of modules (e.g., most distribution configs
69263 + compile many drivers as modules) or use huge modules such as
69264 + nvidia's kernel driver, you will need to adjust this amount.
69265 + A good rule of thumb is to look at your currently loaded kernel
69266 + modules and add up their sizes.
69270 +menu "Address Space Layout Randomization"
69274 + bool "Address Space Layout Randomization"
69275 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
69277 + Many if not most exploit techniques rely on the knowledge of
69278 + certain addresses in the attacked program. The following options
69279 + will allow the kernel to apply a certain amount of randomization
69280 + to specific parts of the program thereby forcing an attacker to
69281 + guess them in most cases. Any failed guess will most likely crash
69282 + the attacked program which allows the kernel to detect such attempts
69283 + and react on them. PaX itself provides no reaction mechanisms,
69284 + instead it is strongly encouraged that you make use of Nergal's
69285 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
69286 + (http://www.grsecurity.net/) built-in crash detection features or
69287 + develop one yourself.
69289 + By saying Y here you can choose to randomize the following areas:
69290 + - top of the task's kernel stack
69291 + - top of the task's userland stack
69292 + - base address for mmap() requests that do not specify one
69293 + (this includes all libraries)
69294 + - base address of the main executable
69296 + It is strongly recommended to say Y here as address space layout
69297 + randomization has negligible impact on performance yet it provides
69298 + a very effective protection.
69300 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69301 + this feature on a per file basis.
69303 +config PAX_RANDKSTACK
69304 + bool "Randomize kernel stack base"
69305 + depends on PAX_ASLR && X86_TSC && X86
69307 + By saying Y here the kernel will randomize every task's kernel
69308 + stack on every system call. This will not only force an attacker
69309 + to guess it but also prevent him from making use of possible
69310 + leaked information about it.
69312 + Since the kernel stack is a rather scarce resource, randomization
69313 + may cause unexpected stack overflows, therefore you should very
69314 + carefully test your system. Note that once enabled in the kernel
69315 + configuration, this feature cannot be disabled on a per file basis.
69317 +config PAX_RANDUSTACK
69318 + bool "Randomize user stack base"
69319 + depends on PAX_ASLR
69321 + By saying Y here the kernel will randomize every task's userland
69322 + stack. The randomization is done in two steps where the second
69323 + one may apply a big amount of shift to the top of the stack and
69324 + cause problems for programs that want to use lots of memory (more
69325 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
69326 + For this reason the second step can be controlled by 'chpax' or
69327 + 'paxctl' on a per file basis.
69329 +config PAX_RANDMMAP
69330 + bool "Randomize mmap() base"
69331 + depends on PAX_ASLR
69333 + By saying Y here the kernel will use a randomized base address for
69334 + mmap() requests that do not specify one themselves. As a result
69335 + all dynamically loaded libraries will appear at random addresses
69336 + and therefore be harder to exploit by a technique where an attacker
69337 + attempts to execute library code for his purposes (e.g. spawn a
69338 + shell from an exploited program that is running at an elevated
69339 + privilege level).
69341 + Furthermore, if a program is relinked as a dynamic ELF file, its
69342 + base address will be randomized as well, completing the full
69343 + randomization of the address space layout. Attacking such programs
69344 + becomes a guess game. You can find an example of doing this at
69345 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
69346 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
69348 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
69349 + feature on a per file basis.
69353 +menu "Miscellaneous hardening features"
69355 +config PAX_MEMORY_SANITIZE
69356 + bool "Sanitize all freed memory"
69358 + By saying Y here the kernel will erase memory pages as soon as they
69359 + are freed. This in turn reduces the lifetime of data stored in the
69360 + pages, making it less likely that sensitive information such as
69361 + passwords, cryptographic secrets, etc stay in memory for too long.
69363 + This is especially useful for programs whose runtime is short, long
69364 + lived processes and the kernel itself benefit from this as long as
69365 + they operate on whole memory pages and ensure timely freeing of pages
69366 + that may hold sensitive information.
69368 + The tradeoff is performance impact, on a single CPU system kernel
69369 + compilation sees a 3% slowdown, other systems and workloads may vary
69370 + and you are advised to test this feature on your expected workload
69371 + before deploying it.
69373 + Note that this feature does not protect data stored in live pages,
69374 + e.g., process memory swapped to disk may stay there for a long time.
69376 +config PAX_MEMORY_STACKLEAK
69377 + bool "Sanitize kernel stack"
69380 + By saying Y here the kernel will erase the kernel stack before it
69381 + returns from a system call. This in turn reduces the information
69382 + that a kernel stack leak bug can reveal.
69384 + Note that such a bug can still leak information that was put on
69385 + the stack by the current system call (the one eventually triggering
69386 + the bug) but traces of earlier system calls on the kernel stack
69387 + cannot leak anymore.
69389 + The tradeoff is performance impact: on a single CPU system kernel
69390 + compilation sees a 1% slowdown, other systems and workloads may vary
69391 + and you are advised to test this feature on your expected workload
69392 + before deploying it.
69394 + Note: full support for this feature requires gcc with plugin support
69395 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
69396 + is not supported). Using older gcc versions means that functions
69397 + with large enough stack frames may leave uninitialized memory behind
69398 + that may be exposed to a later syscall leaking the stack.
69400 +config PAX_MEMORY_UDEREF
69401 + bool "Prevent invalid userland pointer dereference"
69402 + depends on X86 && !UML_X86 && !XEN
69403 + select PAX_PER_CPU_PGD if X86_64
69405 + By saying Y here the kernel will be prevented from dereferencing
69406 + userland pointers in contexts where the kernel expects only kernel
69407 + pointers. This is both a useful runtime debugging feature and a
69408 + security measure that prevents exploiting a class of kernel bugs.
69410 + The tradeoff is that some virtualization solutions may experience
69411 + a huge slowdown and therefore you should not enable this feature
69412 + for kernels meant to run in such environments. Whether a given VM
69413 + solution is affected or not is best determined by simply trying it
69414 + out, the performance impact will be obvious right on boot as this
69415 + mechanism engages from very early on. A good rule of thumb is that
69416 + VMs running on CPUs without hardware virtualization support (i.e.,
69417 + the majority of IA-32 CPUs) will likely experience the slowdown.
69419 +config PAX_REFCOUNT
69420 + bool "Prevent various kernel object reference counter overflows"
69421 + depends on GRKERNSEC && (X86 || SPARC64)
69423 + By saying Y here the kernel will detect and prevent overflowing
69424 + various (but not all) kinds of object reference counters. Such
69425 + overflows can normally occur due to bugs only and are often, if
69426 + not always, exploitable.
69428 + The tradeoff is that data structures protected by an overflowed
69429 + refcount will never be freed and therefore will leak memory. Note
69430 + that this leak also happens even without this protection but in
69431 + that case the overflow can eventually trigger the freeing of the
69432 + data structure while it is still being used elsewhere, resulting
69433 + in the exploitable situation that this feature prevents.
69435 + Since this has a negligible performance impact, you should enable
69438 +config PAX_USERCOPY
69439 + bool "Harden heap object copies between kernel and userland"
69440 + depends on X86 || PPC || SPARC || ARM
69441 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
69443 + By saying Y here the kernel will enforce the size of heap objects
69444 + when they are copied in either direction between the kernel and
69445 + userland, even if only a part of the heap object is copied.
69447 + Specifically, this checking prevents information leaking from the
69448 + kernel heap during kernel to userland copies (if the kernel heap
69449 + object is otherwise fully initialized) and prevents kernel heap
69450 + overflows during userland to kernel copies.
69452 + Note that the current implementation provides the strictest bounds
69453 + checks for the SLUB allocator.
69455 + Enabling this option also enables per-slab cache protection against
69456 + data in a given cache being copied into/out of via userland
69457 + accessors. Though the whitelist of regions will be reduced over
69458 + time, it notably protects important data structures like task structs.
69460 + If frame pointers are enabled on x86, this option will also restrict
69461 + copies into and out of the kernel stack to local variables within a
69464 + Since this has a negligible performance impact, you should enable
69472 bool "Enable access key retention support"
69474 @@ -167,7 +715,7 @@ config INTEL_TXT
69475 config LSM_MMAP_MIN_ADDR
69476 int "Low address space for LSM to protect from user allocation"
69477 depends on SECURITY && SECURITY_SELINUX
69481 This is the portion of low virtual memory which should be protected
69482 from userspace allocation. Keeping a user from writing to low pages
69483 diff -urNp linux-2.6.39.4/security/keys/keyring.c linux-2.6.39.4/security/keys/keyring.c
69484 --- linux-2.6.39.4/security/keys/keyring.c 2011-05-19 00:06:34.000000000 -0400
69485 +++ linux-2.6.39.4/security/keys/keyring.c 2011-08-05 19:44:37.000000000 -0400
69486 @@ -213,15 +213,15 @@ static long keyring_read(const struct ke
69489 for (loop = 0; loop < klist->nkeys; loop++) {
69490 + key_serial_t serial;
69491 key = klist->keys[loop];
69492 + serial = key->serial;
69494 tmp = sizeof(key_serial_t);
69498 - if (copy_to_user(buffer,
69501 + if (copy_to_user(buffer, &serial, tmp))
69505 diff -urNp linux-2.6.39.4/security/min_addr.c linux-2.6.39.4/security/min_addr.c
69506 --- linux-2.6.39.4/security/min_addr.c 2011-05-19 00:06:34.000000000 -0400
69507 +++ linux-2.6.39.4/security/min_addr.c 2011-08-05 19:44:37.000000000 -0400
69508 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
69510 static void update_mmap_min_addr(void)
69513 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
69514 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
69515 mmap_min_addr = dac_mmap_min_addr;
69516 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
69518 mmap_min_addr = dac_mmap_min_addr;
69524 diff -urNp linux-2.6.39.4/security/security.c linux-2.6.39.4/security/security.c
69525 --- linux-2.6.39.4/security/security.c 2011-05-19 00:06:34.000000000 -0400
69526 +++ linux-2.6.39.4/security/security.c 2011-08-05 19:44:37.000000000 -0400
69527 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
69528 /* things that live in capability.c */
69529 extern void __init security_fixup_ops(struct security_operations *ops);
69531 -static struct security_operations *security_ops;
69532 -static struct security_operations default_security_ops = {
69533 +static struct security_operations *security_ops __read_only;
69534 +static struct security_operations default_security_ops __read_only = {
69538 @@ -67,7 +67,9 @@ int __init security_init(void)
69540 void reset_security_ops(void)
69542 + pax_open_kernel();
69543 security_ops = &default_security_ops;
69544 + pax_close_kernel();
69547 /* Save user chosen LSM */
69548 diff -urNp linux-2.6.39.4/security/selinux/hooks.c linux-2.6.39.4/security/selinux/hooks.c
69549 --- linux-2.6.39.4/security/selinux/hooks.c 2011-05-19 00:06:34.000000000 -0400
69550 +++ linux-2.6.39.4/security/selinux/hooks.c 2011-08-05 19:44:37.000000000 -0400
69552 #define NUM_SEL_MNT_OPTS 5
69554 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
69555 -extern struct security_operations *security_ops;
69557 /* SECMARK reference count */
69558 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
69559 @@ -5431,7 +5430,7 @@ static int selinux_key_getsecurity(struc
69563 -static struct security_operations selinux_ops = {
69564 +static struct security_operations selinux_ops __read_only = {
69567 .ptrace_access_check = selinux_ptrace_access_check,
69568 diff -urNp linux-2.6.39.4/security/selinux/include/xfrm.h linux-2.6.39.4/security/selinux/include/xfrm.h
69569 --- linux-2.6.39.4/security/selinux/include/xfrm.h 2011-05-19 00:06:34.000000000 -0400
69570 +++ linux-2.6.39.4/security/selinux/include/xfrm.h 2011-08-05 19:44:37.000000000 -0400
69571 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
69573 static inline void selinux_xfrm_notify_policyload(void)
69575 - atomic_inc(&flow_cache_genid);
69576 + atomic_inc_unchecked(&flow_cache_genid);
69579 static inline int selinux_xfrm_enabled(void)
69580 diff -urNp linux-2.6.39.4/security/selinux/ss/services.c linux-2.6.39.4/security/selinux/ss/services.c
69581 --- linux-2.6.39.4/security/selinux/ss/services.c 2011-05-19 00:06:34.000000000 -0400
69582 +++ linux-2.6.39.4/security/selinux/ss/services.c 2011-08-05 19:44:37.000000000 -0400
69583 @@ -1806,6 +1806,8 @@ int security_load_policy(void *data, siz
69585 struct policy_file file = { data, len }, *fp = &file;
69587 + pax_track_stack();
69589 if (!ss_initialized) {
69590 avtab_cache_init();
69591 rc = policydb_read(&policydb, fp);
69592 diff -urNp linux-2.6.39.4/security/smack/smack_lsm.c linux-2.6.39.4/security/smack/smack_lsm.c
69593 --- linux-2.6.39.4/security/smack/smack_lsm.c 2011-05-19 00:06:34.000000000 -0400
69594 +++ linux-2.6.39.4/security/smack/smack_lsm.c 2011-08-05 19:44:37.000000000 -0400
69595 @@ -3386,7 +3386,7 @@ static int smack_inode_getsecctx(struct
69599 -struct security_operations smack_ops = {
69600 +struct security_operations smack_ops __read_only = {
69603 .ptrace_access_check = smack_ptrace_access_check,
69604 diff -urNp linux-2.6.39.4/security/tomoyo/tomoyo.c linux-2.6.39.4/security/tomoyo/tomoyo.c
69605 --- linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-05-19 00:06:34.000000000 -0400
69606 +++ linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-08-05 19:44:37.000000000 -0400
69607 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
69608 * tomoyo_security_ops is a "struct security_operations" which is used for
69609 * registering TOMOYO.
69611 -static struct security_operations tomoyo_security_ops = {
69612 +static struct security_operations tomoyo_security_ops __read_only = {
69614 .cred_alloc_blank = tomoyo_cred_alloc_blank,
69615 .cred_prepare = tomoyo_cred_prepare,
69616 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.c linux-2.6.39.4/sound/aoa/codecs/onyx.c
69617 --- linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-05-19 00:06:34.000000000 -0400
69618 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-08-05 19:44:37.000000000 -0400
69619 @@ -54,7 +54,7 @@ struct onyx {
69624 + local_t open_count;
69625 struct codec_info *codec_info;
69627 /* mutex serializes concurrent access to the device
69628 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
69629 struct onyx *onyx = cii->codec_data;
69631 mutex_lock(&onyx->mutex);
69632 - onyx->open_count++;
69633 + local_inc(&onyx->open_count);
69634 mutex_unlock(&onyx->mutex);
69637 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
69638 struct onyx *onyx = cii->codec_data;
69640 mutex_lock(&onyx->mutex);
69641 - onyx->open_count--;
69642 - if (!onyx->open_count)
69643 + if (local_dec_and_test(&onyx->open_count))
69644 onyx->spdif_locked = onyx->analog_locked = 0;
69645 mutex_unlock(&onyx->mutex);
69647 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.h linux-2.6.39.4/sound/aoa/codecs/onyx.h
69648 --- linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-05-19 00:06:34.000000000 -0400
69649 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-08-05 19:44:37.000000000 -0400
69651 #include <linux/i2c.h>
69652 #include <asm/pmac_low_i2c.h>
69653 #include <asm/prom.h>
69654 +#include <asm/local.h>
69656 /* PCM3052 register definitions */
69658 diff -urNp linux-2.6.39.4/sound/core/seq/seq_device.c linux-2.6.39.4/sound/core/seq/seq_device.c
69659 --- linux-2.6.39.4/sound/core/seq/seq_device.c 2011-05-19 00:06:34.000000000 -0400
69660 +++ linux-2.6.39.4/sound/core/seq/seq_device.c 2011-08-05 20:34:06.000000000 -0400
69661 @@ -63,7 +63,7 @@ struct ops_list {
69662 int argsize; /* argument size */
69665 - struct snd_seq_dev_ops ops;
69666 + struct snd_seq_dev_ops *ops;
69668 /* registred devices */
69669 struct list_head dev_list; /* list of devices */
69670 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
69672 mutex_lock(&ops->reg_mutex);
69673 /* copy driver operators */
69674 - ops->ops = *entry;
69675 + ops->ops = entry;
69676 ops->driver |= DRIVER_LOADED;
69677 ops->argsize = argsize;
69679 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
69680 dev->name, ops->id, ops->argsize, dev->argsize);
69683 - if (ops->ops.init_device(dev) >= 0) {
69684 + if (ops->ops->init_device(dev) >= 0) {
69685 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
69686 ops->num_init_devices++;
69688 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
69689 dev->name, ops->id, ops->argsize, dev->argsize);
69692 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
69693 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
69694 dev->status = SNDRV_SEQ_DEVICE_FREE;
69695 dev->driver_data = NULL;
69696 ops->num_init_devices--;
69697 diff -urNp linux-2.6.39.4/sound/drivers/mts64.c linux-2.6.39.4/sound/drivers/mts64.c
69698 --- linux-2.6.39.4/sound/drivers/mts64.c 2011-05-19 00:06:34.000000000 -0400
69699 +++ linux-2.6.39.4/sound/drivers/mts64.c 2011-08-05 20:34:06.000000000 -0400
69701 #include <sound/initval.h>
69702 #include <sound/rawmidi.h>
69703 #include <sound/control.h>
69704 +#include <asm/local.h>
69706 #define CARD_NAME "Miditerminal 4140"
69707 #define DRIVER_NAME "MTS64"
69708 @@ -66,7 +67,7 @@ struct mts64 {
69709 struct pardevice *pardev;
69710 int pardev_claimed;
69713 + local_t open_count;
69714 int current_midi_output_port;
69715 int current_midi_input_port;
69716 u8 mode[MTS64_NUM_INPUT_PORTS];
69717 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
69719 struct mts64 *mts = substream->rmidi->private_data;
69721 - if (mts->open_count == 0) {
69722 + if (local_read(&mts->open_count) == 0) {
69723 /* We don't need a spinlock here, because this is just called
69724 if the device has not been opened before.
69725 So there aren't any IRQs from the device */
69726 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
69730 - ++(mts->open_count);
69731 + local_inc(&mts->open_count);
69735 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
69736 struct mts64 *mts = substream->rmidi->private_data;
69737 unsigned long flags;
69739 - --(mts->open_count);
69740 - if (mts->open_count == 0) {
69741 + if (local_dec_return(&mts->open_count) == 0) {
69742 /* We need the spinlock_irqsave here because we can still
69743 have IRQs at this point */
69744 spin_lock_irqsave(&mts->lock, flags);
69745 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
69749 - } else if (mts->open_count < 0)
69750 - mts->open_count = 0;
69751 + } else if (local_read(&mts->open_count) < 0)
69752 + local_set(&mts->open_count, 0);
69756 diff -urNp linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c
69757 --- linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-05-19 00:06:34.000000000 -0400
69758 +++ linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:34:06.000000000 -0400
69759 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
69760 MODULE_DESCRIPTION("OPL4 driver");
69761 MODULE_LICENSE("GPL");
69763 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
69764 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
69767 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
69768 diff -urNp linux-2.6.39.4/sound/drivers/portman2x4.c linux-2.6.39.4/sound/drivers/portman2x4.c
69769 --- linux-2.6.39.4/sound/drivers/portman2x4.c 2011-05-19 00:06:34.000000000 -0400
69770 +++ linux-2.6.39.4/sound/drivers/portman2x4.c 2011-08-05 20:34:06.000000000 -0400
69772 #include <sound/initval.h>
69773 #include <sound/rawmidi.h>
69774 #include <sound/control.h>
69775 +#include <asm/local.h>
69777 #define CARD_NAME "Portman 2x4"
69778 #define DRIVER_NAME "portman"
69779 @@ -84,7 +85,7 @@ struct portman {
69780 struct pardevice *pardev;
69781 int pardev_claimed;
69784 + local_t open_count;
69785 int mode[PORTMAN_NUM_INPUT_PORTS];
69786 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
69788 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.c linux-2.6.39.4/sound/firewire/amdtp.c
69789 --- linux-2.6.39.4/sound/firewire/amdtp.c 2011-05-19 00:06:34.000000000 -0400
69790 +++ linux-2.6.39.4/sound/firewire/amdtp.c 2011-08-05 19:44:37.000000000 -0400
69791 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
69792 ptr = s->pcm_buffer_pointer + data_blocks;
69793 if (ptr >= pcm->runtime->buffer_size)
69794 ptr -= pcm->runtime->buffer_size;
69795 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
69796 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
69798 s->pcm_period_pointer += data_blocks;
69799 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
69800 @@ -510,7 +510,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
69802 void amdtp_out_stream_update(struct amdtp_out_stream *s)
69804 - ACCESS_ONCE(s->source_node_id_field) =
69805 + ACCESS_ONCE_RW(s->source_node_id_field) =
69806 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
69808 EXPORT_SYMBOL(amdtp_out_stream_update);
69809 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.h linux-2.6.39.4/sound/firewire/amdtp.h
69810 --- linux-2.6.39.4/sound/firewire/amdtp.h 2011-05-19 00:06:34.000000000 -0400
69811 +++ linux-2.6.39.4/sound/firewire/amdtp.h 2011-08-05 19:44:37.000000000 -0400
69812 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
69813 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
69814 struct snd_pcm_substream *pcm)
69816 - ACCESS_ONCE(s->pcm) = pcm;
69817 + ACCESS_ONCE_RW(s->pcm) = pcm;
69821 diff -urNp linux-2.6.39.4/sound/isa/cmi8330.c linux-2.6.39.4/sound/isa/cmi8330.c
69822 --- linux-2.6.39.4/sound/isa/cmi8330.c 2011-05-19 00:06:34.000000000 -0400
69823 +++ linux-2.6.39.4/sound/isa/cmi8330.c 2011-08-05 20:34:06.000000000 -0400
69824 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
69826 struct snd_pcm *pcm;
69827 struct snd_cmi8330_stream {
69828 - struct snd_pcm_ops ops;
69829 + snd_pcm_ops_no_const ops;
69830 snd_pcm_open_callback_t open;
69831 void *private_data; /* sb or wss */
69833 diff -urNp linux-2.6.39.4/sound/oss/sb_audio.c linux-2.6.39.4/sound/oss/sb_audio.c
69834 --- linux-2.6.39.4/sound/oss/sb_audio.c 2011-05-19 00:06:34.000000000 -0400
69835 +++ linux-2.6.39.4/sound/oss/sb_audio.c 2011-08-05 19:44:37.000000000 -0400
69836 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
69837 buf16 = (signed short *)(localbuf + localoffs);
69840 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69841 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69842 if (copy_from_user(lbuf8,
69843 userbuf+useroffs + p,
69845 diff -urNp linux-2.6.39.4/sound/oss/swarm_cs4297a.c linux-2.6.39.4/sound/oss/swarm_cs4297a.c
69846 --- linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-05-19 00:06:34.000000000 -0400
69847 +++ linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-08-05 19:44:37.000000000 -0400
69848 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
69850 struct cs4297a_state *s;
69854 #ifndef CONFIG_BCM_CS4297A_CSWARM
69856 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
69858 char *sb1250_duart_present;
69865 val = SOUND_MASK_LINE;
69866 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
69867 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
69868 val = initvol[i].vol;
69869 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
69872 // cs4297a_write_ac97(s, 0x18, 0x0808);
69874 // cs4297a_write_ac97(s, 0x5e, 0x180);
69875 cs4297a_write_ac97(s, 0x02, 0x0808);
69876 cs4297a_write_ac97(s, 0x18, 0x0808);
69880 list_add(&s->list, &cs4297a_devs);
69882 diff -urNp linux-2.6.39.4/sound/pci/hda/hda_codec.h linux-2.6.39.4/sound/pci/hda/hda_codec.h
69883 --- linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-05-19 00:06:34.000000000 -0400
69884 +++ linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-08-05 20:34:06.000000000 -0400
69885 @@ -615,7 +615,7 @@ struct hda_bus_ops {
69886 /* notify power-up/down from codec to controller */
69887 void (*pm_notify)(struct hda_bus *bus);
69892 /* template to pass to the bus constructor */
69893 struct hda_bus_template {
69894 @@ -713,6 +713,7 @@ struct hda_codec_ops {
69896 void (*reboot_notify)(struct hda_codec *codec);
69898 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
69900 /* record for amp information cache */
69901 struct hda_cache_head {
69902 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
69903 struct snd_pcm_substream *substream);
69904 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
69905 struct snd_pcm_substream *substream);
69909 /* PCM information for each substream */
69910 struct hda_pcm_stream {
69911 @@ -801,7 +802,7 @@ struct hda_codec {
69912 const char *modelname; /* model name for preset */
69915 - struct hda_codec_ops patch_ops;
69916 + hda_codec_ops_no_const patch_ops;
69918 /* PCM to create, set by patch_ops.build_pcms callback */
69919 unsigned int num_pcms;
69920 diff -urNp linux-2.6.39.4/sound/pci/ice1712/ice1712.h linux-2.6.39.4/sound/pci/ice1712/ice1712.h
69921 --- linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-05-19 00:06:34.000000000 -0400
69922 +++ linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-08-05 20:34:06.000000000 -0400
69923 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
69924 unsigned int mask_flags; /* total mask bits */
69925 struct snd_akm4xxx_ops {
69926 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69928 + } __no_const ops;
69931 struct snd_ice1712_spdif {
69932 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
69933 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69934 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69935 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69937 + } __no_const ops;
69941 diff -urNp linux-2.6.39.4/sound/pci/intel8x0m.c linux-2.6.39.4/sound/pci/intel8x0m.c
69942 --- linux-2.6.39.4/sound/pci/intel8x0m.c 2011-05-19 00:06:34.000000000 -0400
69943 +++ linux-2.6.39.4/sound/pci/intel8x0m.c 2011-08-05 20:34:06.000000000 -0400
69944 @@ -1265,7 +1265,7 @@ static struct shortname_table {
69945 { 0x5455, "ALi M5455" },
69946 { 0x746d, "AMD AMD8111" },
69952 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
69953 diff -urNp linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c
69954 --- linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-05-19 00:06:34.000000000 -0400
69955 +++ linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-08-05 20:34:06.000000000 -0400
69956 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
69957 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
69960 - if (atomic_read(&chip->interrupt_sleep_count)) {
69961 - atomic_set(&chip->interrupt_sleep_count, 0);
69962 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69963 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69964 wake_up(&chip->interrupt_sleep);
69967 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
69969 init_waitqueue_entry(&wait, current);
69970 add_wait_queue(&chip->interrupt_sleep, &wait);
69971 - atomic_inc(&chip->interrupt_sleep_count);
69972 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
69973 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
69974 remove_wait_queue(&chip->interrupt_sleep, &wait);
69976 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
69977 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
69978 spin_unlock(&chip->reg_lock);
69980 - if (atomic_read(&chip->interrupt_sleep_count)) {
69981 - atomic_set(&chip->interrupt_sleep_count, 0);
69982 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69983 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69984 wake_up(&chip->interrupt_sleep);
69987 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
69988 spin_lock_init(&chip->reg_lock);
69989 spin_lock_init(&chip->voice_lock);
69990 init_waitqueue_head(&chip->interrupt_sleep);
69991 - atomic_set(&chip->interrupt_sleep_count, 0);
69992 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69996 diff -urNp linux-2.6.39.4/sound/soc/soc-core.c linux-2.6.39.4/sound/soc/soc-core.c
69997 --- linux-2.6.39.4/sound/soc/soc-core.c 2011-05-19 00:06:34.000000000 -0400
69998 +++ linux-2.6.39.4/sound/soc/soc-core.c 2011-08-05 20:34:06.000000000 -0400
69999 @@ -1027,7 +1027,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
70002 /* ASoC PCM operations */
70003 -static struct snd_pcm_ops soc_pcm_ops = {
70004 +static snd_pcm_ops_no_const soc_pcm_ops = {
70005 .open = soc_pcm_open,
70006 .close = soc_codec_close,
70007 .hw_params = soc_pcm_hw_params,
70008 @@ -2105,6 +2105,7 @@ static int soc_new_pcm(struct snd_soc_pc
70011 pcm->private_data = rtd;
70012 + /* this whole logic is broken... */
70013 soc_pcm_ops.mmap = platform->driver->ops->mmap;
70014 soc_pcm_ops.pointer = platform->driver->ops->pointer;
70015 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
70016 diff -urNp linux-2.6.39.4/sound/usb/card.h linux-2.6.39.4/sound/usb/card.h
70017 --- linux-2.6.39.4/sound/usb/card.h 2011-05-19 00:06:34.000000000 -0400
70018 +++ linux-2.6.39.4/sound/usb/card.h 2011-08-05 20:34:06.000000000 -0400
70019 @@ -44,6 +44,7 @@ struct snd_urb_ops {
70020 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
70021 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
70023 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
70025 struct snd_usb_substream {
70026 struct snd_usb_stream *stream;
70027 @@ -93,7 +94,7 @@ struct snd_usb_substream {
70028 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
70031 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
70032 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
70035 struct snd_usb_stream {
70036 diff -urNp linux-2.6.39.4/tools/gcc/constify_plugin.c linux-2.6.39.4/tools/gcc/constify_plugin.c
70037 --- linux-2.6.39.4/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
70038 +++ linux-2.6.39.4/tools/gcc/constify_plugin.c 2011-08-05 20:34:06.000000000 -0400
70041 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
70042 + * Licensed under the GPL v2, or (at your option) v3
70044 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
70047 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
70048 + * $ gcc -fplugin=constify_plugin.so test.c -O2
70051 +#include "gcc-plugin.h"
70052 +#include "config.h"
70053 +#include "system.h"
70054 +#include "coretypes.h"
70056 +#include "tree-pass.h"
70058 +#include "plugin-version.h"
70060 +#include "toplev.h"
70061 +#include "function.h"
70062 +#include "tree-flow.h"
70063 +#include "plugin.h"
70065 +int plugin_is_GPL_compatible;
70067 +static struct plugin_info const_plugin_info = {
70068 + .version = "20110721",
70069 + .help = "no-constify\tturn off constification\n",
70072 +static bool walk_struct(tree node);
70074 +static void deconstify_node(tree node)
70078 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70079 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70080 + if (code == RECORD_TYPE || code == UNION_TYPE)
70081 + deconstify_node(TREE_TYPE(field));
70082 + TREE_READONLY(field) = 0;
70083 + TREE_READONLY(TREE_TYPE(field)) = 0;
70087 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
70089 + if (TREE_CODE(*node) == FUNCTION_DECL) {
70090 + error("%qE attribute does not apply to functions", name);
70091 + *no_add_attrs = true;
70092 + return NULL_TREE;
70095 + if (DECL_P(*node) && lookup_attribute("no_const", TYPE_ATTRIBUTES(TREE_TYPE(*node)))) {
70096 + error("%qE attribute is already applied to the type" , name);
70097 + *no_add_attrs = true;
70098 + return NULL_TREE;
70101 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(TREE_TYPE(*node))) {
70102 + error("%qE attribute used on type that is not constified" , name);
70103 + *no_add_attrs = true;
70104 + return NULL_TREE;
70107 + if (TREE_CODE(*node) == TYPE_DECL) {
70108 + tree chain = TREE_CHAIN(TREE_TYPE(*node));
70109 + TREE_TYPE(*node) = copy_node(TREE_TYPE(*node));
70110 + TREE_CHAIN(TREE_TYPE(*node)) = copy_list(chain);
70111 + TREE_READONLY(TREE_TYPE(*node)) = 0;
70112 + deconstify_node(TREE_TYPE(*node));
70113 + return NULL_TREE;
70116 + return NULL_TREE;
70119 +static struct attribute_spec no_const_attr = {
70120 + .name = "no_const",
70123 + .decl_required = false,
70124 + .type_required = false,
70125 + .function_type_required = false,
70126 + .handler = handle_no_const_attribute
70129 +static void register_attributes(void *event_data, void *data)
70131 + register_attribute(&no_const_attr);
70135 +static void printnode(char *prefix, tree node)
70137 + enum tree_code code;
70138 + enum tree_code_class tclass;
70140 + tclass = TREE_CODE_CLASS(TREE_CODE (node));
70142 + code = TREE_CODE(node);
70143 + fprintf(stderr, "\n%s node: %p, code: %d type: %s\n", prefix, node, code, tree_code_name[(int)code]);
70144 + if (DECL_CONTEXT(node) != NULL_TREE && TYPE_NAME(DECL_CONTEXT(node)) != NULL_TREE)
70145 + fprintf(stderr, "struct name: %s\n", IDENTIFIER_POINTER(TYPE_NAME(DECL_CONTEXT(node))));
70146 + if (tclass == tcc_declaration && DECL_NAME(node) != NULL_TREE)
70147 + fprintf(stderr, "field name: %s\n", IDENTIFIER_POINTER(DECL_NAME(node)));
70151 +static void constify_node(tree node)
70153 + TREE_READONLY(node) = 1;
70156 +static bool is_fptr(tree field)
70158 + tree ptr = TREE_TYPE(field);
70160 + if (TREE_CODE(ptr) != POINTER_TYPE)
70163 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
70166 +static bool walk_struct(tree node)
70170 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70171 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70172 + if (code == RECORD_TYPE || code == UNION_TYPE) {
70173 + if (!(walk_struct(TREE_TYPE(field))))
70175 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
70181 +static void finish_type(void *event_data, void *data)
70183 + tree node = (tree)event_data;
70185 + if (node == NULL_TREE)
70188 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
70191 + if (TREE_READONLY(node))
70194 + if (TYPE_FIELDS(node) == NULL_TREE)
70197 + if (walk_struct(node))
70198 + constify_node(node);
70201 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70203 + const char * const plugin_name = plugin_info->base_name;
70204 + const int argc = plugin_info->argc;
70205 + const struct plugin_argument * const argv = plugin_info->argv;
70207 + bool constify = true;
70209 + if (!plugin_default_version_check(version, &gcc_version)) {
70210 + error(G_("incompatible gcc/plugin versions"));
70214 + for (i = 0; i < argc; ++i) {
70215 + if (!(strcmp(argv[i].key, "no-constify"))) {
70216 + constify = false;
70219 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70222 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
70224 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
70225 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
70229 diff -urNp linux-2.6.39.4/tools/gcc/Makefile linux-2.6.39.4/tools/gcc/Makefile
70230 --- linux-2.6.39.4/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
70231 +++ linux-2.6.39.4/tools/gcc/Makefile 2011-08-05 20:34:06.000000000 -0400
70234 +#PLUGIN_SOURCE_FILES := pax_plugin.c
70235 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
70236 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
70237 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
70239 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
70241 +hostlibs-y := stackleak_plugin.so constify_plugin.so
70242 +always := $(hostlibs-y)
70243 +stackleak_plugin-objs := stackleak_plugin.o
70244 +constify_plugin-objs := constify_plugin.o
70245 diff -urNp linux-2.6.39.4/tools/gcc/stackleak_plugin.c linux-2.6.39.4/tools/gcc/stackleak_plugin.c
70246 --- linux-2.6.39.4/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
70247 +++ linux-2.6.39.4/tools/gcc/stackleak_plugin.c 2011-08-05 20:34:06.000000000 -0400
70250 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
70251 + * Licensed under the GPL v2
70253 + * Note: the choice of the license means that the compilation process is
70254 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
70255 + * but for the kernel it doesn't matter since it doesn't link against
70256 + * any of the gcc libraries
70258 + * gcc plugin to help implement various PaX features
70260 + * - track lowest stack pointer
70263 + * - initialize all local variables
70266 + * - cloned functions are instrumented twice
70268 +#include "gcc-plugin.h"
70269 +#include "plugin-version.h"
70270 +#include "config.h"
70271 +#include "system.h"
70272 +#include "coretypes.h"
70274 +#include "toplev.h"
70275 +#include "basic-block.h"
70276 +#include "gimple.h"
70277 +//#include "expr.h" where are you...
70278 +#include "diagnostic.h"
70280 +#include "emit-rtl.h"
70281 +#include "function.h"
70283 +#include "tree-pass.h"
70286 +int plugin_is_GPL_compatible;
70288 +static int track_frame_size = -1;
70289 +static const char track_function[] = "pax_track_stack";
70290 +static bool init_locals;
70292 +static struct plugin_info stackleak_plugin_info = {
70293 + .version = "201106030000",
70294 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
70295 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
70298 +static bool gate_stackleak_track_stack(void);
70299 +static unsigned int execute_stackleak_tree_instrument(void);
70300 +static unsigned int execute_stackleak_final(void);
70302 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
70304 + .type = GIMPLE_PASS,
70305 + .name = "stackleak_tree_instrument",
70306 + .gate = gate_stackleak_track_stack,
70307 + .execute = execute_stackleak_tree_instrument,
70310 + .static_pass_number = 0,
70311 + .tv_id = TV_NONE,
70312 + .properties_required = PROP_gimple_leh | PROP_cfg,
70313 + .properties_provided = 0,
70314 + .properties_destroyed = 0,
70315 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
70316 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
70320 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
70322 + .type = RTL_PASS,
70323 + .name = "stackleak_final",
70324 + .gate = gate_stackleak_track_stack,
70325 + .execute = execute_stackleak_final,
70328 + .static_pass_number = 0,
70329 + .tv_id = TV_NONE,
70330 + .properties_required = 0,
70331 + .properties_provided = 0,
70332 + .properties_destroyed = 0,
70333 + .todo_flags_start = 0,
70334 + .todo_flags_finish = 0
70338 +static bool gate_stackleak_track_stack(void)
70340 + return track_frame_size >= 0;
70343 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
70348 + // insert call to void pax_track_stack(void)
70349 + type = build_function_type_list(void_type_node, NULL_TREE);
70350 + decl = build_fn_decl(track_function, type);
70351 + DECL_ASSEMBLER_NAME(decl); // for LTO
70352 + call = gimple_build_call(decl, 0);
70354 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
70356 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
70359 +static unsigned int execute_stackleak_tree_instrument(void)
70362 + gimple_stmt_iterator gsi;
70364 + // 1. loop through BBs and GIMPLE statements
70365 + FOR_EACH_BB(bb) {
70366 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
70367 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
70369 + gimple stmt = gsi_stmt(gsi);
70371 + if (!is_gimple_call(stmt))
70373 + decl = gimple_call_fndecl(stmt);
70376 + if (TREE_CODE(decl) != FUNCTION_DECL)
70378 + if (!DECL_BUILT_IN(decl))
70380 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
70382 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
70385 + // 2. insert track call after each __builtin_alloca call
70386 + stackleak_add_instrumentation(&gsi, false);
70387 +// print_node(stderr, "pax", decl, 4);
70391 + // 3. insert track call at the beginning
70392 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
70393 + gsi = gsi_start_bb(bb);
70394 + stackleak_add_instrumentation(&gsi, true);
70399 +static unsigned int execute_stackleak_final(void)
70403 + if (cfun->calls_alloca)
70406 + // 1. find pax_track_stack calls
70407 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
70408 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
70411 + if (!CALL_P(insn))
70413 + body = PATTERN(insn);
70414 + if (GET_CODE(body) != CALL)
70416 + body = XEXP(body, 0);
70417 + if (GET_CODE(body) != MEM)
70419 + body = XEXP(body, 0);
70420 + if (GET_CODE(body) != SYMBOL_REF)
70422 + if (strcmp(XSTR(body, 0), track_function))
70424 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70425 + // 2. delete call if function frame is not big enough
70426 + if (get_frame_size() >= track_frame_size)
70428 + delete_insn_and_edges(insn);
70431 +// print_simple_rtl(stderr, get_insns());
70432 +// print_rtl(stderr, get_insns());
70433 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70438 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70440 + const char * const plugin_name = plugin_info->base_name;
70441 + const int argc = plugin_info->argc;
70442 + const struct plugin_argument * const argv = plugin_info->argv;
70444 + struct register_pass_info stackleak_tree_instrument_pass_info = {
70445 + .pass = &stackleak_tree_instrument_pass.pass,
70446 +// .reference_pass_name = "tree_profile",
70447 + .reference_pass_name = "optimized",
70448 + .ref_pass_instance_number = 0,
70449 + .pos_op = PASS_POS_INSERT_AFTER
70451 + struct register_pass_info stackleak_final_pass_info = {
70452 + .pass = &stackleak_final_rtl_opt_pass.pass,
70453 + .reference_pass_name = "final",
70454 + .ref_pass_instance_number = 0,
70455 + .pos_op = PASS_POS_INSERT_BEFORE
70458 + if (!plugin_default_version_check(version, &gcc_version)) {
70459 + error(G_("incompatible gcc/plugin versions"));
70463 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
70465 + for (i = 0; i < argc; ++i) {
70466 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
70467 + if (!argv[i].value) {
70468 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70471 + track_frame_size = atoi(argv[i].value);
70472 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
70473 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70476 + if (!strcmp(argv[i].key, "initialize-locals")) {
70477 + if (argv[i].value) {
70478 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70481 + init_locals = true;
70484 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70487 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
70488 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
70492 diff -urNp linux-2.6.39.4/usr/gen_init_cpio.c linux-2.6.39.4/usr/gen_init_cpio.c
70493 --- linux-2.6.39.4/usr/gen_init_cpio.c 2011-05-19 00:06:34.000000000 -0400
70494 +++ linux-2.6.39.4/usr/gen_init_cpio.c 2011-08-05 19:44:38.000000000 -0400
70495 @@ -305,7 +305,7 @@ static int cpio_mkfile(const char *name,
70504 @@ -394,9 +394,10 @@ static char *cpio_replace_env(char *new_
70505 *env_var = *expanded = '\0';
70506 strncat(env_var, start + 2, end - start - 2);
70507 strncat(expanded, new_location, start - new_location);
70508 - strncat(expanded, getenv(env_var), PATH_MAX);
70509 - strncat(expanded, end + 1, PATH_MAX);
70510 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
70511 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
70512 strncpy(new_location, expanded, PATH_MAX);
70513 + new_location[PATH_MAX] = 0;
70517 diff -urNp linux-2.6.39.4/virt/kvm/kvm_main.c linux-2.6.39.4/virt/kvm/kvm_main.c
70518 --- linux-2.6.39.4/virt/kvm/kvm_main.c 2011-05-19 00:06:34.000000000 -0400
70519 +++ linux-2.6.39.4/virt/kvm/kvm_main.c 2011-08-05 20:34:06.000000000 -0400
70520 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
70522 static cpumask_var_t cpus_hardware_enabled;
70523 static int kvm_usage_count = 0;
70524 -static atomic_t hardware_enable_failed;
70525 +static atomic_unchecked_t hardware_enable_failed;
70527 struct kmem_cache *kvm_vcpu_cache;
70528 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
70529 @@ -2187,7 +2187,7 @@ static void hardware_enable_nolock(void
70532 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
70533 - atomic_inc(&hardware_enable_failed);
70534 + atomic_inc_unchecked(&hardware_enable_failed);
70535 printk(KERN_INFO "kvm: enabling virtualization on "
70536 "CPU%d failed\n", cpu);
70538 @@ -2241,10 +2241,10 @@ static int hardware_enable_all(void)
70541 if (kvm_usage_count == 1) {
70542 - atomic_set(&hardware_enable_failed, 0);
70543 + atomic_set_unchecked(&hardware_enable_failed, 0);
70544 on_each_cpu(hardware_enable_nolock, NULL, 1);
70546 - if (atomic_read(&hardware_enable_failed)) {
70547 + if (atomic_read_unchecked(&hardware_enable_failed)) {
70548 hardware_disable_all_nolock();
70551 @@ -2509,7 +2509,7 @@ static void kvm_sched_out(struct preempt
70552 kvm_arch_vcpu_put(vcpu);
70555 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70556 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70557 struct module *module)
70560 @@ -2572,7 +2572,7 @@ int kvm_init(void *opaque, unsigned vcpu
70562 vcpu_align = __alignof__(struct kvm_vcpu);
70563 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
70565 + SLAB_USERCOPY, NULL);
70566 if (!kvm_vcpu_cache) {
70569 @@ -2582,9 +2582,11 @@ int kvm_init(void *opaque, unsigned vcpu
70573 - kvm_chardev_ops.owner = module;
70574 - kvm_vm_fops.owner = module;
70575 - kvm_vcpu_fops.owner = module;
70576 + pax_open_kernel();
70577 + *(void **)&kvm_chardev_ops.owner = module;
70578 + *(void **)&kvm_vm_fops.owner = module;
70579 + *(void **)&kvm_vcpu_fops.owner = module;
70580 + pax_close_kernel();
70582 r = misc_register(&kvm_dev);