]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.39.4-201108060941.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.39.4-201108060941.patch
CommitLineData
c349170f
PK
1diff -urNp linux-2.6.39.4/arch/alpha/include/asm/elf.h linux-2.6.39.4/arch/alpha/include/asm/elf.h
2--- linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
3+++ linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.39.4/arch/alpha/include/asm/pgtable.h linux-2.6.39.4/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
20+++ linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.39.4/arch/alpha/kernel/module.c linux-2.6.39.4/arch/alpha/kernel/module.c
40--- linux-2.6.39.4/arch/alpha/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
41+++ linux-2.6.39.4/arch/alpha/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.39.4/arch/alpha/kernel/osf_sys.c linux-2.6.39.4/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 21:11:51.000000000 -0400
53+++ linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 19:44:33.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-2.6.39.4/arch/alpha/mm/fault.c linux-2.6.39.4/arch/alpha/mm/fault.c
86--- linux-2.6.39.4/arch/alpha/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
87+++ linux-2.6.39.4/arch/alpha/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-2.6.39.4/arch/arm/include/asm/elf.h linux-2.6.39.4/arch/arm/include/asm/elf.h
245--- linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
246+++ linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
247@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-2.6.39.4/arch/arm/include/asm/kmap_types.h linux-2.6.39.4/arch/arm/include/asm/kmap_types.h
275--- linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
276+++ linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-2.6.39.4/arch/arm/include/asm/uaccess.h linux-2.6.39.4/arch/arm/include/asm/uaccess.h
286--- linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
287+++ linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-2.6.39.4/arch/arm/kernel/armksyms.c linux-2.6.39.4/arch/arm/kernel/armksyms.c
344--- linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-05-19 00:06:34.000000000 -0400
345+++ linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-08-05 19:44:33.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-2.6.39.4/arch/arm/kernel/process.c linux-2.6.39.4/arch/arm/kernel/process.c
358--- linux-2.6.39.4/arch/arm/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
359+++ linux-2.6.39.4/arch/arm/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-2.6.39.4/arch/arm/kernel/traps.c linux-2.6.39.4/arch/arm/kernel/traps.c
382--- linux-2.6.39.4/arch/arm/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
383+++ linux-2.6.39.4/arch/arm/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
384@@ -258,6 +258,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -285,6 +287,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-2.6.39.4/arch/arm/lib/copy_from_user.S linux-2.6.39.4/arch/arm/lib/copy_from_user.S
404--- linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-05-19 00:06:34.000000000 -0400
405+++ linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-08-05 19:44:33.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-2.6.39.4/arch/arm/lib/copy_to_user.S linux-2.6.39.4/arch/arm/lib/copy_to_user.S
430--- linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-05-19 00:06:34.000000000 -0400
431+++ linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-08-05 19:44:33.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess.S linux-2.6.39.4/arch/arm/lib/uaccess.S
456--- linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-05-19 00:06:34.000000000 -0400
457+++ linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-08-05 19:44:33.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-05-19 00:06:34.000000000 -0400
513+++ linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-05 19:44:33.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-05-19 00:06:34.000000000 -0400
525+++ linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-05 19:44:33.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-2.6.39.4/arch/arm/mm/fault.c linux-2.6.39.4/arch/arm/mm/fault.c
536--- linux-2.6.39.4/arch/arm/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
537+++ linux-2.6.39.4/arch/arm/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-2.6.39.4/arch/arm/mm/mmap.c linux-2.6.39.4/arch/arm/mm/mmap.c
587--- linux-2.6.39.4/arch/arm/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
588+++ linux-2.6.39.4/arch/arm/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-2.6.39.4/arch/avr32/include/asm/elf.h linux-2.6.39.4/arch/avr32/include/asm/elf.h
639--- linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
640+++ linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h
658--- linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
659+++ linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-2.6.39.4/arch/avr32/mm/fault.c linux-2.6.39.4/arch/avr32/mm/fault.c
671--- linux-2.6.39.4/arch/avr32/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
672+++ linux-2.6.39.4/arch/avr32/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-2.6.39.4/arch/frv/include/asm/kmap_types.h linux-2.6.39.4/arch/frv/include/asm/kmap_types.h
715--- linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
716+++ linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-2.6.39.4/arch/frv/mm/elf-fdpic.c linux-2.6.39.4/arch/frv/mm/elf-fdpic.c
726--- linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-05-19 00:06:34.000000000 -0400
727+++ linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-08-05 19:44:33.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-2.6.39.4/arch/ia64/include/asm/elf.h linux-2.6.39.4/arch/ia64/include/asm/elf.h
757--- linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
758+++ linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-2.6.39.4/arch/ia64/include/asm/pgtable.h linux-2.6.39.4/arch/ia64/include/asm/pgtable.h
774--- linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
775+++ linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-2.6.39.4/arch/ia64/include/asm/spinlock.h linux-2.6.39.4/arch/ia64/include/asm/spinlock.h
804--- linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
805+++ linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-2.6.39.4/arch/ia64/include/asm/uaccess.h linux-2.6.39.4/arch/ia64/include/asm/uaccess.h
816--- linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
817+++ linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-2.6.39.4/arch/ia64/kernel/module.c linux-2.6.39.4/arch/ia64/kernel/module.c
837--- linux-2.6.39.4/arch/ia64/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
838+++ linux-2.6.39.4/arch/ia64/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c
928--- linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-05-19 00:06:34.000000000 -0400
929+++ linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-08-05 19:44:33.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S
963--- linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
964+++ linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-05 19:44:33.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-2.6.39.4/arch/ia64/mm/fault.c linux-2.6.39.4/arch/ia64/mm/fault.c
975--- linux-2.6.39.4/arch/ia64/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
976+++ linux-2.6.39.4/arch/ia64/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
977@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c
1027--- linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
1028+++ linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-2.6.39.4/arch/ia64/mm/init.c linux-2.6.39.4/arch/ia64/mm/init.c
1039--- linux-2.6.39.4/arch/ia64/mm/init.c 2011-05-19 00:06:34.000000000 -0400
1040+++ linux-2.6.39.4/arch/ia64/mm/init.c 2011-08-05 19:44:33.000000000 -0400
1041@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-2.6.39.4/arch/m32r/lib/usercopy.c linux-2.6.39.4/arch/m32r/lib/usercopy.c
1062--- linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-05-19 00:06:34.000000000 -0400
1063+++ linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-08-05 19:44:33.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-2.6.39.4/arch/mips/include/asm/elf.h linux-2.6.39.4/arch/mips/include/asm/elf.h
1085--- linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1086+++ linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-2.6.39.4/arch/mips/include/asm/page.h linux-2.6.39.4/arch/mips/include/asm/page.h
1109--- linux-2.6.39.4/arch/mips/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1110+++ linux-2.6.39.4/arch/mips/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-2.6.39.4/arch/mips/include/asm/system.h linux-2.6.39.4/arch/mips/include/asm/system.h
1121--- linux-2.6.39.4/arch/mips/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1122+++ linux-2.6.39.4/arch/mips/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-05-19 00:06:34.000000000 -0400
1133+++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-05 19:44:33.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-05-19 00:06:34.000000000 -0400
1150+++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-05 19:44:33.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-2.6.39.4/arch/mips/kernel/process.c linux-2.6.39.4/arch/mips/kernel/process.c
1166--- linux-2.6.39.4/arch/mips/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
1167+++ linux-2.6.39.4/arch/mips/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-2.6.39.4/arch/mips/kernel/syscall.c linux-2.6.39.4/arch/mips/kernel/syscall.c
1185--- linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-05-19 00:06:34.000000000 -0400
1186+++ linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-08-05 19:44:33.000000000 -0400
1187@@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str
1188 do_color_align = 0;
1189 if (filp || (flags & MAP_SHARED))
1190 do_color_align = 1;
1191+
1192+#ifdef CONFIG_PAX_RANDMMAP
1193+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1194+#endif
1195+
1196 if (addr) {
1197 if (do_color_align)
1198 addr = COLOUR_ALIGN(addr, pgoff);
1199 else
1200 addr = PAGE_ALIGN(addr);
1201 vmm = find_vma(current->mm, addr);
1202- if (task_size - len >= addr &&
1203- (!vmm || addr + len <= vmm->vm_start))
1204+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1205 return addr;
1206 }
1207 addr = current->mm->mmap_base;
1208@@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str
1209 /* At this point: (!vmm || addr < vmm->vm_end). */
1210 if (task_size - len < addr)
1211 return -ENOMEM;
1212- if (!vmm || addr + len <= vmm->vm_start)
1213+ if (check_heap_stack_gap(vmm, addr, len))
1214 return addr;
1215 addr = vmm->vm_end;
1216 if (do_color_align)
1217@@ -154,33 +158,6 @@ void arch_pick_mmap_layout(struct mm_str
1218 mm->unmap_area = arch_unmap_area;
1219 }
1220
1221-static inline unsigned long brk_rnd(void)
1222-{
1223- unsigned long rnd = get_random_int();
1224-
1225- rnd = rnd << PAGE_SHIFT;
1226- /* 8MB for 32bit, 256MB for 64bit */
1227- if (TASK_IS_32BIT_ADDR)
1228- rnd = rnd & 0x7ffffful;
1229- else
1230- rnd = rnd & 0xffffffful;
1231-
1232- return rnd;
1233-}
1234-
1235-unsigned long arch_randomize_brk(struct mm_struct *mm)
1236-{
1237- unsigned long base = mm->brk;
1238- unsigned long ret;
1239-
1240- ret = PAGE_ALIGN(base + brk_rnd());
1241-
1242- if (ret < mm->brk)
1243- return mm->brk;
1244-
1245- return ret;
1246-}
1247-
1248 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
1249 unsigned long, prot, unsigned long, flags, unsigned long,
1250 fd, off_t, offset)
1251diff -urNp linux-2.6.39.4/arch/mips/mm/fault.c linux-2.6.39.4/arch/mips/mm/fault.c
1252--- linux-2.6.39.4/arch/mips/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1253+++ linux-2.6.39.4/arch/mips/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1254@@ -28,6 +28,23 @@
1255 #include <asm/highmem.h> /* For VMALLOC_END */
1256 #include <linux/kdebug.h>
1257
1258+#ifdef CONFIG_PAX_PAGEEXEC
1259+void pax_report_insns(void *pc, void *sp)
1260+{
1261+ unsigned long i;
1262+
1263+ printk(KERN_ERR "PAX: bytes at PC: ");
1264+ for (i = 0; i < 5; i++) {
1265+ unsigned int c;
1266+ if (get_user(c, (unsigned int *)pc+i))
1267+ printk(KERN_CONT "???????? ");
1268+ else
1269+ printk(KERN_CONT "%08x ", c);
1270+ }
1271+ printk("\n");
1272+}
1273+#endif
1274+
1275 /*
1276 * This routine handles page faults. It determines the address,
1277 * and the problem, and then passes it off to one of the appropriate
1278diff -urNp linux-2.6.39.4/arch/parisc/include/asm/elf.h linux-2.6.39.4/arch/parisc/include/asm/elf.h
1279--- linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1280+++ linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1281@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1282
1283 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1284
1285+#ifdef CONFIG_PAX_ASLR
1286+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1287+
1288+#define PAX_DELTA_MMAP_LEN 16
1289+#define PAX_DELTA_STACK_LEN 16
1290+#endif
1291+
1292 /* This yields a mask that user programs can use to figure out what
1293 instruction set this CPU supports. This could be done in user space,
1294 but it's not easy, and we've already done it here. */
1295diff -urNp linux-2.6.39.4/arch/parisc/include/asm/pgtable.h linux-2.6.39.4/arch/parisc/include/asm/pgtable.h
1296--- linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1297+++ linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1298@@ -207,6 +207,17 @@ struct vm_area_struct;
1299 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1300 #define PAGE_COPY PAGE_EXECREAD
1301 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1302+
1303+#ifdef CONFIG_PAX_PAGEEXEC
1304+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1305+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1306+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1307+#else
1308+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1309+# define PAGE_COPY_NOEXEC PAGE_COPY
1310+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1311+#endif
1312+
1313 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1314 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1315 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1316diff -urNp linux-2.6.39.4/arch/parisc/kernel/module.c linux-2.6.39.4/arch/parisc/kernel/module.c
1317--- linux-2.6.39.4/arch/parisc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
1318+++ linux-2.6.39.4/arch/parisc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
1319@@ -96,16 +96,38 @@
1320
1321 /* three functions to determine where in the module core
1322 * or init pieces the location is */
1323+static inline int in_init_rx(struct module *me, void *loc)
1324+{
1325+ return (loc >= me->module_init_rx &&
1326+ loc < (me->module_init_rx + me->init_size_rx));
1327+}
1328+
1329+static inline int in_init_rw(struct module *me, void *loc)
1330+{
1331+ return (loc >= me->module_init_rw &&
1332+ loc < (me->module_init_rw + me->init_size_rw));
1333+}
1334+
1335 static inline int in_init(struct module *me, void *loc)
1336 {
1337- return (loc >= me->module_init &&
1338- loc <= (me->module_init + me->init_size));
1339+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1340+}
1341+
1342+static inline int in_core_rx(struct module *me, void *loc)
1343+{
1344+ return (loc >= me->module_core_rx &&
1345+ loc < (me->module_core_rx + me->core_size_rx));
1346+}
1347+
1348+static inline int in_core_rw(struct module *me, void *loc)
1349+{
1350+ return (loc >= me->module_core_rw &&
1351+ loc < (me->module_core_rw + me->core_size_rw));
1352 }
1353
1354 static inline int in_core(struct module *me, void *loc)
1355 {
1356- return (loc >= me->module_core &&
1357- loc <= (me->module_core + me->core_size));
1358+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1359 }
1360
1361 static inline int in_local(struct module *me, void *loc)
1362@@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
1363 }
1364
1365 /* align things a bit */
1366- me->core_size = ALIGN(me->core_size, 16);
1367- me->arch.got_offset = me->core_size;
1368- me->core_size += gots * sizeof(struct got_entry);
1369-
1370- me->core_size = ALIGN(me->core_size, 16);
1371- me->arch.fdesc_offset = me->core_size;
1372- me->core_size += fdescs * sizeof(Elf_Fdesc);
1373+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1374+ me->arch.got_offset = me->core_size_rw;
1375+ me->core_size_rw += gots * sizeof(struct got_entry);
1376+
1377+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1378+ me->arch.fdesc_offset = me->core_size_rw;
1379+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1380
1381 me->arch.got_max = gots;
1382 me->arch.fdesc_max = fdescs;
1383@@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
1384
1385 BUG_ON(value == 0);
1386
1387- got = me->module_core + me->arch.got_offset;
1388+ got = me->module_core_rw + me->arch.got_offset;
1389 for (i = 0; got[i].addr; i++)
1390 if (got[i].addr == value)
1391 goto out;
1392@@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
1393 #ifdef CONFIG_64BIT
1394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1395 {
1396- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1397+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1398
1399 if (!value) {
1400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1401@@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
1402
1403 /* Create new one */
1404 fdesc->addr = value;
1405- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1406+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1407 return (Elf_Addr)fdesc;
1408 }
1409 #endif /* CONFIG_64BIT */
1410@@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
1411
1412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1413 end = table + sechdrs[me->arch.unwind_section].sh_size;
1414- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1415+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1416
1417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1418 me->arch.unwind_section, table, end, gp);
1419diff -urNp linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c
1420--- linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-05-19 00:06:34.000000000 -0400
1421+++ linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-08-05 19:44:33.000000000 -0400
1422@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1423 /* At this point: (!vma || addr < vma->vm_end). */
1424 if (TASK_SIZE - len < addr)
1425 return -ENOMEM;
1426- if (!vma || addr + len <= vma->vm_start)
1427+ if (check_heap_stack_gap(vma, addr, len))
1428 return addr;
1429 addr = vma->vm_end;
1430 }
1431@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1432 /* At this point: (!vma || addr < vma->vm_end). */
1433 if (TASK_SIZE - len < addr)
1434 return -ENOMEM;
1435- if (!vma || addr + len <= vma->vm_start)
1436+ if (check_heap_stack_gap(vma, addr, len))
1437 return addr;
1438 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1439 if (addr < vma->vm_end) /* handle wraparound */
1440@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1441 if (flags & MAP_FIXED)
1442 return addr;
1443 if (!addr)
1444- addr = TASK_UNMAPPED_BASE;
1445+ addr = current->mm->mmap_base;
1446
1447 if (filp) {
1448 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1449diff -urNp linux-2.6.39.4/arch/parisc/kernel/traps.c linux-2.6.39.4/arch/parisc/kernel/traps.c
1450--- linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
1451+++ linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
1452@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1453
1454 down_read(&current->mm->mmap_sem);
1455 vma = find_vma(current->mm,regs->iaoq[0]);
1456- if (vma && (regs->iaoq[0] >= vma->vm_start)
1457- && (vma->vm_flags & VM_EXEC)) {
1458-
1459+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1460 fault_address = regs->iaoq[0];
1461 fault_space = regs->iasq[0];
1462
1463diff -urNp linux-2.6.39.4/arch/parisc/mm/fault.c linux-2.6.39.4/arch/parisc/mm/fault.c
1464--- linux-2.6.39.4/arch/parisc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1465+++ linux-2.6.39.4/arch/parisc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1466@@ -15,6 +15,7 @@
1467 #include <linux/sched.h>
1468 #include <linux/interrupt.h>
1469 #include <linux/module.h>
1470+#include <linux/unistd.h>
1471
1472 #include <asm/uaccess.h>
1473 #include <asm/traps.h>
1474@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1475 static unsigned long
1476 parisc_acctyp(unsigned long code, unsigned int inst)
1477 {
1478- if (code == 6 || code == 16)
1479+ if (code == 6 || code == 7 || code == 16)
1480 return VM_EXEC;
1481
1482 switch (inst & 0xf0000000) {
1483@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1484 }
1485 #endif
1486
1487+#ifdef CONFIG_PAX_PAGEEXEC
1488+/*
1489+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1490+ *
1491+ * returns 1 when task should be killed
1492+ * 2 when rt_sigreturn trampoline was detected
1493+ * 3 when unpatched PLT trampoline was detected
1494+ */
1495+static int pax_handle_fetch_fault(struct pt_regs *regs)
1496+{
1497+
1498+#ifdef CONFIG_PAX_EMUPLT
1499+ int err;
1500+
1501+ do { /* PaX: unpatched PLT emulation */
1502+ unsigned int bl, depwi;
1503+
1504+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1505+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1506+
1507+ if (err)
1508+ break;
1509+
1510+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1511+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1512+
1513+ err = get_user(ldw, (unsigned int *)addr);
1514+ err |= get_user(bv, (unsigned int *)(addr+4));
1515+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1516+
1517+ if (err)
1518+ break;
1519+
1520+ if (ldw == 0x0E801096U &&
1521+ bv == 0xEAC0C000U &&
1522+ ldw2 == 0x0E881095U)
1523+ {
1524+ unsigned int resolver, map;
1525+
1526+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1527+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1528+ if (err)
1529+ break;
1530+
1531+ regs->gr[20] = instruction_pointer(regs)+8;
1532+ regs->gr[21] = map;
1533+ regs->gr[22] = resolver;
1534+ regs->iaoq[0] = resolver | 3UL;
1535+ regs->iaoq[1] = regs->iaoq[0] + 4;
1536+ return 3;
1537+ }
1538+ }
1539+ } while (0);
1540+#endif
1541+
1542+#ifdef CONFIG_PAX_EMUTRAMP
1543+
1544+#ifndef CONFIG_PAX_EMUSIGRT
1545+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1546+ return 1;
1547+#endif
1548+
1549+ do { /* PaX: rt_sigreturn emulation */
1550+ unsigned int ldi1, ldi2, bel, nop;
1551+
1552+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1553+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1554+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1555+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1556+
1557+ if (err)
1558+ break;
1559+
1560+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1561+ ldi2 == 0x3414015AU &&
1562+ bel == 0xE4008200U &&
1563+ nop == 0x08000240U)
1564+ {
1565+ regs->gr[25] = (ldi1 & 2) >> 1;
1566+ regs->gr[20] = __NR_rt_sigreturn;
1567+ regs->gr[31] = regs->iaoq[1] + 16;
1568+ regs->sr[0] = regs->iasq[1];
1569+ regs->iaoq[0] = 0x100UL;
1570+ regs->iaoq[1] = regs->iaoq[0] + 4;
1571+ regs->iasq[0] = regs->sr[2];
1572+ regs->iasq[1] = regs->sr[2];
1573+ return 2;
1574+ }
1575+ } while (0);
1576+#endif
1577+
1578+ return 1;
1579+}
1580+
1581+void pax_report_insns(void *pc, void *sp)
1582+{
1583+ unsigned long i;
1584+
1585+ printk(KERN_ERR "PAX: bytes at PC: ");
1586+ for (i = 0; i < 5; i++) {
1587+ unsigned int c;
1588+ if (get_user(c, (unsigned int *)pc+i))
1589+ printk(KERN_CONT "???????? ");
1590+ else
1591+ printk(KERN_CONT "%08x ", c);
1592+ }
1593+ printk("\n");
1594+}
1595+#endif
1596+
1597 int fixup_exception(struct pt_regs *regs)
1598 {
1599 const struct exception_table_entry *fix;
1600@@ -192,8 +303,33 @@ good_area:
1601
1602 acc_type = parisc_acctyp(code,regs->iir);
1603
1604- if ((vma->vm_flags & acc_type) != acc_type)
1605+ if ((vma->vm_flags & acc_type) != acc_type) {
1606+
1607+#ifdef CONFIG_PAX_PAGEEXEC
1608+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1609+ (address & ~3UL) == instruction_pointer(regs))
1610+ {
1611+ up_read(&mm->mmap_sem);
1612+ switch (pax_handle_fetch_fault(regs)) {
1613+
1614+#ifdef CONFIG_PAX_EMUPLT
1615+ case 3:
1616+ return;
1617+#endif
1618+
1619+#ifdef CONFIG_PAX_EMUTRAMP
1620+ case 2:
1621+ return;
1622+#endif
1623+
1624+ }
1625+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1626+ do_group_exit(SIGKILL);
1627+ }
1628+#endif
1629+
1630 goto bad_area;
1631+ }
1632
1633 /*
1634 * If for any reason at all we couldn't handle the fault, make
1635diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/elf.h linux-2.6.39.4/arch/powerpc/include/asm/elf.h
1636--- linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1637+++ linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1638@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1639 the loader. We need to make sure that it is out of the way of the program
1640 that it will "exec", and that there is sufficient room for the brk. */
1641
1642-extern unsigned long randomize_et_dyn(unsigned long base);
1643-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1644+#define ELF_ET_DYN_BASE (0x20000000)
1645+
1646+#ifdef CONFIG_PAX_ASLR
1647+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1648+
1649+#ifdef __powerpc64__
1650+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1651+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1652+#else
1653+#define PAX_DELTA_MMAP_LEN 15
1654+#define PAX_DELTA_STACK_LEN 15
1655+#endif
1656+#endif
1657
1658 /*
1659 * Our registers are always unsigned longs, whether we're a 32 bit
1660@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1661 (0x7ff >> (PAGE_SHIFT - 12)) : \
1662 (0x3ffff >> (PAGE_SHIFT - 12)))
1663
1664-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1665-#define arch_randomize_brk arch_randomize_brk
1666-
1667 #endif /* __KERNEL__ */
1668
1669 /*
1670diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h
1671--- linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
1672+++ linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
1673@@ -27,6 +27,7 @@ enum km_type {
1674 KM_PPC_SYNC_PAGE,
1675 KM_PPC_SYNC_ICACHE,
1676 KM_KDB,
1677+ KM_CLEARPAGE,
1678 KM_TYPE_NR
1679 };
1680
1681diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page_64.h linux-2.6.39.4/arch/powerpc/include/asm/page_64.h
1682--- linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-05-19 00:06:34.000000000 -0400
1683+++ linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-08-05 19:44:33.000000000 -0400
1684@@ -172,15 +172,18 @@ do { \
1685 * stack by default, so in the absence of a PT_GNU_STACK program header
1686 * we turn execute permission off.
1687 */
1688-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1689- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1690+#define VM_STACK_DEFAULT_FLAGS32 \
1691+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1692+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1693
1694 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1695 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1696
1697+#ifndef CONFIG_PAX_PAGEEXEC
1698 #define VM_STACK_DEFAULT_FLAGS \
1699 (is_32bit_task() ? \
1700 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1701+#endif
1702
1703 #include <asm-generic/getorder.h>
1704
1705diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page.h linux-2.6.39.4/arch/powerpc/include/asm/page.h
1706--- linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1707+++ linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1708@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1709 * and needs to be executable. This means the whole heap ends
1710 * up being executable.
1711 */
1712-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1713- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1714+#define VM_DATA_DEFAULT_FLAGS32 \
1715+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1716+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1717
1718 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1719 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1720@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1721 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1722 #endif
1723
1724+#define ktla_ktva(addr) (addr)
1725+#define ktva_ktla(addr) (addr)
1726+
1727 #ifndef __ASSEMBLY__
1728
1729 #undef STRICT_MM_TYPECHECKS
1730diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h
1731--- linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1732+++ linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1733@@ -2,6 +2,7 @@
1734 #define _ASM_POWERPC_PGTABLE_H
1735 #ifdef __KERNEL__
1736
1737+#include <linux/const.h>
1738 #ifndef __ASSEMBLY__
1739 #include <asm/processor.h> /* For TASK_SIZE */
1740 #include <asm/mmu.h>
1741diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h
1742--- linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-05-19 00:06:34.000000000 -0400
1743+++ linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-05 19:44:33.000000000 -0400
1744@@ -21,6 +21,7 @@
1745 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1746 #define _PAGE_USER 0x004 /* usermode access allowed */
1747 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1748+#define _PAGE_EXEC _PAGE_GUARDED
1749 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1750 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1751 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1752diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/reg.h linux-2.6.39.4/arch/powerpc/include/asm/reg.h
1753--- linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-05-19 00:06:34.000000000 -0400
1754+++ linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-08-05 19:44:33.000000000 -0400
1755@@ -201,6 +201,7 @@
1756 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1757 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1758 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1759+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1760 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1761 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1762 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1763diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/system.h linux-2.6.39.4/arch/powerpc/include/asm/system.h
1764--- linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1765+++ linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1766@@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1767 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1768 #endif
1769
1770-extern unsigned long arch_align_stack(unsigned long sp);
1771+#define arch_align_stack(x) ((x) & ~0xfUL)
1772
1773 /* Used in very early kernel initialization. */
1774 extern unsigned long reloc_offset(void);
1775diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h
1776--- linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
1777+++ linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
1778@@ -13,6 +13,8 @@
1779 #define VERIFY_READ 0
1780 #define VERIFY_WRITE 1
1781
1782+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1783+
1784 /*
1785 * The fs value determines whether argument validity checking should be
1786 * performed or not. If get_fs() == USER_DS, checking is performed, with
1787@@ -327,52 +329,6 @@ do { \
1788 extern unsigned long __copy_tofrom_user(void __user *to,
1789 const void __user *from, unsigned long size);
1790
1791-#ifndef __powerpc64__
1792-
1793-static inline unsigned long copy_from_user(void *to,
1794- const void __user *from, unsigned long n)
1795-{
1796- unsigned long over;
1797-
1798- if (access_ok(VERIFY_READ, from, n))
1799- return __copy_tofrom_user((__force void __user *)to, from, n);
1800- if ((unsigned long)from < TASK_SIZE) {
1801- over = (unsigned long)from + n - TASK_SIZE;
1802- return __copy_tofrom_user((__force void __user *)to, from,
1803- n - over) + over;
1804- }
1805- return n;
1806-}
1807-
1808-static inline unsigned long copy_to_user(void __user *to,
1809- const void *from, unsigned long n)
1810-{
1811- unsigned long over;
1812-
1813- if (access_ok(VERIFY_WRITE, to, n))
1814- return __copy_tofrom_user(to, (__force void __user *)from, n);
1815- if ((unsigned long)to < TASK_SIZE) {
1816- over = (unsigned long)to + n - TASK_SIZE;
1817- return __copy_tofrom_user(to, (__force void __user *)from,
1818- n - over) + over;
1819- }
1820- return n;
1821-}
1822-
1823-#else /* __powerpc64__ */
1824-
1825-#define __copy_in_user(to, from, size) \
1826- __copy_tofrom_user((to), (from), (size))
1827-
1828-extern unsigned long copy_from_user(void *to, const void __user *from,
1829- unsigned long n);
1830-extern unsigned long copy_to_user(void __user *to, const void *from,
1831- unsigned long n);
1832-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1833- unsigned long n);
1834-
1835-#endif /* __powerpc64__ */
1836-
1837 static inline unsigned long __copy_from_user_inatomic(void *to,
1838 const void __user *from, unsigned long n)
1839 {
1840@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1841 if (ret == 0)
1842 return 0;
1843 }
1844+
1845+ if (!__builtin_constant_p(n))
1846+ check_object_size(to, n, false);
1847+
1848 return __copy_tofrom_user((__force void __user *)to, from, n);
1849 }
1850
1851@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1852 if (ret == 0)
1853 return 0;
1854 }
1855+
1856+ if (!__builtin_constant_p(n))
1857+ check_object_size(from, n, true);
1858+
1859 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1860 }
1861
1862@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1863 return __copy_to_user_inatomic(to, from, size);
1864 }
1865
1866+#ifndef __powerpc64__
1867+
1868+static inline unsigned long __must_check copy_from_user(void *to,
1869+ const void __user *from, unsigned long n)
1870+{
1871+ unsigned long over;
1872+
1873+ if ((long)n < 0)
1874+ return n;
1875+
1876+ if (access_ok(VERIFY_READ, from, n)) {
1877+ if (!__builtin_constant_p(n))
1878+ check_object_size(to, n, false);
1879+ return __copy_tofrom_user((__force void __user *)to, from, n);
1880+ }
1881+ if ((unsigned long)from < TASK_SIZE) {
1882+ over = (unsigned long)from + n - TASK_SIZE;
1883+ if (!__builtin_constant_p(n - over))
1884+ check_object_size(to, n - over, false);
1885+ return __copy_tofrom_user((__force void __user *)to, from,
1886+ n - over) + over;
1887+ }
1888+ return n;
1889+}
1890+
1891+static inline unsigned long __must_check copy_to_user(void __user *to,
1892+ const void *from, unsigned long n)
1893+{
1894+ unsigned long over;
1895+
1896+ if ((long)n < 0)
1897+ return n;
1898+
1899+ if (access_ok(VERIFY_WRITE, to, n)) {
1900+ if (!__builtin_constant_p(n))
1901+ check_object_size(from, n, true);
1902+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1903+ }
1904+ if ((unsigned long)to < TASK_SIZE) {
1905+ over = (unsigned long)to + n - TASK_SIZE;
1906+ if (!__builtin_constant_p(n))
1907+ check_object_size(from, n - over, true);
1908+ return __copy_tofrom_user(to, (__force void __user *)from,
1909+ n - over) + over;
1910+ }
1911+ return n;
1912+}
1913+
1914+#else /* __powerpc64__ */
1915+
1916+#define __copy_in_user(to, from, size) \
1917+ __copy_tofrom_user((to), (from), (size))
1918+
1919+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1920+{
1921+ if ((long)n < 0 || n > INT_MAX)
1922+ return n;
1923+
1924+ if (!__builtin_constant_p(n))
1925+ check_object_size(to, n, false);
1926+
1927+ if (likely(access_ok(VERIFY_READ, from, n)))
1928+ n = __copy_from_user(to, from, n);
1929+ else
1930+ memset(to, 0, n);
1931+ return n;
1932+}
1933+
1934+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1935+{
1936+ if ((long)n < 0 || n > INT_MAX)
1937+ return n;
1938+
1939+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1940+ if (!__builtin_constant_p(n))
1941+ check_object_size(from, n, true);
1942+ n = __copy_to_user(to, from, n);
1943+ }
1944+ return n;
1945+}
1946+
1947+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1948+ unsigned long n);
1949+
1950+#endif /* __powerpc64__ */
1951+
1952 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1953
1954 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1955diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S
1956--- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-05-19 00:06:34.000000000 -0400
1957+++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-05 19:44:33.000000000 -0400
1958@@ -495,6 +495,7 @@ storage_fault_common:
1959 std r14,_DAR(r1)
1960 std r15,_DSISR(r1)
1961 addi r3,r1,STACK_FRAME_OVERHEAD
1962+ bl .save_nvgprs
1963 mr r4,r14
1964 mr r5,r15
1965 ld r14,PACA_EXGEN+EX_R14(r13)
1966@@ -504,8 +505,7 @@ storage_fault_common:
1967 cmpdi r3,0
1968 bne- 1f
1969 b .ret_from_except_lite
1970-1: bl .save_nvgprs
1971- mr r5,r3
1972+1: mr r5,r3
1973 addi r3,r1,STACK_FRAME_OVERHEAD
1974 ld r4,_DAR(r1)
1975 bl .bad_page_fault
1976diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S
1977--- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-05-19 00:06:34.000000000 -0400
1978+++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-05 19:44:33.000000000 -0400
1979@@ -848,10 +848,10 @@ handle_page_fault:
1980 11: ld r4,_DAR(r1)
1981 ld r5,_DSISR(r1)
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983+ bl .save_nvgprs
1984 bl .do_page_fault
1985 cmpdi r3,0
1986 beq+ 13f
1987- bl .save_nvgprs
1988 mr r5,r3
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990 lwz r4,_DAR(r1)
1991diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module_32.c linux-2.6.39.4/arch/powerpc/kernel/module_32.c
1992--- linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-05-19 00:06:34.000000000 -0400
1993+++ linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-08-05 19:44:33.000000000 -0400
1994@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
1995 me->arch.core_plt_section = i;
1996 }
1997 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
1998- printk("Module doesn't contain .plt or .init.plt sections.\n");
1999+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2000 return -ENOEXEC;
2001 }
2002
2003@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2004
2005 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2006 /* Init, or core PLT? */
2007- if (location >= mod->module_core
2008- && location < mod->module_core + mod->core_size)
2009+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2010+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2011 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2012- else
2013+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2014+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2015 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2016+ else {
2017+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2018+ return ~0UL;
2019+ }
2020
2021 /* Find this entry, or if that fails, the next avail. entry */
2022 while (entry->jump[0]) {
2023diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module.c linux-2.6.39.4/arch/powerpc/kernel/module.c
2024--- linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2025+++ linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2026@@ -31,11 +31,24 @@
2027
2028 LIST_HEAD(module_bug_list);
2029
2030+#ifdef CONFIG_PAX_KERNEXEC
2031 void *module_alloc(unsigned long size)
2032 {
2033 if (size == 0)
2034 return NULL;
2035
2036+ return vmalloc(size);
2037+}
2038+
2039+void *module_alloc_exec(unsigned long size)
2040+#else
2041+void *module_alloc(unsigned long size)
2042+#endif
2043+
2044+{
2045+ if (size == 0)
2046+ return NULL;
2047+
2048 return vmalloc_exec(size);
2049 }
2050
2051@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2052 vfree(module_region);
2053 }
2054
2055+#ifdef CONFIG_PAX_KERNEXEC
2056+void module_free_exec(struct module *mod, void *module_region)
2057+{
2058+ module_free(mod, module_region);
2059+}
2060+#endif
2061+
2062 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2063 const Elf_Shdr *sechdrs,
2064 const char *name)
2065diff -urNp linux-2.6.39.4/arch/powerpc/kernel/process.c linux-2.6.39.4/arch/powerpc/kernel/process.c
2066--- linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2067+++ linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2068@@ -655,8 +655,8 @@ void show_regs(struct pt_regs * regs)
2069 * Lookup NIP late so we have the best change of getting the
2070 * above info out without failing
2071 */
2072- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2073- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2074+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2075+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2076 #endif
2077 show_stack(current, (unsigned long *) regs->gpr[1]);
2078 if (!user_mode(regs))
2079@@ -1146,10 +1146,10 @@ void show_stack(struct task_struct *tsk,
2080 newsp = stack[0];
2081 ip = stack[STACK_FRAME_LR_SAVE];
2082 if (!firstframe || ip != lr) {
2083- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2084+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2085 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2086 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2087- printk(" (%pS)",
2088+ printk(" (%pA)",
2089 (void *)current->ret_stack[curr_frame].ret);
2090 curr_frame--;
2091 }
2092@@ -1169,7 +1169,7 @@ void show_stack(struct task_struct *tsk,
2093 struct pt_regs *regs = (struct pt_regs *)
2094 (sp + STACK_FRAME_OVERHEAD);
2095 lr = regs->link;
2096- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2097+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2098 regs->trap, (void *)regs->nip, (void *)lr);
2099 firstframe = 1;
2100 }
2101@@ -1244,58 +1244,3 @@ void thread_info_cache_init(void)
2102 }
2103
2104 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2105-
2106-unsigned long arch_align_stack(unsigned long sp)
2107-{
2108- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2109- sp -= get_random_int() & ~PAGE_MASK;
2110- return sp & ~0xf;
2111-}
2112-
2113-static inline unsigned long brk_rnd(void)
2114-{
2115- unsigned long rnd = 0;
2116-
2117- /* 8MB for 32bit, 1GB for 64bit */
2118- if (is_32bit_task())
2119- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2120- else
2121- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2122-
2123- return rnd << PAGE_SHIFT;
2124-}
2125-
2126-unsigned long arch_randomize_brk(struct mm_struct *mm)
2127-{
2128- unsigned long base = mm->brk;
2129- unsigned long ret;
2130-
2131-#ifdef CONFIG_PPC_STD_MMU_64
2132- /*
2133- * If we are using 1TB segments and we are allowed to randomise
2134- * the heap, we can put it above 1TB so it is backed by a 1TB
2135- * segment. Otherwise the heap will be in the bottom 1TB
2136- * which always uses 256MB segments and this may result in a
2137- * performance penalty.
2138- */
2139- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2140- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2141-#endif
2142-
2143- ret = PAGE_ALIGN(base + brk_rnd());
2144-
2145- if (ret < mm->brk)
2146- return mm->brk;
2147-
2148- return ret;
2149-}
2150-
2151-unsigned long randomize_et_dyn(unsigned long base)
2152-{
2153- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2154-
2155- if (ret < base)
2156- return base;
2157-
2158- return ret;
2159-}
2160diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_32.c linux-2.6.39.4/arch/powerpc/kernel/signal_32.c
2161--- linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-05-19 00:06:34.000000000 -0400
2162+++ linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-08-05 19:44:33.000000000 -0400
2163@@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig
2164 /* Save user registers on the stack */
2165 frame = &rt_sf->uc.uc_mcontext;
2166 addr = frame;
2167- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2168+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2169 if (save_user_regs(regs, frame, 0, 1))
2170 goto badframe;
2171 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2172diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_64.c linux-2.6.39.4/arch/powerpc/kernel/signal_64.c
2173--- linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-05-19 00:06:34.000000000 -0400
2174+++ linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-08-05 19:44:33.000000000 -0400
2175@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2176 current->thread.fpscr.val = 0;
2177
2178 /* Set up to return from userspace. */
2179- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2180+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2181 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2182 } else {
2183 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2184diff -urNp linux-2.6.39.4/arch/powerpc/kernel/traps.c linux-2.6.39.4/arch/powerpc/kernel/traps.c
2185--- linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
2186+++ linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
2187@@ -96,6 +96,8 @@ static void pmac_backlight_unblank(void)
2188 static inline void pmac_backlight_unblank(void) { }
2189 #endif
2190
2191+extern void gr_handle_kernel_exploit(void);
2192+
2193 int die(const char *str, struct pt_regs *regs, long err)
2194 {
2195 static struct {
2196@@ -170,6 +172,8 @@ int die(const char *str, struct pt_regs
2197 if (panic_on_oops)
2198 panic("Fatal exception");
2199
2200+ gr_handle_kernel_exploit();
2201+
2202 oops_exit();
2203 do_exit(err);
2204
2205diff -urNp linux-2.6.39.4/arch/powerpc/kernel/vdso.c linux-2.6.39.4/arch/powerpc/kernel/vdso.c
2206--- linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-05-19 00:06:34.000000000 -0400
2207+++ linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-08-05 19:44:33.000000000 -0400
2208@@ -36,6 +36,7 @@
2209 #include <asm/firmware.h>
2210 #include <asm/vdso.h>
2211 #include <asm/vdso_datapage.h>
2212+#include <asm/mman.h>
2213
2214 #include "setup.h"
2215
2216@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2217 vdso_base = VDSO32_MBASE;
2218 #endif
2219
2220- current->mm->context.vdso_base = 0;
2221+ current->mm->context.vdso_base = ~0UL;
2222
2223 /* vDSO has a problem and was disabled, just don't "enable" it for the
2224 * process
2225@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = get_unmapped_area(NULL, vdso_base,
2227 (vdso_pages << PAGE_SHIFT) +
2228 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2229- 0, 0);
2230+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2231 if (IS_ERR_VALUE(vdso_base)) {
2232 rc = vdso_base;
2233 goto fail_mmapsem;
2234diff -urNp linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c
2235--- linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
2236+++ linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-08-05 19:44:33.000000000 -0400
2237@@ -9,22 +9,6 @@
2238 #include <linux/module.h>
2239 #include <asm/uaccess.h>
2240
2241-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2242-{
2243- if (likely(access_ok(VERIFY_READ, from, n)))
2244- n = __copy_from_user(to, from, n);
2245- else
2246- memset(to, 0, n);
2247- return n;
2248-}
2249-
2250-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_WRITE, to, n)))
2253- n = __copy_to_user(to, from, n);
2254- return n;
2255-}
2256-
2257 unsigned long copy_in_user(void __user *to, const void __user *from,
2258 unsigned long n)
2259 {
2260@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2261 return n;
2262 }
2263
2264-EXPORT_SYMBOL(copy_from_user);
2265-EXPORT_SYMBOL(copy_to_user);
2266 EXPORT_SYMBOL(copy_in_user);
2267
2268diff -urNp linux-2.6.39.4/arch/powerpc/mm/fault.c linux-2.6.39.4/arch/powerpc/mm/fault.c
2269--- linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
2270+++ linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
2271@@ -31,6 +31,10 @@
2272 #include <linux/kdebug.h>
2273 #include <linux/perf_event.h>
2274 #include <linux/magic.h>
2275+#include <linux/slab.h>
2276+#include <linux/pagemap.h>
2277+#include <linux/compiler.h>
2278+#include <linux/unistd.h>
2279
2280 #include <asm/firmware.h>
2281 #include <asm/page.h>
2282@@ -42,6 +46,7 @@
2283 #include <asm/tlbflush.h>
2284 #include <asm/siginfo.h>
2285 #include <mm/mmu_decl.h>
2286+#include <asm/ptrace.h>
2287
2288 #ifdef CONFIG_KPROBES
2289 static inline int notify_page_fault(struct pt_regs *regs)
2290@@ -65,6 +70,33 @@ static inline int notify_page_fault(stru
2291 }
2292 #endif
2293
2294+#ifdef CONFIG_PAX_PAGEEXEC
2295+/*
2296+ * PaX: decide what to do with offenders (regs->nip = fault address)
2297+ *
2298+ * returns 1 when task should be killed
2299+ */
2300+static int pax_handle_fetch_fault(struct pt_regs *regs)
2301+{
2302+ return 1;
2303+}
2304+
2305+void pax_report_insns(void *pc, void *sp)
2306+{
2307+ unsigned long i;
2308+
2309+ printk(KERN_ERR "PAX: bytes at PC: ");
2310+ for (i = 0; i < 5; i++) {
2311+ unsigned int c;
2312+ if (get_user(c, (unsigned int __user *)pc+i))
2313+ printk(KERN_CONT "???????? ");
2314+ else
2315+ printk(KERN_CONT "%08x ", c);
2316+ }
2317+ printk("\n");
2318+}
2319+#endif
2320+
2321 /*
2322 * Check whether the instruction at regs->nip is a store using
2323 * an update addressing form which will update r1.
2324@@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re
2325 * indicate errors in DSISR but can validly be set in SRR1.
2326 */
2327 if (trap == 0x400)
2328- error_code &= 0x48200000;
2329+ error_code &= 0x58200000;
2330 else
2331 is_write = error_code & DSISR_ISSTORE;
2332 #else
2333@@ -258,7 +290,7 @@ good_area:
2334 * "undefined". Of those that can be set, this is the only
2335 * one which seems bad.
2336 */
2337- if (error_code & 0x10000000)
2338+ if (error_code & DSISR_GUARDED)
2339 /* Guarded storage error. */
2340 goto bad_area;
2341 #endif /* CONFIG_8xx */
2342@@ -273,7 +305,7 @@ good_area:
2343 * processors use the same I/D cache coherency mechanism
2344 * as embedded.
2345 */
2346- if (error_code & DSISR_PROTFAULT)
2347+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2348 goto bad_area;
2349 #endif /* CONFIG_PPC_STD_MMU */
2350
2351@@ -342,6 +374,23 @@ bad_area:
2352 bad_area_nosemaphore:
2353 /* User mode accesses cause a SIGSEGV */
2354 if (user_mode(regs)) {
2355+
2356+#ifdef CONFIG_PAX_PAGEEXEC
2357+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2358+#ifdef CONFIG_PPC_STD_MMU
2359+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2360+#else
2361+ if (is_exec && regs->nip == address) {
2362+#endif
2363+ switch (pax_handle_fetch_fault(regs)) {
2364+ }
2365+
2366+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2367+ do_group_exit(SIGKILL);
2368+ }
2369+ }
2370+#endif
2371+
2372 _exception(SIGSEGV, regs, code, address);
2373 return 0;
2374 }
2375diff -urNp linux-2.6.39.4/arch/powerpc/mm/mmap_64.c linux-2.6.39.4/arch/powerpc/mm/mmap_64.c
2376--- linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-05-19 00:06:34.000000000 -0400
2377+++ linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-08-05 19:44:33.000000000 -0400
2378@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2379 */
2380 if (mmap_is_legacy()) {
2381 mm->mmap_base = TASK_UNMAPPED_BASE;
2382+
2383+#ifdef CONFIG_PAX_RANDMMAP
2384+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2385+ mm->mmap_base += mm->delta_mmap;
2386+#endif
2387+
2388 mm->get_unmapped_area = arch_get_unmapped_area;
2389 mm->unmap_area = arch_unmap_area;
2390 } else {
2391 mm->mmap_base = mmap_base();
2392+
2393+#ifdef CONFIG_PAX_RANDMMAP
2394+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2395+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2396+#endif
2397+
2398 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2399 mm->unmap_area = arch_unmap_area_topdown;
2400 }
2401diff -urNp linux-2.6.39.4/arch/powerpc/mm/slice.c linux-2.6.39.4/arch/powerpc/mm/slice.c
2402--- linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-05-19 00:06:34.000000000 -0400
2403+++ linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-08-05 19:44:33.000000000 -0400
2404@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2405 if ((mm->task_size - len) < addr)
2406 return 0;
2407 vma = find_vma(mm, addr);
2408- return (!vma || (addr + len) <= vma->vm_start);
2409+ return check_heap_stack_gap(vma, addr, len);
2410 }
2411
2412 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2413@@ -256,7 +256,7 @@ full_search:
2414 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2415 continue;
2416 }
2417- if (!vma || addr + len <= vma->vm_start) {
2418+ if (check_heap_stack_gap(vma, addr, len)) {
2419 /*
2420 * Remember the place where we stopped the search:
2421 */
2422@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2423 }
2424 }
2425
2426- addr = mm->mmap_base;
2427- while (addr > len) {
2428+ if (mm->mmap_base < len)
2429+ addr = -ENOMEM;
2430+ else
2431+ addr = mm->mmap_base - len;
2432+
2433+ while (!IS_ERR_VALUE(addr)) {
2434 /* Go down by chunk size */
2435- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2436+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2437
2438 /* Check for hit with different page size */
2439 mask = slice_range_to_mask(addr, len);
2440@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2441 * return with success:
2442 */
2443 vma = find_vma(mm, addr);
2444- if (!vma || (addr + len) <= vma->vm_start) {
2445+ if (check_heap_stack_gap(vma, addr, len)) {
2446 /* remember the address as a hint for next time */
2447 if (use_cache)
2448 mm->free_area_cache = addr;
2449@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2450 mm->cached_hole_size = vma->vm_start - addr;
2451
2452 /* try just below the current vma->vm_start */
2453- addr = vma->vm_start;
2454+ addr = skip_heap_stack_gap(vma, len);
2455 }
2456
2457 /*
2458@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2459 if (fixed && addr > (mm->task_size - len))
2460 return -EINVAL;
2461
2462+#ifdef CONFIG_PAX_RANDMMAP
2463+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2464+ addr = 0;
2465+#endif
2466+
2467 /* If hint, make sure it matches our alignment restrictions */
2468 if (!fixed && addr) {
2469 addr = _ALIGN_UP(addr, 1ul << pshift);
2470diff -urNp linux-2.6.39.4/arch/s390/include/asm/elf.h linux-2.6.39.4/arch/s390/include/asm/elf.h
2471--- linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
2472+++ linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
2473@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2474 the loader. We need to make sure that it is out of the way of the program
2475 that it will "exec", and that there is sufficient room for the brk. */
2476
2477-extern unsigned long randomize_et_dyn(unsigned long base);
2478-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2479+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2480+
2481+#ifdef CONFIG_PAX_ASLR
2482+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2483+
2484+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2485+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2486+#endif
2487
2488 /* This yields a mask that user programs can use to figure out what
2489 instruction set this CPU supports. */
2490@@ -222,7 +228,4 @@ struct linux_binprm;
2491 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2492 int arch_setup_additional_pages(struct linux_binprm *, int);
2493
2494-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2495-#define arch_randomize_brk arch_randomize_brk
2496-
2497 #endif
2498diff -urNp linux-2.6.39.4/arch/s390/include/asm/system.h linux-2.6.39.4/arch/s390/include/asm/system.h
2499--- linux-2.6.39.4/arch/s390/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2500+++ linux-2.6.39.4/arch/s390/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2501@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2502 extern void (*_machine_halt)(void);
2503 extern void (*_machine_power_off)(void);
2504
2505-extern unsigned long arch_align_stack(unsigned long sp);
2506+#define arch_align_stack(x) ((x) & ~0xfUL)
2507
2508 static inline int tprot(unsigned long addr)
2509 {
2510diff -urNp linux-2.6.39.4/arch/s390/include/asm/uaccess.h linux-2.6.39.4/arch/s390/include/asm/uaccess.h
2511--- linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
2512+++ linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
2513@@ -234,6 +234,10 @@ static inline unsigned long __must_check
2514 copy_to_user(void __user *to, const void *from, unsigned long n)
2515 {
2516 might_fault();
2517+
2518+ if ((long)n < 0)
2519+ return n;
2520+
2521 if (access_ok(VERIFY_WRITE, to, n))
2522 n = __copy_to_user(to, from, n);
2523 return n;
2524@@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
2525 static inline unsigned long __must_check
2526 __copy_from_user(void *to, const void __user *from, unsigned long n)
2527 {
2528+ if ((long)n < 0)
2529+ return n;
2530+
2531 if (__builtin_constant_p(n) && (n <= 256))
2532 return uaccess.copy_from_user_small(n, from, to);
2533 else
2534@@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
2535 unsigned int sz = __compiletime_object_size(to);
2536
2537 might_fault();
2538+
2539+ if ((long)n < 0)
2540+ return n;
2541+
2542 if (unlikely(sz != -1 && sz < n)) {
2543 copy_from_user_overflow();
2544 return n;
2545diff -urNp linux-2.6.39.4/arch/s390/Kconfig linux-2.6.39.4/arch/s390/Kconfig
2546--- linux-2.6.39.4/arch/s390/Kconfig 2011-05-19 00:06:34.000000000 -0400
2547+++ linux-2.6.39.4/arch/s390/Kconfig 2011-08-05 19:44:33.000000000 -0400
2548@@ -234,11 +234,9 @@ config S390_EXEC_PROTECT
2549 prompt "Data execute protection"
2550 help
2551 This option allows to enable a buffer overflow protection for user
2552- space programs and it also selects the addressing mode option above.
2553- The kernel parameter noexec=on will enable this feature and also
2554- switch the addressing modes, default is disabled. Enabling this (via
2555- kernel parameter) on machines earlier than IBM System z9 this will
2556- reduce system performance.
2557+ space programs.
2558+ Enabling this (via kernel parameter) on machines earlier than IBM
2559+ System z9 this will reduce system performance.
2560
2561 comment "Code generation options"
2562
2563diff -urNp linux-2.6.39.4/arch/s390/kernel/module.c linux-2.6.39.4/arch/s390/kernel/module.c
2564--- linux-2.6.39.4/arch/s390/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2565+++ linux-2.6.39.4/arch/s390/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2566@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2567
2568 /* Increase core size by size of got & plt and set start
2569 offsets for got and plt. */
2570- me->core_size = ALIGN(me->core_size, 4);
2571- me->arch.got_offset = me->core_size;
2572- me->core_size += me->arch.got_size;
2573- me->arch.plt_offset = me->core_size;
2574- me->core_size += me->arch.plt_size;
2575+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2576+ me->arch.got_offset = me->core_size_rw;
2577+ me->core_size_rw += me->arch.got_size;
2578+ me->arch.plt_offset = me->core_size_rx;
2579+ me->core_size_rx += me->arch.plt_size;
2580 return 0;
2581 }
2582
2583@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 if (info->got_initialized == 0) {
2585 Elf_Addr *gotent;
2586
2587- gotent = me->module_core + me->arch.got_offset +
2588+ gotent = me->module_core_rw + me->arch.got_offset +
2589 info->got_offset;
2590 *gotent = val;
2591 info->got_initialized = 1;
2592@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 else if (r_type == R_390_GOTENT ||
2594 r_type == R_390_GOTPLTENT)
2595 *(unsigned int *) loc =
2596- (val + (Elf_Addr) me->module_core - loc) >> 1;
2597+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2598 else if (r_type == R_390_GOT64 ||
2599 r_type == R_390_GOTPLT64)
2600 *(unsigned long *) loc = val;
2601@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2603 if (info->plt_initialized == 0) {
2604 unsigned int *ip;
2605- ip = me->module_core + me->arch.plt_offset +
2606+ ip = me->module_core_rx + me->arch.plt_offset +
2607 info->plt_offset;
2608 #ifndef CONFIG_64BIT
2609 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2610@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 val - loc + 0xffffUL < 0x1ffffeUL) ||
2612 (r_type == R_390_PLT32DBL &&
2613 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2614- val = (Elf_Addr) me->module_core +
2615+ val = (Elf_Addr) me->module_core_rx +
2616 me->arch.plt_offset +
2617 info->plt_offset;
2618 val += rela->r_addend - loc;
2619@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2621 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2622 val = val + rela->r_addend -
2623- ((Elf_Addr) me->module_core + me->arch.got_offset);
2624+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2625 if (r_type == R_390_GOTOFF16)
2626 *(unsigned short *) loc = val;
2627 else if (r_type == R_390_GOTOFF32)
2628@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2629 break;
2630 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2631 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2632- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2633+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2634 rela->r_addend - loc;
2635 if (r_type == R_390_GOTPC)
2636 *(unsigned int *) loc = val;
2637diff -urNp linux-2.6.39.4/arch/s390/kernel/process.c linux-2.6.39.4/arch/s390/kernel/process.c
2638--- linux-2.6.39.4/arch/s390/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2639+++ linux-2.6.39.4/arch/s390/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2640@@ -334,39 +334,3 @@ unsigned long get_wchan(struct task_stru
2641 }
2642 return 0;
2643 }
2644-
2645-unsigned long arch_align_stack(unsigned long sp)
2646-{
2647- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2648- sp -= get_random_int() & ~PAGE_MASK;
2649- return sp & ~0xf;
2650-}
2651-
2652-static inline unsigned long brk_rnd(void)
2653-{
2654- /* 8MB for 32bit, 1GB for 64bit */
2655- if (is_32bit_task())
2656- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2657- else
2658- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2659-}
2660-
2661-unsigned long arch_randomize_brk(struct mm_struct *mm)
2662-{
2663- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2664-
2665- if (ret < mm->brk)
2666- return mm->brk;
2667- return ret;
2668-}
2669-
2670-unsigned long randomize_et_dyn(unsigned long base)
2671-{
2672- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2673-
2674- if (!(current->flags & PF_RANDOMIZE))
2675- return base;
2676- if (ret < base)
2677- return base;
2678- return ret;
2679-}
2680diff -urNp linux-2.6.39.4/arch/s390/kernel/setup.c linux-2.6.39.4/arch/s390/kernel/setup.c
2681--- linux-2.6.39.4/arch/s390/kernel/setup.c 2011-05-19 00:06:34.000000000 -0400
2682+++ linux-2.6.39.4/arch/s390/kernel/setup.c 2011-08-05 19:44:33.000000000 -0400
2683@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2684 }
2685 early_param("mem", early_parse_mem);
2686
2687-unsigned int user_mode = HOME_SPACE_MODE;
2688+unsigned int user_mode = SECONDARY_SPACE_MODE;
2689 EXPORT_SYMBOL_GPL(user_mode);
2690
2691 static int set_amode_and_uaccess(unsigned long user_amode,
2692@@ -300,17 +300,6 @@ static int set_amode_and_uaccess(unsigne
2693 }
2694 }
2695
2696-/*
2697- * Switch kernel/user addressing modes?
2698- */
2699-static int __init early_parse_switch_amode(char *p)
2700-{
2701- if (user_mode != SECONDARY_SPACE_MODE)
2702- user_mode = PRIMARY_SPACE_MODE;
2703- return 0;
2704-}
2705-early_param("switch_amode", early_parse_switch_amode);
2706-
2707 static int __init early_parse_user_mode(char *p)
2708 {
2709 if (p && strcmp(p, "primary") == 0)
2710@@ -327,20 +316,6 @@ static int __init early_parse_user_mode(
2711 }
2712 early_param("user_mode", early_parse_user_mode);
2713
2714-#ifdef CONFIG_S390_EXEC_PROTECT
2715-/*
2716- * Enable execute protection?
2717- */
2718-static int __init early_parse_noexec(char *p)
2719-{
2720- if (!strncmp(p, "off", 3))
2721- return 0;
2722- user_mode = SECONDARY_SPACE_MODE;
2723- return 0;
2724-}
2725-early_param("noexec", early_parse_noexec);
2726-#endif /* CONFIG_S390_EXEC_PROTECT */
2727-
2728 static void setup_addressing_mode(void)
2729 {
2730 if (user_mode == SECONDARY_SPACE_MODE) {
2731diff -urNp linux-2.6.39.4/arch/s390/mm/mmap.c linux-2.6.39.4/arch/s390/mm/mmap.c
2732--- linux-2.6.39.4/arch/s390/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2733+++ linux-2.6.39.4/arch/s390/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2734@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2735 */
2736 if (mmap_is_legacy()) {
2737 mm->mmap_base = TASK_UNMAPPED_BASE;
2738+
2739+#ifdef CONFIG_PAX_RANDMMAP
2740+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2741+ mm->mmap_base += mm->delta_mmap;
2742+#endif
2743+
2744 mm->get_unmapped_area = arch_get_unmapped_area;
2745 mm->unmap_area = arch_unmap_area;
2746 } else {
2747 mm->mmap_base = mmap_base();
2748+
2749+#ifdef CONFIG_PAX_RANDMMAP
2750+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2751+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2752+#endif
2753+
2754 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2755 mm->unmap_area = arch_unmap_area_topdown;
2756 }
2757@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2758 */
2759 if (mmap_is_legacy()) {
2760 mm->mmap_base = TASK_UNMAPPED_BASE;
2761+
2762+#ifdef CONFIG_PAX_RANDMMAP
2763+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2764+ mm->mmap_base += mm->delta_mmap;
2765+#endif
2766+
2767 mm->get_unmapped_area = s390_get_unmapped_area;
2768 mm->unmap_area = arch_unmap_area;
2769 } else {
2770 mm->mmap_base = mmap_base();
2771+
2772+#ifdef CONFIG_PAX_RANDMMAP
2773+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2774+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2775+#endif
2776+
2777 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2778 mm->unmap_area = arch_unmap_area_topdown;
2779 }
2780diff -urNp linux-2.6.39.4/arch/score/include/asm/system.h linux-2.6.39.4/arch/score/include/asm/system.h
2781--- linux-2.6.39.4/arch/score/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2782+++ linux-2.6.39.4/arch/score/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2783@@ -17,7 +17,7 @@ do { \
2784 #define finish_arch_switch(prev) do {} while (0)
2785
2786 typedef void (*vi_handler_t)(void);
2787-extern unsigned long arch_align_stack(unsigned long sp);
2788+#define arch_align_stack(x) (x)
2789
2790 #define mb() barrier()
2791 #define rmb() barrier()
2792diff -urNp linux-2.6.39.4/arch/score/kernel/process.c linux-2.6.39.4/arch/score/kernel/process.c
2793--- linux-2.6.39.4/arch/score/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2794+++ linux-2.6.39.4/arch/score/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2795@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2796
2797 return task_pt_regs(task)->cp0_epc;
2798 }
2799-
2800-unsigned long arch_align_stack(unsigned long sp)
2801-{
2802- return sp;
2803-}
2804diff -urNp linux-2.6.39.4/arch/sh/mm/mmap.c linux-2.6.39.4/arch/sh/mm/mmap.c
2805--- linux-2.6.39.4/arch/sh/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2806+++ linux-2.6.39.4/arch/sh/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2807@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2808 addr = PAGE_ALIGN(addr);
2809
2810 vma = find_vma(mm, addr);
2811- if (TASK_SIZE - len >= addr &&
2812- (!vma || addr + len <= vma->vm_start))
2813+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2814 return addr;
2815 }
2816
2817@@ -106,7 +105,7 @@ full_search:
2818 }
2819 return -ENOMEM;
2820 }
2821- if (likely(!vma || addr + len <= vma->vm_start)) {
2822+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2823 /*
2824 * Remember the place where we stopped the search:
2825 */
2826@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2827 addr = PAGE_ALIGN(addr);
2828
2829 vma = find_vma(mm, addr);
2830- if (TASK_SIZE - len >= addr &&
2831- (!vma || addr + len <= vma->vm_start))
2832+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2833 return addr;
2834 }
2835
2836@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2837 /* make sure it can fit in the remaining address space */
2838 if (likely(addr > len)) {
2839 vma = find_vma(mm, addr-len);
2840- if (!vma || addr <= vma->vm_start) {
2841+ if (check_heap_stack_gap(vma, addr - len, len)) {
2842 /* remember the address as a hint for next time */
2843 return (mm->free_area_cache = addr-len);
2844 }
2845@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2846 if (unlikely(mm->mmap_base < len))
2847 goto bottomup;
2848
2849- addr = mm->mmap_base-len;
2850- if (do_colour_align)
2851- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2852+ addr = mm->mmap_base - len;
2853
2854 do {
2855+ if (do_colour_align)
2856+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2857 /*
2858 * Lookup failure means no vma is above this address,
2859 * else if new region fits below vma->vm_start,
2860 * return with success:
2861 */
2862 vma = find_vma(mm, addr);
2863- if (likely(!vma || addr+len <= vma->vm_start)) {
2864+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2865 /* remember the address as a hint for next time */
2866 return (mm->free_area_cache = addr);
2867 }
2868@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2869 mm->cached_hole_size = vma->vm_start - addr;
2870
2871 /* try just below the current vma->vm_start */
2872- addr = vma->vm_start-len;
2873- if (do_colour_align)
2874- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2875- } while (likely(len < vma->vm_start));
2876+ addr = skip_heap_stack_gap(vma, len);
2877+ } while (!IS_ERR_VALUE(addr));
2878
2879 bottomup:
2880 /*
2881diff -urNp linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h
2882--- linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-05-19 00:06:34.000000000 -0400
2883+++ linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-08-05 20:34:06.000000000 -0400
2884@@ -14,18 +14,40 @@
2885 #define ATOMIC64_INIT(i) { (i) }
2886
2887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2888+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2889+{
2890+ return v->counter;
2891+}
2892 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2893+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2894+{
2895+ return v->counter;
2896+}
2897
2898 #define atomic_set(v, i) (((v)->counter) = i)
2899+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2900+{
2901+ v->counter = i;
2902+}
2903 #define atomic64_set(v, i) (((v)->counter) = i)
2904+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2905+{
2906+ v->counter = i;
2907+}
2908
2909 extern void atomic_add(int, atomic_t *);
2910+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2911 extern void atomic64_add(long, atomic64_t *);
2912+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2913 extern void atomic_sub(int, atomic_t *);
2914+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2915 extern void atomic64_sub(long, atomic64_t *);
2916+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2917
2918 extern int atomic_add_ret(int, atomic_t *);
2919+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2920 extern long atomic64_add_ret(long, atomic64_t *);
2921+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2922 extern int atomic_sub_ret(int, atomic_t *);
2923 extern long atomic64_sub_ret(long, atomic64_t *);
2924
2925@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2926 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2927
2928 #define atomic_inc_return(v) atomic_add_ret(1, v)
2929+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2930+{
2931+ return atomic_add_ret_unchecked(1, v);
2932+}
2933 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2934+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2935+{
2936+ return atomic64_add_ret_unchecked(1, v);
2937+}
2938
2939 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2940 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2941
2942 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2943+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2944+{
2945+ return atomic_add_ret_unchecked(i, v);
2946+}
2947 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2948+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2949+{
2950+ return atomic64_add_ret_unchecked(i, v);
2951+}
2952
2953 /*
2954 * atomic_inc_and_test - increment and test
2955@@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
2956 * other cases.
2957 */
2958 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2959+#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
2960 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2961
2962 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2963@@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
2964 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2965
2966 #define atomic_inc(v) atomic_add(1, v)
2967+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2968+{
2969+ atomic_add_unchecked(1, v);
2970+}
2971 #define atomic64_inc(v) atomic64_add(1, v)
2972+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2973+{
2974+ atomic64_add_unchecked(1, v);
2975+}
2976
2977 #define atomic_dec(v) atomic_sub(1, v)
2978+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2979+{
2980+ atomic_sub_unchecked(1, v);
2981+}
2982 #define atomic64_dec(v) atomic64_sub(1, v)
2983+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2984+{
2985+ atomic64_sub_unchecked(1, v);
2986+}
2987
2988 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2989 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2990
2991 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2992+#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2993 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2994+#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
2995
2996 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2997 {
2998- int c, old;
2999+ int c, old, new;
3000 c = atomic_read(v);
3001 for (;;) {
3002- if (unlikely(c == (u)))
3003+ if (unlikely(c == u))
3004 break;
3005- old = atomic_cmpxchg((v), c, c + (a));
3006+
3007+ asm volatile("addcc %2, %0, %0\n"
3008+
3009+#ifdef CONFIG_PAX_REFCOUNT
3010+ "tvs %%icc, 6\n"
3011+#endif
3012+
3013+ : "=r" (new)
3014+ : "0" (c), "ir" (a)
3015+ : "cc");
3016+
3017+ old = atomic_cmpxchg(v, c, new);
3018 if (likely(old == c))
3019 break;
3020 c = old;
3021 }
3022- return c != (u);
3023+ return c != u;
3024 }
3025
3026 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3027@@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3028
3029 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3030 {
3031- long c, old;
3032+ long c, old, new;
3033 c = atomic64_read(v);
3034 for (;;) {
3035- if (unlikely(c == (u)))
3036+ if (unlikely(c == u))
3037 break;
3038- old = atomic64_cmpxchg((v), c, c + (a));
3039+
3040+ asm volatile("addcc %2, %0, %0\n"
3041+
3042+#ifdef CONFIG_PAX_REFCOUNT
3043+ "tvs %%xcc, 6\n"
3044+#endif
3045+
3046+ : "=r" (new)
3047+ : "0" (c), "ir" (a)
3048+ : "cc");
3049+
3050+ old = atomic64_cmpxchg(v, c, new);
3051 if (likely(old == c))
3052 break;
3053 c = old;
3054 }
3055- return c != (u);
3056+ return c != u;
3057 }
3058
3059 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3060diff -urNp linux-2.6.39.4/arch/sparc/include/asm/cache.h linux-2.6.39.4/arch/sparc/include/asm/cache.h
3061--- linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
3062+++ linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
3063@@ -10,7 +10,7 @@
3064 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3065
3066 #define L1_CACHE_SHIFT 5
3067-#define L1_CACHE_BYTES 32
3068+#define L1_CACHE_BYTES 32UL
3069
3070 #ifdef CONFIG_SPARC32
3071 #define SMP_CACHE_BYTES_SHIFT 5
3072diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_32.h linux-2.6.39.4/arch/sparc/include/asm/elf_32.h
3073--- linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-05-19 00:06:34.000000000 -0400
3074+++ linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-08-05 19:44:33.000000000 -0400
3075@@ -114,6 +114,13 @@ typedef struct {
3076
3077 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3078
3079+#ifdef CONFIG_PAX_ASLR
3080+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3081+
3082+#define PAX_DELTA_MMAP_LEN 16
3083+#define PAX_DELTA_STACK_LEN 16
3084+#endif
3085+
3086 /* This yields a mask that user programs can use to figure out what
3087 instruction set this cpu supports. This can NOT be done in userspace
3088 on Sparc. */
3089diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_64.h linux-2.6.39.4/arch/sparc/include/asm/elf_64.h
3090--- linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-05-19 00:06:34.000000000 -0400
3091+++ linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-08-05 19:44:33.000000000 -0400
3092@@ -162,6 +162,12 @@ typedef struct {
3093 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3094 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3095
3096+#ifdef CONFIG_PAX_ASLR
3097+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3098+
3099+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3100+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3101+#endif
3102
3103 /* This yields a mask that user programs can use to figure out what
3104 instruction set this cpu supports. */
3105diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h
3106--- linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
3107+++ linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
3108@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3109 BTFIXUPDEF_INT(page_none)
3110 BTFIXUPDEF_INT(page_copy)
3111 BTFIXUPDEF_INT(page_readonly)
3112+
3113+#ifdef CONFIG_PAX_PAGEEXEC
3114+BTFIXUPDEF_INT(page_shared_noexec)
3115+BTFIXUPDEF_INT(page_copy_noexec)
3116+BTFIXUPDEF_INT(page_readonly_noexec)
3117+#endif
3118+
3119 BTFIXUPDEF_INT(page_kernel)
3120
3121 #define PMD_SHIFT SUN4C_PMD_SHIFT
3122@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3123 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3124 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3125
3126+#ifdef CONFIG_PAX_PAGEEXEC
3127+extern pgprot_t PAGE_SHARED_NOEXEC;
3128+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3129+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3130+#else
3131+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3132+# define PAGE_COPY_NOEXEC PAGE_COPY
3133+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3134+#endif
3135+
3136 extern unsigned long page_kernel;
3137
3138 #ifdef MODULE
3139diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h
3140--- linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-05-19 00:06:34.000000000 -0400
3141+++ linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-05 19:44:33.000000000 -0400
3142@@ -115,6 +115,13 @@
3143 SRMMU_EXEC | SRMMU_REF)
3144 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3145 SRMMU_EXEC | SRMMU_REF)
3146+
3147+#ifdef CONFIG_PAX_PAGEEXEC
3148+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3149+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3150+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3151+#endif
3152+
3153 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3154 SRMMU_DIRTY | SRMMU_REF)
3155
3156diff -urNp linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h
3157--- linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-05-19 00:06:34.000000000 -0400
3158+++ linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-08-05 19:44:33.000000000 -0400
3159@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3160
3161 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3162
3163-static void inline arch_read_lock(arch_rwlock_t *lock)
3164+static inline void arch_read_lock(arch_rwlock_t *lock)
3165 {
3166 unsigned long tmp1, tmp2;
3167
3168 __asm__ __volatile__ (
3169 "1: ldsw [%2], %0\n"
3170 " brlz,pn %0, 2f\n"
3171-"4: add %0, 1, %1\n"
3172+"4: addcc %0, 1, %1\n"
3173+
3174+#ifdef CONFIG_PAX_REFCOUNT
3175+" tvs %%icc, 6\n"
3176+#endif
3177+
3178 " cas [%2], %0, %1\n"
3179 " cmp %0, %1\n"
3180 " bne,pn %%icc, 1b\n"
3181@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3182 " .previous"
3183 : "=&r" (tmp1), "=&r" (tmp2)
3184 : "r" (lock)
3185- : "memory");
3186+ : "memory", "cc");
3187 }
3188
3189-static int inline arch_read_trylock(arch_rwlock_t *lock)
3190+static inline int arch_read_trylock(arch_rwlock_t *lock)
3191 {
3192 int tmp1, tmp2;
3193
3194@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3195 "1: ldsw [%2], %0\n"
3196 " brlz,a,pn %0, 2f\n"
3197 " mov 0, %0\n"
3198-" add %0, 1, %1\n"
3199+" addcc %0, 1, %1\n"
3200+
3201+#ifdef CONFIG_PAX_REFCOUNT
3202+" tvs %%icc, 6\n"
3203+#endif
3204+
3205 " cas [%2], %0, %1\n"
3206 " cmp %0, %1\n"
3207 " bne,pn %%icc, 1b\n"
3208@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3209 return tmp1;
3210 }
3211
3212-static void inline arch_read_unlock(arch_rwlock_t *lock)
3213+static inline void arch_read_unlock(arch_rwlock_t *lock)
3214 {
3215 unsigned long tmp1, tmp2;
3216
3217 __asm__ __volatile__(
3218 "1: lduw [%2], %0\n"
3219-" sub %0, 1, %1\n"
3220+" subcc %0, 1, %1\n"
3221+
3222+#ifdef CONFIG_PAX_REFCOUNT
3223+" tvs %%icc, 6\n"
3224+#endif
3225+
3226 " cas [%2], %0, %1\n"
3227 " cmp %0, %1\n"
3228 " bne,pn %%xcc, 1b\n"
3229@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3230 : "memory");
3231 }
3232
3233-static void inline arch_write_lock(arch_rwlock_t *lock)
3234+static inline void arch_write_lock(arch_rwlock_t *lock)
3235 {
3236 unsigned long mask, tmp1, tmp2;
3237
3238@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3239 : "memory");
3240 }
3241
3242-static void inline arch_write_unlock(arch_rwlock_t *lock)
3243+static inline void arch_write_unlock(arch_rwlock_t *lock)
3244 {
3245 __asm__ __volatile__(
3246 " stw %%g0, [%0]"
3247@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3248 : "memory");
3249 }
3250
3251-static int inline arch_write_trylock(arch_rwlock_t *lock)
3252+static inline int arch_write_trylock(arch_rwlock_t *lock)
3253 {
3254 unsigned long mask, tmp1, tmp2, result;
3255
3256diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h
3257--- linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-05-19 00:06:34.000000000 -0400
3258+++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-08-05 19:44:33.000000000 -0400
3259@@ -50,6 +50,8 @@ struct thread_info {
3260 unsigned long w_saved;
3261
3262 struct restart_block restart_block;
3263+
3264+ unsigned long lowest_stack;
3265 };
3266
3267 /*
3268diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h
3269--- linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-05-19 00:06:34.000000000 -0400
3270+++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-08-05 19:44:33.000000000 -0400
3271@@ -63,6 +63,8 @@ struct thread_info {
3272 struct pt_regs *kern_una_regs;
3273 unsigned int kern_una_insn;
3274
3275+ unsigned long lowest_stack;
3276+
3277 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3278 };
3279
3280diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h
3281--- linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
3282+++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
3283@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3284
3285 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3286 {
3287- if (n && __access_ok((unsigned long) to, n))
3288+ if ((long)n < 0)
3289+ return n;
3290+
3291+ if (n && __access_ok((unsigned long) to, n)) {
3292+ if (!__builtin_constant_p(n))
3293+ check_object_size(from, n, true);
3294 return __copy_user(to, (__force void __user *) from, n);
3295- else
3296+ } else
3297 return n;
3298 }
3299
3300 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3301 {
3302+ if ((long)n < 0)
3303+ return n;
3304+
3305+ if (!__builtin_constant_p(n))
3306+ check_object_size(from, n, true);
3307+
3308 return __copy_user(to, (__force void __user *) from, n);
3309 }
3310
3311 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3312 {
3313- if (n && __access_ok((unsigned long) from, n))
3314+ if ((long)n < 0)
3315+ return n;
3316+
3317+ if (n && __access_ok((unsigned long) from, n)) {
3318+ if (!__builtin_constant_p(n))
3319+ check_object_size(to, n, false);
3320 return __copy_user((__force void __user *) to, from, n);
3321- else
3322+ } else
3323 return n;
3324 }
3325
3326 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3327 {
3328+ if ((long)n < 0)
3329+ return n;
3330+
3331 return __copy_user((__force void __user *) to, from, n);
3332 }
3333
3334diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h
3335--- linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
3336+++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
3337@@ -10,6 +10,7 @@
3338 #include <linux/compiler.h>
3339 #include <linux/string.h>
3340 #include <linux/thread_info.h>
3341+#include <linux/kernel.h>
3342 #include <asm/asi.h>
3343 #include <asm/system.h>
3344 #include <asm/spitfire.h>
3345@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3346 static inline unsigned long __must_check
3347 copy_from_user(void *to, const void __user *from, unsigned long size)
3348 {
3349- unsigned long ret = ___copy_from_user(to, from, size);
3350+ unsigned long ret;
3351
3352+ if ((long)size < 0 || size > INT_MAX)
3353+ return size;
3354+
3355+ if (!__builtin_constant_p(size))
3356+ check_object_size(to, size, false);
3357+
3358+ ret = ___copy_from_user(to, from, size);
3359 if (unlikely(ret))
3360 ret = copy_from_user_fixup(to, from, size);
3361
3362@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3363 static inline unsigned long __must_check
3364 copy_to_user(void __user *to, const void *from, unsigned long size)
3365 {
3366- unsigned long ret = ___copy_to_user(to, from, size);
3367+ unsigned long ret;
3368+
3369+ if ((long)size < 0 || size > INT_MAX)
3370+ return size;
3371+
3372+ if (!__builtin_constant_p(size))
3373+ check_object_size(from, size, true);
3374
3375+ ret = ___copy_to_user(to, from, size);
3376 if (unlikely(ret))
3377 ret = copy_to_user_fixup(to, from, size);
3378 return ret;
3379diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess.h linux-2.6.39.4/arch/sparc/include/asm/uaccess.h
3380--- linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
3381+++ linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
3382@@ -1,5 +1,13 @@
3383 #ifndef ___ASM_SPARC_UACCESS_H
3384 #define ___ASM_SPARC_UACCESS_H
3385+
3386+#ifdef __KERNEL__
3387+#ifndef __ASSEMBLY__
3388+#include <linux/types.h>
3389+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3390+#endif
3391+#endif
3392+
3393 #if defined(__sparc__) && defined(__arch64__)
3394 #include <asm/uaccess_64.h>
3395 #else
3396diff -urNp linux-2.6.39.4/arch/sparc/kernel/Makefile linux-2.6.39.4/arch/sparc/kernel/Makefile
3397--- linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-05-19 00:06:34.000000000 -0400
3398+++ linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-08-05 19:44:33.000000000 -0400
3399@@ -3,7 +3,7 @@
3400 #
3401
3402 asflags-y := -ansi
3403-ccflags-y := -Werror
3404+#ccflags-y := -Werror
3405
3406 extra-y := head_$(BITS).o
3407 extra-y += init_task.o
3408diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_32.c linux-2.6.39.4/arch/sparc/kernel/process_32.c
3409--- linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400
3410+++ linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-08-05 19:44:33.000000000 -0400
3411@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
3412 rw->ins[4], rw->ins[5],
3413 rw->ins[6],
3414 rw->ins[7]);
3415- printk("%pS\n", (void *) rw->ins[7]);
3416+ printk("%pA\n", (void *) rw->ins[7]);
3417 rw = (struct reg_window32 *) rw->ins[6];
3418 }
3419 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3420@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
3421
3422 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3423 r->psr, r->pc, r->npc, r->y, print_tainted());
3424- printk("PC: <%pS>\n", (void *) r->pc);
3425+ printk("PC: <%pA>\n", (void *) r->pc);
3426 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3427 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3428 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3429 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3430 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3431 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3432- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3433+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3434
3435 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3436 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3437@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
3438 rw = (struct reg_window32 *) fp;
3439 pc = rw->ins[7];
3440 printk("[%08lx : ", pc);
3441- printk("%pS ] ", (void *) pc);
3442+ printk("%pA ] ", (void *) pc);
3443 fp = rw->ins[6];
3444 } while (++count < 16);
3445 printk("\n");
3446diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_64.c linux-2.6.39.4/arch/sparc/kernel/process_64.c
3447--- linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400
3448+++ linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-08-05 19:44:33.000000000 -0400
3449@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3450 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3451 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3452 if (regs->tstate & TSTATE_PRIV)
3453- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3454+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3455 }
3456
3457 void show_regs(struct pt_regs *regs)
3458 {
3459 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3460 regs->tpc, regs->tnpc, regs->y, print_tainted());
3461- printk("TPC: <%pS>\n", (void *) regs->tpc);
3462+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3463 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3464 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3465 regs->u_regs[3]);
3466@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3467 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3468 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3469 regs->u_regs[15]);
3470- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3471+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3472 show_regwindow(regs);
3473 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3474 }
3475@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3476 ((tp && tp->task) ? tp->task->pid : -1));
3477
3478 if (gp->tstate & TSTATE_PRIV) {
3479- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3480+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3481 (void *) gp->tpc,
3482 (void *) gp->o7,
3483 (void *) gp->i7,
3484diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c
3485--- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-05-19 00:06:34.000000000 -0400
3486+++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-05 19:44:33.000000000 -0400
3487@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3488 if (ARCH_SUN4C && len > 0x20000000)
3489 return -ENOMEM;
3490 if (!addr)
3491- addr = TASK_UNMAPPED_BASE;
3492+ addr = current->mm->mmap_base;
3493
3494 if (flags & MAP_SHARED)
3495 addr = COLOUR_ALIGN(addr);
3496@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3497 }
3498 if (TASK_SIZE - PAGE_SIZE - len < addr)
3499 return -ENOMEM;
3500- if (!vmm || addr + len <= vmm->vm_start)
3501+ if (check_heap_stack_gap(vmm, addr, len))
3502 return addr;
3503 addr = vmm->vm_end;
3504 if (flags & MAP_SHARED)
3505diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c
3506--- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-05-19 00:06:34.000000000 -0400
3507+++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-05 19:44:33.000000000 -0400
3508@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3509 /* We do not accept a shared mapping if it would violate
3510 * cache aliasing constraints.
3511 */
3512- if ((flags & MAP_SHARED) &&
3513+ if ((filp || (flags & MAP_SHARED)) &&
3514 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3515 return -EINVAL;
3516 return addr;
3517@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3518 if (filp || (flags & MAP_SHARED))
3519 do_color_align = 1;
3520
3521+#ifdef CONFIG_PAX_RANDMMAP
3522+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3523+#endif
3524+
3525 if (addr) {
3526 if (do_color_align)
3527 addr = COLOUR_ALIGN(addr, pgoff);
3528@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3529 addr = PAGE_ALIGN(addr);
3530
3531 vma = find_vma(mm, addr);
3532- if (task_size - len >= addr &&
3533- (!vma || addr + len <= vma->vm_start))
3534+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3535 return addr;
3536 }
3537
3538 if (len > mm->cached_hole_size) {
3539- start_addr = addr = mm->free_area_cache;
3540+ start_addr = addr = mm->free_area_cache;
3541 } else {
3542- start_addr = addr = TASK_UNMAPPED_BASE;
3543+ start_addr = addr = mm->mmap_base;
3544 mm->cached_hole_size = 0;
3545 }
3546
3547@@ -174,14 +177,14 @@ full_search:
3548 vma = find_vma(mm, VA_EXCLUDE_END);
3549 }
3550 if (unlikely(task_size < addr)) {
3551- if (start_addr != TASK_UNMAPPED_BASE) {
3552- start_addr = addr = TASK_UNMAPPED_BASE;
3553+ if (start_addr != mm->mmap_base) {
3554+ start_addr = addr = mm->mmap_base;
3555 mm->cached_hole_size = 0;
3556 goto full_search;
3557 }
3558 return -ENOMEM;
3559 }
3560- if (likely(!vma || addr + len <= vma->vm_start)) {
3561+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3562 /*
3563 * Remember the place where we stopped the search:
3564 */
3565@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3566 /* We do not accept a shared mapping if it would violate
3567 * cache aliasing constraints.
3568 */
3569- if ((flags & MAP_SHARED) &&
3570+ if ((filp || (flags & MAP_SHARED)) &&
3571 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3572 return -EINVAL;
3573 return addr;
3574@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3575 addr = PAGE_ALIGN(addr);
3576
3577 vma = find_vma(mm, addr);
3578- if (task_size - len >= addr &&
3579- (!vma || addr + len <= vma->vm_start))
3580+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3581 return addr;
3582 }
3583
3584@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3585 /* make sure it can fit in the remaining address space */
3586 if (likely(addr > len)) {
3587 vma = find_vma(mm, addr-len);
3588- if (!vma || addr <= vma->vm_start) {
3589+ if (check_heap_stack_gap(vma, addr - len, len)) {
3590 /* remember the address as a hint for next time */
3591 return (mm->free_area_cache = addr-len);
3592 }
3593@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3594 if (unlikely(mm->mmap_base < len))
3595 goto bottomup;
3596
3597- addr = mm->mmap_base-len;
3598- if (do_color_align)
3599- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3600+ addr = mm->mmap_base - len;
3601
3602 do {
3603+ if (do_color_align)
3604+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3605 /*
3606 * Lookup failure means no vma is above this address,
3607 * else if new region fits below vma->vm_start,
3608 * return with success:
3609 */
3610 vma = find_vma(mm, addr);
3611- if (likely(!vma || addr+len <= vma->vm_start)) {
3612+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3613 /* remember the address as a hint for next time */
3614 return (mm->free_area_cache = addr);
3615 }
3616@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3617 mm->cached_hole_size = vma->vm_start - addr;
3618
3619 /* try just below the current vma->vm_start */
3620- addr = vma->vm_start-len;
3621- if (do_color_align)
3622- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3623- } while (likely(len < vma->vm_start));
3624+ addr = skip_heap_stack_gap(vma, len);
3625+ } while (!IS_ERR_VALUE(addr));
3626
3627 bottomup:
3628 /*
3629@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3630 gap == RLIM_INFINITY ||
3631 sysctl_legacy_va_layout) {
3632 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3633+
3634+#ifdef CONFIG_PAX_RANDMMAP
3635+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3636+ mm->mmap_base += mm->delta_mmap;
3637+#endif
3638+
3639 mm->get_unmapped_area = arch_get_unmapped_area;
3640 mm->unmap_area = arch_unmap_area;
3641 } else {
3642@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3643 gap = (task_size / 6 * 5);
3644
3645 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3646+
3647+#ifdef CONFIG_PAX_RANDMMAP
3648+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3649+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3650+#endif
3651+
3652 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3653 mm->unmap_area = arch_unmap_area_topdown;
3654 }
3655diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_32.c linux-2.6.39.4/arch/sparc/kernel/traps_32.c
3656--- linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-05-19 00:06:34.000000000 -0400
3657+++ linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-08-05 19:44:33.000000000 -0400
3658@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3659 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3660 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3661
3662+extern void gr_handle_kernel_exploit(void);
3663+
3664 void die_if_kernel(char *str, struct pt_regs *regs)
3665 {
3666 static int die_counter;
3667@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3668 count++ < 30 &&
3669 (((unsigned long) rw) >= PAGE_OFFSET) &&
3670 !(((unsigned long) rw) & 0x7)) {
3671- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3672+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3673 (void *) rw->ins[7]);
3674 rw = (struct reg_window32 *)rw->ins[6];
3675 }
3676 }
3677 printk("Instruction DUMP:");
3678 instruction_dump ((unsigned long *) regs->pc);
3679- if(regs->psr & PSR_PS)
3680+ if(regs->psr & PSR_PS) {
3681+ gr_handle_kernel_exploit();
3682 do_exit(SIGKILL);
3683+ }
3684 do_exit(SIGSEGV);
3685 }
3686
3687diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_64.c linux-2.6.39.4/arch/sparc/kernel/traps_64.c
3688--- linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-05-19 00:06:34.000000000 -0400
3689+++ linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-08-05 19:44:33.000000000 -0400
3690@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3691 i + 1,
3692 p->trapstack[i].tstate, p->trapstack[i].tpc,
3693 p->trapstack[i].tnpc, p->trapstack[i].tt);
3694- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3695+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3696 }
3697 }
3698
3699@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3700
3701 lvl -= 0x100;
3702 if (regs->tstate & TSTATE_PRIV) {
3703+
3704+#ifdef CONFIG_PAX_REFCOUNT
3705+ if (lvl == 6)
3706+ pax_report_refcount_overflow(regs);
3707+#endif
3708+
3709 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3710 die_if_kernel(buffer, regs);
3711 }
3712@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3713 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3714 {
3715 char buffer[32];
3716-
3717+
3718 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3719 0, lvl, SIGTRAP) == NOTIFY_STOP)
3720 return;
3721
3722+#ifdef CONFIG_PAX_REFCOUNT
3723+ if (lvl == 6)
3724+ pax_report_refcount_overflow(regs);
3725+#endif
3726+
3727 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3728
3729 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3730@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3731 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3732 printk("%s" "ERROR(%d): ",
3733 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3734- printk("TPC<%pS>\n", (void *) regs->tpc);
3735+ printk("TPC<%pA>\n", (void *) regs->tpc);
3736 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3737 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3738 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3739@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3740 smp_processor_id(),
3741 (type & 0x1) ? 'I' : 'D',
3742 regs->tpc);
3743- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3744+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3745 panic("Irrecoverable Cheetah+ parity error.");
3746 }
3747
3748@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3749 smp_processor_id(),
3750 (type & 0x1) ? 'I' : 'D',
3751 regs->tpc);
3752- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3753+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3754 }
3755
3756 struct sun4v_error_entry {
3757@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3758
3759 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3760 regs->tpc, tl);
3761- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3762+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3763 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3764- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3765+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3766 (void *) regs->u_regs[UREG_I7]);
3767 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3768 "pte[%lx] error[%lx]\n",
3769@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3770
3771 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3772 regs->tpc, tl);
3773- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3774+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3775 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3776- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3777+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3778 (void *) regs->u_regs[UREG_I7]);
3779 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3780 "pte[%lx] error[%lx]\n",
3781@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3782 fp = (unsigned long)sf->fp + STACK_BIAS;
3783 }
3784
3785- printk(" [%016lx] %pS\n", pc, (void *) pc);
3786+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3787 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3788 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3789 int index = tsk->curr_ret_stack;
3790 if (tsk->ret_stack && index >= graph) {
3791 pc = tsk->ret_stack[index - graph].ret;
3792- printk(" [%016lx] %pS\n", pc, (void *) pc);
3793+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3794 graph++;
3795 }
3796 }
3797@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3798 return (struct reg_window *) (fp + STACK_BIAS);
3799 }
3800
3801+extern void gr_handle_kernel_exploit(void);
3802+
3803 void die_if_kernel(char *str, struct pt_regs *regs)
3804 {
3805 static int die_counter;
3806@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3807 while (rw &&
3808 count++ < 30 &&
3809 kstack_valid(tp, (unsigned long) rw)) {
3810- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3811+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3812 (void *) rw->ins[7]);
3813
3814 rw = kernel_stack_up(rw);
3815@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3816 }
3817 user_instruction_dump ((unsigned int __user *) regs->tpc);
3818 }
3819- if (regs->tstate & TSTATE_PRIV)
3820+ if (regs->tstate & TSTATE_PRIV) {
3821+ gr_handle_kernel_exploit();
3822 do_exit(SIGKILL);
3823+ }
3824 do_exit(SIGSEGV);
3825 }
3826 EXPORT_SYMBOL(die_if_kernel);
3827diff -urNp linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c
3828--- linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-05-19 00:06:34.000000000 -0400
3829+++ linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-08-05 19:44:33.000000000 -0400
3830@@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs
3831 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3832
3833 if (__ratelimit(&ratelimit)) {
3834- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3835+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3836 regs->tpc, (void *) regs->tpc);
3837 }
3838 }
3839diff -urNp linux-2.6.39.4/arch/sparc/lib/atomic_64.S linux-2.6.39.4/arch/sparc/lib/atomic_64.S
3840--- linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-05-19 00:06:34.000000000 -0400
3841+++ linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-08-05 19:44:33.000000000 -0400
3842@@ -18,7 +18,12 @@
3843 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3844 BACKOFF_SETUP(%o2)
3845 1: lduw [%o1], %g1
3846- add %g1, %o0, %g7
3847+ addcc %g1, %o0, %g7
3848+
3849+#ifdef CONFIG_PAX_REFCOUNT
3850+ tvs %icc, 6
3851+#endif
3852+
3853 cas [%o1], %g1, %g7
3854 cmp %g1, %g7
3855 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3856@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3857 2: BACKOFF_SPIN(%o2, %o3, 1b)
3858 .size atomic_add, .-atomic_add
3859
3860+ .globl atomic_add_unchecked
3861+ .type atomic_add_unchecked,#function
3862+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3863+ BACKOFF_SETUP(%o2)
3864+1: lduw [%o1], %g1
3865+ add %g1, %o0, %g7
3866+ cas [%o1], %g1, %g7
3867+ cmp %g1, %g7
3868+ bne,pn %icc, 2f
3869+ nop
3870+ retl
3871+ nop
3872+2: BACKOFF_SPIN(%o2, %o3, 1b)
3873+ .size atomic_add_unchecked, .-atomic_add_unchecked
3874+
3875 .globl atomic_sub
3876 .type atomic_sub,#function
3877 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3878 BACKOFF_SETUP(%o2)
3879 1: lduw [%o1], %g1
3880- sub %g1, %o0, %g7
3881+ subcc %g1, %o0, %g7
3882+
3883+#ifdef CONFIG_PAX_REFCOUNT
3884+ tvs %icc, 6
3885+#endif
3886+
3887 cas [%o1], %g1, %g7
3888 cmp %g1, %g7
3889 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3890@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3891 2: BACKOFF_SPIN(%o2, %o3, 1b)
3892 .size atomic_sub, .-atomic_sub
3893
3894+ .globl atomic_sub_unchecked
3895+ .type atomic_sub_unchecked,#function
3896+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3897+ BACKOFF_SETUP(%o2)
3898+1: lduw [%o1], %g1
3899+ sub %g1, %o0, %g7
3900+ cas [%o1], %g1, %g7
3901+ cmp %g1, %g7
3902+ bne,pn %icc, 2f
3903+ nop
3904+ retl
3905+ nop
3906+2: BACKOFF_SPIN(%o2, %o3, 1b)
3907+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3908+
3909 .globl atomic_add_ret
3910 .type atomic_add_ret,#function
3911 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3912 BACKOFF_SETUP(%o2)
3913 1: lduw [%o1], %g1
3914- add %g1, %o0, %g7
3915+ addcc %g1, %o0, %g7
3916+
3917+#ifdef CONFIG_PAX_REFCOUNT
3918+ tvs %icc, 6
3919+#endif
3920+
3921 cas [%o1], %g1, %g7
3922 cmp %g1, %g7
3923 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3924@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3925 2: BACKOFF_SPIN(%o2, %o3, 1b)
3926 .size atomic_add_ret, .-atomic_add_ret
3927
3928+ .globl atomic_add_ret_unchecked
3929+ .type atomic_add_ret_unchecked,#function
3930+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3931+ BACKOFF_SETUP(%o2)
3932+1: lduw [%o1], %g1
3933+ addcc %g1, %o0, %g7
3934+ cas [%o1], %g1, %g7
3935+ cmp %g1, %g7
3936+ bne,pn %icc, 2f
3937+ add %g7, %o0, %g7
3938+ sra %g7, 0, %o0
3939+ retl
3940+ nop
3941+2: BACKOFF_SPIN(%o2, %o3, 1b)
3942+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3943+
3944 .globl atomic_sub_ret
3945 .type atomic_sub_ret,#function
3946 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3947 BACKOFF_SETUP(%o2)
3948 1: lduw [%o1], %g1
3949- sub %g1, %o0, %g7
3950+ subcc %g1, %o0, %g7
3951+
3952+#ifdef CONFIG_PAX_REFCOUNT
3953+ tvs %icc, 6
3954+#endif
3955+
3956 cas [%o1], %g1, %g7
3957 cmp %g1, %g7
3958 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3959@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3960 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3961 BACKOFF_SETUP(%o2)
3962 1: ldx [%o1], %g1
3963- add %g1, %o0, %g7
3964+ addcc %g1, %o0, %g7
3965+
3966+#ifdef CONFIG_PAX_REFCOUNT
3967+ tvs %xcc, 6
3968+#endif
3969+
3970 casx [%o1], %g1, %g7
3971 cmp %g1, %g7
3972 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3973@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3974 2: BACKOFF_SPIN(%o2, %o3, 1b)
3975 .size atomic64_add, .-atomic64_add
3976
3977+ .globl atomic64_add_unchecked
3978+ .type atomic64_add_unchecked,#function
3979+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3980+ BACKOFF_SETUP(%o2)
3981+1: ldx [%o1], %g1
3982+ addcc %g1, %o0, %g7
3983+ casx [%o1], %g1, %g7
3984+ cmp %g1, %g7
3985+ bne,pn %xcc, 2f
3986+ nop
3987+ retl
3988+ nop
3989+2: BACKOFF_SPIN(%o2, %o3, 1b)
3990+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3991+
3992 .globl atomic64_sub
3993 .type atomic64_sub,#function
3994 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3995 BACKOFF_SETUP(%o2)
3996 1: ldx [%o1], %g1
3997- sub %g1, %o0, %g7
3998+ subcc %g1, %o0, %g7
3999+
4000+#ifdef CONFIG_PAX_REFCOUNT
4001+ tvs %xcc, 6
4002+#endif
4003+
4004 casx [%o1], %g1, %g7
4005 cmp %g1, %g7
4006 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4007@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4008 2: BACKOFF_SPIN(%o2, %o3, 1b)
4009 .size atomic64_sub, .-atomic64_sub
4010
4011+ .globl atomic64_sub_unchecked
4012+ .type atomic64_sub_unchecked,#function
4013+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4014+ BACKOFF_SETUP(%o2)
4015+1: ldx [%o1], %g1
4016+ subcc %g1, %o0, %g7
4017+ casx [%o1], %g1, %g7
4018+ cmp %g1, %g7
4019+ bne,pn %xcc, 2f
4020+ nop
4021+ retl
4022+ nop
4023+2: BACKOFF_SPIN(%o2, %o3, 1b)
4024+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4025+
4026 .globl atomic64_add_ret
4027 .type atomic64_add_ret,#function
4028 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4029 BACKOFF_SETUP(%o2)
4030 1: ldx [%o1], %g1
4031- add %g1, %o0, %g7
4032+ addcc %g1, %o0, %g7
4033+
4034+#ifdef CONFIG_PAX_REFCOUNT
4035+ tvs %xcc, 6
4036+#endif
4037+
4038 casx [%o1], %g1, %g7
4039 cmp %g1, %g7
4040 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4041@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4042 2: BACKOFF_SPIN(%o2, %o3, 1b)
4043 .size atomic64_add_ret, .-atomic64_add_ret
4044
4045+ .globl atomic64_add_ret_unchecked
4046+ .type atomic64_add_ret_unchecked,#function
4047+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4048+ BACKOFF_SETUP(%o2)
4049+1: ldx [%o1], %g1
4050+ addcc %g1, %o0, %g7
4051+ casx [%o1], %g1, %g7
4052+ cmp %g1, %g7
4053+ bne,pn %xcc, 2f
4054+ add %g7, %o0, %g7
4055+ mov %g7, %o0
4056+ retl
4057+ nop
4058+2: BACKOFF_SPIN(%o2, %o3, 1b)
4059+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4060+
4061 .globl atomic64_sub_ret
4062 .type atomic64_sub_ret,#function
4063 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4064 BACKOFF_SETUP(%o2)
4065 1: ldx [%o1], %g1
4066- sub %g1, %o0, %g7
4067+ subcc %g1, %o0, %g7
4068+
4069+#ifdef CONFIG_PAX_REFCOUNT
4070+ tvs %xcc, 6
4071+#endif
4072+
4073 casx [%o1], %g1, %g7
4074 cmp %g1, %g7
4075 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4076diff -urNp linux-2.6.39.4/arch/sparc/lib/ksyms.c linux-2.6.39.4/arch/sparc/lib/ksyms.c
4077--- linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-05-19 00:06:34.000000000 -0400
4078+++ linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-08-05 19:44:33.000000000 -0400
4079@@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
4080
4081 /* Atomic counter implementation. */
4082 EXPORT_SYMBOL(atomic_add);
4083+EXPORT_SYMBOL(atomic_add_unchecked);
4084 EXPORT_SYMBOL(atomic_add_ret);
4085 EXPORT_SYMBOL(atomic_sub);
4086+EXPORT_SYMBOL(atomic_sub_unchecked);
4087 EXPORT_SYMBOL(atomic_sub_ret);
4088 EXPORT_SYMBOL(atomic64_add);
4089+EXPORT_SYMBOL(atomic64_add_unchecked);
4090 EXPORT_SYMBOL(atomic64_add_ret);
4091+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4092 EXPORT_SYMBOL(atomic64_sub);
4093+EXPORT_SYMBOL(atomic64_sub_unchecked);
4094 EXPORT_SYMBOL(atomic64_sub_ret);
4095
4096 /* Atomic bit operations. */
4097diff -urNp linux-2.6.39.4/arch/sparc/lib/Makefile linux-2.6.39.4/arch/sparc/lib/Makefile
4098--- linux-2.6.39.4/arch/sparc/lib/Makefile 2011-05-19 00:06:34.000000000 -0400
4099+++ linux-2.6.39.4/arch/sparc/lib/Makefile 2011-08-05 19:44:33.000000000 -0400
4100@@ -2,7 +2,7 @@
4101 #
4102
4103 asflags-y := -ansi -DST_DIV0=0x02
4104-ccflags-y := -Werror
4105+#ccflags-y := -Werror
4106
4107 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4108 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4109diff -urNp linux-2.6.39.4/arch/sparc/Makefile linux-2.6.39.4/arch/sparc/Makefile
4110--- linux-2.6.39.4/arch/sparc/Makefile 2011-05-19 00:06:34.000000000 -0400
4111+++ linux-2.6.39.4/arch/sparc/Makefile 2011-08-05 19:44:33.000000000 -0400
4112@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4113 # Export what is needed by arch/sparc/boot/Makefile
4114 export VMLINUX_INIT VMLINUX_MAIN
4115 VMLINUX_INIT := $(head-y) $(init-y)
4116-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4117+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4118 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4119 VMLINUX_MAIN += $(drivers-y) $(net-y)
4120
4121diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_32.c linux-2.6.39.4/arch/sparc/mm/fault_32.c
4122--- linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-05-19 00:06:34.000000000 -0400
4123+++ linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-08-05 19:44:33.000000000 -0400
4124@@ -22,6 +22,9 @@
4125 #include <linux/interrupt.h>
4126 #include <linux/module.h>
4127 #include <linux/kdebug.h>
4128+#include <linux/slab.h>
4129+#include <linux/pagemap.h>
4130+#include <linux/compiler.h>
4131
4132 #include <asm/system.h>
4133 #include <asm/page.h>
4134@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4135 return safe_compute_effective_address(regs, insn);
4136 }
4137
4138+#ifdef CONFIG_PAX_PAGEEXEC
4139+#ifdef CONFIG_PAX_DLRESOLVE
4140+static void pax_emuplt_close(struct vm_area_struct *vma)
4141+{
4142+ vma->vm_mm->call_dl_resolve = 0UL;
4143+}
4144+
4145+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4146+{
4147+ unsigned int *kaddr;
4148+
4149+ vmf->page = alloc_page(GFP_HIGHUSER);
4150+ if (!vmf->page)
4151+ return VM_FAULT_OOM;
4152+
4153+ kaddr = kmap(vmf->page);
4154+ memset(kaddr, 0, PAGE_SIZE);
4155+ kaddr[0] = 0x9DE3BFA8U; /* save */
4156+ flush_dcache_page(vmf->page);
4157+ kunmap(vmf->page);
4158+ return VM_FAULT_MAJOR;
4159+}
4160+
4161+static const struct vm_operations_struct pax_vm_ops = {
4162+ .close = pax_emuplt_close,
4163+ .fault = pax_emuplt_fault
4164+};
4165+
4166+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4167+{
4168+ int ret;
4169+
4170+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4171+ vma->vm_mm = current->mm;
4172+ vma->vm_start = addr;
4173+ vma->vm_end = addr + PAGE_SIZE;
4174+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4175+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4176+ vma->vm_ops = &pax_vm_ops;
4177+
4178+ ret = insert_vm_struct(current->mm, vma);
4179+ if (ret)
4180+ return ret;
4181+
4182+ ++current->mm->total_vm;
4183+ return 0;
4184+}
4185+#endif
4186+
4187+/*
4188+ * PaX: decide what to do with offenders (regs->pc = fault address)
4189+ *
4190+ * returns 1 when task should be killed
4191+ * 2 when patched PLT trampoline was detected
4192+ * 3 when unpatched PLT trampoline was detected
4193+ */
4194+static int pax_handle_fetch_fault(struct pt_regs *regs)
4195+{
4196+
4197+#ifdef CONFIG_PAX_EMUPLT
4198+ int err;
4199+
4200+ do { /* PaX: patched PLT emulation #1 */
4201+ unsigned int sethi1, sethi2, jmpl;
4202+
4203+ err = get_user(sethi1, (unsigned int *)regs->pc);
4204+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4205+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4206+
4207+ if (err)
4208+ break;
4209+
4210+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4211+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4212+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4213+ {
4214+ unsigned int addr;
4215+
4216+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4217+ addr = regs->u_regs[UREG_G1];
4218+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4219+ regs->pc = addr;
4220+ regs->npc = addr+4;
4221+ return 2;
4222+ }
4223+ } while (0);
4224+
4225+ { /* PaX: patched PLT emulation #2 */
4226+ unsigned int ba;
4227+
4228+ err = get_user(ba, (unsigned int *)regs->pc);
4229+
4230+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4231+ unsigned int addr;
4232+
4233+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4234+ regs->pc = addr;
4235+ regs->npc = addr+4;
4236+ return 2;
4237+ }
4238+ }
4239+
4240+ do { /* PaX: patched PLT emulation #3 */
4241+ unsigned int sethi, jmpl, nop;
4242+
4243+ err = get_user(sethi, (unsigned int *)regs->pc);
4244+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4245+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4246+
4247+ if (err)
4248+ break;
4249+
4250+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4251+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4252+ nop == 0x01000000U)
4253+ {
4254+ unsigned int addr;
4255+
4256+ addr = (sethi & 0x003FFFFFU) << 10;
4257+ regs->u_regs[UREG_G1] = addr;
4258+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4259+ regs->pc = addr;
4260+ regs->npc = addr+4;
4261+ return 2;
4262+ }
4263+ } while (0);
4264+
4265+ do { /* PaX: unpatched PLT emulation step 1 */
4266+ unsigned int sethi, ba, nop;
4267+
4268+ err = get_user(sethi, (unsigned int *)regs->pc);
4269+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4270+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4271+
4272+ if (err)
4273+ break;
4274+
4275+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4276+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4277+ nop == 0x01000000U)
4278+ {
4279+ unsigned int addr, save, call;
4280+
4281+ if ((ba & 0xFFC00000U) == 0x30800000U)
4282+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4283+ else
4284+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4285+
4286+ err = get_user(save, (unsigned int *)addr);
4287+ err |= get_user(call, (unsigned int *)(addr+4));
4288+ err |= get_user(nop, (unsigned int *)(addr+8));
4289+ if (err)
4290+ break;
4291+
4292+#ifdef CONFIG_PAX_DLRESOLVE
4293+ if (save == 0x9DE3BFA8U &&
4294+ (call & 0xC0000000U) == 0x40000000U &&
4295+ nop == 0x01000000U)
4296+ {
4297+ struct vm_area_struct *vma;
4298+ unsigned long call_dl_resolve;
4299+
4300+ down_read(&current->mm->mmap_sem);
4301+ call_dl_resolve = current->mm->call_dl_resolve;
4302+ up_read(&current->mm->mmap_sem);
4303+ if (likely(call_dl_resolve))
4304+ goto emulate;
4305+
4306+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4307+
4308+ down_write(&current->mm->mmap_sem);
4309+ if (current->mm->call_dl_resolve) {
4310+ call_dl_resolve = current->mm->call_dl_resolve;
4311+ up_write(&current->mm->mmap_sem);
4312+ if (vma)
4313+ kmem_cache_free(vm_area_cachep, vma);
4314+ goto emulate;
4315+ }
4316+
4317+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4318+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4319+ up_write(&current->mm->mmap_sem);
4320+ if (vma)
4321+ kmem_cache_free(vm_area_cachep, vma);
4322+ return 1;
4323+ }
4324+
4325+ if (pax_insert_vma(vma, call_dl_resolve)) {
4326+ up_write(&current->mm->mmap_sem);
4327+ kmem_cache_free(vm_area_cachep, vma);
4328+ return 1;
4329+ }
4330+
4331+ current->mm->call_dl_resolve = call_dl_resolve;
4332+ up_write(&current->mm->mmap_sem);
4333+
4334+emulate:
4335+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4336+ regs->pc = call_dl_resolve;
4337+ regs->npc = addr+4;
4338+ return 3;
4339+ }
4340+#endif
4341+
4342+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4343+ if ((save & 0xFFC00000U) == 0x05000000U &&
4344+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4345+ nop == 0x01000000U)
4346+ {
4347+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4348+ regs->u_regs[UREG_G2] = addr + 4;
4349+ addr = (save & 0x003FFFFFU) << 10;
4350+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4351+ regs->pc = addr;
4352+ regs->npc = addr+4;
4353+ return 3;
4354+ }
4355+ }
4356+ } while (0);
4357+
4358+ do { /* PaX: unpatched PLT emulation step 2 */
4359+ unsigned int save, call, nop;
4360+
4361+ err = get_user(save, (unsigned int *)(regs->pc-4));
4362+ err |= get_user(call, (unsigned int *)regs->pc);
4363+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4364+ if (err)
4365+ break;
4366+
4367+ if (save == 0x9DE3BFA8U &&
4368+ (call & 0xC0000000U) == 0x40000000U &&
4369+ nop == 0x01000000U)
4370+ {
4371+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4372+
4373+ regs->u_regs[UREG_RETPC] = regs->pc;
4374+ regs->pc = dl_resolve;
4375+ regs->npc = dl_resolve+4;
4376+ return 3;
4377+ }
4378+ } while (0);
4379+#endif
4380+
4381+ return 1;
4382+}
4383+
4384+void pax_report_insns(void *pc, void *sp)
4385+{
4386+ unsigned long i;
4387+
4388+ printk(KERN_ERR "PAX: bytes at PC: ");
4389+ for (i = 0; i < 8; i++) {
4390+ unsigned int c;
4391+ if (get_user(c, (unsigned int *)pc+i))
4392+ printk(KERN_CONT "???????? ");
4393+ else
4394+ printk(KERN_CONT "%08x ", c);
4395+ }
4396+ printk("\n");
4397+}
4398+#endif
4399+
4400 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4401 int text_fault)
4402 {
4403@@ -281,6 +546,24 @@ good_area:
4404 if(!(vma->vm_flags & VM_WRITE))
4405 goto bad_area;
4406 } else {
4407+
4408+#ifdef CONFIG_PAX_PAGEEXEC
4409+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4410+ up_read(&mm->mmap_sem);
4411+ switch (pax_handle_fetch_fault(regs)) {
4412+
4413+#ifdef CONFIG_PAX_EMUPLT
4414+ case 2:
4415+ case 3:
4416+ return;
4417+#endif
4418+
4419+ }
4420+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4421+ do_group_exit(SIGKILL);
4422+ }
4423+#endif
4424+
4425 /* Allow reads even for write-only mappings */
4426 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4427 goto bad_area;
4428diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_64.c linux-2.6.39.4/arch/sparc/mm/fault_64.c
4429--- linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-05-19 00:06:34.000000000 -0400
4430+++ linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-08-05 19:44:33.000000000 -0400
4431@@ -21,6 +21,9 @@
4432 #include <linux/kprobes.h>
4433 #include <linux/kdebug.h>
4434 #include <linux/percpu.h>
4435+#include <linux/slab.h>
4436+#include <linux/pagemap.h>
4437+#include <linux/compiler.h>
4438
4439 #include <asm/page.h>
4440 #include <asm/pgtable.h>
4441@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4442 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4443 regs->tpc);
4444 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4445- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4446+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4447 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4448 dump_stack();
4449 unhandled_fault(regs->tpc, current, regs);
4450@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4451 show_regs(regs);
4452 }
4453
4454+#ifdef CONFIG_PAX_PAGEEXEC
4455+#ifdef CONFIG_PAX_DLRESOLVE
4456+static void pax_emuplt_close(struct vm_area_struct *vma)
4457+{
4458+ vma->vm_mm->call_dl_resolve = 0UL;
4459+}
4460+
4461+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4462+{
4463+ unsigned int *kaddr;
4464+
4465+ vmf->page = alloc_page(GFP_HIGHUSER);
4466+ if (!vmf->page)
4467+ return VM_FAULT_OOM;
4468+
4469+ kaddr = kmap(vmf->page);
4470+ memset(kaddr, 0, PAGE_SIZE);
4471+ kaddr[0] = 0x9DE3BFA8U; /* save */
4472+ flush_dcache_page(vmf->page);
4473+ kunmap(vmf->page);
4474+ return VM_FAULT_MAJOR;
4475+}
4476+
4477+static const struct vm_operations_struct pax_vm_ops = {
4478+ .close = pax_emuplt_close,
4479+ .fault = pax_emuplt_fault
4480+};
4481+
4482+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4483+{
4484+ int ret;
4485+
4486+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4487+ vma->vm_mm = current->mm;
4488+ vma->vm_start = addr;
4489+ vma->vm_end = addr + PAGE_SIZE;
4490+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4491+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4492+ vma->vm_ops = &pax_vm_ops;
4493+
4494+ ret = insert_vm_struct(current->mm, vma);
4495+ if (ret)
4496+ return ret;
4497+
4498+ ++current->mm->total_vm;
4499+ return 0;
4500+}
4501+#endif
4502+
4503+/*
4504+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4505+ *
4506+ * returns 1 when task should be killed
4507+ * 2 when patched PLT trampoline was detected
4508+ * 3 when unpatched PLT trampoline was detected
4509+ */
4510+static int pax_handle_fetch_fault(struct pt_regs *regs)
4511+{
4512+
4513+#ifdef CONFIG_PAX_EMUPLT
4514+ int err;
4515+
4516+ do { /* PaX: patched PLT emulation #1 */
4517+ unsigned int sethi1, sethi2, jmpl;
4518+
4519+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4520+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4521+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4522+
4523+ if (err)
4524+ break;
4525+
4526+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4527+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4528+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4529+ {
4530+ unsigned long addr;
4531+
4532+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4533+ addr = regs->u_regs[UREG_G1];
4534+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4535+
4536+ if (test_thread_flag(TIF_32BIT))
4537+ addr &= 0xFFFFFFFFUL;
4538+
4539+ regs->tpc = addr;
4540+ regs->tnpc = addr+4;
4541+ return 2;
4542+ }
4543+ } while (0);
4544+
4545+ { /* PaX: patched PLT emulation #2 */
4546+ unsigned int ba;
4547+
4548+ err = get_user(ba, (unsigned int *)regs->tpc);
4549+
4550+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4551+ unsigned long addr;
4552+
4553+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4554+
4555+ if (test_thread_flag(TIF_32BIT))
4556+ addr &= 0xFFFFFFFFUL;
4557+
4558+ regs->tpc = addr;
4559+ regs->tnpc = addr+4;
4560+ return 2;
4561+ }
4562+ }
4563+
4564+ do { /* PaX: patched PLT emulation #3 */
4565+ unsigned int sethi, jmpl, nop;
4566+
4567+ err = get_user(sethi, (unsigned int *)regs->tpc);
4568+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4569+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4576+ nop == 0x01000000U)
4577+ {
4578+ unsigned long addr;
4579+
4580+ addr = (sethi & 0x003FFFFFU) << 10;
4581+ regs->u_regs[UREG_G1] = addr;
4582+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #4 */
4594+ unsigned int sethi, mov1, call, mov2;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4600+
4601+ if (err)
4602+ break;
4603+
4604+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4605+ mov1 == 0x8210000FU &&
4606+ (call & 0xC0000000U) == 0x40000000U &&
4607+ mov2 == 0x9E100001U)
4608+ {
4609+ unsigned long addr;
4610+
4611+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4612+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4613+
4614+ if (test_thread_flag(TIF_32BIT))
4615+ addr &= 0xFFFFFFFFUL;
4616+
4617+ regs->tpc = addr;
4618+ regs->tnpc = addr+4;
4619+ return 2;
4620+ }
4621+ } while (0);
4622+
4623+ do { /* PaX: patched PLT emulation #5 */
4624+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4625+
4626+ err = get_user(sethi, (unsigned int *)regs->tpc);
4627+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4628+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4629+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4630+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4631+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4632+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4633+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4634+
4635+ if (err)
4636+ break;
4637+
4638+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4639+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4640+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4641+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4642+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4643+ sllx == 0x83287020U &&
4644+ jmpl == 0x81C04005U &&
4645+ nop == 0x01000000U)
4646+ {
4647+ unsigned long addr;
4648+
4649+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4650+ regs->u_regs[UREG_G1] <<= 32;
4651+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4652+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4653+ regs->tpc = addr;
4654+ regs->tnpc = addr+4;
4655+ return 2;
4656+ }
4657+ } while (0);
4658+
4659+ do { /* PaX: patched PLT emulation #6 */
4660+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4661+
4662+ err = get_user(sethi, (unsigned int *)regs->tpc);
4663+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4664+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4665+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4666+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4667+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4675+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4676+ sllx == 0x83287020U &&
4677+ (or & 0xFFFFE000U) == 0x8A116000U &&
4678+ jmpl == 0x81C04005U &&
4679+ nop == 0x01000000U)
4680+ {
4681+ unsigned long addr;
4682+
4683+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4684+ regs->u_regs[UREG_G1] <<= 32;
4685+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4686+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4687+ regs->tpc = addr;
4688+ regs->tnpc = addr+4;
4689+ return 2;
4690+ }
4691+ } while (0);
4692+
4693+ do { /* PaX: unpatched PLT emulation step 1 */
4694+ unsigned int sethi, ba, nop;
4695+
4696+ err = get_user(sethi, (unsigned int *)regs->tpc);
4697+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4698+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4699+
4700+ if (err)
4701+ break;
4702+
4703+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4704+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4705+ nop == 0x01000000U)
4706+ {
4707+ unsigned long addr;
4708+ unsigned int save, call;
4709+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4710+
4711+ if ((ba & 0xFFC00000U) == 0x30800000U)
4712+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4713+ else
4714+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4715+
4716+ if (test_thread_flag(TIF_32BIT))
4717+ addr &= 0xFFFFFFFFUL;
4718+
4719+ err = get_user(save, (unsigned int *)addr);
4720+ err |= get_user(call, (unsigned int *)(addr+4));
4721+ err |= get_user(nop, (unsigned int *)(addr+8));
4722+ if (err)
4723+ break;
4724+
4725+#ifdef CONFIG_PAX_DLRESOLVE
4726+ if (save == 0x9DE3BFA8U &&
4727+ (call & 0xC0000000U) == 0x40000000U &&
4728+ nop == 0x01000000U)
4729+ {
4730+ struct vm_area_struct *vma;
4731+ unsigned long call_dl_resolve;
4732+
4733+ down_read(&current->mm->mmap_sem);
4734+ call_dl_resolve = current->mm->call_dl_resolve;
4735+ up_read(&current->mm->mmap_sem);
4736+ if (likely(call_dl_resolve))
4737+ goto emulate;
4738+
4739+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4740+
4741+ down_write(&current->mm->mmap_sem);
4742+ if (current->mm->call_dl_resolve) {
4743+ call_dl_resolve = current->mm->call_dl_resolve;
4744+ up_write(&current->mm->mmap_sem);
4745+ if (vma)
4746+ kmem_cache_free(vm_area_cachep, vma);
4747+ goto emulate;
4748+ }
4749+
4750+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4751+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4752+ up_write(&current->mm->mmap_sem);
4753+ if (vma)
4754+ kmem_cache_free(vm_area_cachep, vma);
4755+ return 1;
4756+ }
4757+
4758+ if (pax_insert_vma(vma, call_dl_resolve)) {
4759+ up_write(&current->mm->mmap_sem);
4760+ kmem_cache_free(vm_area_cachep, vma);
4761+ return 1;
4762+ }
4763+
4764+ current->mm->call_dl_resolve = call_dl_resolve;
4765+ up_write(&current->mm->mmap_sem);
4766+
4767+emulate:
4768+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4769+ regs->tpc = call_dl_resolve;
4770+ regs->tnpc = addr+4;
4771+ return 3;
4772+ }
4773+#endif
4774+
4775+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4776+ if ((save & 0xFFC00000U) == 0x05000000U &&
4777+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4778+ nop == 0x01000000U)
4779+ {
4780+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4781+ regs->u_regs[UREG_G2] = addr + 4;
4782+ addr = (save & 0x003FFFFFU) << 10;
4783+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4784+
4785+ if (test_thread_flag(TIF_32BIT))
4786+ addr &= 0xFFFFFFFFUL;
4787+
4788+ regs->tpc = addr;
4789+ regs->tnpc = addr+4;
4790+ return 3;
4791+ }
4792+
4793+ /* PaX: 64-bit PLT stub */
4794+ err = get_user(sethi1, (unsigned int *)addr);
4795+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4796+ err |= get_user(or1, (unsigned int *)(addr+8));
4797+ err |= get_user(or2, (unsigned int *)(addr+12));
4798+ err |= get_user(sllx, (unsigned int *)(addr+16));
4799+ err |= get_user(add, (unsigned int *)(addr+20));
4800+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4801+ err |= get_user(nop, (unsigned int *)(addr+28));
4802+ if (err)
4803+ break;
4804+
4805+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4806+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4807+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4808+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4809+ sllx == 0x89293020U &&
4810+ add == 0x8A010005U &&
4811+ jmpl == 0x89C14000U &&
4812+ nop == 0x01000000U)
4813+ {
4814+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4815+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4816+ regs->u_regs[UREG_G4] <<= 32;
4817+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4818+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4819+ regs->u_regs[UREG_G4] = addr + 24;
4820+ addr = regs->u_regs[UREG_G5];
4821+ regs->tpc = addr;
4822+ regs->tnpc = addr+4;
4823+ return 3;
4824+ }
4825+ }
4826+ } while (0);
4827+
4828+#ifdef CONFIG_PAX_DLRESOLVE
4829+ do { /* PaX: unpatched PLT emulation step 2 */
4830+ unsigned int save, call, nop;
4831+
4832+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4833+ err |= get_user(call, (unsigned int *)regs->tpc);
4834+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4835+ if (err)
4836+ break;
4837+
4838+ if (save == 0x9DE3BFA8U &&
4839+ (call & 0xC0000000U) == 0x40000000U &&
4840+ nop == 0x01000000U)
4841+ {
4842+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4843+
4844+ if (test_thread_flag(TIF_32BIT))
4845+ dl_resolve &= 0xFFFFFFFFUL;
4846+
4847+ regs->u_regs[UREG_RETPC] = regs->tpc;
4848+ regs->tpc = dl_resolve;
4849+ regs->tnpc = dl_resolve+4;
4850+ return 3;
4851+ }
4852+ } while (0);
4853+#endif
4854+
4855+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4856+ unsigned int sethi, ba, nop;
4857+
4858+ err = get_user(sethi, (unsigned int *)regs->tpc);
4859+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4860+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4861+
4862+ if (err)
4863+ break;
4864+
4865+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4866+ (ba & 0xFFF00000U) == 0x30600000U &&
4867+ nop == 0x01000000U)
4868+ {
4869+ unsigned long addr;
4870+
4871+ addr = (sethi & 0x003FFFFFU) << 10;
4872+ regs->u_regs[UREG_G1] = addr;
4873+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4874+
4875+ if (test_thread_flag(TIF_32BIT))
4876+ addr &= 0xFFFFFFFFUL;
4877+
4878+ regs->tpc = addr;
4879+ regs->tnpc = addr+4;
4880+ return 2;
4881+ }
4882+ } while (0);
4883+
4884+#endif
4885+
4886+ return 1;
4887+}
4888+
4889+void pax_report_insns(void *pc, void *sp)
4890+{
4891+ unsigned long i;
4892+
4893+ printk(KERN_ERR "PAX: bytes at PC: ");
4894+ for (i = 0; i < 8; i++) {
4895+ unsigned int c;
4896+ if (get_user(c, (unsigned int *)pc+i))
4897+ printk(KERN_CONT "???????? ");
4898+ else
4899+ printk(KERN_CONT "%08x ", c);
4900+ }
4901+ printk("\n");
4902+}
4903+#endif
4904+
4905 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4906 {
4907 struct mm_struct *mm = current->mm;
4908@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4909 if (!vma)
4910 goto bad_area;
4911
4912+#ifdef CONFIG_PAX_PAGEEXEC
4913+ /* PaX: detect ITLB misses on non-exec pages */
4914+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4915+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4916+ {
4917+ if (address != regs->tpc)
4918+ goto good_area;
4919+
4920+ up_read(&mm->mmap_sem);
4921+ switch (pax_handle_fetch_fault(regs)) {
4922+
4923+#ifdef CONFIG_PAX_EMUPLT
4924+ case 2:
4925+ case 3:
4926+ return;
4927+#endif
4928+
4929+ }
4930+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4931+ do_group_exit(SIGKILL);
4932+ }
4933+#endif
4934+
4935 /* Pure DTLB misses do not tell us whether the fault causing
4936 * load/store/atomic was a write or not, it only says that there
4937 * was no match. So in such a case we (carefully) read the
4938diff -urNp linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c
4939--- linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
4940+++ linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
4941@@ -68,7 +68,7 @@ full_search:
4942 }
4943 return -ENOMEM;
4944 }
4945- if (likely(!vma || addr + len <= vma->vm_start)) {
4946+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4947 /*
4948 * Remember the place where we stopped the search:
4949 */
4950@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4951 /* make sure it can fit in the remaining address space */
4952 if (likely(addr > len)) {
4953 vma = find_vma(mm, addr-len);
4954- if (!vma || addr <= vma->vm_start) {
4955+ if (check_heap_stack_gap(vma, addr - len, len)) {
4956 /* remember the address as a hint for next time */
4957 return (mm->free_area_cache = addr-len);
4958 }
4959@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4960 if (unlikely(mm->mmap_base < len))
4961 goto bottomup;
4962
4963- addr = (mm->mmap_base-len) & HPAGE_MASK;
4964+ addr = mm->mmap_base - len;
4965
4966 do {
4967+ addr &= HPAGE_MASK;
4968 /*
4969 * Lookup failure means no vma is above this address,
4970 * else if new region fits below vma->vm_start,
4971 * return with success:
4972 */
4973 vma = find_vma(mm, addr);
4974- if (likely(!vma || addr+len <= vma->vm_start)) {
4975+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4976 /* remember the address as a hint for next time */
4977 return (mm->free_area_cache = addr);
4978 }
4979@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4980 mm->cached_hole_size = vma->vm_start - addr;
4981
4982 /* try just below the current vma->vm_start */
4983- addr = (vma->vm_start-len) & HPAGE_MASK;
4984- } while (likely(len < vma->vm_start));
4985+ addr = skip_heap_stack_gap(vma, len);
4986+ } while (!IS_ERR_VALUE(addr));
4987
4988 bottomup:
4989 /*
4990@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4991 if (addr) {
4992 addr = ALIGN(addr, HPAGE_SIZE);
4993 vma = find_vma(mm, addr);
4994- if (task_size - len >= addr &&
4995- (!vma || addr + len <= vma->vm_start))
4996+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4997 return addr;
4998 }
4999 if (mm->get_unmapped_area == arch_get_unmapped_area)
5000diff -urNp linux-2.6.39.4/arch/sparc/mm/init_32.c linux-2.6.39.4/arch/sparc/mm/init_32.c
5001--- linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
5002+++ linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-08-05 19:44:33.000000000 -0400
5003@@ -318,6 +318,9 @@ extern void device_scan(void);
5004 pgprot_t PAGE_SHARED __read_mostly;
5005 EXPORT_SYMBOL(PAGE_SHARED);
5006
5007+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5008+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5009+
5010 void __init paging_init(void)
5011 {
5012 switch(sparc_cpu_model) {
5013@@ -346,17 +349,17 @@ void __init paging_init(void)
5014
5015 /* Initialize the protection map with non-constant, MMU dependent values. */
5016 protection_map[0] = PAGE_NONE;
5017- protection_map[1] = PAGE_READONLY;
5018- protection_map[2] = PAGE_COPY;
5019- protection_map[3] = PAGE_COPY;
5020+ protection_map[1] = PAGE_READONLY_NOEXEC;
5021+ protection_map[2] = PAGE_COPY_NOEXEC;
5022+ protection_map[3] = PAGE_COPY_NOEXEC;
5023 protection_map[4] = PAGE_READONLY;
5024 protection_map[5] = PAGE_READONLY;
5025 protection_map[6] = PAGE_COPY;
5026 protection_map[7] = PAGE_COPY;
5027 protection_map[8] = PAGE_NONE;
5028- protection_map[9] = PAGE_READONLY;
5029- protection_map[10] = PAGE_SHARED;
5030- protection_map[11] = PAGE_SHARED;
5031+ protection_map[9] = PAGE_READONLY_NOEXEC;
5032+ protection_map[10] = PAGE_SHARED_NOEXEC;
5033+ protection_map[11] = PAGE_SHARED_NOEXEC;
5034 protection_map[12] = PAGE_READONLY;
5035 protection_map[13] = PAGE_READONLY;
5036 protection_map[14] = PAGE_SHARED;
5037diff -urNp linux-2.6.39.4/arch/sparc/mm/Makefile linux-2.6.39.4/arch/sparc/mm/Makefile
5038--- linux-2.6.39.4/arch/sparc/mm/Makefile 2011-05-19 00:06:34.000000000 -0400
5039+++ linux-2.6.39.4/arch/sparc/mm/Makefile 2011-08-05 19:44:33.000000000 -0400
5040@@ -2,7 +2,7 @@
5041 #
5042
5043 asflags-y := -ansi
5044-ccflags-y := -Werror
5045+#ccflags-y := -Werror
5046
5047 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5048 obj-y += fault_$(BITS).o
5049diff -urNp linux-2.6.39.4/arch/sparc/mm/srmmu.c linux-2.6.39.4/arch/sparc/mm/srmmu.c
5050--- linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-05-19 00:06:34.000000000 -0400
5051+++ linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-08-05 19:44:33.000000000 -0400
5052@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5053 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5054 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5055 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5056+
5057+#ifdef CONFIG_PAX_PAGEEXEC
5058+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5059+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5060+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5061+#endif
5062+
5063 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5064 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5065
5066diff -urNp linux-2.6.39.4/arch/um/include/asm/kmap_types.h linux-2.6.39.4/arch/um/include/asm/kmap_types.h
5067--- linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
5068+++ linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
5069@@ -23,6 +23,7 @@ enum km_type {
5070 KM_IRQ1,
5071 KM_SOFTIRQ0,
5072 KM_SOFTIRQ1,
5073+ KM_CLEARPAGE,
5074 KM_TYPE_NR
5075 };
5076
5077diff -urNp linux-2.6.39.4/arch/um/include/asm/page.h linux-2.6.39.4/arch/um/include/asm/page.h
5078--- linux-2.6.39.4/arch/um/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
5079+++ linux-2.6.39.4/arch/um/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
5080@@ -14,6 +14,9 @@
5081 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5082 #define PAGE_MASK (~(PAGE_SIZE-1))
5083
5084+#define ktla_ktva(addr) (addr)
5085+#define ktva_ktla(addr) (addr)
5086+
5087 #ifndef __ASSEMBLY__
5088
5089 struct page;
5090diff -urNp linux-2.6.39.4/arch/um/kernel/process.c linux-2.6.39.4/arch/um/kernel/process.c
5091--- linux-2.6.39.4/arch/um/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
5092+++ linux-2.6.39.4/arch/um/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
5093@@ -404,22 +404,6 @@ int singlestepping(void * t)
5094 return 2;
5095 }
5096
5097-/*
5098- * Only x86 and x86_64 have an arch_align_stack().
5099- * All other arches have "#define arch_align_stack(x) (x)"
5100- * in their asm/system.h
5101- * As this is included in UML from asm-um/system-generic.h,
5102- * we can use it to behave as the subarch does.
5103- */
5104-#ifndef arch_align_stack
5105-unsigned long arch_align_stack(unsigned long sp)
5106-{
5107- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5108- sp -= get_random_int() % 8192;
5109- return sp & ~0xf;
5110-}
5111-#endif
5112-
5113 unsigned long get_wchan(struct task_struct *p)
5114 {
5115 unsigned long stack_page, sp, ip;
5116diff -urNp linux-2.6.39.4/arch/um/sys-i386/syscalls.c linux-2.6.39.4/arch/um/sys-i386/syscalls.c
5117--- linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-05-19 00:06:34.000000000 -0400
5118+++ linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-08-05 19:44:33.000000000 -0400
5119@@ -11,6 +11,21 @@
5120 #include "asm/uaccess.h"
5121 #include "asm/unistd.h"
5122
5123+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5124+{
5125+ unsigned long pax_task_size = TASK_SIZE;
5126+
5127+#ifdef CONFIG_PAX_SEGMEXEC
5128+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5129+ pax_task_size = SEGMEXEC_TASK_SIZE;
5130+#endif
5131+
5132+ if (len > pax_task_size || addr > pax_task_size - len)
5133+ return -EINVAL;
5134+
5135+ return 0;
5136+}
5137+
5138 /*
5139 * The prototype on i386 is:
5140 *
5141diff -urNp linux-2.6.39.4/arch/x86/boot/bitops.h linux-2.6.39.4/arch/x86/boot/bitops.h
5142--- linux-2.6.39.4/arch/x86/boot/bitops.h 2011-05-19 00:06:34.000000000 -0400
5143+++ linux-2.6.39.4/arch/x86/boot/bitops.h 2011-08-05 19:44:33.000000000 -0400
5144@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5145 u8 v;
5146 const u32 *p = (const u32 *)addr;
5147
5148- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5149+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5150 return v;
5151 }
5152
5153@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5154
5155 static inline void set_bit(int nr, void *addr)
5156 {
5157- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5158+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5159 }
5160
5161 #endif /* BOOT_BITOPS_H */
5162diff -urNp linux-2.6.39.4/arch/x86/boot/boot.h linux-2.6.39.4/arch/x86/boot/boot.h
5163--- linux-2.6.39.4/arch/x86/boot/boot.h 2011-05-19 00:06:34.000000000 -0400
5164+++ linux-2.6.39.4/arch/x86/boot/boot.h 2011-08-05 19:44:33.000000000 -0400
5165@@ -85,7 +85,7 @@ static inline void io_delay(void)
5166 static inline u16 ds(void)
5167 {
5168 u16 seg;
5169- asm("movw %%ds,%0" : "=rm" (seg));
5170+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5171 return seg;
5172 }
5173
5174@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5175 static inline int memcmp(const void *s1, const void *s2, size_t len)
5176 {
5177 u8 diff;
5178- asm("repe; cmpsb; setnz %0"
5179+ asm volatile("repe; cmpsb; setnz %0"
5180 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5181 return diff;
5182 }
5183diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_32.S linux-2.6.39.4/arch/x86/boot/compressed/head_32.S
5184--- linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-05-19 00:06:34.000000000 -0400
5185+++ linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-08-05 19:44:33.000000000 -0400
5186@@ -76,7 +76,7 @@ ENTRY(startup_32)
5187 notl %eax
5188 andl %eax, %ebx
5189 #else
5190- movl $LOAD_PHYSICAL_ADDR, %ebx
5191+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5192 #endif
5193
5194 /* Target address to relocate to for decompression */
5195@@ -162,7 +162,7 @@ relocated:
5196 * and where it was actually loaded.
5197 */
5198 movl %ebp, %ebx
5199- subl $LOAD_PHYSICAL_ADDR, %ebx
5200+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5201 jz 2f /* Nothing to be done if loaded at compiled addr. */
5202 /*
5203 * Process relocations.
5204@@ -170,8 +170,7 @@ relocated:
5205
5206 1: subl $4, %edi
5207 movl (%edi), %ecx
5208- testl %ecx, %ecx
5209- jz 2f
5210+ jecxz 2f
5211 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5212 jmp 1b
5213 2:
5214diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_64.S linux-2.6.39.4/arch/x86/boot/compressed/head_64.S
5215--- linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-05-19 00:06:34.000000000 -0400
5216+++ linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-08-05 19:44:33.000000000 -0400
5217@@ -91,7 +91,7 @@ ENTRY(startup_32)
5218 notl %eax
5219 andl %eax, %ebx
5220 #else
5221- movl $LOAD_PHYSICAL_ADDR, %ebx
5222+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5223 #endif
5224
5225 /* Target address to relocate to for decompression */
5226@@ -233,7 +233,7 @@ ENTRY(startup_64)
5227 notq %rax
5228 andq %rax, %rbp
5229 #else
5230- movq $LOAD_PHYSICAL_ADDR, %rbp
5231+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5232 #endif
5233
5234 /* Target address to relocate to for decompression */
5235diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/Makefile linux-2.6.39.4/arch/x86/boot/compressed/Makefile
5236--- linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-05-19 00:06:34.000000000 -0400
5237+++ linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-08-05 20:34:06.000000000 -0400
5238@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5239 KBUILD_CFLAGS += $(cflags-y)
5240 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5241 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5242+ifdef CONSTIFY_PLUGIN
5243+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5244+endif
5245
5246 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5247 GCOV_PROFILE := n
5248diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/misc.c linux-2.6.39.4/arch/x86/boot/compressed/misc.c
5249--- linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-05-19 00:06:34.000000000 -0400
5250+++ linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-08-05 19:44:33.000000000 -0400
5251@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5252 case PT_LOAD:
5253 #ifdef CONFIG_RELOCATABLE
5254 dest = output;
5255- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5256+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5257 #else
5258 dest = (void *)(phdr->p_paddr);
5259 #endif
5260@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5261 error("Destination address too large");
5262 #endif
5263 #ifndef CONFIG_RELOCATABLE
5264- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5265+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5266 error("Wrong destination address");
5267 #endif
5268
5269diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/relocs.c linux-2.6.39.4/arch/x86/boot/compressed/relocs.c
5270--- linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-05-19 00:06:34.000000000 -0400
5271+++ linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-08-05 19:44:33.000000000 -0400
5272@@ -13,8 +13,11 @@
5273
5274 static void die(char *fmt, ...);
5275
5276+#include "../../../../include/generated/autoconf.h"
5277+
5278 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5279 static Elf32_Ehdr ehdr;
5280+static Elf32_Phdr *phdr;
5281 static unsigned long reloc_count, reloc_idx;
5282 static unsigned long *relocs;
5283
5284@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5285 }
5286 }
5287
5288+static void read_phdrs(FILE *fp)
5289+{
5290+ unsigned int i;
5291+
5292+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5293+ if (!phdr) {
5294+ die("Unable to allocate %d program headers\n",
5295+ ehdr.e_phnum);
5296+ }
5297+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5298+ die("Seek to %d failed: %s\n",
5299+ ehdr.e_phoff, strerror(errno));
5300+ }
5301+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5302+ die("Cannot read ELF program headers: %s\n",
5303+ strerror(errno));
5304+ }
5305+ for(i = 0; i < ehdr.e_phnum; i++) {
5306+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5307+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5308+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5309+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5310+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5311+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5312+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5313+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5314+ }
5315+
5316+}
5317+
5318 static void read_shdrs(FILE *fp)
5319 {
5320- int i;
5321+ unsigned int i;
5322 Elf32_Shdr shdr;
5323
5324 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5325@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5326
5327 static void read_strtabs(FILE *fp)
5328 {
5329- int i;
5330+ unsigned int i;
5331 for (i = 0; i < ehdr.e_shnum; i++) {
5332 struct section *sec = &secs[i];
5333 if (sec->shdr.sh_type != SHT_STRTAB) {
5334@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5335
5336 static void read_symtabs(FILE *fp)
5337 {
5338- int i,j;
5339+ unsigned int i,j;
5340 for (i = 0; i < ehdr.e_shnum; i++) {
5341 struct section *sec = &secs[i];
5342 if (sec->shdr.sh_type != SHT_SYMTAB) {
5343@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5344
5345 static void read_relocs(FILE *fp)
5346 {
5347- int i,j;
5348+ unsigned int i,j;
5349+ uint32_t base;
5350+
5351 for (i = 0; i < ehdr.e_shnum; i++) {
5352 struct section *sec = &secs[i];
5353 if (sec->shdr.sh_type != SHT_REL) {
5354@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5355 die("Cannot read symbol table: %s\n",
5356 strerror(errno));
5357 }
5358+ base = 0;
5359+ for (j = 0; j < ehdr.e_phnum; j++) {
5360+ if (phdr[j].p_type != PT_LOAD )
5361+ continue;
5362+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5363+ continue;
5364+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5365+ break;
5366+ }
5367 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5368 Elf32_Rel *rel = &sec->reltab[j];
5369- rel->r_offset = elf32_to_cpu(rel->r_offset);
5370+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5371 rel->r_info = elf32_to_cpu(rel->r_info);
5372 }
5373 }
5374@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5375
5376 static void print_absolute_symbols(void)
5377 {
5378- int i;
5379+ unsigned int i;
5380 printf("Absolute symbols\n");
5381 printf(" Num: Value Size Type Bind Visibility Name\n");
5382 for (i = 0; i < ehdr.e_shnum; i++) {
5383 struct section *sec = &secs[i];
5384 char *sym_strtab;
5385 Elf32_Sym *sh_symtab;
5386- int j;
5387+ unsigned int j;
5388
5389 if (sec->shdr.sh_type != SHT_SYMTAB) {
5390 continue;
5391@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5392
5393 static void print_absolute_relocs(void)
5394 {
5395- int i, printed = 0;
5396+ unsigned int i, printed = 0;
5397
5398 for (i = 0; i < ehdr.e_shnum; i++) {
5399 struct section *sec = &secs[i];
5400 struct section *sec_applies, *sec_symtab;
5401 char *sym_strtab;
5402 Elf32_Sym *sh_symtab;
5403- int j;
5404+ unsigned int j;
5405 if (sec->shdr.sh_type != SHT_REL) {
5406 continue;
5407 }
5408@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5409
5410 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5411 {
5412- int i;
5413+ unsigned int i;
5414 /* Walk through the relocations */
5415 for (i = 0; i < ehdr.e_shnum; i++) {
5416 char *sym_strtab;
5417 Elf32_Sym *sh_symtab;
5418 struct section *sec_applies, *sec_symtab;
5419- int j;
5420+ unsigned int j;
5421 struct section *sec = &secs[i];
5422
5423 if (sec->shdr.sh_type != SHT_REL) {
5424@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5425 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5426 continue;
5427 }
5428+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5429+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5430+ continue;
5431+
5432+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5433+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5434+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5435+ continue;
5436+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5437+ continue;
5438+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5439+ continue;
5440+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5441+ continue;
5442+#endif
5443+
5444 switch (r_type) {
5445 case R_386_NONE:
5446 case R_386_PC32:
5447@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5448
5449 static void emit_relocs(int as_text)
5450 {
5451- int i;
5452+ unsigned int i;
5453 /* Count how many relocations I have and allocate space for them. */
5454 reloc_count = 0;
5455 walk_relocs(count_reloc);
5456@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5457 fname, strerror(errno));
5458 }
5459 read_ehdr(fp);
5460+ read_phdrs(fp);
5461 read_shdrs(fp);
5462 read_strtabs(fp);
5463 read_symtabs(fp);
5464diff -urNp linux-2.6.39.4/arch/x86/boot/cpucheck.c linux-2.6.39.4/arch/x86/boot/cpucheck.c
5465--- linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-05-19 00:06:34.000000000 -0400
5466+++ linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-08-05 19:44:33.000000000 -0400
5467@@ -74,7 +74,7 @@ static int has_fpu(void)
5468 u16 fcw = -1, fsw = -1;
5469 u32 cr0;
5470
5471- asm("movl %%cr0,%0" : "=r" (cr0));
5472+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5473 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5474 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5475 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5476@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5477 {
5478 u32 f0, f1;
5479
5480- asm("pushfl ; "
5481+ asm volatile("pushfl ; "
5482 "pushfl ; "
5483 "popl %0 ; "
5484 "movl %0,%1 ; "
5485@@ -115,7 +115,7 @@ static void get_flags(void)
5486 set_bit(X86_FEATURE_FPU, cpu.flags);
5487
5488 if (has_eflag(X86_EFLAGS_ID)) {
5489- asm("cpuid"
5490+ asm volatile("cpuid"
5491 : "=a" (max_intel_level),
5492 "=b" (cpu_vendor[0]),
5493 "=d" (cpu_vendor[1]),
5494@@ -124,7 +124,7 @@ static void get_flags(void)
5495
5496 if (max_intel_level >= 0x00000001 &&
5497 max_intel_level <= 0x0000ffff) {
5498- asm("cpuid"
5499+ asm volatile("cpuid"
5500 : "=a" (tfms),
5501 "=c" (cpu.flags[4]),
5502 "=d" (cpu.flags[0])
5503@@ -136,7 +136,7 @@ static void get_flags(void)
5504 cpu.model += ((tfms >> 16) & 0xf) << 4;
5505 }
5506
5507- asm("cpuid"
5508+ asm volatile("cpuid"
5509 : "=a" (max_amd_level)
5510 : "a" (0x80000000)
5511 : "ebx", "ecx", "edx");
5512@@ -144,7 +144,7 @@ static void get_flags(void)
5513 if (max_amd_level >= 0x80000001 &&
5514 max_amd_level <= 0x8000ffff) {
5515 u32 eax = 0x80000001;
5516- asm("cpuid"
5517+ asm volatile("cpuid"
5518 : "+a" (eax),
5519 "=c" (cpu.flags[6]),
5520 "=d" (cpu.flags[1])
5521@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5522 u32 ecx = MSR_K7_HWCR;
5523 u32 eax, edx;
5524
5525- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5526+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5527 eax &= ~(1 << 15);
5528- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5530
5531 get_flags(); /* Make sure it really did something */
5532 err = check_flags();
5533@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5534 u32 ecx = MSR_VIA_FCR;
5535 u32 eax, edx;
5536
5537- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5538+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5539 eax |= (1<<1)|(1<<7);
5540- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5541+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5542
5543 set_bit(X86_FEATURE_CX8, cpu.flags);
5544 err = check_flags();
5545@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5546 u32 eax, edx;
5547 u32 level = 1;
5548
5549- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5550- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5551- asm("cpuid"
5552+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5553+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5554+ asm volatile("cpuid"
5555 : "+a" (level), "=d" (cpu.flags[0])
5556 : : "ecx", "ebx");
5557- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5558+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5559
5560 err = check_flags();
5561 }
5562diff -urNp linux-2.6.39.4/arch/x86/boot/header.S linux-2.6.39.4/arch/x86/boot/header.S
5563--- linux-2.6.39.4/arch/x86/boot/header.S 2011-05-19 00:06:34.000000000 -0400
5564+++ linux-2.6.39.4/arch/x86/boot/header.S 2011-08-05 19:44:33.000000000 -0400
5565@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5566 # single linked list of
5567 # struct setup_data
5568
5569-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5570+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5571
5572 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5573 #define VO_INIT_SIZE (VO__end - VO__text)
5574diff -urNp linux-2.6.39.4/arch/x86/boot/Makefile linux-2.6.39.4/arch/x86/boot/Makefile
5575--- linux-2.6.39.4/arch/x86/boot/Makefile 2011-05-19 00:06:34.000000000 -0400
5576+++ linux-2.6.39.4/arch/x86/boot/Makefile 2011-08-05 20:34:06.000000000 -0400
5577@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5578 $(call cc-option, -fno-stack-protector) \
5579 $(call cc-option, -mpreferred-stack-boundary=2)
5580 KBUILD_CFLAGS += $(call cc-option, -m32)
5581+ifdef CONSTIFY_PLUGIN
5582+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5583+endif
5584 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5585 GCOV_PROFILE := n
5586
5587diff -urNp linux-2.6.39.4/arch/x86/boot/memory.c linux-2.6.39.4/arch/x86/boot/memory.c
5588--- linux-2.6.39.4/arch/x86/boot/memory.c 2011-05-19 00:06:34.000000000 -0400
5589+++ linux-2.6.39.4/arch/x86/boot/memory.c 2011-08-05 19:44:33.000000000 -0400
5590@@ -19,7 +19,7 @@
5591
5592 static int detect_memory_e820(void)
5593 {
5594- int count = 0;
5595+ unsigned int count = 0;
5596 struct biosregs ireg, oreg;
5597 struct e820entry *desc = boot_params.e820_map;
5598 static struct e820entry buf; /* static so it is zeroed */
5599diff -urNp linux-2.6.39.4/arch/x86/boot/video.c linux-2.6.39.4/arch/x86/boot/video.c
5600--- linux-2.6.39.4/arch/x86/boot/video.c 2011-05-19 00:06:34.000000000 -0400
5601+++ linux-2.6.39.4/arch/x86/boot/video.c 2011-08-05 19:44:33.000000000 -0400
5602@@ -96,7 +96,7 @@ static void store_mode_params(void)
5603 static unsigned int get_entry(void)
5604 {
5605 char entry_buf[4];
5606- int i, len = 0;
5607+ unsigned int i, len = 0;
5608 int key;
5609 unsigned int v;
5610
5611diff -urNp linux-2.6.39.4/arch/x86/boot/video-vesa.c linux-2.6.39.4/arch/x86/boot/video-vesa.c
5612--- linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-05-19 00:06:34.000000000 -0400
5613+++ linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-08-05 19:44:33.000000000 -0400
5614@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5615
5616 boot_params.screen_info.vesapm_seg = oreg.es;
5617 boot_params.screen_info.vesapm_off = oreg.di;
5618+ boot_params.screen_info.vesapm_size = oreg.cx;
5619 }
5620
5621 /*
5622diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_aout.c linux-2.6.39.4/arch/x86/ia32/ia32_aout.c
5623--- linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-05-19 00:06:34.000000000 -0400
5624+++ linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-08-05 19:44:33.000000000 -0400
5625@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5626 unsigned long dump_start, dump_size;
5627 struct user32 dump;
5628
5629+ memset(&dump, 0, sizeof(dump));
5630+
5631 fs = get_fs();
5632 set_fs(KERNEL_DS);
5633 has_dumped = 1;
5634diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32entry.S linux-2.6.39.4/arch/x86/ia32/ia32entry.S
5635--- linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-05-19 00:06:34.000000000 -0400
5636+++ linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-08-05 19:44:33.000000000 -0400
5637@@ -13,6 +13,7 @@
5638 #include <asm/thread_info.h>
5639 #include <asm/segment.h>
5640 #include <asm/irqflags.h>
5641+#include <asm/pgtable.h>
5642 #include <linux/linkage.h>
5643
5644 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5645@@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
5646 ENDPROC(native_irq_enable_sysexit)
5647 #endif
5648
5649+ .macro pax_enter_kernel_user
5650+#ifdef CONFIG_PAX_MEMORY_UDEREF
5651+ call pax_enter_kernel_user
5652+#endif
5653+ .endm
5654+
5655+ .macro pax_exit_kernel_user
5656+#ifdef CONFIG_PAX_MEMORY_UDEREF
5657+ call pax_exit_kernel_user
5658+#endif
5659+#ifdef CONFIG_PAX_RANDKSTACK
5660+ pushq %rax
5661+ call pax_randomize_kstack
5662+ popq %rax
5663+#endif
5664+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5665+ call pax_erase_kstack
5666+#endif
5667+ .endm
5668+
5669+ .macro pax_erase_kstack
5670+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5671+ call pax_erase_kstack
5672+#endif
5673+ .endm
5674+
5675 /*
5676 * 32bit SYSENTER instruction entry.
5677 *
5678@@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
5679 CFI_REGISTER rsp,rbp
5680 SWAPGS_UNSAFE_STACK
5681 movq PER_CPU_VAR(kernel_stack), %rsp
5682- addq $(KERNEL_STACK_OFFSET),%rsp
5683+ pax_enter_kernel_user
5684 /*
5685 * No need to follow this irqs on/off section: the syscall
5686 * disabled irqs, here we enable it straight after entry:
5687@@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
5688 CFI_REL_OFFSET rsp,0
5689 pushfq_cfi
5690 /*CFI_REL_OFFSET rflags,0*/
5691- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5692+ GET_THREAD_INFO(%r10)
5693+ movl TI_sysenter_return(%r10), %r10d
5694 CFI_REGISTER rip,r10
5695 pushq_cfi $__USER32_CS
5696 /*CFI_REL_OFFSET cs,0*/
5697@@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
5698 SAVE_ARGS 0,0,1
5699 /* no need to do an access_ok check here because rbp has been
5700 32bit zero extended */
5701+
5702+#ifdef CONFIG_PAX_MEMORY_UDEREF
5703+ mov $PAX_USER_SHADOW_BASE,%r10
5704+ add %r10,%rbp
5705+#endif
5706+
5707 1: movl (%rbp),%ebp
5708 .section __ex_table,"a"
5709 .quad 1b,ia32_badarg
5710@@ -168,6 +202,7 @@ sysenter_dispatch:
5711 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5712 jnz sysexit_audit
5713 sysexit_from_sys_call:
5714+ pax_exit_kernel_user
5715 andl $~TS_COMPAT,TI_status(%r10)
5716 /* clear IF, that popfq doesn't enable interrupts early */
5717 andl $~0x200,EFLAGS-R11(%rsp)
5718@@ -194,6 +229,9 @@ sysexit_from_sys_call:
5719 movl %eax,%esi /* 2nd arg: syscall number */
5720 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5721 call audit_syscall_entry
5722+
5723+ pax_erase_kstack
5724+
5725 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5726 cmpq $(IA32_NR_syscalls-1),%rax
5727 ja ia32_badsys
5728@@ -246,6 +284,9 @@ sysenter_tracesys:
5729 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5730 movq %rsp,%rdi /* &pt_regs -> arg1 */
5731 call syscall_trace_enter
5732+
5733+ pax_erase_kstack
5734+
5735 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5736 RESTORE_REST
5737 cmpq $(IA32_NR_syscalls-1),%rax
5738@@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
5739 ENTRY(ia32_cstar_target)
5740 CFI_STARTPROC32 simple
5741 CFI_SIGNAL_FRAME
5742- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5743+ CFI_DEF_CFA rsp,0
5744 CFI_REGISTER rip,rcx
5745 /*CFI_REGISTER rflags,r11*/
5746 SWAPGS_UNSAFE_STACK
5747 movl %esp,%r8d
5748 CFI_REGISTER rsp,r8
5749 movq PER_CPU_VAR(kernel_stack),%rsp
5750+
5751+#ifdef CONFIG_PAX_MEMORY_UDEREF
5752+ pax_enter_kernel_user
5753+#endif
5754+
5755 /*
5756 * No need to follow this irqs on/off section: the syscall
5757 * disabled irqs and here we enable it straight after entry:
5758 */
5759 ENABLE_INTERRUPTS(CLBR_NONE)
5760- SAVE_ARGS 8,1,1
5761+ SAVE_ARGS 8*6,1,1
5762 movl %eax,%eax /* zero extension */
5763 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5764 movq %rcx,RIP-ARGOFFSET(%rsp)
5765@@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
5766 /* no need to do an access_ok check here because r8 has been
5767 32bit zero extended */
5768 /* hardware stack frame is complete now */
5769+
5770+#ifdef CONFIG_PAX_MEMORY_UDEREF
5771+ mov $PAX_USER_SHADOW_BASE,%r10
5772+ add %r10,%r8
5773+#endif
5774+
5775 1: movl (%r8),%r9d
5776 .section __ex_table,"a"
5777 .quad 1b,ia32_badarg
5778@@ -327,6 +379,7 @@ cstar_dispatch:
5779 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5780 jnz sysretl_audit
5781 sysretl_from_sys_call:
5782+ pax_exit_kernel_user
5783 andl $~TS_COMPAT,TI_status(%r10)
5784 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5785 movl RIP-ARGOFFSET(%rsp),%ecx
5786@@ -364,6 +417,9 @@ cstar_tracesys:
5787 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5788 movq %rsp,%rdi /* &pt_regs -> arg1 */
5789 call syscall_trace_enter
5790+
5791+ pax_erase_kstack
5792+
5793 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5794 RESTORE_REST
5795 xchgl %ebp,%r9d
5796@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5797 CFI_REL_OFFSET rip,RIP-RIP
5798 PARAVIRT_ADJUST_EXCEPTION_FRAME
5799 SWAPGS
5800+ pax_enter_kernel_user
5801 /*
5802 * No need to follow this irqs on/off section: the syscall
5803 * disabled irqs and here we enable it straight after entry:
5804@@ -441,6 +498,9 @@ ia32_tracesys:
5805 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5806 movq %rsp,%rdi /* &pt_regs -> arg1 */
5807 call syscall_trace_enter
5808+
5809+ pax_erase_kstack
5810+
5811 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5812 RESTORE_REST
5813 cmpq $(IA32_NR_syscalls-1),%rax
5814diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_signal.c linux-2.6.39.4/arch/x86/ia32/ia32_signal.c
5815--- linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-05-19 00:06:34.000000000 -0400
5816+++ linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-08-05 19:44:33.000000000 -0400
5817@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5818 sp -= frame_size;
5819 /* Align the stack pointer according to the i386 ABI,
5820 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5821- sp = ((sp + 4) & -16ul) - 4;
5822+ sp = ((sp - 12) & -16ul) - 4;
5823 return (void __user *) sp;
5824 }
5825
5826@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5827 * These are actually not used anymore, but left because some
5828 * gdb versions depend on them as a marker.
5829 */
5830- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5836 0xb8,
5837 __NR_ia32_rt_sigreturn,
5838 0x80cd,
5839- 0,
5840+ 0
5841 };
5842
5843 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5844@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5845
5846 if (ka->sa.sa_flags & SA_RESTORER)
5847 restorer = ka->sa.sa_restorer;
5848+ else if (current->mm->context.vdso)
5849+ /* Return stub is in 32bit vsyscall page */
5850+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5851 else
5852- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5853- rt_sigreturn);
5854+ restorer = &frame->retcode;
5855 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5856
5857 /*
5858 * Not actually used anymore, but left because some gdb
5859 * versions need it.
5860 */
5861- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5862+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5863 } put_user_catch(err);
5864
5865 if (err)
5866diff -urNp linux-2.6.39.4/arch/x86/include/asm/alternative.h linux-2.6.39.4/arch/x86/include/asm/alternative.h
5867--- linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-05-19 00:06:34.000000000 -0400
5868+++ linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-08-05 19:44:33.000000000 -0400
5869@@ -94,7 +94,7 @@ static inline int alternatives_text_rese
5870 ".section .discard,\"aw\",@progbits\n" \
5871 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5872 ".previous\n" \
5873- ".section .altinstr_replacement, \"ax\"\n" \
5874+ ".section .altinstr_replacement, \"a\"\n" \
5875 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5876 ".previous"
5877
5878diff -urNp linux-2.6.39.4/arch/x86/include/asm/apm.h linux-2.6.39.4/arch/x86/include/asm/apm.h
5879--- linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-05-19 00:06:34.000000000 -0400
5880+++ linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-08-05 19:44:33.000000000 -0400
5881@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5882 __asm__ __volatile__(APM_DO_ZERO_SEGS
5883 "pushl %%edi\n\t"
5884 "pushl %%ebp\n\t"
5885- "lcall *%%cs:apm_bios_entry\n\t"
5886+ "lcall *%%ss:apm_bios_entry\n\t"
5887 "setc %%al\n\t"
5888 "popl %%ebp\n\t"
5889 "popl %%edi\n\t"
5890@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5891 __asm__ __volatile__(APM_DO_ZERO_SEGS
5892 "pushl %%edi\n\t"
5893 "pushl %%ebp\n\t"
5894- "lcall *%%cs:apm_bios_entry\n\t"
5895+ "lcall *%%ss:apm_bios_entry\n\t"
5896 "setc %%bl\n\t"
5897 "popl %%ebp\n\t"
5898 "popl %%edi\n\t"
5899diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h
5900--- linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-05-19 00:06:34.000000000 -0400
5901+++ linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-08-05 19:44:33.000000000 -0400
5902@@ -12,6 +12,14 @@ typedef struct {
5903 u64 __aligned(8) counter;
5904 } atomic64_t;
5905
5906+#ifdef CONFIG_PAX_REFCOUNT
5907+typedef struct {
5908+ u64 __aligned(8) counter;
5909+} atomic64_unchecked_t;
5910+#else
5911+typedef atomic64_t atomic64_unchecked_t;
5912+#endif
5913+
5914 #define ATOMIC64_INIT(val) { (val) }
5915
5916 #ifdef CONFIG_X86_CMPXCHG64
5917@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5918 }
5919
5920 /**
5921+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5922+ * @p: pointer to type atomic64_unchecked_t
5923+ * @o: expected value
5924+ * @n: new value
5925+ *
5926+ * Atomically sets @v to @n if it was equal to @o and returns
5927+ * the old value.
5928+ */
5929+
5930+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5931+{
5932+ return cmpxchg64(&v->counter, o, n);
5933+}
5934+
5935+/**
5936 * atomic64_xchg - xchg atomic64 variable
5937 * @v: pointer to type atomic64_t
5938 * @n: value to assign
5939@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5940 }
5941
5942 /**
5943+ * atomic64_set_unchecked - set atomic64 variable
5944+ * @v: pointer to type atomic64_unchecked_t
5945+ * @n: value to assign
5946+ *
5947+ * Atomically sets the value of @v to @n.
5948+ */
5949+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5950+{
5951+ unsigned high = (unsigned)(i >> 32);
5952+ unsigned low = (unsigned)i;
5953+ asm volatile(ATOMIC64_ALTERNATIVE(set)
5954+ : "+b" (low), "+c" (high)
5955+ : "S" (v)
5956+ : "eax", "edx", "memory"
5957+ );
5958+}
5959+
5960+/**
5961 * atomic64_read - read atomic64 variable
5962 * @v: pointer to type atomic64_t
5963 *
5964@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5965 }
5966
5967 /**
5968+ * atomic64_read_unchecked - read atomic64 variable
5969+ * @v: pointer to type atomic64_unchecked_t
5970+ *
5971+ * Atomically reads the value of @v and returns it.
5972+ */
5973+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5974+{
5975+ long long r;
5976+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5977+ : "=A" (r), "+c" (v)
5978+ : : "memory"
5979+ );
5980+ return r;
5981+ }
5982+
5983+/**
5984 * atomic64_add_return - add and return
5985 * @i: integer value to add
5986 * @v: pointer to type atomic64_t
5987@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5988 return i;
5989 }
5990
5991+/**
5992+ * atomic64_add_return_unchecked - add and return
5993+ * @i: integer value to add
5994+ * @v: pointer to type atomic64_unchecked_t
5995+ *
5996+ * Atomically adds @i to @v and returns @i + *@v
5997+ */
5998+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5999+{
6000+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6001+ : "+A" (i), "+c" (v)
6002+ : : "memory"
6003+ );
6004+ return i;
6005+}
6006+
6007 /*
6008 * Other variants with different arithmetic operators:
6009 */
6010@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6011 return a;
6012 }
6013
6014+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6015+{
6016+ long long a;
6017+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6018+ : "=A" (a)
6019+ : "S" (v)
6020+ : "memory", "ecx"
6021+ );
6022+ return a;
6023+}
6024+
6025 static inline long long atomic64_dec_return(atomic64_t *v)
6026 {
6027 long long a;
6028@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6029 }
6030
6031 /**
6032+ * atomic64_add_unchecked - add integer to atomic64 variable
6033+ * @i: integer value to add
6034+ * @v: pointer to type atomic64_unchecked_t
6035+ *
6036+ * Atomically adds @i to @v.
6037+ */
6038+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6039+{
6040+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6041+ : "+A" (i), "+c" (v)
6042+ : : "memory"
6043+ );
6044+ return i;
6045+}
6046+
6047+/**
6048 * atomic64_sub - subtract the atomic64 variable
6049 * @i: integer value to subtract
6050 * @v: pointer to type atomic64_t
6051diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h
6052--- linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-05-19 00:06:34.000000000 -0400
6053+++ linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-08-05 19:44:33.000000000 -0400
6054@@ -18,7 +18,19 @@
6055 */
6056 static inline long atomic64_read(const atomic64_t *v)
6057 {
6058- return (*(volatile long *)&(v)->counter);
6059+ return (*(volatile const long *)&(v)->counter);
6060+}
6061+
6062+/**
6063+ * atomic64_read_unchecked - read atomic64 variable
6064+ * @v: pointer of type atomic64_unchecked_t
6065+ *
6066+ * Atomically reads the value of @v.
6067+ * Doesn't imply a read memory barrier.
6068+ */
6069+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6070+{
6071+ return (*(volatile const long *)&(v)->counter);
6072 }
6073
6074 /**
6075@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6076 }
6077
6078 /**
6079+ * atomic64_set_unchecked - set atomic64 variable
6080+ * @v: pointer to type atomic64_unchecked_t
6081+ * @i: required value
6082+ *
6083+ * Atomically sets the value of @v to @i.
6084+ */
6085+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6086+{
6087+ v->counter = i;
6088+}
6089+
6090+/**
6091 * atomic64_add - add integer to atomic64 variable
6092 * @i: integer value to add
6093 * @v: pointer to type atomic64_t
6094@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6095 */
6096 static inline void atomic64_add(long i, atomic64_t *v)
6097 {
6098+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6099+
6100+#ifdef CONFIG_PAX_REFCOUNT
6101+ "jno 0f\n"
6102+ LOCK_PREFIX "subq %1,%0\n"
6103+ "int $4\n0:\n"
6104+ _ASM_EXTABLE(0b, 0b)
6105+#endif
6106+
6107+ : "=m" (v->counter)
6108+ : "er" (i), "m" (v->counter));
6109+}
6110+
6111+/**
6112+ * atomic64_add_unchecked - add integer to atomic64 variable
6113+ * @i: integer value to add
6114+ * @v: pointer to type atomic64_unchecked_t
6115+ *
6116+ * Atomically adds @i to @v.
6117+ */
6118+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6119+{
6120 asm volatile(LOCK_PREFIX "addq %1,%0"
6121 : "=m" (v->counter)
6122 : "er" (i), "m" (v->counter));
6123@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6124 */
6125 static inline void atomic64_sub(long i, atomic64_t *v)
6126 {
6127- asm volatile(LOCK_PREFIX "subq %1,%0"
6128+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6129+
6130+#ifdef CONFIG_PAX_REFCOUNT
6131+ "jno 0f\n"
6132+ LOCK_PREFIX "addq %1,%0\n"
6133+ "int $4\n0:\n"
6134+ _ASM_EXTABLE(0b, 0b)
6135+#endif
6136+
6137+ : "=m" (v->counter)
6138+ : "er" (i), "m" (v->counter));
6139+}
6140+
6141+/**
6142+ * atomic64_sub_unchecked - subtract the atomic64 variable
6143+ * @i: integer value to subtract
6144+ * @v: pointer to type atomic64_unchecked_t
6145+ *
6146+ * Atomically subtracts @i from @v.
6147+ */
6148+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6149+{
6150+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6151 : "=m" (v->counter)
6152 : "er" (i), "m" (v->counter));
6153 }
6154@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6155 {
6156 unsigned char c;
6157
6158- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6159+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6160+
6161+#ifdef CONFIG_PAX_REFCOUNT
6162+ "jno 0f\n"
6163+ LOCK_PREFIX "addq %2,%0\n"
6164+ "int $4\n0:\n"
6165+ _ASM_EXTABLE(0b, 0b)
6166+#endif
6167+
6168+ "sete %1\n"
6169 : "=m" (v->counter), "=qm" (c)
6170 : "er" (i), "m" (v->counter) : "memory");
6171 return c;
6172@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6173 */
6174 static inline void atomic64_inc(atomic64_t *v)
6175 {
6176+ asm volatile(LOCK_PREFIX "incq %0\n"
6177+
6178+#ifdef CONFIG_PAX_REFCOUNT
6179+ "jno 0f\n"
6180+ LOCK_PREFIX "decq %0\n"
6181+ "int $4\n0:\n"
6182+ _ASM_EXTABLE(0b, 0b)
6183+#endif
6184+
6185+ : "=m" (v->counter)
6186+ : "m" (v->counter));
6187+}
6188+
6189+/**
6190+ * atomic64_inc_unchecked - increment atomic64 variable
6191+ * @v: pointer to type atomic64_unchecked_t
6192+ *
6193+ * Atomically increments @v by 1.
6194+ */
6195+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6196+{
6197 asm volatile(LOCK_PREFIX "incq %0"
6198 : "=m" (v->counter)
6199 : "m" (v->counter));
6200@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6201 */
6202 static inline void atomic64_dec(atomic64_t *v)
6203 {
6204- asm volatile(LOCK_PREFIX "decq %0"
6205+ asm volatile(LOCK_PREFIX "decq %0\n"
6206+
6207+#ifdef CONFIG_PAX_REFCOUNT
6208+ "jno 0f\n"
6209+ LOCK_PREFIX "incq %0\n"
6210+ "int $4\n0:\n"
6211+ _ASM_EXTABLE(0b, 0b)
6212+#endif
6213+
6214+ : "=m" (v->counter)
6215+ : "m" (v->counter));
6216+}
6217+
6218+/**
6219+ * atomic64_dec_unchecked - decrement atomic64 variable
6220+ * @v: pointer to type atomic64_t
6221+ *
6222+ * Atomically decrements @v by 1.
6223+ */
6224+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6225+{
6226+ asm volatile(LOCK_PREFIX "decq %0\n"
6227 : "=m" (v->counter)
6228 : "m" (v->counter));
6229 }
6230@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6231 {
6232 unsigned char c;
6233
6234- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6235+ asm volatile(LOCK_PREFIX "decq %0\n"
6236+
6237+#ifdef CONFIG_PAX_REFCOUNT
6238+ "jno 0f\n"
6239+ LOCK_PREFIX "incq %0\n"
6240+ "int $4\n0:\n"
6241+ _ASM_EXTABLE(0b, 0b)
6242+#endif
6243+
6244+ "sete %1\n"
6245 : "=m" (v->counter), "=qm" (c)
6246 : "m" (v->counter) : "memory");
6247 return c != 0;
6248@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6249 {
6250 unsigned char c;
6251
6252- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6253+ asm volatile(LOCK_PREFIX "incq %0\n"
6254+
6255+#ifdef CONFIG_PAX_REFCOUNT
6256+ "jno 0f\n"
6257+ LOCK_PREFIX "decq %0\n"
6258+ "int $4\n0:\n"
6259+ _ASM_EXTABLE(0b, 0b)
6260+#endif
6261+
6262+ "sete %1\n"
6263 : "=m" (v->counter), "=qm" (c)
6264 : "m" (v->counter) : "memory");
6265 return c != 0;
6266@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6267 {
6268 unsigned char c;
6269
6270- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6271+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6272+
6273+#ifdef CONFIG_PAX_REFCOUNT
6274+ "jno 0f\n"
6275+ LOCK_PREFIX "subq %2,%0\n"
6276+ "int $4\n0:\n"
6277+ _ASM_EXTABLE(0b, 0b)
6278+#endif
6279+
6280+ "sets %1\n"
6281 : "=m" (v->counter), "=qm" (c)
6282 : "er" (i), "m" (v->counter) : "memory");
6283 return c;
6284@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6285 static inline long atomic64_add_return(long i, atomic64_t *v)
6286 {
6287 long __i = i;
6288- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6289+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6290+
6291+#ifdef CONFIG_PAX_REFCOUNT
6292+ "jno 0f\n"
6293+ "movq %0, %1\n"
6294+ "int $4\n0:\n"
6295+ _ASM_EXTABLE(0b, 0b)
6296+#endif
6297+
6298+ : "+r" (i), "+m" (v->counter)
6299+ : : "memory");
6300+ return i + __i;
6301+}
6302+
6303+/**
6304+ * atomic64_add_return_unchecked - add and return
6305+ * @i: integer value to add
6306+ * @v: pointer to type atomic64_unchecked_t
6307+ *
6308+ * Atomically adds @i to @v and returns @i + @v
6309+ */
6310+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6311+{
6312+ long __i = i;
6313+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6314 : "+r" (i), "+m" (v->counter)
6315 : : "memory");
6316 return i + __i;
6317@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6318 }
6319
6320 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6321+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6322+{
6323+ return atomic64_add_return_unchecked(1, v);
6324+}
6325 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6326
6327 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6328@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6329 return cmpxchg(&v->counter, old, new);
6330 }
6331
6332+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6333+{
6334+ return cmpxchg(&v->counter, old, new);
6335+}
6336+
6337 static inline long atomic64_xchg(atomic64_t *v, long new)
6338 {
6339 return xchg(&v->counter, new);
6340@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6341 */
6342 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6343 {
6344- long c, old;
6345+ long c, old, new;
6346 c = atomic64_read(v);
6347 for (;;) {
6348- if (unlikely(c == (u)))
6349+ if (unlikely(c == u))
6350 break;
6351- old = atomic64_cmpxchg((v), c, c + (a));
6352+
6353+ asm volatile("add %2,%0\n"
6354+
6355+#ifdef CONFIG_PAX_REFCOUNT
6356+ "jno 0f\n"
6357+ "sub %2,%0\n"
6358+ "int $4\n0:\n"
6359+ _ASM_EXTABLE(0b, 0b)
6360+#endif
6361+
6362+ : "=r" (new)
6363+ : "0" (c), "ir" (a));
6364+
6365+ old = atomic64_cmpxchg(v, c, new);
6366 if (likely(old == c))
6367 break;
6368 c = old;
6369 }
6370- return c != (u);
6371+ return c != u;
6372 }
6373
6374 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6375diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic.h linux-2.6.39.4/arch/x86/include/asm/atomic.h
6376--- linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-05-19 00:06:34.000000000 -0400
6377+++ linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-08-05 19:44:33.000000000 -0400
6378@@ -22,7 +22,18 @@
6379 */
6380 static inline int atomic_read(const atomic_t *v)
6381 {
6382- return (*(volatile int *)&(v)->counter);
6383+ return (*(volatile const int *)&(v)->counter);
6384+}
6385+
6386+/**
6387+ * atomic_read_unchecked - read atomic variable
6388+ * @v: pointer of type atomic_unchecked_t
6389+ *
6390+ * Atomically reads the value of @v.
6391+ */
6392+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6393+{
6394+ return (*(volatile const int *)&(v)->counter);
6395 }
6396
6397 /**
6398@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6399 }
6400
6401 /**
6402+ * atomic_set_unchecked - set atomic variable
6403+ * @v: pointer of type atomic_unchecked_t
6404+ * @i: required value
6405+ *
6406+ * Atomically sets the value of @v to @i.
6407+ */
6408+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6409+{
6410+ v->counter = i;
6411+}
6412+
6413+/**
6414 * atomic_add - add integer to atomic variable
6415 * @i: integer value to add
6416 * @v: pointer of type atomic_t
6417@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6418 */
6419 static inline void atomic_add(int i, atomic_t *v)
6420 {
6421- asm volatile(LOCK_PREFIX "addl %1,%0"
6422+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6423+
6424+#ifdef CONFIG_PAX_REFCOUNT
6425+ "jno 0f\n"
6426+ LOCK_PREFIX "subl %1,%0\n"
6427+ "int $4\n0:\n"
6428+ _ASM_EXTABLE(0b, 0b)
6429+#endif
6430+
6431+ : "+m" (v->counter)
6432+ : "ir" (i));
6433+}
6434+
6435+/**
6436+ * atomic_add_unchecked - add integer to atomic variable
6437+ * @i: integer value to add
6438+ * @v: pointer of type atomic_unchecked_t
6439+ *
6440+ * Atomically adds @i to @v.
6441+ */
6442+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6443+{
6444+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6445 : "+m" (v->counter)
6446 : "ir" (i));
6447 }
6448@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6449 */
6450 static inline void atomic_sub(int i, atomic_t *v)
6451 {
6452- asm volatile(LOCK_PREFIX "subl %1,%0"
6453+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6454+
6455+#ifdef CONFIG_PAX_REFCOUNT
6456+ "jno 0f\n"
6457+ LOCK_PREFIX "addl %1,%0\n"
6458+ "int $4\n0:\n"
6459+ _ASM_EXTABLE(0b, 0b)
6460+#endif
6461+
6462+ : "+m" (v->counter)
6463+ : "ir" (i));
6464+}
6465+
6466+/**
6467+ * atomic_sub_unchecked - subtract integer from atomic variable
6468+ * @i: integer value to subtract
6469+ * @v: pointer of type atomic_unchecked_t
6470+ *
6471+ * Atomically subtracts @i from @v.
6472+ */
6473+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6474+{
6475+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6476 : "+m" (v->counter)
6477 : "ir" (i));
6478 }
6479@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6480 {
6481 unsigned char c;
6482
6483- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6484+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6485+
6486+#ifdef CONFIG_PAX_REFCOUNT
6487+ "jno 0f\n"
6488+ LOCK_PREFIX "addl %2,%0\n"
6489+ "int $4\n0:\n"
6490+ _ASM_EXTABLE(0b, 0b)
6491+#endif
6492+
6493+ "sete %1\n"
6494 : "+m" (v->counter), "=qm" (c)
6495 : "ir" (i) : "memory");
6496 return c;
6497@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6498 */
6499 static inline void atomic_inc(atomic_t *v)
6500 {
6501- asm volatile(LOCK_PREFIX "incl %0"
6502+ asm volatile(LOCK_PREFIX "incl %0\n"
6503+
6504+#ifdef CONFIG_PAX_REFCOUNT
6505+ "jno 0f\n"
6506+ LOCK_PREFIX "decl %0\n"
6507+ "int $4\n0:\n"
6508+ _ASM_EXTABLE(0b, 0b)
6509+#endif
6510+
6511+ : "+m" (v->counter));
6512+}
6513+
6514+/**
6515+ * atomic_inc_unchecked - increment atomic variable
6516+ * @v: pointer of type atomic_unchecked_t
6517+ *
6518+ * Atomically increments @v by 1.
6519+ */
6520+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6521+{
6522+ asm volatile(LOCK_PREFIX "incl %0\n"
6523 : "+m" (v->counter));
6524 }
6525
6526@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6527 */
6528 static inline void atomic_dec(atomic_t *v)
6529 {
6530- asm volatile(LOCK_PREFIX "decl %0"
6531+ asm volatile(LOCK_PREFIX "decl %0\n"
6532+
6533+#ifdef CONFIG_PAX_REFCOUNT
6534+ "jno 0f\n"
6535+ LOCK_PREFIX "incl %0\n"
6536+ "int $4\n0:\n"
6537+ _ASM_EXTABLE(0b, 0b)
6538+#endif
6539+
6540+ : "+m" (v->counter));
6541+}
6542+
6543+/**
6544+ * atomic_dec_unchecked - decrement atomic variable
6545+ * @v: pointer of type atomic_unchecked_t
6546+ *
6547+ * Atomically decrements @v by 1.
6548+ */
6549+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6550+{
6551+ asm volatile(LOCK_PREFIX "decl %0\n"
6552 : "+m" (v->counter));
6553 }
6554
6555@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6556 {
6557 unsigned char c;
6558
6559- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6560+ asm volatile(LOCK_PREFIX "decl %0\n"
6561+
6562+#ifdef CONFIG_PAX_REFCOUNT
6563+ "jno 0f\n"
6564+ LOCK_PREFIX "incl %0\n"
6565+ "int $4\n0:\n"
6566+ _ASM_EXTABLE(0b, 0b)
6567+#endif
6568+
6569+ "sete %1\n"
6570 : "+m" (v->counter), "=qm" (c)
6571 : : "memory");
6572 return c != 0;
6573@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6574 {
6575 unsigned char c;
6576
6577- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6578+ asm volatile(LOCK_PREFIX "incl %0\n"
6579+
6580+#ifdef CONFIG_PAX_REFCOUNT
6581+ "jno 0f\n"
6582+ LOCK_PREFIX "decl %0\n"
6583+ "int $4\n0:\n"
6584+ _ASM_EXTABLE(0b, 0b)
6585+#endif
6586+
6587+ "sete %1\n"
6588+ : "+m" (v->counter), "=qm" (c)
6589+ : : "memory");
6590+ return c != 0;
6591+}
6592+
6593+/**
6594+ * atomic_inc_and_test_unchecked - increment and test
6595+ * @v: pointer of type atomic_unchecked_t
6596+ *
6597+ * Atomically increments @v by 1
6598+ * and returns true if the result is zero, or false for all
6599+ * other cases.
6600+ */
6601+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6602+{
6603+ unsigned char c;
6604+
6605+ asm volatile(LOCK_PREFIX "incl %0\n"
6606+ "sete %1\n"
6607 : "+m" (v->counter), "=qm" (c)
6608 : : "memory");
6609 return c != 0;
6610@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6611 {
6612 unsigned char c;
6613
6614- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6615+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6616+
6617+#ifdef CONFIG_PAX_REFCOUNT
6618+ "jno 0f\n"
6619+ LOCK_PREFIX "subl %2,%0\n"
6620+ "int $4\n0:\n"
6621+ _ASM_EXTABLE(0b, 0b)
6622+#endif
6623+
6624+ "sets %1\n"
6625 : "+m" (v->counter), "=qm" (c)
6626 : "ir" (i) : "memory");
6627 return c;
6628@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6629 #endif
6630 /* Modern 486+ processor */
6631 __i = i;
6632+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6633+
6634+#ifdef CONFIG_PAX_REFCOUNT
6635+ "jno 0f\n"
6636+ "movl %0, %1\n"
6637+ "int $4\n0:\n"
6638+ _ASM_EXTABLE(0b, 0b)
6639+#endif
6640+
6641+ : "+r" (i), "+m" (v->counter)
6642+ : : "memory");
6643+ return i + __i;
6644+
6645+#ifdef CONFIG_M386
6646+no_xadd: /* Legacy 386 processor */
6647+ local_irq_save(flags);
6648+ __i = atomic_read(v);
6649+ atomic_set(v, i + __i);
6650+ local_irq_restore(flags);
6651+ return i + __i;
6652+#endif
6653+}
6654+
6655+/**
6656+ * atomic_add_return_unchecked - add integer and return
6657+ * @v: pointer of type atomic_unchecked_t
6658+ * @i: integer value to add
6659+ *
6660+ * Atomically adds @i to @v and returns @i + @v
6661+ */
6662+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6663+{
6664+ int __i;
6665+#ifdef CONFIG_M386
6666+ unsigned long flags;
6667+ if (unlikely(boot_cpu_data.x86 <= 3))
6668+ goto no_xadd;
6669+#endif
6670+ /* Modern 486+ processor */
6671+ __i = i;
6672 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6673 : "+r" (i), "+m" (v->counter)
6674 : : "memory");
6675@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6676 }
6677
6678 #define atomic_inc_return(v) (atomic_add_return(1, v))
6679+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6680+{
6681+ return atomic_add_return_unchecked(1, v);
6682+}
6683 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6684
6685 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6686@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6687 return cmpxchg(&v->counter, old, new);
6688 }
6689
6690+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6691+{
6692+ return cmpxchg(&v->counter, old, new);
6693+}
6694+
6695 static inline int atomic_xchg(atomic_t *v, int new)
6696 {
6697 return xchg(&v->counter, new);
6698 }
6699
6700+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6701+{
6702+ return xchg(&v->counter, new);
6703+}
6704+
6705 /**
6706 * atomic_add_unless - add unless the number is already a given value
6707 * @v: pointer of type atomic_t
6708@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6709 */
6710 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6711 {
6712- int c, old;
6713+ int c, old, new;
6714 c = atomic_read(v);
6715 for (;;) {
6716- if (unlikely(c == (u)))
6717+ if (unlikely(c == u))
6718 break;
6719- old = atomic_cmpxchg((v), c, c + (a));
6720+
6721+ asm volatile("addl %2,%0\n"
6722+
6723+#ifdef CONFIG_PAX_REFCOUNT
6724+ "jno 0f\n"
6725+ "subl %2,%0\n"
6726+ "int $4\n0:\n"
6727+ _ASM_EXTABLE(0b, 0b)
6728+#endif
6729+
6730+ : "=r" (new)
6731+ : "0" (c), "ir" (a));
6732+
6733+ old = atomic_cmpxchg(v, c, new);
6734 if (likely(old == c))
6735 break;
6736 c = old;
6737 }
6738- return c != (u);
6739+ return c != u;
6740 }
6741
6742 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6743
6744+/**
6745+ * atomic_inc_not_zero_hint - increment if not null
6746+ * @v: pointer of type atomic_t
6747+ * @hint: probable value of the atomic before the increment
6748+ *
6749+ * This version of atomic_inc_not_zero() gives a hint of probable
6750+ * value of the atomic. This helps processor to not read the memory
6751+ * before doing the atomic read/modify/write cycle, lowering
6752+ * number of bus transactions on some arches.
6753+ *
6754+ * Returns: 0 if increment was not done, 1 otherwise.
6755+ */
6756+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6757+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6758+{
6759+ int val, c = hint, new;
6760+
6761+ /* sanity test, should be removed by compiler if hint is a constant */
6762+ if (!hint)
6763+ return atomic_inc_not_zero(v);
6764+
6765+ do {
6766+ asm volatile("incl %0\n"
6767+
6768+#ifdef CONFIG_PAX_REFCOUNT
6769+ "jno 0f\n"
6770+ "decl %0\n"
6771+ "int $4\n0:\n"
6772+ _ASM_EXTABLE(0b, 0b)
6773+#endif
6774+
6775+ : "=r" (new)
6776+ : "0" (c));
6777+
6778+ val = atomic_cmpxchg(v, c, new);
6779+ if (val == c)
6780+ return 1;
6781+ c = val;
6782+ } while (c);
6783+
6784+ return 0;
6785+}
6786+
6787 /*
6788 * atomic_dec_if_positive - decrement by 1 if old value positive
6789 * @v: pointer of type atomic_t
6790diff -urNp linux-2.6.39.4/arch/x86/include/asm/bitops.h linux-2.6.39.4/arch/x86/include/asm/bitops.h
6791--- linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-05-19 00:06:34.000000000 -0400
6792+++ linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-08-05 19:44:33.000000000 -0400
6793@@ -38,7 +38,7 @@
6794 * a mask operation on a byte.
6795 */
6796 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6797-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6798+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6799 #define CONST_MASK(nr) (1 << ((nr) & 7))
6800
6801 /**
6802diff -urNp linux-2.6.39.4/arch/x86/include/asm/boot.h linux-2.6.39.4/arch/x86/include/asm/boot.h
6803--- linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-05-19 00:06:34.000000000 -0400
6804+++ linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-08-05 19:44:33.000000000 -0400
6805@@ -11,10 +11,15 @@
6806 #include <asm/pgtable_types.h>
6807
6808 /* Physical address where kernel should be loaded. */
6809-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6810+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6811 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6812 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6813
6814+#ifndef __ASSEMBLY__
6815+extern unsigned char __LOAD_PHYSICAL_ADDR[];
6816+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6817+#endif
6818+
6819 /* Minimum kernel alignment, as a power of two */
6820 #ifdef CONFIG_X86_64
6821 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6822diff -urNp linux-2.6.39.4/arch/x86/include/asm/cacheflush.h linux-2.6.39.4/arch/x86/include/asm/cacheflush.h
6823--- linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400
6824+++ linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-08-05 19:44:33.000000000 -0400
6825@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6826 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6827
6828 if (pg_flags == _PGMT_DEFAULT)
6829- return -1;
6830+ return ~0UL;
6831 else if (pg_flags == _PGMT_WC)
6832 return _PAGE_CACHE_WC;
6833 else if (pg_flags == _PGMT_UC_MINUS)
6834diff -urNp linux-2.6.39.4/arch/x86/include/asm/cache.h linux-2.6.39.4/arch/x86/include/asm/cache.h
6835--- linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
6836+++ linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
6837@@ -5,12 +5,13 @@
6838
6839 /* L1 cache line size */
6840 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6841-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6842+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6843
6844 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6845+#define __read_only __attribute__((__section__(".data..read_only")))
6846
6847 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6848-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6849+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6850
6851 #ifdef CONFIG_X86_VSMP
6852 #ifdef CONFIG_SMP
6853diff -urNp linux-2.6.39.4/arch/x86/include/asm/checksum_32.h linux-2.6.39.4/arch/x86/include/asm/checksum_32.h
6854--- linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-05-19 00:06:34.000000000 -0400
6855+++ linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-08-05 19:44:33.000000000 -0400
6856@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6857 int len, __wsum sum,
6858 int *src_err_ptr, int *dst_err_ptr);
6859
6860+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6861+ int len, __wsum sum,
6862+ int *src_err_ptr, int *dst_err_ptr);
6863+
6864+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6865+ int len, __wsum sum,
6866+ int *src_err_ptr, int *dst_err_ptr);
6867+
6868 /*
6869 * Note: when you get a NULL pointer exception here this means someone
6870 * passed in an incorrect kernel address to one of these functions.
6871@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6872 int *err_ptr)
6873 {
6874 might_sleep();
6875- return csum_partial_copy_generic((__force void *)src, dst,
6876+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
6877 len, sum, err_ptr, NULL);
6878 }
6879
6880@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6881 {
6882 might_sleep();
6883 if (access_ok(VERIFY_WRITE, dst, len))
6884- return csum_partial_copy_generic(src, (__force void *)dst,
6885+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6886 len, sum, NULL, err_ptr);
6887
6888 if (len)
6889diff -urNp linux-2.6.39.4/arch/x86/include/asm/cpufeature.h linux-2.6.39.4/arch/x86/include/asm/cpufeature.h
6890--- linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-06-03 00:04:13.000000000 -0400
6891+++ linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-08-05 19:44:33.000000000 -0400
6892@@ -351,7 +351,7 @@ static __always_inline __pure bool __sta
6893 ".section .discard,\"aw\",@progbits\n"
6894 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6895 ".previous\n"
6896- ".section .altinstr_replacement,\"ax\"\n"
6897+ ".section .altinstr_replacement,\"a\"\n"
6898 "3: movb $1,%0\n"
6899 "4:\n"
6900 ".previous\n"
6901diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc_defs.h linux-2.6.39.4/arch/x86/include/asm/desc_defs.h
6902--- linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-05-19 00:06:34.000000000 -0400
6903+++ linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-08-05 19:44:33.000000000 -0400
6904@@ -31,6 +31,12 @@ struct desc_struct {
6905 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6906 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6907 };
6908+ struct {
6909+ u16 offset_low;
6910+ u16 seg;
6911+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6912+ unsigned offset_high: 16;
6913+ } gate;
6914 };
6915 } __attribute__((packed));
6916
6917diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc.h linux-2.6.39.4/arch/x86/include/asm/desc.h
6918--- linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-05-19 00:06:34.000000000 -0400
6919+++ linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-08-05 19:44:33.000000000 -0400
6920@@ -4,6 +4,7 @@
6921 #include <asm/desc_defs.h>
6922 #include <asm/ldt.h>
6923 #include <asm/mmu.h>
6924+#include <asm/pgtable.h>
6925 #include <linux/smp.h>
6926
6927 static inline void fill_ldt(struct desc_struct *desc,
6928@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
6929 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
6930 desc->type = (info->read_exec_only ^ 1) << 1;
6931 desc->type |= info->contents << 2;
6932+ desc->type |= info->seg_not_present ^ 1;
6933 desc->s = 1;
6934 desc->dpl = 0x3;
6935 desc->p = info->seg_not_present ^ 1;
6936@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
6937 }
6938
6939 extern struct desc_ptr idt_descr;
6940-extern gate_desc idt_table[];
6941-
6942-struct gdt_page {
6943- struct desc_struct gdt[GDT_ENTRIES];
6944-} __attribute__((aligned(PAGE_SIZE)));
6945-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6946+extern gate_desc idt_table[256];
6947
6948+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6949 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6950 {
6951- return per_cpu(gdt_page, cpu).gdt;
6952+ return cpu_gdt_table[cpu];
6953 }
6954
6955 #ifdef CONFIG_X86_64
6956@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
6957 unsigned long base, unsigned dpl, unsigned flags,
6958 unsigned short seg)
6959 {
6960- gate->a = (seg << 16) | (base & 0xffff);
6961- gate->b = (base & 0xffff0000) |
6962- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6963+ gate->gate.offset_low = base;
6964+ gate->gate.seg = seg;
6965+ gate->gate.reserved = 0;
6966+ gate->gate.type = type;
6967+ gate->gate.s = 0;
6968+ gate->gate.dpl = dpl;
6969+ gate->gate.p = 1;
6970+ gate->gate.offset_high = base >> 16;
6971 }
6972
6973 #endif
6974@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
6975 static inline void native_write_idt_entry(gate_desc *idt, int entry,
6976 const gate_desc *gate)
6977 {
6978+ pax_open_kernel();
6979 memcpy(&idt[entry], gate, sizeof(*gate));
6980+ pax_close_kernel();
6981 }
6982
6983 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
6984 const void *desc)
6985 {
6986+ pax_open_kernel();
6987 memcpy(&ldt[entry], desc, 8);
6988+ pax_close_kernel();
6989 }
6990
6991 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
6992@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
6993 size = sizeof(struct desc_struct);
6994 break;
6995 }
6996+
6997+ pax_open_kernel();
6998 memcpy(&gdt[entry], desc, size);
6999+ pax_close_kernel();
7000 }
7001
7002 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7003@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7004
7005 static inline void native_load_tr_desc(void)
7006 {
7007+ pax_open_kernel();
7008 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7009+ pax_close_kernel();
7010 }
7011
7012 static inline void native_load_gdt(const struct desc_ptr *dtr)
7013@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7014 unsigned int i;
7015 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7016
7017+ pax_open_kernel();
7018 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7019 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7020+ pax_close_kernel();
7021 }
7022
7023 #define _LDT_empty(info) \
7024@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7025 desc->limit = (limit >> 16) & 0xf;
7026 }
7027
7028-static inline void _set_gate(int gate, unsigned type, void *addr,
7029+static inline void _set_gate(int gate, unsigned type, const void *addr,
7030 unsigned dpl, unsigned ist, unsigned seg)
7031 {
7032 gate_desc s;
7033@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7034 * Pentium F0 0F bugfix can have resulted in the mapped
7035 * IDT being write-protected.
7036 */
7037-static inline void set_intr_gate(unsigned int n, void *addr)
7038+static inline void set_intr_gate(unsigned int n, const void *addr)
7039 {
7040 BUG_ON((unsigned)n > 0xFF);
7041 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7042@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7043 /*
7044 * This routine sets up an interrupt gate at directory privilege level 3.
7045 */
7046-static inline void set_system_intr_gate(unsigned int n, void *addr)
7047+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7048 {
7049 BUG_ON((unsigned)n > 0xFF);
7050 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7051 }
7052
7053-static inline void set_system_trap_gate(unsigned int n, void *addr)
7054+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7055 {
7056 BUG_ON((unsigned)n > 0xFF);
7057 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7058 }
7059
7060-static inline void set_trap_gate(unsigned int n, void *addr)
7061+static inline void set_trap_gate(unsigned int n, const void *addr)
7062 {
7063 BUG_ON((unsigned)n > 0xFF);
7064 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7065@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
7066 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7067 {
7068 BUG_ON((unsigned)n > 0xFF);
7069- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7070+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7071 }
7072
7073-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7074+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7075 {
7076 BUG_ON((unsigned)n > 0xFF);
7077 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7078 }
7079
7080-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7081+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7082 {
7083 BUG_ON((unsigned)n > 0xFF);
7084 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7085 }
7086
7087+#ifdef CONFIG_X86_32
7088+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7089+{
7090+ struct desc_struct d;
7091+
7092+ if (likely(limit))
7093+ limit = (limit - 1UL) >> PAGE_SHIFT;
7094+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7095+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7096+}
7097+#endif
7098+
7099 #endif /* _ASM_X86_DESC_H */
7100diff -urNp linux-2.6.39.4/arch/x86/include/asm/e820.h linux-2.6.39.4/arch/x86/include/asm/e820.h
7101--- linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-05-19 00:06:34.000000000 -0400
7102+++ linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-08-05 19:44:33.000000000 -0400
7103@@ -69,7 +69,7 @@ struct e820map {
7104 #define ISA_START_ADDRESS 0xa0000
7105 #define ISA_END_ADDRESS 0x100000
7106
7107-#define BIOS_BEGIN 0x000a0000
7108+#define BIOS_BEGIN 0x000c0000
7109 #define BIOS_END 0x00100000
7110
7111 #define BIOS_ROM_BASE 0xffe00000
7112diff -urNp linux-2.6.39.4/arch/x86/include/asm/elf.h linux-2.6.39.4/arch/x86/include/asm/elf.h
7113--- linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
7114+++ linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
7115@@ -237,7 +237,25 @@ extern int force_personality32;
7116 the loader. We need to make sure that it is out of the way of the program
7117 that it will "exec", and that there is sufficient room for the brk. */
7118
7119+#ifdef CONFIG_PAX_SEGMEXEC
7120+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7121+#else
7122 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7123+#endif
7124+
7125+#ifdef CONFIG_PAX_ASLR
7126+#ifdef CONFIG_X86_32
7127+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7128+
7129+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7130+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7131+#else
7132+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7133+
7134+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7135+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7136+#endif
7137+#endif
7138
7139 /* This yields a mask that user programs can use to figure out what
7140 instruction set this CPU supports. This could be done in user space,
7141@@ -291,8 +309,7 @@ do { \
7142 #define ARCH_DLINFO \
7143 do { \
7144 if (vdso_enabled) \
7145- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7146- (unsigned long)current->mm->context.vdso); \
7147+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
7148 } while (0)
7149
7150 #define AT_SYSINFO 32
7151@@ -303,7 +320,7 @@ do { \
7152
7153 #endif /* !CONFIG_X86_32 */
7154
7155-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7156+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7157
7158 #define VDSO_ENTRY \
7159 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7160@@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
7161 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7162 #define compat_arch_setup_additional_pages syscall32_setup_pages
7163
7164-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7165-#define arch_randomize_brk arch_randomize_brk
7166-
7167 #endif /* _ASM_X86_ELF_H */
7168diff -urNp linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h
7169--- linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-05-19 00:06:34.000000000 -0400
7170+++ linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-08-05 19:44:33.000000000 -0400
7171@@ -15,6 +15,6 @@ enum reboot_type {
7172
7173 extern enum reboot_type reboot_type;
7174
7175-extern void machine_emergency_restart(void);
7176+extern void machine_emergency_restart(void) __noreturn;
7177
7178 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7179diff -urNp linux-2.6.39.4/arch/x86/include/asm/futex.h linux-2.6.39.4/arch/x86/include/asm/futex.h
7180--- linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-05-19 00:06:34.000000000 -0400
7181+++ linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-08-05 19:44:33.000000000 -0400
7182@@ -12,16 +12,18 @@
7183 #include <asm/system.h>
7184
7185 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7186+ typecheck(u32 *, uaddr); \
7187 asm volatile("1:\t" insn "\n" \
7188 "2:\t.section .fixup,\"ax\"\n" \
7189 "3:\tmov\t%3, %1\n" \
7190 "\tjmp\t2b\n" \
7191 "\t.previous\n" \
7192 _ASM_EXTABLE(1b, 3b) \
7193- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7194+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7195 : "i" (-EFAULT), "0" (oparg), "1" (0))
7196
7197 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7198+ typecheck(u32 *, uaddr); \
7199 asm volatile("1:\tmovl %2, %0\n" \
7200 "\tmovl\t%0, %3\n" \
7201 "\t" insn "\n" \
7202@@ -34,7 +36,7 @@
7203 _ASM_EXTABLE(1b, 4b) \
7204 _ASM_EXTABLE(2b, 4b) \
7205 : "=&a" (oldval), "=&r" (ret), \
7206- "+m" (*uaddr), "=&r" (tem) \
7207+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7208 : "r" (oparg), "i" (-EFAULT), "1" (0))
7209
7210 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7211@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7212
7213 switch (op) {
7214 case FUTEX_OP_SET:
7215- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7216+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7217 break;
7218 case FUTEX_OP_ADD:
7219- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7220+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7221 uaddr, oparg);
7222 break;
7223 case FUTEX_OP_OR:
7224@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7225 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7226 return -EFAULT;
7227
7228- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7229+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7230 "2:\t.section .fixup, \"ax\"\n"
7231 "3:\tmov %3, %0\n"
7232 "\tjmp 2b\n"
7233 "\t.previous\n"
7234 _ASM_EXTABLE(1b, 3b)
7235- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7236+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7237 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7238 : "memory"
7239 );
7240diff -urNp linux-2.6.39.4/arch/x86/include/asm/hw_irq.h linux-2.6.39.4/arch/x86/include/asm/hw_irq.h
7241--- linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-05-19 00:06:34.000000000 -0400
7242+++ linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-08-05 19:44:33.000000000 -0400
7243@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7244 extern void enable_IO_APIC(void);
7245
7246 /* Statistics */
7247-extern atomic_t irq_err_count;
7248-extern atomic_t irq_mis_count;
7249+extern atomic_unchecked_t irq_err_count;
7250+extern atomic_unchecked_t irq_mis_count;
7251
7252 /* EISA */
7253 extern void eisa_set_level_irq(unsigned int irq);
7254diff -urNp linux-2.6.39.4/arch/x86/include/asm/i387.h linux-2.6.39.4/arch/x86/include/asm/i387.h
7255--- linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-05-19 00:06:34.000000000 -0400
7256+++ linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-08-05 19:44:33.000000000 -0400
7257@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7258 {
7259 int err;
7260
7261+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7262+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7263+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7264+#endif
7265+
7266 /* See comment in fxsave() below. */
7267 #ifdef CONFIG_AS_FXSAVEQ
7268 asm volatile("1: fxrstorq %[fx]\n\t"
7269@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7270 {
7271 int err;
7272
7273+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7274+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7275+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7276+#endif
7277+
7278 /*
7279 * Clear the bytes not touched by the fxsave and reserved
7280 * for the SW usage.
7281@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7282 #endif /* CONFIG_X86_64 */
7283
7284 /* We need a safe address that is cheap to find and that is already
7285- in L1 during context switch. The best choices are unfortunately
7286- different for UP and SMP */
7287-#ifdef CONFIG_SMP
7288-#define safe_address (__per_cpu_offset[0])
7289-#else
7290-#define safe_address (kstat_cpu(0).cpustat.user)
7291-#endif
7292+ in L1 during context switch. */
7293+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7294
7295 /*
7296 * These must be called with preempt disabled
7297@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7298 struct thread_info *me = current_thread_info();
7299 preempt_disable();
7300 if (me->status & TS_USEDFPU)
7301- __save_init_fpu(me->task);
7302+ __save_init_fpu(current);
7303 else
7304 clts();
7305 }
7306diff -urNp linux-2.6.39.4/arch/x86/include/asm/io.h linux-2.6.39.4/arch/x86/include/asm/io.h
7307--- linux-2.6.39.4/arch/x86/include/asm/io.h 2011-05-19 00:06:34.000000000 -0400
7308+++ linux-2.6.39.4/arch/x86/include/asm/io.h 2011-08-05 19:44:33.000000000 -0400
7309@@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void);
7310
7311 #include <linux/vmalloc.h>
7312
7313+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7314+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7315+{
7316+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7317+}
7318+
7319+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7320+{
7321+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7322+}
7323+
7324 /*
7325 * Convert a virtual cached pointer to an uncached pointer
7326 */
7327diff -urNp linux-2.6.39.4/arch/x86/include/asm/irqflags.h linux-2.6.39.4/arch/x86/include/asm/irqflags.h
7328--- linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-05-19 00:06:34.000000000 -0400
7329+++ linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-08-05 19:44:33.000000000 -0400
7330@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7331 sti; \
7332 sysexit
7333
7334+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7335+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7336+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7337+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7338+
7339 #else
7340 #define INTERRUPT_RETURN iret
7341 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7342diff -urNp linux-2.6.39.4/arch/x86/include/asm/kprobes.h linux-2.6.39.4/arch/x86/include/asm/kprobes.h
7343--- linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-05-19 00:06:34.000000000 -0400
7344+++ linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-08-05 19:44:33.000000000 -0400
7345@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7346 #define RELATIVEJUMP_SIZE 5
7347 #define RELATIVECALL_OPCODE 0xe8
7348 #define RELATIVE_ADDR_SIZE 4
7349-#define MAX_STACK_SIZE 64
7350-#define MIN_STACK_SIZE(ADDR) \
7351- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7352- THREAD_SIZE - (unsigned long)(ADDR))) \
7353- ? (MAX_STACK_SIZE) \
7354- : (((unsigned long)current_thread_info()) + \
7355- THREAD_SIZE - (unsigned long)(ADDR)))
7356+#define MAX_STACK_SIZE 64UL
7357+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7358
7359 #define flush_insn_slot(p) do { } while (0)
7360
7361diff -urNp linux-2.6.39.4/arch/x86/include/asm/kvm_host.h linux-2.6.39.4/arch/x86/include/asm/kvm_host.h
7362--- linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
7363+++ linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-08-05 20:34:06.000000000 -0400
7364@@ -419,7 +419,7 @@ struct kvm_arch {
7365 unsigned int n_used_mmu_pages;
7366 unsigned int n_requested_mmu_pages;
7367 unsigned int n_max_mmu_pages;
7368- atomic_t invlpg_counter;
7369+ atomic_unchecked_t invlpg_counter;
7370 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7371 /*
7372 * Hash table of struct kvm_mmu_page.
7373@@ -589,7 +589,7 @@ struct kvm_x86_ops {
7374 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
7375
7376 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
7377- const struct trace_print_flags *exit_reasons_str;
7378+ const struct trace_print_flags * const exit_reasons_str;
7379 };
7380
7381 struct kvm_arch_async_pf {
7382diff -urNp linux-2.6.39.4/arch/x86/include/asm/local.h linux-2.6.39.4/arch/x86/include/asm/local.h
7383--- linux-2.6.39.4/arch/x86/include/asm/local.h 2011-05-19 00:06:34.000000000 -0400
7384+++ linux-2.6.39.4/arch/x86/include/asm/local.h 2011-08-05 19:44:33.000000000 -0400
7385@@ -18,26 +18,58 @@ typedef struct {
7386
7387 static inline void local_inc(local_t *l)
7388 {
7389- asm volatile(_ASM_INC "%0"
7390+ asm volatile(_ASM_INC "%0\n"
7391+
7392+#ifdef CONFIG_PAX_REFCOUNT
7393+ "jno 0f\n"
7394+ _ASM_DEC "%0\n"
7395+ "int $4\n0:\n"
7396+ _ASM_EXTABLE(0b, 0b)
7397+#endif
7398+
7399 : "+m" (l->a.counter));
7400 }
7401
7402 static inline void local_dec(local_t *l)
7403 {
7404- asm volatile(_ASM_DEC "%0"
7405+ asm volatile(_ASM_DEC "%0\n"
7406+
7407+#ifdef CONFIG_PAX_REFCOUNT
7408+ "jno 0f\n"
7409+ _ASM_INC "%0\n"
7410+ "int $4\n0:\n"
7411+ _ASM_EXTABLE(0b, 0b)
7412+#endif
7413+
7414 : "+m" (l->a.counter));
7415 }
7416
7417 static inline void local_add(long i, local_t *l)
7418 {
7419- asm volatile(_ASM_ADD "%1,%0"
7420+ asm volatile(_ASM_ADD "%1,%0\n"
7421+
7422+#ifdef CONFIG_PAX_REFCOUNT
7423+ "jno 0f\n"
7424+ _ASM_SUB "%1,%0\n"
7425+ "int $4\n0:\n"
7426+ _ASM_EXTABLE(0b, 0b)
7427+#endif
7428+
7429 : "+m" (l->a.counter)
7430 : "ir" (i));
7431 }
7432
7433 static inline void local_sub(long i, local_t *l)
7434 {
7435- asm volatile(_ASM_SUB "%1,%0"
7436+ asm volatile(_ASM_SUB "%1,%0\n"
7437+
7438+#ifdef CONFIG_PAX_REFCOUNT
7439+ "jno 0f\n"
7440+ _ASM_ADD "%1,%0\n"
7441+ "int $4\n0:\n"
7442+ _ASM_EXTABLE(0b, 0b)
7443+#endif
7444+
7445 : "+m" (l->a.counter)
7446 : "ir" (i));
7447 }
7448@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7449 {
7450 unsigned char c;
7451
7452- asm volatile(_ASM_SUB "%2,%0; sete %1"
7453+ asm volatile(_ASM_SUB "%2,%0\n"
7454+
7455+#ifdef CONFIG_PAX_REFCOUNT
7456+ "jno 0f\n"
7457+ _ASM_ADD "%2,%0\n"
7458+ "int $4\n0:\n"
7459+ _ASM_EXTABLE(0b, 0b)
7460+#endif
7461+
7462+ "sete %1\n"
7463 : "+m" (l->a.counter), "=qm" (c)
7464 : "ir" (i) : "memory");
7465 return c;
7466@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7467 {
7468 unsigned char c;
7469
7470- asm volatile(_ASM_DEC "%0; sete %1"
7471+ asm volatile(_ASM_DEC "%0\n"
7472+
7473+#ifdef CONFIG_PAX_REFCOUNT
7474+ "jno 0f\n"
7475+ _ASM_INC "%0\n"
7476+ "int $4\n0:\n"
7477+ _ASM_EXTABLE(0b, 0b)
7478+#endif
7479+
7480+ "sete %1\n"
7481 : "+m" (l->a.counter), "=qm" (c)
7482 : : "memory");
7483 return c != 0;
7484@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7485 {
7486 unsigned char c;
7487
7488- asm volatile(_ASM_INC "%0; sete %1"
7489+ asm volatile(_ASM_INC "%0\n"
7490+
7491+#ifdef CONFIG_PAX_REFCOUNT
7492+ "jno 0f\n"
7493+ _ASM_DEC "%0\n"
7494+ "int $4\n0:\n"
7495+ _ASM_EXTABLE(0b, 0b)
7496+#endif
7497+
7498+ "sete %1\n"
7499 : "+m" (l->a.counter), "=qm" (c)
7500 : : "memory");
7501 return c != 0;
7502@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7503 {
7504 unsigned char c;
7505
7506- asm volatile(_ASM_ADD "%2,%0; sets %1"
7507+ asm volatile(_ASM_ADD "%2,%0\n"
7508+
7509+#ifdef CONFIG_PAX_REFCOUNT
7510+ "jno 0f\n"
7511+ _ASM_SUB "%2,%0\n"
7512+ "int $4\n0:\n"
7513+ _ASM_EXTABLE(0b, 0b)
7514+#endif
7515+
7516+ "sets %1\n"
7517 : "+m" (l->a.counter), "=qm" (c)
7518 : "ir" (i) : "memory");
7519 return c;
7520@@ -133,7 +201,15 @@ static inline long local_add_return(long
7521 #endif
7522 /* Modern 486+ processor */
7523 __i = i;
7524- asm volatile(_ASM_XADD "%0, %1;"
7525+ asm volatile(_ASM_XADD "%0, %1\n"
7526+
7527+#ifdef CONFIG_PAX_REFCOUNT
7528+ "jno 0f\n"
7529+ _ASM_MOV "%0,%1\n"
7530+ "int $4\n0:\n"
7531+ _ASM_EXTABLE(0b, 0b)
7532+#endif
7533+
7534 : "+r" (i), "+m" (l->a.counter)
7535 : : "memory");
7536 return i + __i;
7537diff -urNp linux-2.6.39.4/arch/x86/include/asm/mman.h linux-2.6.39.4/arch/x86/include/asm/mman.h
7538--- linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-05-19 00:06:34.000000000 -0400
7539+++ linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-08-05 19:44:33.000000000 -0400
7540@@ -5,4 +5,14 @@
7541
7542 #include <asm-generic/mman.h>
7543
7544+#ifdef __KERNEL__
7545+#ifndef __ASSEMBLY__
7546+#ifdef CONFIG_X86_32
7547+#define arch_mmap_check i386_mmap_check
7548+int i386_mmap_check(unsigned long addr, unsigned long len,
7549+ unsigned long flags);
7550+#endif
7551+#endif
7552+#endif
7553+
7554 #endif /* _ASM_X86_MMAN_H */
7555diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu_context.h linux-2.6.39.4/arch/x86/include/asm/mmu_context.h
7556--- linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-05-19 00:06:34.000000000 -0400
7557+++ linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-08-05 19:44:33.000000000 -0400
7558@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
7559
7560 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7561 {
7562+
7563+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7564+ unsigned int i;
7565+ pgd_t *pgd;
7566+
7567+ pax_open_kernel();
7568+ pgd = get_cpu_pgd(smp_processor_id());
7569+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7570+ if (paravirt_enabled())
7571+ set_pgd(pgd+i, native_make_pgd(0));
7572+ else
7573+ pgd[i] = native_make_pgd(0);
7574+ pax_close_kernel();
7575+#endif
7576+
7577 #ifdef CONFIG_SMP
7578 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7579 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7580@@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
7581 struct task_struct *tsk)
7582 {
7583 unsigned cpu = smp_processor_id();
7584+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
7585+ int tlbstate = TLBSTATE_OK;
7586+#endif
7587
7588 if (likely(prev != next)) {
7589 #ifdef CONFIG_SMP
7590+#ifdef CONFIG_X86_32
7591+ tlbstate = percpu_read(cpu_tlbstate.state);
7592+#endif
7593 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7594 percpu_write(cpu_tlbstate.active_mm, next);
7595 #endif
7596 cpumask_set_cpu(cpu, mm_cpumask(next));
7597
7598 /* Re-load page tables */
7599+#ifdef CONFIG_PAX_PER_CPU_PGD
7600+ pax_open_kernel();
7601+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7602+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7603+ pax_close_kernel();
7604+ load_cr3(get_cpu_pgd(cpu));
7605+#else
7606 load_cr3(next->pgd);
7607+#endif
7608
7609 /* stop flush ipis for the previous mm */
7610 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7611@@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
7612 */
7613 if (unlikely(prev->context.ldt != next->context.ldt))
7614 load_LDT_nolock(&next->context);
7615- }
7616+
7617+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7618+ if (!(__supported_pte_mask & _PAGE_NX)) {
7619+ smp_mb__before_clear_bit();
7620+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7621+ smp_mb__after_clear_bit();
7622+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7623+ }
7624+#endif
7625+
7626+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7627+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7628+ prev->context.user_cs_limit != next->context.user_cs_limit))
7629+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7630 #ifdef CONFIG_SMP
7631+ else if (unlikely(tlbstate != TLBSTATE_OK))
7632+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7633+#endif
7634+#endif
7635+
7636+ }
7637 else {
7638+
7639+#ifdef CONFIG_PAX_PER_CPU_PGD
7640+ pax_open_kernel();
7641+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7642+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7643+ pax_close_kernel();
7644+ load_cr3(get_cpu_pgd(cpu));
7645+#endif
7646+
7647+#ifdef CONFIG_SMP
7648 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7649 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7650
7651@@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
7652 * tlb flush IPI delivery. We must reload CR3
7653 * to make sure to use no freed page tables.
7654 */
7655+
7656+#ifndef CONFIG_PAX_PER_CPU_PGD
7657 load_cr3(next->pgd);
7658+#endif
7659+
7660 load_LDT_nolock(&next->context);
7661+
7662+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7663+ if (!(__supported_pte_mask & _PAGE_NX))
7664+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7665+#endif
7666+
7667+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7668+#ifdef CONFIG_PAX_PAGEEXEC
7669+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7670+#endif
7671+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7672+#endif
7673+
7674 }
7675- }
7676 #endif
7677+ }
7678 }
7679
7680 #define activate_mm(prev, next) \
7681diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu.h linux-2.6.39.4/arch/x86/include/asm/mmu.h
7682--- linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-05-19 00:06:34.000000000 -0400
7683+++ linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-08-05 19:44:33.000000000 -0400
7684@@ -9,10 +9,22 @@
7685 * we put the segment information here.
7686 */
7687 typedef struct {
7688- void *ldt;
7689+ struct desc_struct *ldt;
7690 int size;
7691 struct mutex lock;
7692- void *vdso;
7693+ unsigned long vdso;
7694+
7695+#ifdef CONFIG_X86_32
7696+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7697+ unsigned long user_cs_base;
7698+ unsigned long user_cs_limit;
7699+
7700+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7701+ cpumask_t cpu_user_cs_mask;
7702+#endif
7703+
7704+#endif
7705+#endif
7706
7707 #ifdef CONFIG_X86_64
7708 /* True if mm supports a task running in 32 bit compatibility mode. */
7709diff -urNp linux-2.6.39.4/arch/x86/include/asm/module.h linux-2.6.39.4/arch/x86/include/asm/module.h
7710--- linux-2.6.39.4/arch/x86/include/asm/module.h 2011-05-19 00:06:34.000000000 -0400
7711+++ linux-2.6.39.4/arch/x86/include/asm/module.h 2011-08-05 19:44:33.000000000 -0400
7712@@ -5,6 +5,7 @@
7713
7714 #ifdef CONFIG_X86_64
7715 /* X86_64 does not define MODULE_PROC_FAMILY */
7716+#define MODULE_PROC_FAMILY ""
7717 #elif defined CONFIG_M386
7718 #define MODULE_PROC_FAMILY "386 "
7719 #elif defined CONFIG_M486
7720@@ -59,8 +60,30 @@
7721 #error unknown processor family
7722 #endif
7723
7724-#ifdef CONFIG_X86_32
7725-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7726+#ifdef CONFIG_PAX_MEMORY_UDEREF
7727+#define MODULE_PAX_UDEREF "UDEREF "
7728+#else
7729+#define MODULE_PAX_UDEREF ""
7730+#endif
7731+
7732+#ifdef CONFIG_PAX_KERNEXEC
7733+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7734+#else
7735+#define MODULE_PAX_KERNEXEC ""
7736 #endif
7737
7738+#ifdef CONFIG_PAX_REFCOUNT
7739+#define MODULE_PAX_REFCOUNT "REFCOUNT "
7740+#else
7741+#define MODULE_PAX_REFCOUNT ""
7742+#endif
7743+
7744+#ifdef CONFIG_GRKERNSEC
7745+#define MODULE_GRSEC "GRSECURITY "
7746+#else
7747+#define MODULE_GRSEC ""
7748+#endif
7749+
7750+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7751+
7752 #endif /* _ASM_X86_MODULE_H */
7753diff -urNp linux-2.6.39.4/arch/x86/include/asm/page_64_types.h linux-2.6.39.4/arch/x86/include/asm/page_64_types.h
7754--- linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-05-19 00:06:34.000000000 -0400
7755+++ linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-08-05 19:44:33.000000000 -0400
7756@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7757
7758 /* duplicated to the one in bootmem.h */
7759 extern unsigned long max_pfn;
7760-extern unsigned long phys_base;
7761+extern const unsigned long phys_base;
7762
7763 extern unsigned long __phys_addr(unsigned long);
7764 #define __phys_reloc_hide(x) (x)
7765diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt.h linux-2.6.39.4/arch/x86/include/asm/paravirt.h
7766--- linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-05-19 00:06:34.000000000 -0400
7767+++ linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-08-05 19:44:33.000000000 -0400
7768@@ -739,6 +739,21 @@ static inline void __set_fixmap(unsigned
7769 pv_mmu_ops.set_fixmap(idx, phys, flags);
7770 }
7771
7772+#ifdef CONFIG_PAX_KERNEXEC
7773+static inline unsigned long pax_open_kernel(void)
7774+{
7775+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7776+}
7777+
7778+static inline unsigned long pax_close_kernel(void)
7779+{
7780+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7781+}
7782+#else
7783+static inline unsigned long pax_open_kernel(void) { return 0; }
7784+static inline unsigned long pax_close_kernel(void) { return 0; }
7785+#endif
7786+
7787 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7788
7789 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7790@@ -955,7 +970,7 @@ extern void default_banner(void);
7791
7792 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7793 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7794-#define PARA_INDIRECT(addr) *%cs:addr
7795+#define PARA_INDIRECT(addr) *%ss:addr
7796 #endif
7797
7798 #define INTERRUPT_RETURN \
7799@@ -1032,6 +1047,21 @@ extern void default_banner(void);
7800 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7801 CLBR_NONE, \
7802 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7803+
7804+#define GET_CR0_INTO_RDI \
7805+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7806+ mov %rax,%rdi
7807+
7808+#define SET_RDI_INTO_CR0 \
7809+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7810+
7811+#define GET_CR3_INTO_RDI \
7812+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7813+ mov %rax,%rdi
7814+
7815+#define SET_RDI_INTO_CR3 \
7816+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7817+
7818 #endif /* CONFIG_X86_32 */
7819
7820 #endif /* __ASSEMBLY__ */
7821diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h
7822--- linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-05-19 00:06:34.000000000 -0400
7823+++ linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:34:06.000000000 -0400
7824@@ -78,19 +78,19 @@ struct pv_init_ops {
7825 */
7826 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7827 unsigned long addr, unsigned len);
7828-};
7829+} __no_const;
7830
7831
7832 struct pv_lazy_ops {
7833 /* Set deferred update mode, used for batching operations. */
7834 void (*enter)(void);
7835 void (*leave)(void);
7836-};
7837+} __no_const;
7838
7839 struct pv_time_ops {
7840 unsigned long long (*sched_clock)(void);
7841 unsigned long (*get_tsc_khz)(void);
7842-};
7843+} __no_const;
7844
7845 struct pv_cpu_ops {
7846 /* hooks for various privileged instructions */
7847@@ -186,7 +186,7 @@ struct pv_cpu_ops {
7848
7849 void (*start_context_switch)(struct task_struct *prev);
7850 void (*end_context_switch)(struct task_struct *next);
7851-};
7852+} __no_const;
7853
7854 struct pv_irq_ops {
7855 /*
7856@@ -217,7 +217,7 @@ struct pv_apic_ops {
7857 unsigned long start_eip,
7858 unsigned long start_esp);
7859 #endif
7860-};
7861+} __no_const;
7862
7863 struct pv_mmu_ops {
7864 unsigned long (*read_cr2)(void);
7865@@ -317,6 +317,12 @@ struct pv_mmu_ops {
7866 an mfn. We can tell which is which from the index. */
7867 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7868 phys_addr_t phys, pgprot_t flags);
7869+
7870+#ifdef CONFIG_PAX_KERNEXEC
7871+ unsigned long (*pax_open_kernel)(void);
7872+ unsigned long (*pax_close_kernel)(void);
7873+#endif
7874+
7875 };
7876
7877 struct arch_spinlock;
7878@@ -327,7 +333,7 @@ struct pv_lock_ops {
7879 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7880 int (*spin_trylock)(struct arch_spinlock *lock);
7881 void (*spin_unlock)(struct arch_spinlock *lock);
7882-};
7883+} __no_const;
7884
7885 /* This contains all the paravirt structures: we get a convenient
7886 * number for each function using the offset which we use to indicate
7887diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgalloc.h linux-2.6.39.4/arch/x86/include/asm/pgalloc.h
7888--- linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-05-19 00:06:34.000000000 -0400
7889+++ linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-08-05 19:44:33.000000000 -0400
7890@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7891 pmd_t *pmd, pte_t *pte)
7892 {
7893 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7894+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7895+}
7896+
7897+static inline void pmd_populate_user(struct mm_struct *mm,
7898+ pmd_t *pmd, pte_t *pte)
7899+{
7900+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7901 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7902 }
7903
7904diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h
7905--- linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-05-19 00:06:34.000000000 -0400
7906+++ linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-08-05 19:44:33.000000000 -0400
7907@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7908
7909 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7910 {
7911+ pax_open_kernel();
7912 *pmdp = pmd;
7913+ pax_close_kernel();
7914 }
7915
7916 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7917diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h
7918--- linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
7919+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
7920@@ -25,9 +25,6 @@
7921 struct mm_struct;
7922 struct vm_area_struct;
7923
7924-extern pgd_t swapper_pg_dir[1024];
7925-extern pgd_t initial_page_table[1024];
7926-
7927 static inline void pgtable_cache_init(void) { }
7928 static inline void check_pgt_cache(void) { }
7929 void paging_init(void);
7930@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7931 # include <asm/pgtable-2level.h>
7932 #endif
7933
7934+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7935+extern pgd_t initial_page_table[PTRS_PER_PGD];
7936+#ifdef CONFIG_X86_PAE
7937+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7938+#endif
7939+
7940 #if defined(CONFIG_HIGHPTE)
7941 #define pte_offset_map(dir, address) \
7942 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7943@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7944 /* Clear a kernel PTE and flush it from the TLB */
7945 #define kpte_clear_flush(ptep, vaddr) \
7946 do { \
7947+ pax_open_kernel(); \
7948 pte_clear(&init_mm, (vaddr), (ptep)); \
7949+ pax_close_kernel(); \
7950 __flush_tlb_one((vaddr)); \
7951 } while (0)
7952
7953@@ -74,6 +79,9 @@ do { \
7954
7955 #endif /* !__ASSEMBLY__ */
7956
7957+#define HAVE_ARCH_UNMAPPED_AREA
7958+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7959+
7960 /*
7961 * kern_addr_valid() is (1) for FLATMEM and (0) for
7962 * SPARSEMEM and DISCONTIGMEM
7963diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h
7964--- linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-05-19 00:06:34.000000000 -0400
7965+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-05 19:44:33.000000000 -0400
7966@@ -8,7 +8,7 @@
7967 */
7968 #ifdef CONFIG_X86_PAE
7969 # include <asm/pgtable-3level_types.h>
7970-# define PMD_SIZE (1UL << PMD_SHIFT)
7971+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7972 # define PMD_MASK (~(PMD_SIZE - 1))
7973 #else
7974 # include <asm/pgtable-2level_types.h>
7975@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7976 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7977 #endif
7978
7979+#ifdef CONFIG_PAX_KERNEXEC
7980+#ifndef __ASSEMBLY__
7981+extern unsigned char MODULES_EXEC_VADDR[];
7982+extern unsigned char MODULES_EXEC_END[];
7983+#endif
7984+#include <asm/boot.h>
7985+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7986+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7987+#else
7988+#define ktla_ktva(addr) (addr)
7989+#define ktva_ktla(addr) (addr)
7990+#endif
7991+
7992 #define MODULES_VADDR VMALLOC_START
7993 #define MODULES_END VMALLOC_END
7994 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
7995diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h
7996--- linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-05-19 00:06:34.000000000 -0400
7997+++ linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-08-05 19:44:33.000000000 -0400
7998@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
7999
8000 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8001 {
8002+ pax_open_kernel();
8003 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8004+ pax_close_kernel();
8005 }
8006
8007 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8008 {
8009+ pax_open_kernel();
8010 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8011+ pax_close_kernel();
8012 }
8013
8014 /*
8015diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h
8016--- linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-05-19 00:06:34.000000000 -0400
8017+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-08-05 19:44:33.000000000 -0400
8018@@ -16,10 +16,13 @@
8019
8020 extern pud_t level3_kernel_pgt[512];
8021 extern pud_t level3_ident_pgt[512];
8022+extern pud_t level3_vmalloc_pgt[512];
8023+extern pud_t level3_vmemmap_pgt[512];
8024+extern pud_t level2_vmemmap_pgt[512];
8025 extern pmd_t level2_kernel_pgt[512];
8026 extern pmd_t level2_fixmap_pgt[512];
8027-extern pmd_t level2_ident_pgt[512];
8028-extern pgd_t init_level4_pgt[];
8029+extern pmd_t level2_ident_pgt[512*2];
8030+extern pgd_t init_level4_pgt[512];
8031
8032 #define swapper_pg_dir init_level4_pgt
8033
8034@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8035
8036 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8037 {
8038+ pax_open_kernel();
8039 *pmdp = pmd;
8040+ pax_close_kernel();
8041 }
8042
8043 static inline void native_pmd_clear(pmd_t *pmd)
8044@@ -107,7 +112,9 @@ static inline void native_pud_clear(pud_
8045
8046 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8047 {
8048+ pax_open_kernel();
8049 *pgdp = pgd;
8050+ pax_close_kernel();
8051 }
8052
8053 static inline void native_pgd_clear(pgd_t *pgd)
8054diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h
8055--- linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-05-19 00:06:34.000000000 -0400
8056+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-05 19:44:33.000000000 -0400
8057@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8058 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8059 #define MODULES_END _AC(0xffffffffff000000, UL)
8060 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8061+#define MODULES_EXEC_VADDR MODULES_VADDR
8062+#define MODULES_EXEC_END MODULES_END
8063+
8064+#define ktla_ktva(addr) (addr)
8065+#define ktva_ktla(addr) (addr)
8066
8067 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8068diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable.h linux-2.6.39.4/arch/x86/include/asm/pgtable.h
8069--- linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
8070+++ linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
8071@@ -81,12 +81,51 @@ extern struct mm_struct *pgd_page_get_mm
8072
8073 #define arch_end_context_switch(prev) do {} while(0)
8074
8075+#define pax_open_kernel() native_pax_open_kernel()
8076+#define pax_close_kernel() native_pax_close_kernel()
8077 #endif /* CONFIG_PARAVIRT */
8078
8079+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8080+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8081+
8082+#ifdef CONFIG_PAX_KERNEXEC
8083+static inline unsigned long native_pax_open_kernel(void)
8084+{
8085+ unsigned long cr0;
8086+
8087+ preempt_disable();
8088+ barrier();
8089+ cr0 = read_cr0() ^ X86_CR0_WP;
8090+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8091+ write_cr0(cr0);
8092+ return cr0 ^ X86_CR0_WP;
8093+}
8094+
8095+static inline unsigned long native_pax_close_kernel(void)
8096+{
8097+ unsigned long cr0;
8098+
8099+ cr0 = read_cr0() ^ X86_CR0_WP;
8100+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8101+ write_cr0(cr0);
8102+ barrier();
8103+ preempt_enable_no_resched();
8104+ return cr0 ^ X86_CR0_WP;
8105+}
8106+#else
8107+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8108+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8109+#endif
8110+
8111 /*
8112 * The following only work if pte_present() is true.
8113 * Undefined behaviour if not..
8114 */
8115+static inline int pte_user(pte_t pte)
8116+{
8117+ return pte_val(pte) & _PAGE_USER;
8118+}
8119+
8120 static inline int pte_dirty(pte_t pte)
8121 {
8122 return pte_flags(pte) & _PAGE_DIRTY;
8123@@ -196,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t
8124 return pte_clear_flags(pte, _PAGE_RW);
8125 }
8126
8127+static inline pte_t pte_mkread(pte_t pte)
8128+{
8129+ return __pte(pte_val(pte) | _PAGE_USER);
8130+}
8131+
8132 static inline pte_t pte_mkexec(pte_t pte)
8133 {
8134- return pte_clear_flags(pte, _PAGE_NX);
8135+#ifdef CONFIG_X86_PAE
8136+ if (__supported_pte_mask & _PAGE_NX)
8137+ return pte_clear_flags(pte, _PAGE_NX);
8138+ else
8139+#endif
8140+ return pte_set_flags(pte, _PAGE_USER);
8141+}
8142+
8143+static inline pte_t pte_exprotect(pte_t pte)
8144+{
8145+#ifdef CONFIG_X86_PAE
8146+ if (__supported_pte_mask & _PAGE_NX)
8147+ return pte_set_flags(pte, _PAGE_NX);
8148+ else
8149+#endif
8150+ return pte_clear_flags(pte, _PAGE_USER);
8151 }
8152
8153 static inline pte_t pte_mkdirty(pte_t pte)
8154@@ -390,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long
8155 #endif
8156
8157 #ifndef __ASSEMBLY__
8158+
8159+#ifdef CONFIG_PAX_PER_CPU_PGD
8160+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8161+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8162+{
8163+ return cpu_pgd[cpu];
8164+}
8165+#endif
8166+
8167 #include <linux/mm_types.h>
8168
8169 static inline int pte_none(pte_t pte)
8170@@ -560,7 +628,7 @@ static inline pud_t *pud_offset(pgd_t *p
8171
8172 static inline int pgd_bad(pgd_t pgd)
8173 {
8174- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8175+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8176 }
8177
8178 static inline int pgd_none(pgd_t pgd)
8179@@ -583,7 +651,12 @@ static inline int pgd_none(pgd_t pgd)
8180 * pgd_offset() returns a (pgd_t *)
8181 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8182 */
8183-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8184+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8185+
8186+#ifdef CONFIG_PAX_PER_CPU_PGD
8187+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8188+#endif
8189+
8190 /*
8191 * a shortcut which implies the use of the kernel's pgd, instead
8192 * of a process's
8193@@ -594,6 +667,20 @@ static inline int pgd_none(pgd_t pgd)
8194 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8195 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8196
8197+#ifdef CONFIG_X86_32
8198+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8199+#else
8200+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8201+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8202+
8203+#ifdef CONFIG_PAX_MEMORY_UDEREF
8204+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8205+#else
8206+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8207+#endif
8208+
8209+#endif
8210+
8211 #ifndef __ASSEMBLY__
8212
8213 extern int direct_gbpages;
8214@@ -758,11 +845,23 @@ static inline void pmdp_set_wrprotect(st
8215 * dst and src can be on the same page, but the range must not overlap,
8216 * and must not cross a page boundary.
8217 */
8218-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8219+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8220 {
8221- memcpy(dst, src, count * sizeof(pgd_t));
8222+ pax_open_kernel();
8223+ while (count--)
8224+ *dst++ = *src++;
8225+ pax_close_kernel();
8226 }
8227
8228+#ifdef CONFIG_PAX_PER_CPU_PGD
8229+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8230+#endif
8231+
8232+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8233+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8234+#else
8235+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8236+#endif
8237
8238 #include <asm-generic/pgtable.h>
8239 #endif /* __ASSEMBLY__ */
8240diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h
8241--- linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-05-19 00:06:34.000000000 -0400
8242+++ linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-08-05 19:44:33.000000000 -0400
8243@@ -16,13 +16,12 @@
8244 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8245 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8246 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8247-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8248+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8249 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8250 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8251 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8252-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8253-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8254-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8255+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8256+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8257 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8258
8259 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8260@@ -40,7 +39,6 @@
8261 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8262 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8263 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8264-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8265 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8266 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8267 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8268@@ -57,8 +55,10 @@
8269
8270 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8271 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8272-#else
8273+#elif defined(CONFIG_KMEMCHECK)
8274 #define _PAGE_NX (_AT(pteval_t, 0))
8275+#else
8276+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8277 #endif
8278
8279 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8280@@ -96,6 +96,9 @@
8281 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8282 _PAGE_ACCESSED)
8283
8284+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8285+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8286+
8287 #define __PAGE_KERNEL_EXEC \
8288 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8289 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8290@@ -106,8 +109,8 @@
8291 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8292 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8293 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8294-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8295-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8296+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8297+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8298 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8299 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8300 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8301@@ -166,8 +169,8 @@
8302 * bits are combined, this will alow user to access the high address mapped
8303 * VDSO in the presence of CONFIG_COMPAT_VDSO
8304 */
8305-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8306-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8307+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8308+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8309 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8310 #endif
8311
8312@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8313 {
8314 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8315 }
8316+#endif
8317
8318+#if PAGETABLE_LEVELS == 3
8319+#include <asm-generic/pgtable-nopud.h>
8320+#endif
8321+
8322+#if PAGETABLE_LEVELS == 2
8323+#include <asm-generic/pgtable-nopmd.h>
8324+#endif
8325+
8326+#ifndef __ASSEMBLY__
8327 #if PAGETABLE_LEVELS > 3
8328 typedef struct { pudval_t pud; } pud_t;
8329
8330@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8331 return pud.pud;
8332 }
8333 #else
8334-#include <asm-generic/pgtable-nopud.h>
8335-
8336 static inline pudval_t native_pud_val(pud_t pud)
8337 {
8338 return native_pgd_val(pud.pgd);
8339@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8340 return pmd.pmd;
8341 }
8342 #else
8343-#include <asm-generic/pgtable-nopmd.h>
8344-
8345 static inline pmdval_t native_pmd_val(pmd_t pmd)
8346 {
8347 return native_pgd_val(pmd.pud.pgd);
8348@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8349
8350 extern pteval_t __supported_pte_mask;
8351 extern void set_nx(void);
8352-extern int nx_enabled;
8353
8354 #define pgprot_writecombine pgprot_writecombine
8355 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8356diff -urNp linux-2.6.39.4/arch/x86/include/asm/processor.h linux-2.6.39.4/arch/x86/include/asm/processor.h
8357--- linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-05-19 00:06:34.000000000 -0400
8358+++ linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-08-05 19:44:33.000000000 -0400
8359@@ -266,7 +266,7 @@ struct tss_struct {
8360
8361 } ____cacheline_aligned;
8362
8363-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8364+extern struct tss_struct init_tss[NR_CPUS];
8365
8366 /*
8367 * Save the original ist values for checking stack pointers during debugging
8368@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8369 */
8370 #define TASK_SIZE PAGE_OFFSET
8371 #define TASK_SIZE_MAX TASK_SIZE
8372+
8373+#ifdef CONFIG_PAX_SEGMEXEC
8374+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8375+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8376+#else
8377 #define STACK_TOP TASK_SIZE
8378-#define STACK_TOP_MAX STACK_TOP
8379+#endif
8380+
8381+#define STACK_TOP_MAX TASK_SIZE
8382
8383 #define INIT_THREAD { \
8384- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8385+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8386 .vm86_info = NULL, \
8387 .sysenter_cs = __KERNEL_CS, \
8388 .io_bitmap_ptr = NULL, \
8389@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define INIT_TSS { \
8392 .x86_tss = { \
8393- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8394+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8395 .ss0 = __KERNEL_DS, \
8396 .ss1 = __KERNEL_CS, \
8397 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8398@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8399 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8400
8401 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8402-#define KSTK_TOP(info) \
8403-({ \
8404- unsigned long *__ptr = (unsigned long *)(info); \
8405- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8406-})
8407+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8408
8409 /*
8410 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8411@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8412 #define task_pt_regs(task) \
8413 ({ \
8414 struct pt_regs *__regs__; \
8415- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8416+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8417 __regs__ - 1; \
8418 })
8419
8420@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8421 /*
8422 * User space process size. 47bits minus one guard page.
8423 */
8424-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8425+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8426
8427 /* This decides where the kernel will search for a free chunk of vm
8428 * space during mmap's.
8429 */
8430 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8431- 0xc0000000 : 0xFFFFe000)
8432+ 0xc0000000 : 0xFFFFf000)
8433
8434 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8435 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8436@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8437 #define STACK_TOP_MAX TASK_SIZE_MAX
8438
8439 #define INIT_THREAD { \
8440- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8441+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8442 }
8443
8444 #define INIT_TSS { \
8445- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8446+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8447 }
8448
8449 /*
8450@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8451 */
8452 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8453
8454+#ifdef CONFIG_PAX_SEGMEXEC
8455+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8456+#endif
8457+
8458 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8459
8460 /* Get/set a process' ability to use the timestamp counter instruction */
8461diff -urNp linux-2.6.39.4/arch/x86/include/asm/ptrace.h linux-2.6.39.4/arch/x86/include/asm/ptrace.h
8462--- linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-05-19 00:06:34.000000000 -0400
8463+++ linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-08-05 19:44:33.000000000 -0400
8464@@ -152,28 +152,29 @@ static inline unsigned long regs_return_
8465 }
8466
8467 /*
8468- * user_mode_vm(regs) determines whether a register set came from user mode.
8469+ * user_mode(regs) determines whether a register set came from user mode.
8470 * This is true if V8086 mode was enabled OR if the register set was from
8471 * protected mode with RPL-3 CS value. This tricky test checks that with
8472 * one comparison. Many places in the kernel can bypass this full check
8473- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8474+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8475+ * be used.
8476 */
8477-static inline int user_mode(struct pt_regs *regs)
8478+static inline int user_mode_novm(struct pt_regs *regs)
8479 {
8480 #ifdef CONFIG_X86_32
8481 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8482 #else
8483- return !!(regs->cs & 3);
8484+ return !!(regs->cs & SEGMENT_RPL_MASK);
8485 #endif
8486 }
8487
8488-static inline int user_mode_vm(struct pt_regs *regs)
8489+static inline int user_mode(struct pt_regs *regs)
8490 {
8491 #ifdef CONFIG_X86_32
8492 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8493 USER_RPL;
8494 #else
8495- return user_mode(regs);
8496+ return user_mode_novm(regs);
8497 #endif
8498 }
8499
8500diff -urNp linux-2.6.39.4/arch/x86/include/asm/reboot.h linux-2.6.39.4/arch/x86/include/asm/reboot.h
8501--- linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-05-19 00:06:34.000000000 -0400
8502+++ linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-08-05 20:34:06.000000000 -0400
8503@@ -6,19 +6,19 @@
8504 struct pt_regs;
8505
8506 struct machine_ops {
8507- void (*restart)(char *cmd);
8508- void (*halt)(void);
8509- void (*power_off)(void);
8510+ void (* __noreturn restart)(char *cmd);
8511+ void (* __noreturn halt)(void);
8512+ void (* __noreturn power_off)(void);
8513 void (*shutdown)(void);
8514 void (*crash_shutdown)(struct pt_regs *);
8515- void (*emergency_restart)(void);
8516-};
8517+ void (* __noreturn emergency_restart)(void);
8518+} __no_const;
8519
8520 extern struct machine_ops machine_ops;
8521
8522 void native_machine_crash_shutdown(struct pt_regs *regs);
8523 void native_machine_shutdown(void);
8524-void machine_real_restart(unsigned int type);
8525+void machine_real_restart(unsigned int type) __noreturn;
8526 /* These must match dispatch_table in reboot_32.S */
8527 #define MRR_BIOS 0
8528 #define MRR_APM 1
8529diff -urNp linux-2.6.39.4/arch/x86/include/asm/rwsem.h linux-2.6.39.4/arch/x86/include/asm/rwsem.h
8530--- linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-05-19 00:06:34.000000000 -0400
8531+++ linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-08-05 19:44:33.000000000 -0400
8532@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8533 {
8534 asm volatile("# beginning down_read\n\t"
8535 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8536+
8537+#ifdef CONFIG_PAX_REFCOUNT
8538+ "jno 0f\n"
8539+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8540+ "int $4\n0:\n"
8541+ _ASM_EXTABLE(0b, 0b)
8542+#endif
8543+
8544 /* adds 0x00000001 */
8545 " jns 1f\n"
8546 " call call_rwsem_down_read_failed\n"
8547@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8548 "1:\n\t"
8549 " mov %1,%2\n\t"
8550 " add %3,%2\n\t"
8551+
8552+#ifdef CONFIG_PAX_REFCOUNT
8553+ "jno 0f\n"
8554+ "sub %3,%2\n"
8555+ "int $4\n0:\n"
8556+ _ASM_EXTABLE(0b, 0b)
8557+#endif
8558+
8559 " jle 2f\n\t"
8560 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8561 " jnz 1b\n\t"
8562@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8563 long tmp;
8564 asm volatile("# beginning down_write\n\t"
8565 LOCK_PREFIX " xadd %1,(%2)\n\t"
8566+
8567+#ifdef CONFIG_PAX_REFCOUNT
8568+ "jno 0f\n"
8569+ "mov %1,(%2)\n"
8570+ "int $4\n0:\n"
8571+ _ASM_EXTABLE(0b, 0b)
8572+#endif
8573+
8574 /* adds 0xffff0001, returns the old value */
8575 " test %1,%1\n\t"
8576 /* was the count 0 before? */
8577@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8578 long tmp;
8579 asm volatile("# beginning __up_read\n\t"
8580 LOCK_PREFIX " xadd %1,(%2)\n\t"
8581+
8582+#ifdef CONFIG_PAX_REFCOUNT
8583+ "jno 0f\n"
8584+ "mov %1,(%2)\n"
8585+ "int $4\n0:\n"
8586+ _ASM_EXTABLE(0b, 0b)
8587+#endif
8588+
8589 /* subtracts 1, returns the old value */
8590 " jns 1f\n\t"
8591 " call call_rwsem_wake\n" /* expects old value in %edx */
8592@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8593 long tmp;
8594 asm volatile("# beginning __up_write\n\t"
8595 LOCK_PREFIX " xadd %1,(%2)\n\t"
8596+
8597+#ifdef CONFIG_PAX_REFCOUNT
8598+ "jno 0f\n"
8599+ "mov %1,(%2)\n"
8600+ "int $4\n0:\n"
8601+ _ASM_EXTABLE(0b, 0b)
8602+#endif
8603+
8604 /* subtracts 0xffff0001, returns the old value */
8605 " jns 1f\n\t"
8606 " call call_rwsem_wake\n" /* expects old value in %edx */
8607@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8608 {
8609 asm volatile("# beginning __downgrade_write\n\t"
8610 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8611+
8612+#ifdef CONFIG_PAX_REFCOUNT
8613+ "jno 0f\n"
8614+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8615+ "int $4\n0:\n"
8616+ _ASM_EXTABLE(0b, 0b)
8617+#endif
8618+
8619 /*
8620 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8621 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8622@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8623 */
8624 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8625 {
8626- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8627+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8628+
8629+#ifdef CONFIG_PAX_REFCOUNT
8630+ "jno 0f\n"
8631+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8632+ "int $4\n0:\n"
8633+ _ASM_EXTABLE(0b, 0b)
8634+#endif
8635+
8636 : "+m" (sem->count)
8637 : "er" (delta));
8638 }
8639@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8640 {
8641 long tmp = delta;
8642
8643- asm volatile(LOCK_PREFIX "xadd %0,%1"
8644+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8645+
8646+#ifdef CONFIG_PAX_REFCOUNT
8647+ "jno 0f\n"
8648+ "mov %0,%1\n"
8649+ "int $4\n0:\n"
8650+ _ASM_EXTABLE(0b, 0b)
8651+#endif
8652+
8653 : "+r" (tmp), "+m" (sem->count)
8654 : : "memory");
8655
8656diff -urNp linux-2.6.39.4/arch/x86/include/asm/segment.h linux-2.6.39.4/arch/x86/include/asm/segment.h
8657--- linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-05-19 00:06:34.000000000 -0400
8658+++ linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-08-05 19:44:33.000000000 -0400
8659@@ -64,8 +64,8 @@
8660 * 26 - ESPFIX small SS
8661 * 27 - per-cpu [ offset to per-cpu data area ]
8662 * 28 - stack_canary-20 [ for stack protector ]
8663- * 29 - unused
8664- * 30 - unused
8665+ * 29 - PCI BIOS CS
8666+ * 30 - PCI BIOS DS
8667 * 31 - TSS for double fault handler
8668 */
8669 #define GDT_ENTRY_TLS_MIN 6
8670@@ -79,6 +79,8 @@
8671
8672 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8673
8674+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8675+
8676 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8677
8678 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8679@@ -104,6 +106,12 @@
8680 #define __KERNEL_STACK_CANARY 0
8681 #endif
8682
8683+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8684+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8685+
8686+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8687+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8688+
8689 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8690
8691 /*
8692@@ -141,7 +149,7 @@
8693 */
8694
8695 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8696-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8697+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8698
8699
8700 #else
8701@@ -165,6 +173,8 @@
8702 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8703 #define __USER32_DS __USER_DS
8704
8705+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8706+
8707 #define GDT_ENTRY_TSS 8 /* needs two entries */
8708 #define GDT_ENTRY_LDT 10 /* needs two entries */
8709 #define GDT_ENTRY_TLS_MIN 12
8710@@ -185,6 +195,7 @@
8711 #endif
8712
8713 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8714+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8715 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8716 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8717 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8718diff -urNp linux-2.6.39.4/arch/x86/include/asm/smp.h linux-2.6.39.4/arch/x86/include/asm/smp.h
8719--- linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-05-19 00:06:34.000000000 -0400
8720+++ linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-08-05 20:34:06.000000000 -0400
8721@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8722 /* cpus sharing the last level cache: */
8723 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8724 DECLARE_PER_CPU(u16, cpu_llc_id);
8725-DECLARE_PER_CPU(int, cpu_number);
8726+DECLARE_PER_CPU(unsigned int, cpu_number);
8727
8728 static inline struct cpumask *cpu_sibling_mask(int cpu)
8729 {
8730@@ -77,7 +77,7 @@ struct smp_ops {
8731
8732 void (*send_call_func_ipi)(const struct cpumask *mask);
8733 void (*send_call_func_single_ipi)(int cpu);
8734-};
8735+} __no_const;
8736
8737 /* Globals due to paravirt */
8738 extern void set_cpu_sibling_map(int cpu);
8739@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8740 extern int safe_smp_processor_id(void);
8741
8742 #elif defined(CONFIG_X86_64_SMP)
8743-#define raw_smp_processor_id() (percpu_read(cpu_number))
8744-
8745-#define stack_smp_processor_id() \
8746-({ \
8747- struct thread_info *ti; \
8748- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8749- ti->cpu; \
8750-})
8751+#define raw_smp_processor_id() (percpu_read(cpu_number))
8752+#define stack_smp_processor_id() raw_smp_processor_id()
8753 #define safe_smp_processor_id() smp_processor_id()
8754
8755 #endif
8756diff -urNp linux-2.6.39.4/arch/x86/include/asm/spinlock.h linux-2.6.39.4/arch/x86/include/asm/spinlock.h
8757--- linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
8758+++ linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
8759@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8760 static inline void arch_read_lock(arch_rwlock_t *rw)
8761 {
8762 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8763+
8764+#ifdef CONFIG_PAX_REFCOUNT
8765+ "jno 0f\n"
8766+ LOCK_PREFIX " addl $1,(%0)\n"
8767+ "int $4\n0:\n"
8768+ _ASM_EXTABLE(0b, 0b)
8769+#endif
8770+
8771 "jns 1f\n"
8772 "call __read_lock_failed\n\t"
8773 "1:\n"
8774@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8775 static inline void arch_write_lock(arch_rwlock_t *rw)
8776 {
8777 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8778+
8779+#ifdef CONFIG_PAX_REFCOUNT
8780+ "jno 0f\n"
8781+ LOCK_PREFIX " addl %1,(%0)\n"
8782+ "int $4\n0:\n"
8783+ _ASM_EXTABLE(0b, 0b)
8784+#endif
8785+
8786 "jz 1f\n"
8787 "call __write_lock_failed\n\t"
8788 "1:\n"
8789@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8790
8791 static inline void arch_read_unlock(arch_rwlock_t *rw)
8792 {
8793- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8794+ asm volatile(LOCK_PREFIX "incl %0\n"
8795+
8796+#ifdef CONFIG_PAX_REFCOUNT
8797+ "jno 0f\n"
8798+ LOCK_PREFIX "decl %0\n"
8799+ "int $4\n0:\n"
8800+ _ASM_EXTABLE(0b, 0b)
8801+#endif
8802+
8803+ :"+m" (rw->lock) : : "memory");
8804 }
8805
8806 static inline void arch_write_unlock(arch_rwlock_t *rw)
8807 {
8808- asm volatile(LOCK_PREFIX "addl %1, %0"
8809+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
8810+
8811+#ifdef CONFIG_PAX_REFCOUNT
8812+ "jno 0f\n"
8813+ LOCK_PREFIX "subl %1, %0\n"
8814+ "int $4\n0:\n"
8815+ _ASM_EXTABLE(0b, 0b)
8816+#endif
8817+
8818 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8819 }
8820
8821diff -urNp linux-2.6.39.4/arch/x86/include/asm/stackprotector.h linux-2.6.39.4/arch/x86/include/asm/stackprotector.h
8822--- linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-05-19 00:06:34.000000000 -0400
8823+++ linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-08-05 19:44:33.000000000 -0400
8824@@ -48,7 +48,7 @@
8825 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8826 */
8827 #define GDT_STACK_CANARY_INIT \
8828- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8829+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8830
8831 /*
8832 * Initialize the stackprotector canary value.
8833@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8834
8835 static inline void load_stack_canary_segment(void)
8836 {
8837-#ifdef CONFIG_X86_32
8838+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8839 asm volatile ("mov %0, %%gs" : : "r" (0));
8840 #endif
8841 }
8842diff -urNp linux-2.6.39.4/arch/x86/include/asm/stacktrace.h linux-2.6.39.4/arch/x86/include/asm/stacktrace.h
8843--- linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-05-19 00:06:34.000000000 -0400
8844+++ linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-08-05 19:44:33.000000000 -0400
8845@@ -11,28 +11,20 @@
8846
8847 extern int kstack_depth_to_print;
8848
8849-struct thread_info;
8850+struct task_struct;
8851 struct stacktrace_ops;
8852
8853-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8854- unsigned long *stack,
8855- unsigned long bp,
8856- const struct stacktrace_ops *ops,
8857- void *data,
8858- unsigned long *end,
8859- int *graph);
8860-
8861-extern unsigned long
8862-print_context_stack(struct thread_info *tinfo,
8863- unsigned long *stack, unsigned long bp,
8864- const struct stacktrace_ops *ops, void *data,
8865- unsigned long *end, int *graph);
8866-
8867-extern unsigned long
8868-print_context_stack_bp(struct thread_info *tinfo,
8869- unsigned long *stack, unsigned long bp,
8870- const struct stacktrace_ops *ops, void *data,
8871- unsigned long *end, int *graph);
8872+typedef unsigned long walk_stack_t(struct task_struct *task,
8873+ void *stack_start,
8874+ unsigned long *stack,
8875+ unsigned long bp,
8876+ const struct stacktrace_ops *ops,
8877+ void *data,
8878+ unsigned long *end,
8879+ int *graph);
8880+
8881+extern walk_stack_t print_context_stack;
8882+extern walk_stack_t print_context_stack_bp;
8883
8884 /* Generic stack tracer with callbacks */
8885
8886@@ -43,7 +35,7 @@ struct stacktrace_ops {
8887 void (*address)(void *data, unsigned long address, int reliable);
8888 /* On negative return stop dumping */
8889 int (*stack)(void *data, char *name);
8890- walk_stack_t walk_stack;
8891+ walk_stack_t *walk_stack;
8892 };
8893
8894 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8895diff -urNp linux-2.6.39.4/arch/x86/include/asm/system.h linux-2.6.39.4/arch/x86/include/asm/system.h
8896--- linux-2.6.39.4/arch/x86/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
8897+++ linux-2.6.39.4/arch/x86/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
8898@@ -129,7 +129,7 @@ do { \
8899 "call __switch_to\n\t" \
8900 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8901 __switch_canary \
8902- "movq %P[thread_info](%%rsi),%%r8\n\t" \
8903+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8904 "movq %%rax,%%rdi\n\t" \
8905 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8906 "jnz ret_from_fork\n\t" \
8907@@ -140,7 +140,7 @@ do { \
8908 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8909 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8910 [_tif_fork] "i" (_TIF_FORK), \
8911- [thread_info] "i" (offsetof(struct task_struct, stack)), \
8912+ [thread_info] "m" (current_tinfo), \
8913 [current_task] "m" (current_task) \
8914 __switch_canary_iparam \
8915 : "memory", "cc" __EXTRA_CLOBBER)
8916@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8917 {
8918 unsigned long __limit;
8919 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8920- return __limit + 1;
8921+ return __limit;
8922 }
8923
8924 static inline void native_clts(void)
8925@@ -340,12 +340,12 @@ void enable_hlt(void);
8926
8927 void cpu_idle_wait(void);
8928
8929-extern unsigned long arch_align_stack(unsigned long sp);
8930+#define arch_align_stack(x) ((x) & ~0xfUL)
8931 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8932
8933 void default_idle(void);
8934
8935-void stop_this_cpu(void *dummy);
8936+void stop_this_cpu(void *dummy) __noreturn;
8937
8938 /*
8939 * Force strict CPU ordering.
8940diff -urNp linux-2.6.39.4/arch/x86/include/asm/thread_info.h linux-2.6.39.4/arch/x86/include/asm/thread_info.h
8941--- linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-05-19 00:06:34.000000000 -0400
8942+++ linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-08-05 19:44:33.000000000 -0400
8943@@ -10,6 +10,7 @@
8944 #include <linux/compiler.h>
8945 #include <asm/page.h>
8946 #include <asm/types.h>
8947+#include <asm/percpu.h>
8948
8949 /*
8950 * low level task data that entry.S needs immediate access to
8951@@ -24,7 +25,6 @@ struct exec_domain;
8952 #include <asm/atomic.h>
8953
8954 struct thread_info {
8955- struct task_struct *task; /* main task structure */
8956 struct exec_domain *exec_domain; /* execution domain */
8957 __u32 flags; /* low level flags */
8958 __u32 status; /* thread synchronous flags */
8959@@ -34,18 +34,12 @@ struct thread_info {
8960 mm_segment_t addr_limit;
8961 struct restart_block restart_block;
8962 void __user *sysenter_return;
8963-#ifdef CONFIG_X86_32
8964- unsigned long previous_esp; /* ESP of the previous stack in
8965- case of nested (IRQ) stacks
8966- */
8967- __u8 supervisor_stack[0];
8968-#endif
8969+ unsigned long lowest_stack;
8970 int uaccess_err;
8971 };
8972
8973-#define INIT_THREAD_INFO(tsk) \
8974+#define INIT_THREAD_INFO \
8975 { \
8976- .task = &tsk, \
8977 .exec_domain = &default_exec_domain, \
8978 .flags = 0, \
8979 .cpu = 0, \
8980@@ -56,7 +50,7 @@ struct thread_info {
8981 }, \
8982 }
8983
8984-#define init_thread_info (init_thread_union.thread_info)
8985+#define init_thread_info (init_thread_union.stack)
8986 #define init_stack (init_thread_union.stack)
8987
8988 #else /* !__ASSEMBLY__ */
8989@@ -170,6 +164,23 @@ struct thread_info {
8990 ret; \
8991 })
8992
8993+#ifdef __ASSEMBLY__
8994+/* how to get the thread information struct from ASM */
8995+#define GET_THREAD_INFO(reg) \
8996+ mov PER_CPU_VAR(current_tinfo), reg
8997+
8998+/* use this one if reg already contains %esp */
8999+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9000+#else
9001+/* how to get the thread information struct from C */
9002+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9003+
9004+static __always_inline struct thread_info *current_thread_info(void)
9005+{
9006+ return percpu_read_stable(current_tinfo);
9007+}
9008+#endif
9009+
9010 #ifdef CONFIG_X86_32
9011
9012 #define STACK_WARN (THREAD_SIZE/8)
9013@@ -180,35 +191,13 @@ struct thread_info {
9014 */
9015 #ifndef __ASSEMBLY__
9016
9017-
9018 /* how to get the current stack pointer from C */
9019 register unsigned long current_stack_pointer asm("esp") __used;
9020
9021-/* how to get the thread information struct from C */
9022-static inline struct thread_info *current_thread_info(void)
9023-{
9024- return (struct thread_info *)
9025- (current_stack_pointer & ~(THREAD_SIZE - 1));
9026-}
9027-
9028-#else /* !__ASSEMBLY__ */
9029-
9030-/* how to get the thread information struct from ASM */
9031-#define GET_THREAD_INFO(reg) \
9032- movl $-THREAD_SIZE, reg; \
9033- andl %esp, reg
9034-
9035-/* use this one if reg already contains %esp */
9036-#define GET_THREAD_INFO_WITH_ESP(reg) \
9037- andl $-THREAD_SIZE, reg
9038-
9039 #endif
9040
9041 #else /* X86_32 */
9042
9043-#include <asm/percpu.h>
9044-#define KERNEL_STACK_OFFSET (5*8)
9045-
9046 /*
9047 * macros/functions for gaining access to the thread information structure
9048 * preempt_count needs to be 1 initially, until the scheduler is functional.
9049@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9050 #ifndef __ASSEMBLY__
9051 DECLARE_PER_CPU(unsigned long, kernel_stack);
9052
9053-static inline struct thread_info *current_thread_info(void)
9054-{
9055- struct thread_info *ti;
9056- ti = (void *)(percpu_read_stable(kernel_stack) +
9057- KERNEL_STACK_OFFSET - THREAD_SIZE);
9058- return ti;
9059-}
9060-
9061-#else /* !__ASSEMBLY__ */
9062-
9063-/* how to get the thread information struct from ASM */
9064-#define GET_THREAD_INFO(reg) \
9065- movq PER_CPU_VAR(kernel_stack),reg ; \
9066- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9067-
9068+/* how to get the current stack pointer from C */
9069+register unsigned long current_stack_pointer asm("rsp") __used;
9070 #endif
9071
9072 #endif /* !X86_32 */
9073@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9074 extern void free_thread_info(struct thread_info *ti);
9075 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9076 #define arch_task_cache_init arch_task_cache_init
9077+
9078+#define __HAVE_THREAD_FUNCTIONS
9079+#define task_thread_info(task) (&(task)->tinfo)
9080+#define task_stack_page(task) ((task)->stack)
9081+#define setup_thread_stack(p, org) do {} while (0)
9082+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9083+
9084+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9085+extern struct task_struct *alloc_task_struct_node(int node);
9086+extern void free_task_struct(struct task_struct *);
9087+
9088 #endif
9089 #endif /* _ASM_X86_THREAD_INFO_H */
9090diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h
9091--- linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
9092+++ linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
9093@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
9094 static __always_inline unsigned long __must_check
9095 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9096 {
9097+ pax_track_stack();
9098+
9099+ if ((long)n < 0)
9100+ return n;
9101+
9102 if (__builtin_constant_p(n)) {
9103 unsigned long ret;
9104
9105@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
9106 return ret;
9107 }
9108 }
9109+ if (!__builtin_constant_p(n))
9110+ check_object_size(from, n, true);
9111 return __copy_to_user_ll(to, from, n);
9112 }
9113
9114@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
9115 __copy_to_user(void __user *to, const void *from, unsigned long n)
9116 {
9117 might_fault();
9118+
9119 return __copy_to_user_inatomic(to, from, n);
9120 }
9121
9122 static __always_inline unsigned long
9123 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9124 {
9125+ if ((long)n < 0)
9126+ return n;
9127+
9128 /* Avoid zeroing the tail if the copy fails..
9129 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9130 * but as the zeroing behaviour is only significant when n is not
9131@@ -138,6 +149,12 @@ static __always_inline unsigned long
9132 __copy_from_user(void *to, const void __user *from, unsigned long n)
9133 {
9134 might_fault();
9135+
9136+ pax_track_stack();
9137+
9138+ if ((long)n < 0)
9139+ return n;
9140+
9141 if (__builtin_constant_p(n)) {
9142 unsigned long ret;
9143
9144@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
9145 return ret;
9146 }
9147 }
9148+ if (!__builtin_constant_p(n))
9149+ check_object_size(to, n, false);
9150 return __copy_from_user_ll(to, from, n);
9151 }
9152
9153@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
9154 const void __user *from, unsigned long n)
9155 {
9156 might_fault();
9157+
9158+ if ((long)n < 0)
9159+ return n;
9160+
9161 if (__builtin_constant_p(n)) {
9162 unsigned long ret;
9163
9164@@ -182,15 +205,19 @@ static __always_inline unsigned long
9165 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9166 unsigned long n)
9167 {
9168- return __copy_from_user_ll_nocache_nozero(to, from, n);
9169-}
9170+ if ((long)n < 0)
9171+ return n;
9172
9173-unsigned long __must_check copy_to_user(void __user *to,
9174- const void *from, unsigned long n);
9175-unsigned long __must_check _copy_from_user(void *to,
9176- const void __user *from,
9177- unsigned long n);
9178+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9179+}
9180
9181+extern void copy_to_user_overflow(void)
9182+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9183+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9184+#else
9185+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9186+#endif
9187+;
9188
9189 extern void copy_from_user_overflow(void)
9190 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9191@@ -200,17 +227,61 @@ extern void copy_from_user_overflow(void
9192 #endif
9193 ;
9194
9195-static inline unsigned long __must_check copy_from_user(void *to,
9196- const void __user *from,
9197- unsigned long n)
9198+/**
9199+ * copy_to_user: - Copy a block of data into user space.
9200+ * @to: Destination address, in user space.
9201+ * @from: Source address, in kernel space.
9202+ * @n: Number of bytes to copy.
9203+ *
9204+ * Context: User context only. This function may sleep.
9205+ *
9206+ * Copy data from kernel space to user space.
9207+ *
9208+ * Returns number of bytes that could not be copied.
9209+ * On success, this will be zero.
9210+ */
9211+static inline unsigned long __must_check
9212+copy_to_user(void __user *to, const void *from, unsigned long n)
9213+{
9214+ int sz = __compiletime_object_size(from);
9215+
9216+ if (unlikely(sz != -1 && sz < n))
9217+ copy_to_user_overflow();
9218+ else if (access_ok(VERIFY_WRITE, to, n))
9219+ n = __copy_to_user(to, from, n);
9220+ return n;
9221+}
9222+
9223+/**
9224+ * copy_from_user: - Copy a block of data from user space.
9225+ * @to: Destination address, in kernel space.
9226+ * @from: Source address, in user space.
9227+ * @n: Number of bytes to copy.
9228+ *
9229+ * Context: User context only. This function may sleep.
9230+ *
9231+ * Copy data from user space to kernel space.
9232+ *
9233+ * Returns number of bytes that could not be copied.
9234+ * On success, this will be zero.
9235+ *
9236+ * If some data could not be copied, this function will pad the copied
9237+ * data to the requested size using zero bytes.
9238+ */
9239+static inline unsigned long __must_check
9240+copy_from_user(void *to, const void __user *from, unsigned long n)
9241 {
9242 int sz = __compiletime_object_size(to);
9243
9244- if (likely(sz == -1 || sz >= n))
9245- n = _copy_from_user(to, from, n);
9246- else
9247+ if (unlikely(sz != -1 && sz < n))
9248 copy_from_user_overflow();
9249-
9250+ else if (access_ok(VERIFY_READ, from, n))
9251+ n = __copy_from_user(to, from, n);
9252+ else if ((long)n > 0) {
9253+ if (!__builtin_constant_p(n))
9254+ check_object_size(to, n, false);
9255+ memset(to, 0, n);
9256+ }
9257 return n;
9258 }
9259
9260diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h
9261--- linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
9262+++ linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
9263@@ -11,6 +11,9 @@
9264 #include <asm/alternative.h>
9265 #include <asm/cpufeature.h>
9266 #include <asm/page.h>
9267+#include <asm/pgtable.h>
9268+
9269+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9270
9271 /*
9272 * Copy To/From Userspace
9273@@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *
9274 return ret;
9275 }
9276
9277-__must_check unsigned long
9278-_copy_to_user(void __user *to, const void *from, unsigned len);
9279-__must_check unsigned long
9280-_copy_from_user(void *to, const void __user *from, unsigned len);
9281+static __always_inline __must_check unsigned long
9282+__copy_to_user(void __user *to, const void *from, unsigned len);
9283+static __always_inline __must_check unsigned long
9284+__copy_from_user(void *to, const void __user *from, unsigned len);
9285 __must_check unsigned long
9286 copy_in_user(void __user *to, const void __user *from, unsigned len);
9287
9288 static inline unsigned long __must_check copy_from_user(void *to,
9289 const void __user *from,
9290- unsigned long n)
9291+ unsigned n)
9292 {
9293- int sz = __compiletime_object_size(to);
9294-
9295 might_fault();
9296- if (likely(sz == -1 || sz >= n))
9297- n = _copy_from_user(to, from, n);
9298-#ifdef CONFIG_DEBUG_VM
9299- else
9300- WARN(1, "Buffer overflow detected!\n");
9301-#endif
9302+
9303+ if (access_ok(VERIFY_READ, from, n))
9304+ n = __copy_from_user(to, from, n);
9305+ else if ((int)n > 0) {
9306+ if (!__builtin_constant_p(n))
9307+ check_object_size(to, n, false);
9308+ memset(to, 0, n);
9309+ }
9310 return n;
9311 }
9312
9313@@ -65,110 +68,198 @@ int copy_to_user(void __user *dst, const
9314 {
9315 might_fault();
9316
9317- return _copy_to_user(dst, src, size);
9318+ if (access_ok(VERIFY_WRITE, dst, size))
9319+ size = __copy_to_user(dst, src, size);
9320+ return size;
9321 }
9322
9323 static __always_inline __must_check
9324-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9325+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9326 {
9327- int ret = 0;
9328+ int sz = __compiletime_object_size(dst);
9329+ unsigned ret = 0;
9330
9331 might_fault();
9332- if (!__builtin_constant_p(size))
9333- return copy_user_generic(dst, (__force void *)src, size);
9334+
9335+ pax_track_stack();
9336+
9337+ if ((int)size < 0)
9338+ return size;
9339+
9340+#ifdef CONFIG_PAX_MEMORY_UDEREF
9341+ if (!__access_ok(VERIFY_READ, src, size))
9342+ return size;
9343+#endif
9344+
9345+ if (unlikely(sz != -1 && sz < size)) {
9346+#ifdef CONFIG_DEBUG_VM
9347+ WARN(1, "Buffer overflow detected!\n");
9348+#endif
9349+ return size;
9350+ }
9351+
9352+ if (!__builtin_constant_p(size)) {
9353+ check_object_size(dst, size, false);
9354+
9355+#ifdef CONFIG_PAX_MEMORY_UDEREF
9356+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9357+ src += PAX_USER_SHADOW_BASE;
9358+#endif
9359+
9360+ return copy_user_generic(dst, (__force const void *)src, size);
9361+ }
9362 switch (size) {
9363- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9364+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9365 ret, "b", "b", "=q", 1);
9366 return ret;
9367- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9368+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9369 ret, "w", "w", "=r", 2);
9370 return ret;
9371- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9372+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9373 ret, "l", "k", "=r", 4);
9374 return ret;
9375- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9376+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9377 ret, "q", "", "=r", 8);
9378 return ret;
9379 case 10:
9380- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9381+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9382 ret, "q", "", "=r", 10);
9383 if (unlikely(ret))
9384 return ret;
9385 __get_user_asm(*(u16 *)(8 + (char *)dst),
9386- (u16 __user *)(8 + (char __user *)src),
9387+ (const u16 __user *)(8 + (const char __user *)src),
9388 ret, "w", "w", "=r", 2);
9389 return ret;
9390 case 16:
9391- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9392+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9393 ret, "q", "", "=r", 16);
9394 if (unlikely(ret))
9395 return ret;
9396 __get_user_asm(*(u64 *)(8 + (char *)dst),
9397- (u64 __user *)(8 + (char __user *)src),
9398+ (const u64 __user *)(8 + (const char __user *)src),
9399 ret, "q", "", "=r", 8);
9400 return ret;
9401 default:
9402- return copy_user_generic(dst, (__force void *)src, size);
9403+
9404+#ifdef CONFIG_PAX_MEMORY_UDEREF
9405+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9406+ src += PAX_USER_SHADOW_BASE;
9407+#endif
9408+
9409+ return copy_user_generic(dst, (__force const void *)src, size);
9410 }
9411 }
9412
9413 static __always_inline __must_check
9414-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9415+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9416 {
9417- int ret = 0;
9418+ int sz = __compiletime_object_size(src);
9419+ unsigned ret = 0;
9420
9421 might_fault();
9422- if (!__builtin_constant_p(size))
9423+
9424+ pax_track_stack();
9425+
9426+ if ((int)size < 0)
9427+ return size;
9428+
9429+#ifdef CONFIG_PAX_MEMORY_UDEREF
9430+ if (!__access_ok(VERIFY_WRITE, dst, size))
9431+ return size;
9432+#endif
9433+
9434+ if (unlikely(sz != -1 && sz < size)) {
9435+#ifdef CONFIG_DEBUG_VM
9436+ WARN(1, "Buffer overflow detected!\n");
9437+#endif
9438+ return size;
9439+ }
9440+
9441+ if (!__builtin_constant_p(size)) {
9442+ check_object_size(src, size, true);
9443+
9444+#ifdef CONFIG_PAX_MEMORY_UDEREF
9445+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9446+ dst += PAX_USER_SHADOW_BASE;
9447+#endif
9448+
9449 return copy_user_generic((__force void *)dst, src, size);
9450+ }
9451 switch (size) {
9452- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9453+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9454 ret, "b", "b", "iq", 1);
9455 return ret;
9456- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9457+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9458 ret, "w", "w", "ir", 2);
9459 return ret;
9460- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9461+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9462 ret, "l", "k", "ir", 4);
9463 return ret;
9464- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9465+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9466 ret, "q", "", "er", 8);
9467 return ret;
9468 case 10:
9469- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9470+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9471 ret, "q", "", "er", 10);
9472 if (unlikely(ret))
9473 return ret;
9474 asm("":::"memory");
9475- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9476+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9477 ret, "w", "w", "ir", 2);
9478 return ret;
9479 case 16:
9480- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9481+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9482 ret, "q", "", "er", 16);
9483 if (unlikely(ret))
9484 return ret;
9485 asm("":::"memory");
9486- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9487+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9488 ret, "q", "", "er", 8);
9489 return ret;
9490 default:
9491+
9492+#ifdef CONFIG_PAX_MEMORY_UDEREF
9493+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9494+ dst += PAX_USER_SHADOW_BASE;
9495+#endif
9496+
9497 return copy_user_generic((__force void *)dst, src, size);
9498 }
9499 }
9500
9501 static __always_inline __must_check
9502-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9503+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9504 {
9505- int ret = 0;
9506+ unsigned ret = 0;
9507
9508 might_fault();
9509- if (!__builtin_constant_p(size))
9510+
9511+ if ((int)size < 0)
9512+ return size;
9513+
9514+#ifdef CONFIG_PAX_MEMORY_UDEREF
9515+ if (!__access_ok(VERIFY_READ, src, size))
9516+ return size;
9517+ if (!__access_ok(VERIFY_WRITE, dst, size))
9518+ return size;
9519+#endif
9520+
9521+ if (!__builtin_constant_p(size)) {
9522+
9523+#ifdef CONFIG_PAX_MEMORY_UDEREF
9524+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9525+ src += PAX_USER_SHADOW_BASE;
9526+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9527+ dst += PAX_USER_SHADOW_BASE;
9528+#endif
9529+
9530 return copy_user_generic((__force void *)dst,
9531- (__force void *)src, size);
9532+ (__force const void *)src, size);
9533+ }
9534 switch (size) {
9535 case 1: {
9536 u8 tmp;
9537- __get_user_asm(tmp, (u8 __user *)src,
9538+ __get_user_asm(tmp, (const u8 __user *)src,
9539 ret, "b", "b", "=q", 1);
9540 if (likely(!ret))
9541 __put_user_asm(tmp, (u8 __user *)dst,
9542@@ -177,7 +268,7 @@ int __copy_in_user(void __user *dst, con
9543 }
9544 case 2: {
9545 u16 tmp;
9546- __get_user_asm(tmp, (u16 __user *)src,
9547+ __get_user_asm(tmp, (const u16 __user *)src,
9548 ret, "w", "w", "=r", 2);
9549 if (likely(!ret))
9550 __put_user_asm(tmp, (u16 __user *)dst,
9551@@ -187,7 +278,7 @@ int __copy_in_user(void __user *dst, con
9552
9553 case 4: {
9554 u32 tmp;
9555- __get_user_asm(tmp, (u32 __user *)src,
9556+ __get_user_asm(tmp, (const u32 __user *)src,
9557 ret, "l", "k", "=r", 4);
9558 if (likely(!ret))
9559 __put_user_asm(tmp, (u32 __user *)dst,
9560@@ -196,7 +287,7 @@ int __copy_in_user(void __user *dst, con
9561 }
9562 case 8: {
9563 u64 tmp;
9564- __get_user_asm(tmp, (u64 __user *)src,
9565+ __get_user_asm(tmp, (const u64 __user *)src,
9566 ret, "q", "", "=r", 8);
9567 if (likely(!ret))
9568 __put_user_asm(tmp, (u64 __user *)dst,
9569@@ -204,8 +295,16 @@ int __copy_in_user(void __user *dst, con
9570 return ret;
9571 }
9572 default:
9573+
9574+#ifdef CONFIG_PAX_MEMORY_UDEREF
9575+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9576+ src += PAX_USER_SHADOW_BASE;
9577+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9578+ dst += PAX_USER_SHADOW_BASE;
9579+#endif
9580+
9581 return copy_user_generic((__force void *)dst,
9582- (__force void *)src, size);
9583+ (__force const void *)src, size);
9584 }
9585 }
9586
9587@@ -222,33 +321,72 @@ __must_check unsigned long __clear_user(
9588 static __must_check __always_inline int
9589 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9590 {
9591+ pax_track_stack();
9592+
9593+ if ((int)size < 0)
9594+ return size;
9595+
9596+#ifdef CONFIG_PAX_MEMORY_UDEREF
9597+ if (!__access_ok(VERIFY_READ, src, size))
9598+ return size;
9599+
9600+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9601+ src += PAX_USER_SHADOW_BASE;
9602+#endif
9603+
9604 return copy_user_generic(dst, (__force const void *)src, size);
9605 }
9606
9607-static __must_check __always_inline int
9608+static __must_check __always_inline unsigned long
9609 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9610 {
9611+ if ((int)size < 0)
9612+ return size;
9613+
9614+#ifdef CONFIG_PAX_MEMORY_UDEREF
9615+ if (!__access_ok(VERIFY_WRITE, dst, size))
9616+ return size;
9617+
9618+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9619+ dst += PAX_USER_SHADOW_BASE;
9620+#endif
9621+
9622 return copy_user_generic((__force void *)dst, src, size);
9623 }
9624
9625-extern long __copy_user_nocache(void *dst, const void __user *src,
9626+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9627 unsigned size, int zerorest);
9628
9629-static inline int
9630-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9631+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9632 {
9633 might_sleep();
9634+
9635+ if ((int)size < 0)
9636+ return size;
9637+
9638+#ifdef CONFIG_PAX_MEMORY_UDEREF
9639+ if (!__access_ok(VERIFY_READ, src, size))
9640+ return size;
9641+#endif
9642+
9643 return __copy_user_nocache(dst, src, size, 1);
9644 }
9645
9646-static inline int
9647-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9648+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9649 unsigned size)
9650 {
9651+ if ((int)size < 0)
9652+ return size;
9653+
9654+#ifdef CONFIG_PAX_MEMORY_UDEREF
9655+ if (!__access_ok(VERIFY_READ, src, size))
9656+ return size;
9657+#endif
9658+
9659 return __copy_user_nocache(dst, src, size, 0);
9660 }
9661
9662-unsigned long
9663+extern unsigned long
9664 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9665
9666 #endif /* _ASM_X86_UACCESS_64_H */
9667diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess.h linux-2.6.39.4/arch/x86/include/asm/uaccess.h
9668--- linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-06-03 00:04:13.000000000 -0400
9669+++ linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
9670@@ -8,12 +8,15 @@
9671 #include <linux/thread_info.h>
9672 #include <linux/prefetch.h>
9673 #include <linux/string.h>
9674+#include <linux/sched.h>
9675 #include <asm/asm.h>
9676 #include <asm/page.h>
9677
9678 #define VERIFY_READ 0
9679 #define VERIFY_WRITE 1
9680
9681+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9682+
9683 /*
9684 * The fs value determines whether argument validity checking should be
9685 * performed or not. If get_fs() == USER_DS, checking is performed, with
9686@@ -29,7 +32,12 @@
9687
9688 #define get_ds() (KERNEL_DS)
9689 #define get_fs() (current_thread_info()->addr_limit)
9690+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9691+void __set_fs(mm_segment_t x);
9692+void set_fs(mm_segment_t x);
9693+#else
9694 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9695+#endif
9696
9697 #define segment_eq(a, b) ((a).seg == (b).seg)
9698
9699@@ -77,7 +85,33 @@
9700 * checks that the pointer is in the user space range - after calling
9701 * this function, memory access functions may still return -EFAULT.
9702 */
9703-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9704+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9705+#define access_ok(type, addr, size) \
9706+({ \
9707+ long __size = size; \
9708+ unsigned long __addr = (unsigned long)addr; \
9709+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9710+ unsigned long __end_ao = __addr + __size - 1; \
9711+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9712+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9713+ while(__addr_ao <= __end_ao) { \
9714+ char __c_ao; \
9715+ __addr_ao += PAGE_SIZE; \
9716+ if (__size > PAGE_SIZE) \
9717+ cond_resched(); \
9718+ if (__get_user(__c_ao, (char __user *)__addr)) \
9719+ break; \
9720+ if (type != VERIFY_WRITE) { \
9721+ __addr = __addr_ao; \
9722+ continue; \
9723+ } \
9724+ if (__put_user(__c_ao, (char __user *)__addr)) \
9725+ break; \
9726+ __addr = __addr_ao; \
9727+ } \
9728+ } \
9729+ __ret_ao; \
9730+})
9731
9732 /*
9733 * The exception table consists of pairs of addresses: the first is the
9734@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
9735 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9736 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9737
9738-
9739+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9740+#define __copyuser_seg "gs;"
9741+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9742+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9743+#else
9744+#define __copyuser_seg
9745+#define __COPYUSER_SET_ES
9746+#define __COPYUSER_RESTORE_ES
9747+#endif
9748
9749 #ifdef CONFIG_X86_32
9750 #define __put_user_asm_u64(x, addr, err, errret) \
9751- asm volatile("1: movl %%eax,0(%2)\n" \
9752- "2: movl %%edx,4(%2)\n" \
9753+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9754+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9755 "3:\n" \
9756 ".section .fixup,\"ax\"\n" \
9757 "4: movl %3,%0\n" \
9758@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
9759 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9760
9761 #define __put_user_asm_ex_u64(x, addr) \
9762- asm volatile("1: movl %%eax,0(%1)\n" \
9763- "2: movl %%edx,4(%1)\n" \
9764+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9765+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9766 "3:\n" \
9767 _ASM_EXTABLE(1b, 2b - 1b) \
9768 _ASM_EXTABLE(2b, 3b - 2b) \
9769@@ -374,7 +416,7 @@ do { \
9770 } while (0)
9771
9772 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9773- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9774+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9775 "2:\n" \
9776 ".section .fixup,\"ax\"\n" \
9777 "3: mov %3,%0\n" \
9778@@ -382,7 +424,7 @@ do { \
9779 " jmp 2b\n" \
9780 ".previous\n" \
9781 _ASM_EXTABLE(1b, 3b) \
9782- : "=r" (err), ltype(x) \
9783+ : "=r" (err), ltype (x) \
9784 : "m" (__m(addr)), "i" (errret), "0" (err))
9785
9786 #define __get_user_size_ex(x, ptr, size) \
9787@@ -407,7 +449,7 @@ do { \
9788 } while (0)
9789
9790 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9791- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9792+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9793 "2:\n" \
9794 _ASM_EXTABLE(1b, 2b - 1b) \
9795 : ltype(x) : "m" (__m(addr)))
9796@@ -424,13 +466,24 @@ do { \
9797 int __gu_err; \
9798 unsigned long __gu_val; \
9799 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9800- (x) = (__force __typeof__(*(ptr)))__gu_val; \
9801+ (x) = (__typeof__(*(ptr)))__gu_val; \
9802 __gu_err; \
9803 })
9804
9805 /* FIXME: this hack is definitely wrong -AK */
9806 struct __large_struct { unsigned long buf[100]; };
9807-#define __m(x) (*(struct __large_struct __user *)(x))
9808+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9809+#define ____m(x) \
9810+({ \
9811+ unsigned long ____x = (unsigned long)(x); \
9812+ if (____x < PAX_USER_SHADOW_BASE) \
9813+ ____x += PAX_USER_SHADOW_BASE; \
9814+ (void __user *)____x; \
9815+})
9816+#else
9817+#define ____m(x) (x)
9818+#endif
9819+#define __m(x) (*(struct __large_struct __user *)____m(x))
9820
9821 /*
9822 * Tell gcc we read from memory instead of writing: this is because
9823@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
9824 * aliasing issues.
9825 */
9826 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9827- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9828+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9829 "2:\n" \
9830 ".section .fixup,\"ax\"\n" \
9831 "3: mov %3,%0\n" \
9832@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
9833 ".previous\n" \
9834 _ASM_EXTABLE(1b, 3b) \
9835 : "=r"(err) \
9836- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9837+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9838
9839 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9840- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9841+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9842 "2:\n" \
9843 _ASM_EXTABLE(1b, 2b - 1b) \
9844 : : ltype(x), "m" (__m(addr)))
9845@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
9846 * On error, the variable @x is set to zero.
9847 */
9848
9849+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9850+#define __get_user(x, ptr) get_user((x), (ptr))
9851+#else
9852 #define __get_user(x, ptr) \
9853 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9854+#endif
9855
9856 /**
9857 * __put_user: - Write a simple value into user space, with less checking.
9858@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
9859 * Returns zero on success, or -EFAULT on error.
9860 */
9861
9862+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9863+#define __put_user(x, ptr) put_user((x), (ptr))
9864+#else
9865 #define __put_user(x, ptr) \
9866 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9867+#endif
9868
9869 #define __get_user_unaligned __get_user
9870 #define __put_user_unaligned __put_user
9871@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
9872 #define get_user_ex(x, ptr) do { \
9873 unsigned long __gue_val; \
9874 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9875- (x) = (__force __typeof__(*(ptr)))__gue_val; \
9876+ (x) = (__typeof__(*(ptr)))__gue_val; \
9877 } while (0)
9878
9879 #ifdef CONFIG_X86_WP_WORKS_OK
9880@@ -567,6 +628,7 @@ extern struct movsl_mask {
9881
9882 #define ARCH_HAS_NOCACHE_UACCESS 1
9883
9884+#define ARCH_HAS_SORT_EXTABLE
9885 #ifdef CONFIG_X86_32
9886 # include "uaccess_32.h"
9887 #else
9888diff -urNp linux-2.6.39.4/arch/x86/include/asm/vgtod.h linux-2.6.39.4/arch/x86/include/asm/vgtod.h
9889--- linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-05-19 00:06:34.000000000 -0400
9890+++ linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-08-05 19:44:33.000000000 -0400
9891@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9892 int sysctl_enabled;
9893 struct timezone sys_tz;
9894 struct { /* extract of a clocksource struct */
9895+ char name[8];
9896 cycle_t (*vread)(void);
9897 cycle_t cycle_last;
9898 cycle_t mask;
9899diff -urNp linux-2.6.39.4/arch/x86/include/asm/vsyscall.h linux-2.6.39.4/arch/x86/include/asm/vsyscall.h
9900--- linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-05-19 00:06:34.000000000 -0400
9901+++ linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-08-05 19:44:33.000000000 -0400
9902@@ -15,9 +15,10 @@ enum vsyscall_num {
9903
9904 #ifdef __KERNEL__
9905 #include <linux/seqlock.h>
9906+#include <linux/getcpu.h>
9907+#include <linux/time.h>
9908
9909 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
9910-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
9911
9912 /* Definitions for CONFIG_GENERIC_TIME definitions */
9913 #define __section_vsyscall_gtod_data __attribute__ \
9914@@ -31,7 +32,6 @@ enum vsyscall_num {
9915 #define VGETCPU_LSL 2
9916
9917 extern int __vgetcpu_mode;
9918-extern volatile unsigned long __jiffies;
9919
9920 /* kernel space (writeable) */
9921 extern int vgetcpu_mode;
9922@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
9923
9924 extern void map_vsyscall(void);
9925
9926+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
9927+extern time_t vtime(time_t *t);
9928+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
9929 #endif /* __KERNEL__ */
9930
9931 #endif /* _ASM_X86_VSYSCALL_H */
9932diff -urNp linux-2.6.39.4/arch/x86/include/asm/x86_init.h linux-2.6.39.4/arch/x86/include/asm/x86_init.h
9933--- linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-05-19 00:06:34.000000000 -0400
9934+++ linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-08-05 20:34:06.000000000 -0400
9935@@ -28,7 +28,7 @@ struct x86_init_mpparse {
9936 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9937 void (*find_smp_config)(void);
9938 void (*get_smp_config)(unsigned int early);
9939-};
9940+} __no_const;
9941
9942 /**
9943 * struct x86_init_resources - platform specific resource related ops
9944@@ -42,7 +42,7 @@ struct x86_init_resources {
9945 void (*probe_roms)(void);
9946 void (*reserve_resources)(void);
9947 char *(*memory_setup)(void);
9948-};
9949+} __no_const;
9950
9951 /**
9952 * struct x86_init_irqs - platform specific interrupt setup
9953@@ -55,7 +55,7 @@ struct x86_init_irqs {
9954 void (*pre_vector_init)(void);
9955 void (*intr_init)(void);
9956 void (*trap_init)(void);
9957-};
9958+} __no_const;
9959
9960 /**
9961 * struct x86_init_oem - oem platform specific customizing functions
9962@@ -65,7 +65,7 @@ struct x86_init_irqs {
9963 struct x86_init_oem {
9964 void (*arch_setup)(void);
9965 void (*banner)(void);
9966-};
9967+} __no_const;
9968
9969 /**
9970 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9971@@ -76,7 +76,7 @@ struct x86_init_oem {
9972 */
9973 struct x86_init_mapping {
9974 void (*pagetable_reserve)(u64 start, u64 end);
9975-};
9976+} __no_const;
9977
9978 /**
9979 * struct x86_init_paging - platform specific paging functions
9980@@ -86,7 +86,7 @@ struct x86_init_mapping {
9981 struct x86_init_paging {
9982 void (*pagetable_setup_start)(pgd_t *base);
9983 void (*pagetable_setup_done)(pgd_t *base);
9984-};
9985+} __no_const;
9986
9987 /**
9988 * struct x86_init_timers - platform specific timer setup
9989@@ -101,7 +101,7 @@ struct x86_init_timers {
9990 void (*tsc_pre_init)(void);
9991 void (*timer_init)(void);
9992 void (*wallclock_init)(void);
9993-};
9994+} __no_const;
9995
9996 /**
9997 * struct x86_init_iommu - platform specific iommu setup
9998@@ -109,7 +109,7 @@ struct x86_init_timers {
9999 */
10000 struct x86_init_iommu {
10001 int (*iommu_init)(void);
10002-};
10003+} __no_const;
10004
10005 /**
10006 * struct x86_init_pci - platform specific pci init functions
10007@@ -123,7 +123,7 @@ struct x86_init_pci {
10008 int (*init)(void);
10009 void (*init_irq)(void);
10010 void (*fixup_irqs)(void);
10011-};
10012+} __no_const;
10013
10014 /**
10015 * struct x86_init_ops - functions for platform specific setup
10016@@ -139,7 +139,7 @@ struct x86_init_ops {
10017 struct x86_init_timers timers;
10018 struct x86_init_iommu iommu;
10019 struct x86_init_pci pci;
10020-};
10021+} __no_const;
10022
10023 /**
10024 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10025@@ -147,7 +147,7 @@ struct x86_init_ops {
10026 */
10027 struct x86_cpuinit_ops {
10028 void (*setup_percpu_clockev)(void);
10029-};
10030+} __no_const;
10031
10032 /**
10033 * struct x86_platform_ops - platform specific runtime functions
10034@@ -166,7 +166,7 @@ struct x86_platform_ops {
10035 bool (*is_untracked_pat_range)(u64 start, u64 end);
10036 void (*nmi_init)(void);
10037 int (*i8042_detect)(void);
10038-};
10039+} __no_const;
10040
10041 struct pci_dev;
10042
10043@@ -174,7 +174,7 @@ struct x86_msi_ops {
10044 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10045 void (*teardown_msi_irq)(unsigned int irq);
10046 void (*teardown_msi_irqs)(struct pci_dev *dev);
10047-};
10048+} __no_const;
10049
10050 extern struct x86_init_ops x86_init;
10051 extern struct x86_cpuinit_ops x86_cpuinit;
10052diff -urNp linux-2.6.39.4/arch/x86/include/asm/xsave.h linux-2.6.39.4/arch/x86/include/asm/xsave.h
10053--- linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-05-19 00:06:34.000000000 -0400
10054+++ linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-08-05 19:44:33.000000000 -0400
10055@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10056 {
10057 int err;
10058
10059+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10060+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10061+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10062+#endif
10063+
10064 /*
10065 * Clear the xsave header first, so that reserved fields are
10066 * initialized to zero.
10067@@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10068 u32 lmask = mask;
10069 u32 hmask = mask >> 32;
10070
10071+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10072+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10073+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10074+#endif
10075+
10076 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10077 "2:\n"
10078 ".section .fixup,\"ax\"\n"
10079diff -urNp linux-2.6.39.4/arch/x86/Kconfig linux-2.6.39.4/arch/x86/Kconfig
10080--- linux-2.6.39.4/arch/x86/Kconfig 2011-05-19 00:06:34.000000000 -0400
10081+++ linux-2.6.39.4/arch/x86/Kconfig 2011-08-05 19:44:33.000000000 -0400
10082@@ -224,7 +224,7 @@ config X86_HT
10083
10084 config X86_32_LAZY_GS
10085 def_bool y
10086- depends on X86_32 && !CC_STACKPROTECTOR
10087+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10088
10089 config ARCH_HWEIGHT_CFLAGS
10090 string
10091@@ -1022,7 +1022,7 @@ choice
10092
10093 config NOHIGHMEM
10094 bool "off"
10095- depends on !X86_NUMAQ
10096+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10097 ---help---
10098 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10099 However, the address space of 32-bit x86 processors is only 4
10100@@ -1059,7 +1059,7 @@ config NOHIGHMEM
10101
10102 config HIGHMEM4G
10103 bool "4GB"
10104- depends on !X86_NUMAQ
10105+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10106 ---help---
10107 Select this if you have a 32-bit processor and between 1 and 4
10108 gigabytes of physical RAM.
10109@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
10110 hex
10111 default 0xB0000000 if VMSPLIT_3G_OPT
10112 default 0x80000000 if VMSPLIT_2G
10113- default 0x78000000 if VMSPLIT_2G_OPT
10114+ default 0x70000000 if VMSPLIT_2G_OPT
10115 default 0x40000000 if VMSPLIT_1G
10116 default 0xC0000000
10117 depends on X86_32
10118@@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED
10119
10120 config EFI
10121 bool "EFI runtime service support"
10122- depends on ACPI
10123+ depends on ACPI && !PAX_KERNEXEC
10124 ---help---
10125 This enables the kernel to use EFI runtime services that are
10126 available (such as the EFI variable services).
10127@@ -1487,6 +1487,7 @@ config SECCOMP
10128
10129 config CC_STACKPROTECTOR
10130 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10131+ depends on X86_64 || !PAX_MEMORY_UDEREF
10132 ---help---
10133 This option turns on the -fstack-protector GCC feature. This
10134 feature puts, at the beginning of functions, a canary value on
10135@@ -1544,6 +1545,7 @@ config KEXEC_JUMP
10136 config PHYSICAL_START
10137 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10138 default "0x1000000"
10139+ range 0x400000 0x40000000
10140 ---help---
10141 This gives the physical address where the kernel is loaded.
10142
10143@@ -1607,6 +1609,7 @@ config X86_NEED_RELOCS
10144 config PHYSICAL_ALIGN
10145 hex "Alignment value to which kernel should be aligned" if X86_32
10146 default "0x1000000"
10147+ range 0x400000 0x1000000 if PAX_KERNEXEC
10148 range 0x2000 0x1000000
10149 ---help---
10150 This value puts the alignment restrictions on physical address
10151@@ -1638,9 +1641,10 @@ config HOTPLUG_CPU
10152 Say N if you want to disable CPU hotplug.
10153
10154 config COMPAT_VDSO
10155- def_bool y
10156+ def_bool n
10157 prompt "Compat VDSO support"
10158 depends on X86_32 || IA32_EMULATION
10159+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10160 ---help---
10161 Map the 32-bit VDSO to the predictable old-style address too.
10162
10163diff -urNp linux-2.6.39.4/arch/x86/Kconfig.cpu linux-2.6.39.4/arch/x86/Kconfig.cpu
10164--- linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-05-19 00:06:34.000000000 -0400
10165+++ linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-08-05 19:44:33.000000000 -0400
10166@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
10167
10168 config X86_F00F_BUG
10169 def_bool y
10170- depends on M586MMX || M586TSC || M586 || M486 || M386
10171+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10172
10173 config X86_INVD_BUG
10174 def_bool y
10175@@ -358,7 +358,7 @@ config X86_POPAD_OK
10176
10177 config X86_ALIGNMENT_16
10178 def_bool y
10179- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10180+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10181
10182 config X86_INTEL_USERCOPY
10183 def_bool y
10184@@ -404,7 +404,7 @@ config X86_CMPXCHG64
10185 # generates cmov.
10186 config X86_CMOV
10187 def_bool y
10188- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10189+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10190
10191 config X86_MINIMUM_CPU_FAMILY
10192 int
10193diff -urNp linux-2.6.39.4/arch/x86/Kconfig.debug linux-2.6.39.4/arch/x86/Kconfig.debug
10194--- linux-2.6.39.4/arch/x86/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
10195+++ linux-2.6.39.4/arch/x86/Kconfig.debug 2011-08-05 19:44:33.000000000 -0400
10196@@ -101,7 +101,7 @@ config X86_PTDUMP
10197 config DEBUG_RODATA
10198 bool "Write protect kernel read-only data structures"
10199 default y
10200- depends on DEBUG_KERNEL
10201+ depends on DEBUG_KERNEL && BROKEN
10202 ---help---
10203 Mark the kernel read-only data as write-protected in the pagetables,
10204 in order to catch accidental (and incorrect) writes to such const
10205@@ -119,7 +119,7 @@ config DEBUG_RODATA_TEST
10206
10207 config DEBUG_SET_MODULE_RONX
10208 bool "Set loadable kernel module data as NX and text as RO"
10209- depends on MODULES
10210+ depends on MODULES && BROKEN
10211 ---help---
10212 This option helps catch unintended modifications to loadable
10213 kernel module's text and read-only data. It also prevents execution
10214diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile
10215--- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-05-19 00:06:34.000000000 -0400
10216+++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-05 20:34:06.000000000 -0400
10217@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10218 $(call cc-option, -fno-stack-protector) \
10219 $(call cc-option, -mpreferred-stack-boundary=2)
10220 KBUILD_CFLAGS += $(call cc-option, -m32)
10221+ifdef CONSTIFY_PLUGIN
10222+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10223+endif
10224 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10225 GCOV_PROFILE := n
10226
10227diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S
10228--- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-09 09:18:51.000000000 -0400
10229+++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-05 19:44:33.000000000 -0400
10230@@ -108,6 +108,9 @@ wakeup_code:
10231 /* Do any other stuff... */
10232
10233 #ifndef CONFIG_64BIT
10234+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10235+ call verify_cpu
10236+
10237 /* This could also be done in C code... */
10238 movl pmode_cr3, %eax
10239 movl %eax, %cr3
10240@@ -131,6 +134,7 @@ wakeup_code:
10241 movl pmode_cr0, %eax
10242 movl %eax, %cr0
10243 jmp pmode_return
10244+# include "../../verify_cpu.S"
10245 #else
10246 pushw $0
10247 pushw trampoline_segment
10248diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c
10249--- linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-07-09 09:18:51.000000000 -0400
10250+++ linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-08-05 19:44:33.000000000 -0400
10251@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10252 header->trampoline_segment = trampoline_address() >> 4;
10253 #ifdef CONFIG_SMP
10254 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10255+
10256+ pax_open_kernel();
10257 early_gdt_descr.address =
10258 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10259+ pax_close_kernel();
10260+
10261 initial_gs = per_cpu_offset(smp_processor_id());
10262 #endif
10263 initial_code = (unsigned long)wakeup_long64;
10264diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S
10265--- linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-05-19 00:06:34.000000000 -0400
10266+++ linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-05 19:44:33.000000000 -0400
10267@@ -30,13 +30,11 @@ wakeup_pmode_return:
10268 # and restore the stack ... but you need gdt for this to work
10269 movl saved_context_esp, %esp
10270
10271- movl %cs:saved_magic, %eax
10272- cmpl $0x12345678, %eax
10273+ cmpl $0x12345678, saved_magic
10274 jne bogus_magic
10275
10276 # jump to place where we left off
10277- movl saved_eip, %eax
10278- jmp *%eax
10279+ jmp *(saved_eip)
10280
10281 bogus_magic:
10282 jmp bogus_magic
10283diff -urNp linux-2.6.39.4/arch/x86/kernel/alternative.c linux-2.6.39.4/arch/x86/kernel/alternative.c
10284--- linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-05-19 00:06:34.000000000 -0400
10285+++ linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-08-05 19:44:33.000000000 -0400
10286@@ -248,7 +248,7 @@ static void alternatives_smp_lock(const
10287 if (!*poff || ptr < text || ptr >= text_end)
10288 continue;
10289 /* turn DS segment override prefix into lock prefix */
10290- if (*ptr == 0x3e)
10291+ if (*ktla_ktva(ptr) == 0x3e)
10292 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10293 };
10294 mutex_unlock(&text_mutex);
10295@@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons
10296 if (!*poff || ptr < text || ptr >= text_end)
10297 continue;
10298 /* turn lock prefix into DS segment override prefix */
10299- if (*ptr == 0xf0)
10300+ if (*ktla_ktva(ptr) == 0xf0)
10301 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10302 };
10303 mutex_unlock(&text_mutex);
10304@@ -438,7 +438,7 @@ void __init_or_module apply_paravirt(str
10305
10306 BUG_ON(p->len > MAX_PATCH_LEN);
10307 /* prep the buffer with the original instructions */
10308- memcpy(insnbuf, p->instr, p->len);
10309+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10310 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10311 (unsigned long)p->instr, p->len);
10312
10313@@ -506,7 +506,7 @@ void __init alternative_instructions(voi
10314 if (smp_alt_once)
10315 free_init_pages("SMP alternatives",
10316 (unsigned long)__smp_locks,
10317- (unsigned long)__smp_locks_end);
10318+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10319
10320 restart_nmi();
10321 }
10322@@ -523,13 +523,17 @@ void __init alternative_instructions(voi
10323 * instructions. And on the local CPU you need to be protected again NMI or MCE
10324 * handlers seeing an inconsistent instruction while you patch.
10325 */
10326-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10327+void *__kprobes text_poke_early(void *addr, const void *opcode,
10328 size_t len)
10329 {
10330 unsigned long flags;
10331 local_irq_save(flags);
10332- memcpy(addr, opcode, len);
10333+
10334+ pax_open_kernel();
10335+ memcpy(ktla_ktva(addr), opcode, len);
10336 sync_core();
10337+ pax_close_kernel();
10338+
10339 local_irq_restore(flags);
10340 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10341 that causes hangs on some VIA CPUs. */
10342@@ -551,36 +555,22 @@ void *__init_or_module text_poke_early(v
10343 */
10344 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10345 {
10346- unsigned long flags;
10347- char *vaddr;
10348+ unsigned char *vaddr = ktla_ktva(addr);
10349 struct page *pages[2];
10350- int i;
10351+ size_t i;
10352
10353 if (!core_kernel_text((unsigned long)addr)) {
10354- pages[0] = vmalloc_to_page(addr);
10355- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10356+ pages[0] = vmalloc_to_page(vaddr);
10357+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10358 } else {
10359- pages[0] = virt_to_page(addr);
10360+ pages[0] = virt_to_page(vaddr);
10361 WARN_ON(!PageReserved(pages[0]));
10362- pages[1] = virt_to_page(addr + PAGE_SIZE);
10363+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10364 }
10365 BUG_ON(!pages[0]);
10366- local_irq_save(flags);
10367- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10368- if (pages[1])
10369- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10370- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10371- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10372- clear_fixmap(FIX_TEXT_POKE0);
10373- if (pages[1])
10374- clear_fixmap(FIX_TEXT_POKE1);
10375- local_flush_tlb();
10376- sync_core();
10377- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10378- that causes hangs on some VIA CPUs. */
10379+ text_poke_early(addr, opcode, len);
10380 for (i = 0; i < len; i++)
10381- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10382- local_irq_restore(flags);
10383+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10384 return addr;
10385 }
10386
10387@@ -682,9 +672,9 @@ void __kprobes text_poke_smp_batch(struc
10388 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
10389
10390 #ifdef CONFIG_X86_64
10391-unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10392+unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10393 #else
10394-unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10395+unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10396 #endif
10397
10398 void __init arch_init_ideal_nop5(void)
10399diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/apic.c linux-2.6.39.4/arch/x86/kernel/apic/apic.c
10400--- linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-05-19 00:06:34.000000000 -0400
10401+++ linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-08-05 19:44:33.000000000 -0400
10402@@ -1821,7 +1821,7 @@ void smp_error_interrupt(struct pt_regs
10403 apic_write(APIC_ESR, 0);
10404 v1 = apic_read(APIC_ESR);
10405 ack_APIC_irq();
10406- atomic_inc(&irq_err_count);
10407+ atomic_inc_unchecked(&irq_err_count);
10408
10409 /*
10410 * Here is what the APIC error bits mean:
10411@@ -2204,6 +2204,8 @@ static int __cpuinit apic_cluster_num(vo
10412 u16 *bios_cpu_apicid;
10413 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10414
10415+ pax_track_stack();
10416+
10417 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10418 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10419
10420diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c
10421--- linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:04:13.000000000 -0400
10422+++ linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-08-05 19:44:33.000000000 -0400
10423@@ -623,7 +623,7 @@ struct IO_APIC_route_entry **alloc_ioapi
10424 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
10425 GFP_ATOMIC);
10426 if (!ioapic_entries)
10427- return 0;
10428+ return NULL;
10429
10430 for (apic = 0; apic < nr_ioapics; apic++) {
10431 ioapic_entries[apic] =
10432@@ -640,7 +640,7 @@ nomem:
10433 kfree(ioapic_entries[apic]);
10434 kfree(ioapic_entries);
10435
10436- return 0;
10437+ return NULL;
10438 }
10439
10440 /*
10441@@ -1040,7 +1040,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10442 }
10443 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10444
10445-void lock_vector_lock(void)
10446+void lock_vector_lock(void) __acquires(vector_lock)
10447 {
10448 /* Used to the online set of cpus does not change
10449 * during assign_irq_vector.
10450@@ -1048,7 +1048,7 @@ void lock_vector_lock(void)
10451 raw_spin_lock(&vector_lock);
10452 }
10453
10454-void unlock_vector_lock(void)
10455+void unlock_vector_lock(void) __releases(vector_lock)
10456 {
10457 raw_spin_unlock(&vector_lock);
10458 }
10459@@ -2379,7 +2379,7 @@ static void ack_apic_edge(struct irq_dat
10460 ack_APIC_irq();
10461 }
10462
10463-atomic_t irq_mis_count;
10464+atomic_unchecked_t irq_mis_count;
10465
10466 /*
10467 * IO-APIC versions below 0x20 don't support EOI register.
10468@@ -2487,7 +2487,7 @@ static void ack_apic_level(struct irq_da
10469 * at the cpu.
10470 */
10471 if (!(v & (1 << (i & 0x1f)))) {
10472- atomic_inc(&irq_mis_count);
10473+ atomic_inc_unchecked(&irq_mis_count);
10474
10475 eoi_ioapic_irq(irq, cfg);
10476 }
10477diff -urNp linux-2.6.39.4/arch/x86/kernel/apm_32.c linux-2.6.39.4/arch/x86/kernel/apm_32.c
10478--- linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-05-19 00:06:34.000000000 -0400
10479+++ linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-08-05 19:44:33.000000000 -0400
10480@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
10481 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10482 * even though they are called in protected mode.
10483 */
10484-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10485+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10486 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10487
10488 static const char driver_version[] = "1.16ac"; /* no spaces */
10489@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
10490 BUG_ON(cpu != 0);
10491 gdt = get_cpu_gdt_table(cpu);
10492 save_desc_40 = gdt[0x40 / 8];
10493+
10494+ pax_open_kernel();
10495 gdt[0x40 / 8] = bad_bios_desc;
10496+ pax_close_kernel();
10497
10498 apm_irq_save(flags);
10499 APM_DO_SAVE_SEGS;
10500@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
10501 &call->esi);
10502 APM_DO_RESTORE_SEGS;
10503 apm_irq_restore(flags);
10504+
10505+ pax_open_kernel();
10506 gdt[0x40 / 8] = save_desc_40;
10507+ pax_close_kernel();
10508+
10509 put_cpu();
10510
10511 return call->eax & 0xff;
10512@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void
10513 BUG_ON(cpu != 0);
10514 gdt = get_cpu_gdt_table(cpu);
10515 save_desc_40 = gdt[0x40 / 8];
10516+
10517+ pax_open_kernel();
10518 gdt[0x40 / 8] = bad_bios_desc;
10519+ pax_close_kernel();
10520
10521 apm_irq_save(flags);
10522 APM_DO_SAVE_SEGS;
10523@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void
10524 &call->eax);
10525 APM_DO_RESTORE_SEGS;
10526 apm_irq_restore(flags);
10527+
10528+ pax_open_kernel();
10529 gdt[0x40 / 8] = save_desc_40;
10530+ pax_close_kernel();
10531+
10532 put_cpu();
10533 return error;
10534 }
10535@@ -2351,12 +2365,15 @@ static int __init apm_init(void)
10536 * code to that CPU.
10537 */
10538 gdt = get_cpu_gdt_table(0);
10539+
10540+ pax_open_kernel();
10541 set_desc_base(&gdt[APM_CS >> 3],
10542 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10543 set_desc_base(&gdt[APM_CS_16 >> 3],
10544 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10545 set_desc_base(&gdt[APM_DS >> 3],
10546 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10547+ pax_close_kernel();
10548
10549 proc_create("apm", 0, NULL, &apm_file_ops);
10550
10551diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c
10552--- linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-05-19 00:06:34.000000000 -0400
10553+++ linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-08-05 19:44:33.000000000 -0400
10554@@ -69,6 +69,7 @@ int main(void)
10555 BLANK();
10556 #undef ENTRY
10557
10558+ DEFINE(TSS_size, sizeof(struct tss_struct));
10559 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10560 BLANK();
10561
10562diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets.c linux-2.6.39.4/arch/x86/kernel/asm-offsets.c
10563--- linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-05-19 00:06:34.000000000 -0400
10564+++ linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-08-05 19:44:33.000000000 -0400
10565@@ -33,6 +33,8 @@ void common(void) {
10566 OFFSET(TI_status, thread_info, status);
10567 OFFSET(TI_addr_limit, thread_info, addr_limit);
10568 OFFSET(TI_preempt_count, thread_info, preempt_count);
10569+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10570+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10571
10572 BLANK();
10573 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10574@@ -53,8 +55,26 @@ void common(void) {
10575 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10576 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10577 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10578+
10579+#ifdef CONFIG_PAX_KERNEXEC
10580+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10581+#endif
10582+
10583+#ifdef CONFIG_PAX_MEMORY_UDEREF
10584+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10585+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10586+#ifdef CONFIG_X86_64
10587+ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
10588+#endif
10589 #endif
10590
10591+#endif
10592+
10593+ BLANK();
10594+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10595+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10596+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10597+
10598 #ifdef CONFIG_XEN
10599 BLANK();
10600 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10601diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/amd.c linux-2.6.39.4/arch/x86/kernel/cpu/amd.c
10602--- linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-06-03 00:04:13.000000000 -0400
10603+++ linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-08-05 19:44:33.000000000 -0400
10604@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10605 unsigned int size)
10606 {
10607 /* AMD errata T13 (order #21922) */
10608- if ((c->x86 == 6)) {
10609+ if (c->x86 == 6) {
10610 /* Duron Rev A0 */
10611 if (c->x86_model == 3 && c->x86_mask == 0)
10612 size = 64;
10613diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/common.c linux-2.6.39.4/arch/x86/kernel/cpu/common.c
10614--- linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-06-03 00:04:13.000000000 -0400
10615+++ linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-08-05 19:44:33.000000000 -0400
10616@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10617
10618 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10619
10620-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10621-#ifdef CONFIG_X86_64
10622- /*
10623- * We need valid kernel segments for data and code in long mode too
10624- * IRET will check the segment types kkeil 2000/10/28
10625- * Also sysret mandates a special GDT layout
10626- *
10627- * TLS descriptors are currently at a different place compared to i386.
10628- * Hopefully nobody expects them at a fixed place (Wine?)
10629- */
10630- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10631- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10632- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10633- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10634- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10635- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10636-#else
10637- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10638- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10639- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10640- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10641- /*
10642- * Segments used for calling PnP BIOS have byte granularity.
10643- * They code segments and data segments have fixed 64k limits,
10644- * the transfer segment sizes are set at run time.
10645- */
10646- /* 32-bit code */
10647- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10648- /* 16-bit code */
10649- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10650- /* 16-bit data */
10651- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10652- /* 16-bit data */
10653- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10654- /* 16-bit data */
10655- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10656- /*
10657- * The APM segments have byte granularity and their bases
10658- * are set at run time. All have 64k limits.
10659- */
10660- /* 32-bit code */
10661- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10662- /* 16-bit code */
10663- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10664- /* data */
10665- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10666-
10667- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10668- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10669- GDT_STACK_CANARY_INIT
10670-#endif
10671-} };
10672-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10673-
10674 static int __init x86_xsave_setup(char *s)
10675 {
10676 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10677@@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu)
10678 {
10679 struct desc_ptr gdt_descr;
10680
10681- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10682+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10683 gdt_descr.size = GDT_SIZE - 1;
10684 load_gdt(&gdt_descr);
10685 /* Reload the per-cpu base */
10686@@ -824,6 +770,10 @@ static void __cpuinit identify_cpu(struc
10687 /* Filter out anything that depends on CPUID levels we don't have */
10688 filter_cpuid_features(c, true);
10689
10690+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10691+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10692+#endif
10693+
10694 /* If the model name is still unset, do table lookup. */
10695 if (!c->x86_model_id[0]) {
10696 const char *p;
10697@@ -1003,6 +953,9 @@ static __init int setup_disablecpuid(cha
10698 }
10699 __setup("clearcpuid=", setup_disablecpuid);
10700
10701+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10702+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10703+
10704 #ifdef CONFIG_X86_64
10705 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10706
10707@@ -1018,7 +971,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10708 EXPORT_PER_CPU_SYMBOL(current_task);
10709
10710 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10711- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10712+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10713 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10714
10715 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10716@@ -1083,7 +1036,7 @@ struct pt_regs * __cpuinit idle_regs(str
10717 {
10718 memset(regs, 0, sizeof(struct pt_regs));
10719 regs->fs = __KERNEL_PERCPU;
10720- regs->gs = __KERNEL_STACK_CANARY;
10721+ savesegment(gs, regs->gs);
10722
10723 return regs;
10724 }
10725@@ -1138,7 +1091,7 @@ void __cpuinit cpu_init(void)
10726 int i;
10727
10728 cpu = stack_smp_processor_id();
10729- t = &per_cpu(init_tss, cpu);
10730+ t = init_tss + cpu;
10731 oist = &per_cpu(orig_ist, cpu);
10732
10733 #ifdef CONFIG_NUMA
10734@@ -1164,7 +1117,7 @@ void __cpuinit cpu_init(void)
10735 switch_to_new_gdt(cpu);
10736 loadsegment(fs, 0);
10737
10738- load_idt((const struct desc_ptr *)&idt_descr);
10739+ load_idt(&idt_descr);
10740
10741 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10742 syscall_init();
10743@@ -1173,7 +1126,6 @@ void __cpuinit cpu_init(void)
10744 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10745 barrier();
10746
10747- x86_configure_nx();
10748 if (cpu != 0)
10749 enable_x2apic();
10750
10751@@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
10752 {
10753 int cpu = smp_processor_id();
10754 struct task_struct *curr = current;
10755- struct tss_struct *t = &per_cpu(init_tss, cpu);
10756+ struct tss_struct *t = init_tss + cpu;
10757 struct thread_struct *thread = &curr->thread;
10758
10759 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10760diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/intel.c linux-2.6.39.4/arch/x86/kernel/cpu/intel.c
10761--- linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-05-19 00:06:34.000000000 -0400
10762+++ linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-08-05 19:44:33.000000000 -0400
10763@@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
10764 * Update the IDT descriptor and reload the IDT so that
10765 * it uses the read-only mapped virtual address.
10766 */
10767- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10768+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10769 load_idt(&idt_descr);
10770 }
10771 #endif
10772diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/Makefile linux-2.6.39.4/arch/x86/kernel/cpu/Makefile
10773--- linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-05-19 00:06:34.000000000 -0400
10774+++ linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-08-05 19:44:33.000000000 -0400
10775@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10776 CFLAGS_REMOVE_perf_event.o = -pg
10777 endif
10778
10779-# Make sure load_percpu_segment has no stackprotector
10780-nostackp := $(call cc-option, -fno-stack-protector)
10781-CFLAGS_common.o := $(nostackp)
10782-
10783 obj-y := intel_cacheinfo.o scattered.o topology.o
10784 obj-y += proc.o capflags.o powerflags.o common.o
10785 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10786diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c
10787--- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-19 00:06:34.000000000 -0400
10788+++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-05 19:44:33.000000000 -0400
10789@@ -46,6 +46,7 @@
10790 #include <asm/ipi.h>
10791 #include <asm/mce.h>
10792 #include <asm/msr.h>
10793+#include <asm/local.h>
10794
10795 #include "mce-internal.h"
10796
10797@@ -220,7 +221,7 @@ static void print_mce(struct mce *m)
10798 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10799 m->cs, m->ip);
10800
10801- if (m->cs == __KERNEL_CS)
10802+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10803 print_symbol("{%s}", m->ip);
10804 pr_cont("\n");
10805 }
10806@@ -244,10 +245,10 @@ static void print_mce(struct mce *m)
10807
10808 #define PANIC_TIMEOUT 5 /* 5 seconds */
10809
10810-static atomic_t mce_paniced;
10811+static atomic_unchecked_t mce_paniced;
10812
10813 static int fake_panic;
10814-static atomic_t mce_fake_paniced;
10815+static atomic_unchecked_t mce_fake_paniced;
10816
10817 /* Panic in progress. Enable interrupts and wait for final IPI */
10818 static void wait_for_panic(void)
10819@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10820 /*
10821 * Make sure only one CPU runs in machine check panic
10822 */
10823- if (atomic_inc_return(&mce_paniced) > 1)
10824+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10825 wait_for_panic();
10826 barrier();
10827
10828@@ -279,7 +280,7 @@ static void mce_panic(char *msg, struct
10829 console_verbose();
10830 } else {
10831 /* Don't log too much for fake panic */
10832- if (atomic_inc_return(&mce_fake_paniced) > 1)
10833+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10834 return;
10835 }
10836 /* First print corrected ones that are still unlogged */
10837@@ -647,7 +648,7 @@ static int mce_timed_out(u64 *t)
10838 * might have been modified by someone else.
10839 */
10840 rmb();
10841- if (atomic_read(&mce_paniced))
10842+ if (atomic_read_unchecked(&mce_paniced))
10843 wait_for_panic();
10844 if (!monarch_timeout)
10845 goto out;
10846@@ -1461,14 +1462,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10847 */
10848
10849 static DEFINE_SPINLOCK(mce_state_lock);
10850-static int open_count; /* #times opened */
10851+static local_t open_count; /* #times opened */
10852 static int open_exclu; /* already open exclusive? */
10853
10854 static int mce_open(struct inode *inode, struct file *file)
10855 {
10856 spin_lock(&mce_state_lock);
10857
10858- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10859+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10860 spin_unlock(&mce_state_lock);
10861
10862 return -EBUSY;
10863@@ -1476,7 +1477,7 @@ static int mce_open(struct inode *inode,
10864
10865 if (file->f_flags & O_EXCL)
10866 open_exclu = 1;
10867- open_count++;
10868+ local_inc(&open_count);
10869
10870 spin_unlock(&mce_state_lock);
10871
10872@@ -1487,7 +1488,7 @@ static int mce_release(struct inode *ino
10873 {
10874 spin_lock(&mce_state_lock);
10875
10876- open_count--;
10877+ local_dec(&open_count);
10878 open_exclu = 0;
10879
10880 spin_unlock(&mce_state_lock);
10881@@ -2174,7 +2175,7 @@ struct dentry *mce_get_debugfs_dir(void)
10882 static void mce_reset(void)
10883 {
10884 cpu_missing = 0;
10885- atomic_set(&mce_fake_paniced, 0);
10886+ atomic_set_unchecked(&mce_fake_paniced, 0);
10887 atomic_set(&mce_executing, 0);
10888 atomic_set(&mce_callin, 0);
10889 atomic_set(&global_nwo, 0);
10890diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10891--- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-05-19 00:06:34.000000000 -0400
10892+++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:34:06.000000000 -0400
10893@@ -215,7 +215,9 @@ static int inject_init(void)
10894 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10895 return -ENOMEM;
10896 printk(KERN_INFO "Machine check injector initialized\n");
10897- mce_chrdev_ops.write = mce_write;
10898+ pax_open_kernel();
10899+ *(void **)&mce_chrdev_ops.write = mce_write;
10900+ pax_close_kernel();
10901 register_die_notifier(&mce_raise_nb);
10902 return 0;
10903 }
10904diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c
10905--- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-05-19 00:06:34.000000000 -0400
10906+++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-05 19:44:33.000000000 -0400
10907@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10908 u64 size_or_mask, size_and_mask;
10909 static bool mtrr_aps_delayed_init;
10910
10911-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10912+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10913
10914 const struct mtrr_ops *mtrr_if;
10915
10916diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10917--- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-19 00:06:34.000000000 -0400
10918+++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-05 20:34:06.000000000 -0400
10919@@ -12,8 +12,8 @@
10920 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10921
10922 struct mtrr_ops {
10923- u32 vendor;
10924- u32 use_intel_if;
10925+ const u32 vendor;
10926+ const u32 use_intel_if;
10927 void (*set)(unsigned int reg, unsigned long base,
10928 unsigned long size, mtrr_type type);
10929 void (*set_all)(void);
10930diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c
10931--- linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-05-19 00:06:34.000000000 -0400
10932+++ linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-08-05 19:44:33.000000000 -0400
10933@@ -774,6 +774,8 @@ static int x86_schedule_events(struct cp
10934 int i, j, w, wmax, num = 0;
10935 struct hw_perf_event *hwc;
10936
10937+ pax_track_stack();
10938+
10939 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10940
10941 for (i = 0; i < n; i++) {
10942@@ -1878,7 +1880,7 @@ perf_callchain_user(struct perf_callchai
10943 break;
10944
10945 perf_callchain_store(entry, frame.return_address);
10946- fp = frame.next_frame;
10947+ fp = (__force const void __user *)frame.next_frame;
10948 }
10949 }
10950
10951diff -urNp linux-2.6.39.4/arch/x86/kernel/crash.c linux-2.6.39.4/arch/x86/kernel/crash.c
10952--- linux-2.6.39.4/arch/x86/kernel/crash.c 2011-05-19 00:06:34.000000000 -0400
10953+++ linux-2.6.39.4/arch/x86/kernel/crash.c 2011-08-05 19:44:33.000000000 -0400
10954@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10955 regs = args->regs;
10956
10957 #ifdef CONFIG_X86_32
10958- if (!user_mode_vm(regs)) {
10959+ if (!user_mode(regs)) {
10960 crash_fixup_ss_esp(&fixed_regs, regs);
10961 regs = &fixed_regs;
10962 }
10963diff -urNp linux-2.6.39.4/arch/x86/kernel/doublefault_32.c linux-2.6.39.4/arch/x86/kernel/doublefault_32.c
10964--- linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-05-19 00:06:34.000000000 -0400
10965+++ linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-08-05 19:44:33.000000000 -0400
10966@@ -11,7 +11,7 @@
10967
10968 #define DOUBLEFAULT_STACKSIZE (1024)
10969 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10970-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10971+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10972
10973 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10974
10975@@ -21,7 +21,7 @@ static void doublefault_fn(void)
10976 unsigned long gdt, tss;
10977
10978 store_gdt(&gdt_desc);
10979- gdt = gdt_desc.address;
10980+ gdt = (unsigned long)gdt_desc.address;
10981
10982 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10983
10984@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10985 /* 0x2 bit is always set */
10986 .flags = X86_EFLAGS_SF | 0x2,
10987 .sp = STACK_START,
10988- .es = __USER_DS,
10989+ .es = __KERNEL_DS,
10990 .cs = __KERNEL_CS,
10991 .ss = __KERNEL_DS,
10992- .ds = __USER_DS,
10993+ .ds = __KERNEL_DS,
10994 .fs = __KERNEL_PERCPU,
10995
10996 .__cr3 = __pa_nodebug(swapper_pg_dir),
10997diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c
10998--- linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-05-19 00:06:34.000000000 -0400
10999+++ linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-08-05 19:44:33.000000000 -0400
11000@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11001 bp = stack_frame(task, regs);
11002
11003 for (;;) {
11004- struct thread_info *context;
11005+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11006
11007- context = (struct thread_info *)
11008- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11009- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11010+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11011
11012- stack = (unsigned long *)context->previous_esp;
11013- if (!stack)
11014+ if (stack_start == task_stack_page(task))
11015 break;
11016+ stack = *(unsigned long **)stack_start;
11017 if (ops->stack(data, "IRQ") < 0)
11018 break;
11019 touch_nmi_watchdog();
11020@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11021 * When in-kernel, we also print out the stack and code at the
11022 * time of the fault..
11023 */
11024- if (!user_mode_vm(regs)) {
11025+ if (!user_mode(regs)) {
11026 unsigned int code_prologue = code_bytes * 43 / 64;
11027 unsigned int code_len = code_bytes;
11028 unsigned char c;
11029 u8 *ip;
11030+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11031
11032 printk(KERN_EMERG "Stack:\n");
11033 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11034
11035 printk(KERN_EMERG "Code: ");
11036
11037- ip = (u8 *)regs->ip - code_prologue;
11038+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11039 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11040 /* try starting at IP */
11041- ip = (u8 *)regs->ip;
11042+ ip = (u8 *)regs->ip + cs_base;
11043 code_len = code_len - code_prologue + 1;
11044 }
11045 for (i = 0; i < code_len; i++, ip++) {
11046@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11047 printk(" Bad EIP value.");
11048 break;
11049 }
11050- if (ip == (u8 *)regs->ip)
11051+ if (ip == (u8 *)regs->ip + cs_base)
11052 printk("<%02x> ", c);
11053 else
11054 printk("%02x ", c);
11055@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11056 {
11057 unsigned short ud2;
11058
11059+ ip = ktla_ktva(ip);
11060 if (ip < PAGE_OFFSET)
11061 return 0;
11062 if (probe_kernel_address((unsigned short *)ip, ud2))
11063diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c
11064--- linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-05-19 00:06:34.000000000 -0400
11065+++ linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-08-05 19:44:33.000000000 -0400
11066@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11067 unsigned long *irq_stack_end =
11068 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11069 unsigned used = 0;
11070- struct thread_info *tinfo;
11071 int graph = 0;
11072 unsigned long dummy;
11073+ void *stack_start;
11074
11075 if (!task)
11076 task = current;
11077@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11078 * current stack address. If the stacks consist of nested
11079 * exceptions
11080 */
11081- tinfo = task_thread_info(task);
11082 for (;;) {
11083 char *id;
11084 unsigned long *estack_end;
11085+
11086 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11087 &used, &id);
11088
11089@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11090 if (ops->stack(data, id) < 0)
11091 break;
11092
11093- bp = ops->walk_stack(tinfo, stack, bp, ops,
11094+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11095 data, estack_end, &graph);
11096 ops->stack(data, "<EOE>");
11097 /*
11098@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11099 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11100 if (ops->stack(data, "IRQ") < 0)
11101 break;
11102- bp = ops->walk_stack(tinfo, stack, bp,
11103+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11104 ops, data, irq_stack_end, &graph);
11105 /*
11106 * We link to the next stack (which would be
11107@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11108 /*
11109 * This handles the process stack:
11110 */
11111- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11112+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11113+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11114 put_cpu();
11115 }
11116 EXPORT_SYMBOL(dump_trace);
11117diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack.c linux-2.6.39.4/arch/x86/kernel/dumpstack.c
11118--- linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-05-19 00:06:34.000000000 -0400
11119+++ linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-08-05 19:44:33.000000000 -0400
11120@@ -2,6 +2,9 @@
11121 * Copyright (C) 1991, 1992 Linus Torvalds
11122 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11123 */
11124+#ifdef CONFIG_GRKERNSEC_HIDESYM
11125+#define __INCLUDED_BY_HIDESYM 1
11126+#endif
11127 #include <linux/kallsyms.h>
11128 #include <linux/kprobes.h>
11129 #include <linux/uaccess.h>
11130@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11131 static void
11132 print_ftrace_graph_addr(unsigned long addr, void *data,
11133 const struct stacktrace_ops *ops,
11134- struct thread_info *tinfo, int *graph)
11135+ struct task_struct *task, int *graph)
11136 {
11137- struct task_struct *task = tinfo->task;
11138 unsigned long ret_addr;
11139 int index = task->curr_ret_stack;
11140
11141@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11142 static inline void
11143 print_ftrace_graph_addr(unsigned long addr, void *data,
11144 const struct stacktrace_ops *ops,
11145- struct thread_info *tinfo, int *graph)
11146+ struct task_struct *task, int *graph)
11147 { }
11148 #endif
11149
11150@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11151 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11152 */
11153
11154-static inline int valid_stack_ptr(struct thread_info *tinfo,
11155- void *p, unsigned int size, void *end)
11156+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11157 {
11158- void *t = tinfo;
11159 if (end) {
11160 if (p < end && p >= (end-THREAD_SIZE))
11161 return 1;
11162@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11163 }
11164
11165 unsigned long
11166-print_context_stack(struct thread_info *tinfo,
11167+print_context_stack(struct task_struct *task, void *stack_start,
11168 unsigned long *stack, unsigned long bp,
11169 const struct stacktrace_ops *ops, void *data,
11170 unsigned long *end, int *graph)
11171 {
11172 struct stack_frame *frame = (struct stack_frame *)bp;
11173
11174- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11175+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11176 unsigned long addr;
11177
11178 addr = *stack;
11179@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11180 } else {
11181 ops->address(data, addr, 0);
11182 }
11183- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11184+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11185 }
11186 stack++;
11187 }
11188@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11189 EXPORT_SYMBOL_GPL(print_context_stack);
11190
11191 unsigned long
11192-print_context_stack_bp(struct thread_info *tinfo,
11193+print_context_stack_bp(struct task_struct *task, void *stack_start,
11194 unsigned long *stack, unsigned long bp,
11195 const struct stacktrace_ops *ops, void *data,
11196 unsigned long *end, int *graph)
11197@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11198 struct stack_frame *frame = (struct stack_frame *)bp;
11199 unsigned long *ret_addr = &frame->return_address;
11200
11201- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11202+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11203 unsigned long addr = *ret_addr;
11204
11205 if (!__kernel_text_address(addr))
11206@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11207 ops->address(data, addr, 1);
11208 frame = frame->next_frame;
11209 ret_addr = &frame->return_address;
11210- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11211+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11212 }
11213
11214 return (unsigned long)frame;
11215@@ -202,7 +202,7 @@ void dump_stack(void)
11216
11217 bp = stack_frame(current, NULL);
11218 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11219- current->pid, current->comm, print_tainted(),
11220+ task_pid_nr(current), current->comm, print_tainted(),
11221 init_utsname()->release,
11222 (int)strcspn(init_utsname()->version, " "),
11223 init_utsname()->version);
11224@@ -238,6 +238,8 @@ unsigned __kprobes long oops_begin(void)
11225 }
11226 EXPORT_SYMBOL_GPL(oops_begin);
11227
11228+extern void gr_handle_kernel_exploit(void);
11229+
11230 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11231 {
11232 if (regs && kexec_should_crash(current))
11233@@ -259,7 +261,10 @@ void __kprobes oops_end(unsigned long fl
11234 panic("Fatal exception in interrupt");
11235 if (panic_on_oops)
11236 panic("Fatal exception");
11237- do_exit(signr);
11238+
11239+ gr_handle_kernel_exploit();
11240+
11241+ do_group_exit(signr);
11242 }
11243
11244 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11245@@ -286,7 +291,7 @@ int __kprobes __die(const char *str, str
11246
11247 show_registers(regs);
11248 #ifdef CONFIG_X86_32
11249- if (user_mode_vm(regs)) {
11250+ if (user_mode(regs)) {
11251 sp = regs->sp;
11252 ss = regs->ss & 0xffff;
11253 } else {
11254@@ -314,7 +319,7 @@ void die(const char *str, struct pt_regs
11255 unsigned long flags = oops_begin();
11256 int sig = SIGSEGV;
11257
11258- if (!user_mode_vm(regs))
11259+ if (!user_mode(regs))
11260 report_bug(regs->ip, regs);
11261
11262 if (__die(str, regs, err))
11263diff -urNp linux-2.6.39.4/arch/x86/kernel/early_printk.c linux-2.6.39.4/arch/x86/kernel/early_printk.c
11264--- linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-05-19 00:06:34.000000000 -0400
11265+++ linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-08-05 19:44:33.000000000 -0400
11266@@ -7,6 +7,7 @@
11267 #include <linux/pci_regs.h>
11268 #include <linux/pci_ids.h>
11269 #include <linux/errno.h>
11270+#include <linux/sched.h>
11271 #include <asm/io.h>
11272 #include <asm/processor.h>
11273 #include <asm/fcntl.h>
11274@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11275 int n;
11276 va_list ap;
11277
11278+ pax_track_stack();
11279+
11280 va_start(ap, fmt);
11281 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11282 early_console->write(early_console, buf, n);
11283diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_32.S linux-2.6.39.4/arch/x86/kernel/entry_32.S
11284--- linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-05-19 00:06:34.000000000 -0400
11285+++ linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-08-05 19:44:33.000000000 -0400
11286@@ -185,13 +185,146 @@
11287 /*CFI_REL_OFFSET gs, PT_GS*/
11288 .endm
11289 .macro SET_KERNEL_GS reg
11290+
11291+#ifdef CONFIG_CC_STACKPROTECTOR
11292 movl $(__KERNEL_STACK_CANARY), \reg
11293+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11294+ movl $(__USER_DS), \reg
11295+#else
11296+ xorl \reg, \reg
11297+#endif
11298+
11299 movl \reg, %gs
11300 .endm
11301
11302 #endif /* CONFIG_X86_32_LAZY_GS */
11303
11304-.macro SAVE_ALL
11305+.macro pax_enter_kernel
11306+#ifdef CONFIG_PAX_KERNEXEC
11307+ call pax_enter_kernel
11308+#endif
11309+.endm
11310+
11311+.macro pax_exit_kernel
11312+#ifdef CONFIG_PAX_KERNEXEC
11313+ call pax_exit_kernel
11314+#endif
11315+.endm
11316+
11317+#ifdef CONFIG_PAX_KERNEXEC
11318+ENTRY(pax_enter_kernel)
11319+#ifdef CONFIG_PARAVIRT
11320+ pushl %eax
11321+ pushl %ecx
11322+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11323+ mov %eax, %esi
11324+#else
11325+ mov %cr0, %esi
11326+#endif
11327+ bts $16, %esi
11328+ jnc 1f
11329+ mov %cs, %esi
11330+ cmp $__KERNEL_CS, %esi
11331+ jz 3f
11332+ ljmp $__KERNEL_CS, $3f
11333+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11334+2:
11335+#ifdef CONFIG_PARAVIRT
11336+ mov %esi, %eax
11337+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11338+#else
11339+ mov %esi, %cr0
11340+#endif
11341+3:
11342+#ifdef CONFIG_PARAVIRT
11343+ popl %ecx
11344+ popl %eax
11345+#endif
11346+ ret
11347+ENDPROC(pax_enter_kernel)
11348+
11349+ENTRY(pax_exit_kernel)
11350+#ifdef CONFIG_PARAVIRT
11351+ pushl %eax
11352+ pushl %ecx
11353+#endif
11354+ mov %cs, %esi
11355+ cmp $__KERNEXEC_KERNEL_CS, %esi
11356+ jnz 2f
11357+#ifdef CONFIG_PARAVIRT
11358+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11359+ mov %eax, %esi
11360+#else
11361+ mov %cr0, %esi
11362+#endif
11363+ btr $16, %esi
11364+ ljmp $__KERNEL_CS, $1f
11365+1:
11366+#ifdef CONFIG_PARAVIRT
11367+ mov %esi, %eax
11368+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11369+#else
11370+ mov %esi, %cr0
11371+#endif
11372+2:
11373+#ifdef CONFIG_PARAVIRT
11374+ popl %ecx
11375+ popl %eax
11376+#endif
11377+ ret
11378+ENDPROC(pax_exit_kernel)
11379+#endif
11380+
11381+.macro pax_erase_kstack
11382+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11383+ call pax_erase_kstack
11384+#endif
11385+.endm
11386+
11387+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11388+/*
11389+ * ebp: thread_info
11390+ * ecx, edx: can be clobbered
11391+ */
11392+ENTRY(pax_erase_kstack)
11393+ pushl %edi
11394+ pushl %eax
11395+
11396+ mov TI_lowest_stack(%ebp), %edi
11397+ mov $-0xBEEF, %eax
11398+ std
11399+
11400+1: mov %edi, %ecx
11401+ and $THREAD_SIZE_asm - 1, %ecx
11402+ shr $2, %ecx
11403+ repne scasl
11404+ jecxz 2f
11405+
11406+ cmp $2*16, %ecx
11407+ jc 2f
11408+
11409+ mov $2*16, %ecx
11410+ repe scasl
11411+ jecxz 2f
11412+ jne 1b
11413+
11414+2: cld
11415+ mov %esp, %ecx
11416+ sub %edi, %ecx
11417+ shr $2, %ecx
11418+ rep stosl
11419+
11420+ mov TI_task_thread_sp0(%ebp), %edi
11421+ sub $128, %edi
11422+ mov %edi, TI_lowest_stack(%ebp)
11423+
11424+ popl %eax
11425+ popl %edi
11426+ ret
11427+ENDPROC(pax_erase_kstack)
11428+#endif
11429+
11430+.macro __SAVE_ALL _DS
11431 cld
11432 PUSH_GS
11433 pushl_cfi %fs
11434@@ -214,7 +347,7 @@
11435 CFI_REL_OFFSET ecx, 0
11436 pushl_cfi %ebx
11437 CFI_REL_OFFSET ebx, 0
11438- movl $(__USER_DS), %edx
11439+ movl $\_DS, %edx
11440 movl %edx, %ds
11441 movl %edx, %es
11442 movl $(__KERNEL_PERCPU), %edx
11443@@ -222,6 +355,15 @@
11444 SET_KERNEL_GS %edx
11445 .endm
11446
11447+.macro SAVE_ALL
11448+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11449+ __SAVE_ALL __KERNEL_DS
11450+ pax_enter_kernel
11451+#else
11452+ __SAVE_ALL __USER_DS
11453+#endif
11454+.endm
11455+
11456 .macro RESTORE_INT_REGS
11457 popl_cfi %ebx
11458 CFI_RESTORE ebx
11459@@ -332,7 +474,15 @@ check_userspace:
11460 movb PT_CS(%esp), %al
11461 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11462 cmpl $USER_RPL, %eax
11463+
11464+#ifdef CONFIG_PAX_KERNEXEC
11465+ jae resume_userspace
11466+
11467+ PAX_EXIT_KERNEL
11468+ jmp resume_kernel
11469+#else
11470 jb resume_kernel # not returning to v8086 or userspace
11471+#endif
11472
11473 ENTRY(resume_userspace)
11474 LOCKDEP_SYS_EXIT
11475@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11476 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11477 # int/exception return?
11478 jne work_pending
11479- jmp restore_all
11480+ jmp restore_all_pax
11481 END(ret_from_exception)
11482
11483 #ifdef CONFIG_PREEMPT
11484@@ -394,23 +544,34 @@ sysenter_past_esp:
11485 /*CFI_REL_OFFSET cs, 0*/
11486 /*
11487 * Push current_thread_info()->sysenter_return to the stack.
11488- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11489- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11490 */
11491- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11492+ pushl_cfi $0
11493 CFI_REL_OFFSET eip, 0
11494
11495 pushl_cfi %eax
11496 SAVE_ALL
11497+ GET_THREAD_INFO(%ebp)
11498+ movl TI_sysenter_return(%ebp),%ebp
11499+ movl %ebp,PT_EIP(%esp)
11500 ENABLE_INTERRUPTS(CLBR_NONE)
11501
11502 /*
11503 * Load the potential sixth argument from user stack.
11504 * Careful about security.
11505 */
11506+ movl PT_OLDESP(%esp),%ebp
11507+
11508+#ifdef CONFIG_PAX_MEMORY_UDEREF
11509+ mov PT_OLDSS(%esp),%ds
11510+1: movl %ds:(%ebp),%ebp
11511+ push %ss
11512+ pop %ds
11513+#else
11514 cmpl $__PAGE_OFFSET-3,%ebp
11515 jae syscall_fault
11516 1: movl (%ebp),%ebp
11517+#endif
11518+
11519 movl %ebp,PT_EBP(%esp)
11520 .section __ex_table,"a"
11521 .align 4
11522@@ -433,12 +594,23 @@ sysenter_do_call:
11523 testl $_TIF_ALLWORK_MASK, %ecx
11524 jne sysexit_audit
11525 sysenter_exit:
11526+
11527+#ifdef CONFIG_PAX_RANDKSTACK
11528+ pushl_cfi %eax
11529+ call pax_randomize_kstack
11530+ popl_cfi %eax
11531+#endif
11532+
11533+ pax_erase_kstack
11534+
11535 /* if something modifies registers it must also disable sysexit */
11536 movl PT_EIP(%esp), %edx
11537 movl PT_OLDESP(%esp), %ecx
11538 xorl %ebp,%ebp
11539 TRACE_IRQS_ON
11540 1: mov PT_FS(%esp), %fs
11541+2: mov PT_DS(%esp), %ds
11542+3: mov PT_ES(%esp), %es
11543 PTGS_TO_GS
11544 ENABLE_INTERRUPTS_SYSEXIT
11545
11546@@ -455,6 +627,9 @@ sysenter_audit:
11547 movl %eax,%edx /* 2nd arg: syscall number */
11548 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11549 call audit_syscall_entry
11550+
11551+ pax_erase_kstack
11552+
11553 pushl_cfi %ebx
11554 movl PT_EAX(%esp),%eax /* reload syscall number */
11555 jmp sysenter_do_call
11556@@ -481,11 +656,17 @@ sysexit_audit:
11557
11558 CFI_ENDPROC
11559 .pushsection .fixup,"ax"
11560-2: movl $0,PT_FS(%esp)
11561+4: movl $0,PT_FS(%esp)
11562+ jmp 1b
11563+5: movl $0,PT_DS(%esp)
11564+ jmp 1b
11565+6: movl $0,PT_ES(%esp)
11566 jmp 1b
11567 .section __ex_table,"a"
11568 .align 4
11569- .long 1b,2b
11570+ .long 1b,4b
11571+ .long 2b,5b
11572+ .long 3b,6b
11573 .popsection
11574 PTGS_TO_GS_EX
11575 ENDPROC(ia32_sysenter_target)
11576@@ -518,6 +699,14 @@ syscall_exit:
11577 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11578 jne syscall_exit_work
11579
11580+restore_all_pax:
11581+
11582+#ifdef CONFIG_PAX_RANDKSTACK
11583+ call pax_randomize_kstack
11584+#endif
11585+
11586+ pax_erase_kstack
11587+
11588 restore_all:
11589 TRACE_IRQS_IRET
11590 restore_all_notrace:
11591@@ -577,14 +766,21 @@ ldt_ss:
11592 * compensating for the offset by changing to the ESPFIX segment with
11593 * a base address that matches for the difference.
11594 */
11595-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11596+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11597 mov %esp, %edx /* load kernel esp */
11598 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11599 mov %dx, %ax /* eax: new kernel esp */
11600 sub %eax, %edx /* offset (low word is 0) */
11601+#ifdef CONFIG_SMP
11602+ movl PER_CPU_VAR(cpu_number), %ebx
11603+ shll $PAGE_SHIFT_asm, %ebx
11604+ addl $cpu_gdt_table, %ebx
11605+#else
11606+ movl $cpu_gdt_table, %ebx
11607+#endif
11608 shr $16, %edx
11609- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11610- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11611+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11612+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11613 pushl_cfi $__ESPFIX_SS
11614 pushl_cfi %eax /* new kernel esp */
11615 /* Disable interrupts, but do not irqtrace this section: we
11616@@ -613,29 +809,23 @@ work_resched:
11617 movl TI_flags(%ebp), %ecx
11618 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11619 # than syscall tracing?
11620- jz restore_all
11621+ jz restore_all_pax
11622 testb $_TIF_NEED_RESCHED, %cl
11623 jnz work_resched
11624
11625 work_notifysig: # deal with pending signals and
11626 # notify-resume requests
11627+ movl %esp, %eax
11628 #ifdef CONFIG_VM86
11629 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11630- movl %esp, %eax
11631- jne work_notifysig_v86 # returning to kernel-space or
11632+ jz 1f # returning to kernel-space or
11633 # vm86-space
11634- xorl %edx, %edx
11635- call do_notify_resume
11636- jmp resume_userspace_sig
11637
11638- ALIGN
11639-work_notifysig_v86:
11640 pushl_cfi %ecx # save ti_flags for do_notify_resume
11641 call save_v86_state # %eax contains pt_regs pointer
11642 popl_cfi %ecx
11643 movl %eax, %esp
11644-#else
11645- movl %esp, %eax
11646+1:
11647 #endif
11648 xorl %edx, %edx
11649 call do_notify_resume
11650@@ -648,6 +838,9 @@ syscall_trace_entry:
11651 movl $-ENOSYS,PT_EAX(%esp)
11652 movl %esp, %eax
11653 call syscall_trace_enter
11654+
11655+ pax_erase_kstack
11656+
11657 /* What it returned is what we'll actually use. */
11658 cmpl $(nr_syscalls), %eax
11659 jnae syscall_call
11660@@ -670,6 +863,10 @@ END(syscall_exit_work)
11661
11662 RING0_INT_FRAME # can't unwind into user space anyway
11663 syscall_fault:
11664+#ifdef CONFIG_PAX_MEMORY_UDEREF
11665+ push %ss
11666+ pop %ds
11667+#endif
11668 GET_THREAD_INFO(%ebp)
11669 movl $-EFAULT,PT_EAX(%esp)
11670 jmp resume_userspace
11671@@ -752,6 +949,36 @@ ptregs_clone:
11672 CFI_ENDPROC
11673 ENDPROC(ptregs_clone)
11674
11675+ ALIGN;
11676+ENTRY(kernel_execve)
11677+ CFI_STARTPROC
11678+ pushl_cfi %ebp
11679+ sub $PT_OLDSS+4,%esp
11680+ pushl_cfi %edi
11681+ pushl_cfi %ecx
11682+ pushl_cfi %eax
11683+ lea 3*4(%esp),%edi
11684+ mov $PT_OLDSS/4+1,%ecx
11685+ xorl %eax,%eax
11686+ rep stosl
11687+ popl_cfi %eax
11688+ popl_cfi %ecx
11689+ popl_cfi %edi
11690+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11691+ pushl_cfi %esp
11692+ call sys_execve
11693+ add $4,%esp
11694+ CFI_ADJUST_CFA_OFFSET -4
11695+ GET_THREAD_INFO(%ebp)
11696+ test %eax,%eax
11697+ jz syscall_exit
11698+ add $PT_OLDSS+4,%esp
11699+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11700+ popl_cfi %ebp
11701+ ret
11702+ CFI_ENDPROC
11703+ENDPROC(kernel_execve)
11704+
11705 .macro FIXUP_ESPFIX_STACK
11706 /*
11707 * Switch back for ESPFIX stack to the normal zerobased stack
11708@@ -761,8 +988,15 @@ ENDPROC(ptregs_clone)
11709 * normal stack and adjusts ESP with the matching offset.
11710 */
11711 /* fixup the stack */
11712- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11713- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11714+#ifdef CONFIG_SMP
11715+ movl PER_CPU_VAR(cpu_number), %ebx
11716+ shll $PAGE_SHIFT_asm, %ebx
11717+ addl $cpu_gdt_table, %ebx
11718+#else
11719+ movl $cpu_gdt_table, %ebx
11720+#endif
11721+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11722+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11723 shl $16, %eax
11724 addl %esp, %eax /* the adjusted stack pointer */
11725 pushl_cfi $__KERNEL_DS
11726@@ -1213,7 +1447,6 @@ return_to_handler:
11727 jmp *%ecx
11728 #endif
11729
11730-.section .rodata,"a"
11731 #include "syscall_table_32.S"
11732
11733 syscall_table_size=(.-sys_call_table)
11734@@ -1259,9 +1492,12 @@ error_code:
11735 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11736 REG_TO_PTGS %ecx
11737 SET_KERNEL_GS %ecx
11738- movl $(__USER_DS), %ecx
11739+ movl $(__KERNEL_DS), %ecx
11740 movl %ecx, %ds
11741 movl %ecx, %es
11742+
11743+ pax_enter_kernel
11744+
11745 TRACE_IRQS_OFF
11746 movl %esp,%eax # pt_regs pointer
11747 call *%edi
11748@@ -1346,6 +1582,9 @@ nmi_stack_correct:
11749 xorl %edx,%edx # zero error code
11750 movl %esp,%eax # pt_regs pointer
11751 call do_nmi
11752+
11753+ pax_exit_kernel
11754+
11755 jmp restore_all_notrace
11756 CFI_ENDPROC
11757
11758@@ -1382,6 +1621,9 @@ nmi_espfix_stack:
11759 FIXUP_ESPFIX_STACK # %eax == %esp
11760 xorl %edx,%edx # zero error code
11761 call do_nmi
11762+
11763+ pax_exit_kernel
11764+
11765 RESTORE_REGS
11766 lss 12+4(%esp), %esp # back to espfix stack
11767 CFI_ADJUST_CFA_OFFSET -24
11768diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_64.S linux-2.6.39.4/arch/x86/kernel/entry_64.S
11769--- linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-05-19 00:06:34.000000000 -0400
11770+++ linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-08-05 19:44:33.000000000 -0400
11771@@ -53,6 +53,7 @@
11772 #include <asm/paravirt.h>
11773 #include <asm/ftrace.h>
11774 #include <asm/percpu.h>
11775+#include <asm/pgtable.h>
11776
11777 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11778 #include <linux/elf-em.h>
11779@@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
11780 ENDPROC(native_usergs_sysret64)
11781 #endif /* CONFIG_PARAVIRT */
11782
11783+ .macro ljmpq sel, off
11784+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11785+ .byte 0x48; ljmp *1234f(%rip)
11786+ .pushsection .rodata
11787+ .align 16
11788+ 1234: .quad \off; .word \sel
11789+ .popsection
11790+#else
11791+ pushq $\sel
11792+ pushq $\off
11793+ lretq
11794+#endif
11795+ .endm
11796+
11797+ .macro pax_enter_kernel
11798+#ifdef CONFIG_PAX_KERNEXEC
11799+ call pax_enter_kernel
11800+#endif
11801+ .endm
11802+
11803+ .macro pax_exit_kernel
11804+#ifdef CONFIG_PAX_KERNEXEC
11805+ call pax_exit_kernel
11806+#endif
11807+ .endm
11808+
11809+#ifdef CONFIG_PAX_KERNEXEC
11810+ENTRY(pax_enter_kernel)
11811+ pushq %rdi
11812+
11813+#ifdef CONFIG_PARAVIRT
11814+ PV_SAVE_REGS(CLBR_RDI)
11815+#endif
11816+
11817+ GET_CR0_INTO_RDI
11818+ bts $16,%rdi
11819+ jnc 1f
11820+ mov %cs,%edi
11821+ cmp $__KERNEL_CS,%edi
11822+ jz 3f
11823+ ljmpq __KERNEL_CS,3f
11824+1: ljmpq __KERNEXEC_KERNEL_CS,2f
11825+2: SET_RDI_INTO_CR0
11826+3:
11827+
11828+#ifdef CONFIG_PARAVIRT
11829+ PV_RESTORE_REGS(CLBR_RDI)
11830+#endif
11831+
11832+ popq %rdi
11833+ retq
11834+ENDPROC(pax_enter_kernel)
11835+
11836+ENTRY(pax_exit_kernel)
11837+ pushq %rdi
11838+
11839+#ifdef CONFIG_PARAVIRT
11840+ PV_SAVE_REGS(CLBR_RDI)
11841+#endif
11842+
11843+ mov %cs,%rdi
11844+ cmp $__KERNEXEC_KERNEL_CS,%edi
11845+ jnz 2f
11846+ GET_CR0_INTO_RDI
11847+ btr $16,%rdi
11848+ ljmpq __KERNEL_CS,1f
11849+1: SET_RDI_INTO_CR0
11850+2:
11851+
11852+#ifdef CONFIG_PARAVIRT
11853+ PV_RESTORE_REGS(CLBR_RDI);
11854+#endif
11855+
11856+ popq %rdi
11857+ retq
11858+ENDPROC(pax_exit_kernel)
11859+#endif
11860+
11861+ .macro pax_enter_kernel_user
11862+#ifdef CONFIG_PAX_MEMORY_UDEREF
11863+ call pax_enter_kernel_user
11864+#endif
11865+ .endm
11866+
11867+ .macro pax_exit_kernel_user
11868+#ifdef CONFIG_PAX_MEMORY_UDEREF
11869+ call pax_exit_kernel_user
11870+#endif
11871+#ifdef CONFIG_PAX_RANDKSTACK
11872+ push %rax
11873+ call pax_randomize_kstack
11874+ pop %rax
11875+#endif
11876+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11877+ call pax_erase_kstack
11878+#endif
11879+ .endm
11880+
11881+#ifdef CONFIG_PAX_MEMORY_UDEREF
11882+ENTRY(pax_enter_kernel_user)
11883+ pushq %rdi
11884+ pushq %rbx
11885+
11886+#ifdef CONFIG_PARAVIRT
11887+ PV_SAVE_REGS(CLBR_RDI)
11888+#endif
11889+
11890+ GET_CR3_INTO_RDI
11891+ mov %rdi,%rbx
11892+ add $__START_KERNEL_map,%rbx
11893+ sub phys_base(%rip),%rbx
11894+
11895+#ifdef CONFIG_PARAVIRT
11896+ pushq %rdi
11897+ cmpl $0, pv_info+PARAVIRT_enabled
11898+ jz 1f
11899+ i = 0
11900+ .rept USER_PGD_PTRS
11901+ mov i*8(%rbx),%rsi
11902+ mov $0,%sil
11903+ lea i*8(%rbx),%rdi
11904+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11905+ i = i + 1
11906+ .endr
11907+ jmp 2f
11908+1:
11909+#endif
11910+
11911+ i = 0
11912+ .rept USER_PGD_PTRS
11913+ movb $0,i*8(%rbx)
11914+ i = i + 1
11915+ .endr
11916+
11917+#ifdef CONFIG_PARAVIRT
11918+2: popq %rdi
11919+#endif
11920+ SET_RDI_INTO_CR3
11921+
11922+#ifdef CONFIG_PAX_KERNEXEC
11923+ GET_CR0_INTO_RDI
11924+ bts $16,%rdi
11925+ SET_RDI_INTO_CR0
11926+#endif
11927+
11928+#ifdef CONFIG_PARAVIRT
11929+ PV_RESTORE_REGS(CLBR_RDI)
11930+#endif
11931+
11932+ popq %rbx
11933+ popq %rdi
11934+ retq
11935+ENDPROC(pax_enter_kernel_user)
11936+
11937+ENTRY(pax_exit_kernel_user)
11938+ push %rdi
11939+
11940+#ifdef CONFIG_PARAVIRT
11941+ pushq %rbx
11942+ PV_SAVE_REGS(CLBR_RDI)
11943+#endif
11944+
11945+#ifdef CONFIG_PAX_KERNEXEC
11946+ GET_CR0_INTO_RDI
11947+ btr $16,%rdi
11948+ SET_RDI_INTO_CR0
11949+#endif
11950+
11951+ GET_CR3_INTO_RDI
11952+ add $__START_KERNEL_map,%rdi
11953+ sub phys_base(%rip),%rdi
11954+
11955+#ifdef CONFIG_PARAVIRT
11956+ cmpl $0, pv_info+PARAVIRT_enabled
11957+ jz 1f
11958+ mov %rdi,%rbx
11959+ i = 0
11960+ .rept USER_PGD_PTRS
11961+ mov i*8(%rbx),%rsi
11962+ mov $0x67,%sil
11963+ lea i*8(%rbx),%rdi
11964+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11965+ i = i + 1
11966+ .endr
11967+ jmp 2f
11968+1:
11969+#endif
11970+
11971+ i = 0
11972+ .rept USER_PGD_PTRS
11973+ movb $0x67,i*8(%rdi)
11974+ i = i + 1
11975+ .endr
11976+
11977+#ifdef CONFIG_PARAVIRT
11978+2: PV_RESTORE_REGS(CLBR_RDI)
11979+ popq %rbx
11980+#endif
11981+
11982+ popq %rdi
11983+ retq
11984+ENDPROC(pax_exit_kernel_user)
11985+#endif
11986+
11987+ .macro pax_erase_kstack
11988+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11989+ call pax_erase_kstack
11990+#endif
11991+ .endm
11992+
11993+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11994+/*
11995+ * r10: thread_info
11996+ * rcx, rdx: can be clobbered
11997+ */
11998+ENTRY(pax_erase_kstack)
11999+ pushq %rdi
12000+ pushq %rax
12001+
12002+ GET_THREAD_INFO(%r10)
12003+ mov TI_lowest_stack(%r10), %rdi
12004+ mov $-0xBEEF, %rax
12005+ std
12006+
12007+1: mov %edi, %ecx
12008+ and $THREAD_SIZE_asm - 1, %ecx
12009+ shr $3, %ecx
12010+ repne scasq
12011+ jecxz 2f
12012+
12013+ cmp $2*8, %ecx
12014+ jc 2f
12015+
12016+ mov $2*8, %ecx
12017+ repe scasq
12018+ jecxz 2f
12019+ jne 1b
12020+
12021+2: cld
12022+ mov %esp, %ecx
12023+ sub %edi, %ecx
12024+ shr $3, %ecx
12025+ rep stosq
12026+
12027+ mov TI_task_thread_sp0(%r10), %rdi
12028+ sub $256, %rdi
12029+ mov %rdi, TI_lowest_stack(%r10)
12030+
12031+ popq %rax
12032+ popq %rdi
12033+ ret
12034+ENDPROC(pax_erase_kstack)
12035+#endif
12036
12037 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12038 #ifdef CONFIG_TRACE_IRQFLAGS
12039@@ -318,7 +572,7 @@ ENTRY(save_args)
12040 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12041 movq_cfi rbp, 8 /* push %rbp */
12042 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12043- testl $3, CS(%rdi)
12044+ testb $3, CS(%rdi)
12045 je 1f
12046 SWAPGS
12047 /*
12048@@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
12049
12050 RESTORE_REST
12051
12052- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12053+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12054 je int_ret_from_sys_call
12055
12056 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12057@@ -455,7 +709,7 @@ END(ret_from_fork)
12058 ENTRY(system_call)
12059 CFI_STARTPROC simple
12060 CFI_SIGNAL_FRAME
12061- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12062+ CFI_DEF_CFA rsp,0
12063 CFI_REGISTER rip,rcx
12064 /*CFI_REGISTER rflags,r11*/
12065 SWAPGS_UNSAFE_STACK
12066@@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
12067
12068 movq %rsp,PER_CPU_VAR(old_rsp)
12069 movq PER_CPU_VAR(kernel_stack),%rsp
12070+ pax_enter_kernel_user
12071 /*
12072 * No need to follow this irqs off/on section - it's straight
12073 * and short:
12074 */
12075 ENABLE_INTERRUPTS(CLBR_NONE)
12076- SAVE_ARGS 8,1
12077+ SAVE_ARGS 8*6,1
12078 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12079 movq %rcx,RIP-ARGOFFSET(%rsp)
12080 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12081@@ -502,6 +757,7 @@ sysret_check:
12082 andl %edi,%edx
12083 jnz sysret_careful
12084 CFI_REMEMBER_STATE
12085+ pax_exit_kernel_user
12086 /*
12087 * sysretq will re-enable interrupts:
12088 */
12089@@ -560,6 +816,9 @@ auditsys:
12090 movq %rax,%rsi /* 2nd arg: syscall number */
12091 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12092 call audit_syscall_entry
12093+
12094+ pax_erase_kstack
12095+
12096 LOAD_ARGS 0 /* reload call-clobbered registers */
12097 jmp system_call_fastpath
12098
12099@@ -590,6 +849,9 @@ tracesys:
12100 FIXUP_TOP_OF_STACK %rdi
12101 movq %rsp,%rdi
12102 call syscall_trace_enter
12103+
12104+ pax_erase_kstack
12105+
12106 /*
12107 * Reload arg registers from stack in case ptrace changed them.
12108 * We don't reload %rax because syscall_trace_enter() returned
12109@@ -611,7 +873,7 @@ tracesys:
12110 GLOBAL(int_ret_from_sys_call)
12111 DISABLE_INTERRUPTS(CLBR_NONE)
12112 TRACE_IRQS_OFF
12113- testl $3,CS-ARGOFFSET(%rsp)
12114+ testb $3,CS-ARGOFFSET(%rsp)
12115 je retint_restore_args
12116 movl $_TIF_ALLWORK_MASK,%edi
12117 /* edi: mask to check */
12118@@ -793,6 +1055,16 @@ END(interrupt)
12119 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12120 call save_args
12121 PARTIAL_FRAME 0
12122+#ifdef CONFIG_PAX_MEMORY_UDEREF
12123+ testb $3, CS(%rdi)
12124+ jnz 1f
12125+ pax_enter_kernel
12126+ jmp 2f
12127+1: pax_enter_kernel_user
12128+2:
12129+#else
12130+ pax_enter_kernel
12131+#endif
12132 call \func
12133 .endm
12134
12135@@ -825,7 +1097,7 @@ ret_from_intr:
12136 CFI_ADJUST_CFA_OFFSET -8
12137 exit_intr:
12138 GET_THREAD_INFO(%rcx)
12139- testl $3,CS-ARGOFFSET(%rsp)
12140+ testb $3,CS-ARGOFFSET(%rsp)
12141 je retint_kernel
12142
12143 /* Interrupt came from user space */
12144@@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
12145 * The iretq could re-enable interrupts:
12146 */
12147 DISABLE_INTERRUPTS(CLBR_ANY)
12148+ pax_exit_kernel_user
12149 TRACE_IRQS_IRETQ
12150 SWAPGS
12151 jmp restore_args
12152
12153 retint_restore_args: /* return to kernel space */
12154 DISABLE_INTERRUPTS(CLBR_ANY)
12155+ pax_exit_kernel
12156 /*
12157 * The iretq could re-enable interrupts:
12158 */
12159@@ -1027,6 +1301,16 @@ ENTRY(\sym)
12160 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12161 call error_entry
12162 DEFAULT_FRAME 0
12163+#ifdef CONFIG_PAX_MEMORY_UDEREF
12164+ testb $3, CS(%rsp)
12165+ jnz 1f
12166+ pax_enter_kernel
12167+ jmp 2f
12168+1: pax_enter_kernel_user
12169+2:
12170+#else
12171+ pax_enter_kernel
12172+#endif
12173 movq %rsp,%rdi /* pt_regs pointer */
12174 xorl %esi,%esi /* no error code */
12175 call \do_sym
12176@@ -1044,6 +1328,16 @@ ENTRY(\sym)
12177 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12178 call save_paranoid
12179 TRACE_IRQS_OFF
12180+#ifdef CONFIG_PAX_MEMORY_UDEREF
12181+ testb $3, CS(%rsp)
12182+ jnz 1f
12183+ pax_enter_kernel
12184+ jmp 2f
12185+1: pax_enter_kernel_user
12186+2:
12187+#else
12188+ pax_enter_kernel
12189+#endif
12190 movq %rsp,%rdi /* pt_regs pointer */
12191 xorl %esi,%esi /* no error code */
12192 call \do_sym
12193@@ -1052,7 +1346,7 @@ ENTRY(\sym)
12194 END(\sym)
12195 .endm
12196
12197-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12198+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12199 .macro paranoidzeroentry_ist sym do_sym ist
12200 ENTRY(\sym)
12201 INTR_FRAME
12202@@ -1062,8 +1356,24 @@ ENTRY(\sym)
12203 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12204 call save_paranoid
12205 TRACE_IRQS_OFF
12206+#ifdef CONFIG_PAX_MEMORY_UDEREF
12207+ testb $3, CS(%rsp)
12208+ jnz 1f
12209+ pax_enter_kernel
12210+ jmp 2f
12211+1: pax_enter_kernel_user
12212+2:
12213+#else
12214+ pax_enter_kernel
12215+#endif
12216 movq %rsp,%rdi /* pt_regs pointer */
12217 xorl %esi,%esi /* no error code */
12218+#ifdef CONFIG_SMP
12219+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12220+ lea init_tss(%r12), %r12
12221+#else
12222+ lea init_tss(%rip), %r12
12223+#endif
12224 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12225 call \do_sym
12226 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12227@@ -1080,6 +1390,16 @@ ENTRY(\sym)
12228 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12229 call error_entry
12230 DEFAULT_FRAME 0
12231+#ifdef CONFIG_PAX_MEMORY_UDEREF
12232+ testb $3, CS(%rsp)
12233+ jnz 1f
12234+ pax_enter_kernel
12235+ jmp 2f
12236+1: pax_enter_kernel_user
12237+2:
12238+#else
12239+ pax_enter_kernel
12240+#endif
12241 movq %rsp,%rdi /* pt_regs pointer */
12242 movq ORIG_RAX(%rsp),%rsi /* get error code */
12243 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12244@@ -1099,6 +1419,16 @@ ENTRY(\sym)
12245 call save_paranoid
12246 DEFAULT_FRAME 0
12247 TRACE_IRQS_OFF
12248+#ifdef CONFIG_PAX_MEMORY_UDEREF
12249+ testb $3, CS(%rsp)
12250+ jnz 1f
12251+ pax_enter_kernel
12252+ jmp 2f
12253+1: pax_enter_kernel_user
12254+2:
12255+#else
12256+ pax_enter_kernel
12257+#endif
12258 movq %rsp,%rdi /* pt_regs pointer */
12259 movq ORIG_RAX(%rsp),%rsi /* get error code */
12260 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12261@@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
12262 TRACE_IRQS_OFF
12263 testl %ebx,%ebx /* swapgs needed? */
12264 jnz paranoid_restore
12265- testl $3,CS(%rsp)
12266+ testb $3,CS(%rsp)
12267 jnz paranoid_userspace
12268+#ifdef CONFIG_PAX_MEMORY_UDEREF
12269+ pax_exit_kernel
12270+ TRACE_IRQS_IRETQ 0
12271+ SWAPGS_UNSAFE_STACK
12272+ RESTORE_ALL 8
12273+ jmp irq_return
12274+#endif
12275 paranoid_swapgs:
12276+#ifdef CONFIG_PAX_MEMORY_UDEREF
12277+ pax_exit_kernel_user
12278+#else
12279+ pax_exit_kernel
12280+#endif
12281 TRACE_IRQS_IRETQ 0
12282 SWAPGS_UNSAFE_STACK
12283 RESTORE_ALL 8
12284 jmp irq_return
12285 paranoid_restore:
12286+ pax_exit_kernel
12287 TRACE_IRQS_IRETQ 0
12288 RESTORE_ALL 8
12289 jmp irq_return
12290@@ -1426,7 +1769,7 @@ ENTRY(error_entry)
12291 movq_cfi r14, R14+8
12292 movq_cfi r15, R15+8
12293 xorl %ebx,%ebx
12294- testl $3,CS+8(%rsp)
12295+ testb $3,CS+8(%rsp)
12296 je error_kernelspace
12297 error_swapgs:
12298 SWAPGS
12299@@ -1490,6 +1833,16 @@ ENTRY(nmi)
12300 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12301 call save_paranoid
12302 DEFAULT_FRAME 0
12303+#ifdef CONFIG_PAX_MEMORY_UDEREF
12304+ testb $3, CS(%rsp)
12305+ jnz 1f
12306+ pax_enter_kernel
12307+ jmp 2f
12308+1: pax_enter_kernel_user
12309+2:
12310+#else
12311+ pax_enter_kernel
12312+#endif
12313 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12314 movq %rsp,%rdi
12315 movq $-1,%rsi
12316@@ -1500,11 +1853,25 @@ ENTRY(nmi)
12317 DISABLE_INTERRUPTS(CLBR_NONE)
12318 testl %ebx,%ebx /* swapgs needed? */
12319 jnz nmi_restore
12320- testl $3,CS(%rsp)
12321+ testb $3,CS(%rsp)
12322 jnz nmi_userspace
12323+#ifdef CONFIG_PAX_MEMORY_UDEREF
12324+ pax_exit_kernel
12325+ SWAPGS_UNSAFE_STACK
12326+ RESTORE_ALL 8
12327+ jmp irq_return
12328+#endif
12329 nmi_swapgs:
12330+#ifdef CONFIG_PAX_MEMORY_UDEREF
12331+ pax_exit_kernel_user
12332+#else
12333+ pax_exit_kernel
12334+#endif
12335 SWAPGS_UNSAFE_STACK
12336+ RESTORE_ALL 8
12337+ jmp irq_return
12338 nmi_restore:
12339+ pax_exit_kernel
12340 RESTORE_ALL 8
12341 jmp irq_return
12342 nmi_userspace:
12343diff -urNp linux-2.6.39.4/arch/x86/kernel/ftrace.c linux-2.6.39.4/arch/x86/kernel/ftrace.c
12344--- linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-05-19 00:06:34.000000000 -0400
12345+++ linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-08-05 19:44:33.000000000 -0400
12346@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12347 static void *mod_code_newcode; /* holds the text to write to the IP */
12348
12349 static unsigned nmi_wait_count;
12350-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12351+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12352
12353 int ftrace_arch_read_dyn_info(char *buf, int size)
12354 {
12355@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12356
12357 r = snprintf(buf, size, "%u %u",
12358 nmi_wait_count,
12359- atomic_read(&nmi_update_count));
12360+ atomic_read_unchecked(&nmi_update_count));
12361 return r;
12362 }
12363
12364@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12365
12366 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12367 smp_rmb();
12368+ pax_open_kernel();
12369 ftrace_mod_code();
12370- atomic_inc(&nmi_update_count);
12371+ pax_close_kernel();
12372+ atomic_inc_unchecked(&nmi_update_count);
12373 }
12374 /* Must have previous changes seen before executions */
12375 smp_mb();
12376@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12377 {
12378 unsigned char replaced[MCOUNT_INSN_SIZE];
12379
12380+ ip = ktla_ktva(ip);
12381+
12382 /*
12383 * Note: Due to modules and __init, code can
12384 * disappear and change, we need to protect against faulting
12385@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12386 unsigned char old[MCOUNT_INSN_SIZE], *new;
12387 int ret;
12388
12389- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12390+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12391 new = ftrace_call_replace(ip, (unsigned long)func);
12392 ret = ftrace_modify_code(ip, old, new);
12393
12394@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12395 {
12396 unsigned char code[MCOUNT_INSN_SIZE];
12397
12398+ ip = ktla_ktva(ip);
12399+
12400 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12401 return -EFAULT;
12402
12403diff -urNp linux-2.6.39.4/arch/x86/kernel/head32.c linux-2.6.39.4/arch/x86/kernel/head32.c
12404--- linux-2.6.39.4/arch/x86/kernel/head32.c 2011-05-19 00:06:34.000000000 -0400
12405+++ linux-2.6.39.4/arch/x86/kernel/head32.c 2011-08-05 19:44:33.000000000 -0400
12406@@ -19,6 +19,7 @@
12407 #include <asm/io_apic.h>
12408 #include <asm/bios_ebda.h>
12409 #include <asm/tlbflush.h>
12410+#include <asm/boot.h>
12411
12412 static void __init i386_default_early_setup(void)
12413 {
12414@@ -34,7 +35,7 @@ void __init i386_start_kernel(void)
12415 {
12416 memblock_init();
12417
12418- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12419+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12420
12421 #ifdef CONFIG_BLK_DEV_INITRD
12422 /* Reserve INITRD */
12423diff -urNp linux-2.6.39.4/arch/x86/kernel/head_32.S linux-2.6.39.4/arch/x86/kernel/head_32.S
12424--- linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-05-19 00:06:34.000000000 -0400
12425+++ linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-08-05 19:44:33.000000000 -0400
12426@@ -25,6 +25,12 @@
12427 /* Physical address */
12428 #define pa(X) ((X) - __PAGE_OFFSET)
12429
12430+#ifdef CONFIG_PAX_KERNEXEC
12431+#define ta(X) (X)
12432+#else
12433+#define ta(X) ((X) - __PAGE_OFFSET)
12434+#endif
12435+
12436 /*
12437 * References to members of the new_cpu_data structure.
12438 */
12439@@ -54,11 +60,7 @@
12440 * and small than max_low_pfn, otherwise will waste some page table entries
12441 */
12442
12443-#if PTRS_PER_PMD > 1
12444-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12445-#else
12446-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12447-#endif
12448+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12449
12450 /* Number of possible pages in the lowmem region */
12451 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12452@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12453 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12454
12455 /*
12456+ * Real beginning of normal "text" segment
12457+ */
12458+ENTRY(stext)
12459+ENTRY(_stext)
12460+
12461+/*
12462 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12463 * %esi points to the real-mode code as a 32-bit pointer.
12464 * CS and DS must be 4 GB flat segments, but we don't depend on
12465@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12466 * can.
12467 */
12468 __HEAD
12469+
12470+#ifdef CONFIG_PAX_KERNEXEC
12471+ jmp startup_32
12472+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12473+.fill PAGE_SIZE-5,1,0xcc
12474+#endif
12475+
12476 ENTRY(startup_32)
12477 movl pa(stack_start),%ecx
12478
12479@@ -105,6 +120,57 @@ ENTRY(startup_32)
12480 2:
12481 leal -__PAGE_OFFSET(%ecx),%esp
12482
12483+#ifdef CONFIG_SMP
12484+ movl $pa(cpu_gdt_table),%edi
12485+ movl $__per_cpu_load,%eax
12486+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12487+ rorl $16,%eax
12488+ movb %al,__KERNEL_PERCPU + 4(%edi)
12489+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12490+ movl $__per_cpu_end - 1,%eax
12491+ subl $__per_cpu_start,%eax
12492+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12493+#endif
12494+
12495+#ifdef CONFIG_PAX_MEMORY_UDEREF
12496+ movl $NR_CPUS,%ecx
12497+ movl $pa(cpu_gdt_table),%edi
12498+1:
12499+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12500+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12501+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12502+ addl $PAGE_SIZE_asm,%edi
12503+ loop 1b
12504+#endif
12505+
12506+#ifdef CONFIG_PAX_KERNEXEC
12507+ movl $pa(boot_gdt),%edi
12508+ movl $__LOAD_PHYSICAL_ADDR,%eax
12509+ movw %ax,__BOOT_CS + 2(%edi)
12510+ rorl $16,%eax
12511+ movb %al,__BOOT_CS + 4(%edi)
12512+ movb %ah,__BOOT_CS + 7(%edi)
12513+ rorl $16,%eax
12514+
12515+ ljmp $(__BOOT_CS),$1f
12516+1:
12517+
12518+ movl $NR_CPUS,%ecx
12519+ movl $pa(cpu_gdt_table),%edi
12520+ addl $__PAGE_OFFSET,%eax
12521+1:
12522+ movw %ax,__KERNEL_CS + 2(%edi)
12523+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12524+ rorl $16,%eax
12525+ movb %al,__KERNEL_CS + 4(%edi)
12526+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12527+ movb %ah,__KERNEL_CS + 7(%edi)
12528+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12529+ rorl $16,%eax
12530+ addl $PAGE_SIZE_asm,%edi
12531+ loop 1b
12532+#endif
12533+
12534 /*
12535 * Clear BSS first so that there are no surprises...
12536 */
12537@@ -195,8 +261,11 @@ ENTRY(startup_32)
12538 movl %eax, pa(max_pfn_mapped)
12539
12540 /* Do early initialization of the fixmap area */
12541- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12542- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12543+#ifdef CONFIG_COMPAT_VDSO
12544+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12545+#else
12546+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12547+#endif
12548 #else /* Not PAE */
12549
12550 page_pde_offset = (__PAGE_OFFSET >> 20);
12551@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12552 movl %eax, pa(max_pfn_mapped)
12553
12554 /* Do early initialization of the fixmap area */
12555- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12556- movl %eax,pa(initial_page_table+0xffc)
12557+#ifdef CONFIG_COMPAT_VDSO
12558+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12559+#else
12560+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12561+#endif
12562 #endif
12563
12564 #ifdef CONFIG_PARAVIRT
12565@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12566 cmpl $num_subarch_entries, %eax
12567 jae bad_subarch
12568
12569- movl pa(subarch_entries)(,%eax,4), %eax
12570- subl $__PAGE_OFFSET, %eax
12571- jmp *%eax
12572+ jmp *pa(subarch_entries)(,%eax,4)
12573
12574 bad_subarch:
12575 WEAK(lguest_entry)
12576@@ -255,10 +325,10 @@ WEAK(xen_entry)
12577 __INITDATA
12578
12579 subarch_entries:
12580- .long default_entry /* normal x86/PC */
12581- .long lguest_entry /* lguest hypervisor */
12582- .long xen_entry /* Xen hypervisor */
12583- .long default_entry /* Moorestown MID */
12584+ .long ta(default_entry) /* normal x86/PC */
12585+ .long ta(lguest_entry) /* lguest hypervisor */
12586+ .long ta(xen_entry) /* Xen hypervisor */
12587+ .long ta(default_entry) /* Moorestown MID */
12588 num_subarch_entries = (. - subarch_entries) / 4
12589 .previous
12590 #else
12591@@ -312,6 +382,7 @@ default_entry:
12592 orl %edx,%eax
12593 movl %eax,%cr4
12594
12595+#ifdef CONFIG_X86_PAE
12596 testb $X86_CR4_PAE, %al # check if PAE is enabled
12597 jz 6f
12598
12599@@ -340,6 +411,9 @@ default_entry:
12600 /* Make changes effective */
12601 wrmsr
12602
12603+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12604+#endif
12605+
12606 6:
12607
12608 /*
12609@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12610 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12611 movl %eax,%ss # after changing gdt.
12612
12613- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12614+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12615 movl %eax,%ds
12616 movl %eax,%es
12617
12618@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12619 */
12620 cmpb $0,ready
12621 jne 1f
12622- movl $gdt_page,%eax
12623+ movl $cpu_gdt_table,%eax
12624 movl $stack_canary,%ecx
12625+#ifdef CONFIG_SMP
12626+ addl $__per_cpu_load,%ecx
12627+#endif
12628 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12629 shrl $16, %ecx
12630 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12631 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12632 1:
12633-#endif
12634 movl $(__KERNEL_STACK_CANARY),%eax
12635+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12636+ movl $(__USER_DS),%eax
12637+#else
12638+ xorl %eax,%eax
12639+#endif
12640 movl %eax,%gs
12641
12642 xorl %eax,%eax # Clear LDT
12643@@ -558,22 +639,22 @@ early_page_fault:
12644 jmp early_fault
12645
12646 early_fault:
12647- cld
12648 #ifdef CONFIG_PRINTK
12649+ cmpl $1,%ss:early_recursion_flag
12650+ je hlt_loop
12651+ incl %ss:early_recursion_flag
12652+ cld
12653 pusha
12654 movl $(__KERNEL_DS),%eax
12655 movl %eax,%ds
12656 movl %eax,%es
12657- cmpl $2,early_recursion_flag
12658- je hlt_loop
12659- incl early_recursion_flag
12660 movl %cr2,%eax
12661 pushl %eax
12662 pushl %edx /* trapno */
12663 pushl $fault_msg
12664 call printk
12665+; call dump_stack
12666 #endif
12667- call dump_stack
12668 hlt_loop:
12669 hlt
12670 jmp hlt_loop
12671@@ -581,8 +662,11 @@ hlt_loop:
12672 /* This is the default interrupt "handler" :-) */
12673 ALIGN
12674 ignore_int:
12675- cld
12676 #ifdef CONFIG_PRINTK
12677+ cmpl $2,%ss:early_recursion_flag
12678+ je hlt_loop
12679+ incl %ss:early_recursion_flag
12680+ cld
12681 pushl %eax
12682 pushl %ecx
12683 pushl %edx
12684@@ -591,9 +675,6 @@ ignore_int:
12685 movl $(__KERNEL_DS),%eax
12686 movl %eax,%ds
12687 movl %eax,%es
12688- cmpl $2,early_recursion_flag
12689- je hlt_loop
12690- incl early_recursion_flag
12691 pushl 16(%esp)
12692 pushl 24(%esp)
12693 pushl 32(%esp)
12694@@ -622,29 +703,43 @@ ENTRY(initial_code)
12695 /*
12696 * BSS section
12697 */
12698-__PAGE_ALIGNED_BSS
12699- .align PAGE_SIZE
12700 #ifdef CONFIG_X86_PAE
12701+.section .initial_pg_pmd,"a",@progbits
12702 initial_pg_pmd:
12703 .fill 1024*KPMDS,4,0
12704 #else
12705+.section .initial_page_table,"a",@progbits
12706 ENTRY(initial_page_table)
12707 .fill 1024,4,0
12708 #endif
12709+.section .initial_pg_fixmap,"a",@progbits
12710 initial_pg_fixmap:
12711 .fill 1024,4,0
12712+.section .empty_zero_page,"a",@progbits
12713 ENTRY(empty_zero_page)
12714 .fill 4096,1,0
12715+.section .swapper_pg_dir,"a",@progbits
12716 ENTRY(swapper_pg_dir)
12717+#ifdef CONFIG_X86_PAE
12718+ .fill 4,8,0
12719+#else
12720 .fill 1024,4,0
12721+#endif
12722+
12723+/*
12724+ * The IDT has to be page-aligned to simplify the Pentium
12725+ * F0 0F bug workaround.. We have a special link segment
12726+ * for this.
12727+ */
12728+.section .idt,"a",@progbits
12729+ENTRY(idt_table)
12730+ .fill 256,8,0
12731
12732 /*
12733 * This starts the data section.
12734 */
12735 #ifdef CONFIG_X86_PAE
12736-__PAGE_ALIGNED_DATA
12737- /* Page-aligned for the benefit of paravirt? */
12738- .align PAGE_SIZE
12739+.section .initial_page_table,"a",@progbits
12740 ENTRY(initial_page_table)
12741 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12742 # if KPMDS == 3
12743@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12744 # error "Kernel PMDs should be 1, 2 or 3"
12745 # endif
12746 .align PAGE_SIZE /* needs to be page-sized too */
12747+
12748+#ifdef CONFIG_PAX_PER_CPU_PGD
12749+ENTRY(cpu_pgd)
12750+ .rept NR_CPUS
12751+ .fill 4,8,0
12752+ .endr
12753+#endif
12754+
12755 #endif
12756
12757 .data
12758 .balign 4
12759 ENTRY(stack_start)
12760- .long init_thread_union+THREAD_SIZE
12761+ .long init_thread_union+THREAD_SIZE-8
12762+
12763+ready: .byte 0
12764
12765+.section .rodata,"a",@progbits
12766 early_recursion_flag:
12767 .long 0
12768
12769-ready: .byte 0
12770-
12771 int_msg:
12772 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12773
12774@@ -707,7 +811,7 @@ fault_msg:
12775 .word 0 # 32 bit align gdt_desc.address
12776 boot_gdt_descr:
12777 .word __BOOT_DS+7
12778- .long boot_gdt - __PAGE_OFFSET
12779+ .long pa(boot_gdt)
12780
12781 .word 0 # 32-bit align idt_desc.address
12782 idt_descr:
12783@@ -718,7 +822,7 @@ idt_descr:
12784 .word 0 # 32 bit align gdt_desc.address
12785 ENTRY(early_gdt_descr)
12786 .word GDT_ENTRIES*8-1
12787- .long gdt_page /* Overwritten for secondary CPUs */
12788+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
12789
12790 /*
12791 * The boot_gdt must mirror the equivalent in setup.S and is
12792@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12793 .align L1_CACHE_BYTES
12794 ENTRY(boot_gdt)
12795 .fill GDT_ENTRY_BOOT_CS,8,0
12796- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12797- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12798+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12799+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12800+
12801+ .align PAGE_SIZE_asm
12802+ENTRY(cpu_gdt_table)
12803+ .rept NR_CPUS
12804+ .quad 0x0000000000000000 /* NULL descriptor */
12805+ .quad 0x0000000000000000 /* 0x0b reserved */
12806+ .quad 0x0000000000000000 /* 0x13 reserved */
12807+ .quad 0x0000000000000000 /* 0x1b reserved */
12808+
12809+#ifdef CONFIG_PAX_KERNEXEC
12810+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12811+#else
12812+ .quad 0x0000000000000000 /* 0x20 unused */
12813+#endif
12814+
12815+ .quad 0x0000000000000000 /* 0x28 unused */
12816+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12817+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12818+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12819+ .quad 0x0000000000000000 /* 0x4b reserved */
12820+ .quad 0x0000000000000000 /* 0x53 reserved */
12821+ .quad 0x0000000000000000 /* 0x5b reserved */
12822+
12823+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12824+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12825+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12826+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12827+
12828+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12829+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12830+
12831+ /*
12832+ * Segments used for calling PnP BIOS have byte granularity.
12833+ * The code segments and data segments have fixed 64k limits,
12834+ * the transfer segment sizes are set at run time.
12835+ */
12836+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
12837+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
12838+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
12839+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
12840+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
12841+
12842+ /*
12843+ * The APM segments have byte granularity and their bases
12844+ * are set at run time. All have 64k limits.
12845+ */
12846+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12847+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12848+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
12849+
12850+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12851+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12852+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12853+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12854+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12855+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12856+
12857+ /* Be sure this is zeroed to avoid false validations in Xen */
12858+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12859+ .endr
12860diff -urNp linux-2.6.39.4/arch/x86/kernel/head_64.S linux-2.6.39.4/arch/x86/kernel/head_64.S
12861--- linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-05-19 00:06:34.000000000 -0400
12862+++ linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-08-05 19:44:33.000000000 -0400
12863@@ -19,6 +19,7 @@
12864 #include <asm/cache.h>
12865 #include <asm/processor-flags.h>
12866 #include <asm/percpu.h>
12867+#include <asm/cpufeature.h>
12868
12869 #ifdef CONFIG_PARAVIRT
12870 #include <asm/asm-offsets.h>
12871@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12872 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12873 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12874 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12875+L4_VMALLOC_START = pgd_index(VMALLOC_START)
12876+L3_VMALLOC_START = pud_index(VMALLOC_START)
12877+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12878+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12879
12880 .text
12881 __HEAD
12882@@ -85,35 +90,22 @@ startup_64:
12883 */
12884 addq %rbp, init_level4_pgt + 0(%rip)
12885 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12886+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12887+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12888 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12889
12890 addq %rbp, level3_ident_pgt + 0(%rip)
12891+#ifndef CONFIG_XEN
12892+ addq %rbp, level3_ident_pgt + 8(%rip)
12893+#endif
12894
12895- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12896- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12897+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12898
12899- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12900+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12901+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12902
12903- /* Add an Identity mapping if I am above 1G */
12904- leaq _text(%rip), %rdi
12905- andq $PMD_PAGE_MASK, %rdi
12906-
12907- movq %rdi, %rax
12908- shrq $PUD_SHIFT, %rax
12909- andq $(PTRS_PER_PUD - 1), %rax
12910- jz ident_complete
12911-
12912- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12913- leaq level3_ident_pgt(%rip), %rbx
12914- movq %rdx, 0(%rbx, %rax, 8)
12915-
12916- movq %rdi, %rax
12917- shrq $PMD_SHIFT, %rax
12918- andq $(PTRS_PER_PMD - 1), %rax
12919- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12920- leaq level2_spare_pgt(%rip), %rbx
12921- movq %rdx, 0(%rbx, %rax, 8)
12922-ident_complete:
12923+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12924+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12925
12926 /*
12927 * Fixup the kernel text+data virtual addresses. Note that
12928@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12929 * after the boot processor executes this code.
12930 */
12931
12932- /* Enable PAE mode and PGE */
12933- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12934+ /* Enable PAE mode and PSE/PGE */
12935+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12936 movq %rax, %cr4
12937
12938 /* Setup early boot stage 4 level pagetables. */
12939@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12940 movl $MSR_EFER, %ecx
12941 rdmsr
12942 btsl $_EFER_SCE, %eax /* Enable System Call */
12943- btl $20,%edi /* No Execute supported? */
12944+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12945 jnc 1f
12946 btsl $_EFER_NX, %eax
12947+ leaq init_level4_pgt(%rip), %rdi
12948+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12949+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12950+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12951+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12952 1: wrmsr /* Make changes effective */
12953
12954 /* Setup cr0 */
12955@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12956 bad_address:
12957 jmp bad_address
12958
12959- .section ".init.text","ax"
12960+ __INIT
12961 #ifdef CONFIG_EARLY_PRINTK
12962 .globl early_idt_handlers
12963 early_idt_handlers:
12964@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12965 #endif /* EARLY_PRINTK */
12966 1: hlt
12967 jmp 1b
12968+ .previous
12969
12970 #ifdef CONFIG_EARLY_PRINTK
12971+ __INITDATA
12972 early_recursion_flag:
12973 .long 0
12974+ .previous
12975
12976+ .section .rodata,"a",@progbits
12977 early_idt_msg:
12978 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12979 early_idt_ripmsg:
12980 .asciz "RIP %s\n"
12981-#endif /* CONFIG_EARLY_PRINTK */
12982 .previous
12983+#endif /* CONFIG_EARLY_PRINTK */
12984
12985+ .section .rodata,"a",@progbits
12986 #define NEXT_PAGE(name) \
12987 .balign PAGE_SIZE; \
12988 ENTRY(name)
12989@@ -338,7 +340,6 @@ ENTRY(name)
12990 i = i + 1 ; \
12991 .endr
12992
12993- .data
12994 /*
12995 * This default setting generates an ident mapping at address 0x100000
12996 * and a mapping for the kernel that precisely maps virtual address
12997@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12998 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12999 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13000 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13001+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13002+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13003+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13004+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13005 .org init_level4_pgt + L4_START_KERNEL*8, 0
13006 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13007 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13008
13009+#ifdef CONFIG_PAX_PER_CPU_PGD
13010+NEXT_PAGE(cpu_pgd)
13011+ .rept NR_CPUS
13012+ .fill 512,8,0
13013+ .endr
13014+#endif
13015+
13016 NEXT_PAGE(level3_ident_pgt)
13017 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13018+#ifdef CONFIG_XEN
13019 .fill 511,8,0
13020+#else
13021+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13022+ .fill 510,8,0
13023+#endif
13024+
13025+NEXT_PAGE(level3_vmalloc_pgt)
13026+ .fill 512,8,0
13027+
13028+NEXT_PAGE(level3_vmemmap_pgt)
13029+ .fill L3_VMEMMAP_START,8,0
13030+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13031
13032 NEXT_PAGE(level3_kernel_pgt)
13033 .fill L3_START_KERNEL,8,0
13034@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13035 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13036 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13037
13038+NEXT_PAGE(level2_vmemmap_pgt)
13039+ .fill 512,8,0
13040+
13041 NEXT_PAGE(level2_fixmap_pgt)
13042- .fill 506,8,0
13043- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13044- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13045- .fill 5,8,0
13046+ .fill 507,8,0
13047+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13048+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13049+ .fill 4,8,0
13050
13051-NEXT_PAGE(level1_fixmap_pgt)
13052+NEXT_PAGE(level1_vsyscall_pgt)
13053 .fill 512,8,0
13054
13055-NEXT_PAGE(level2_ident_pgt)
13056- /* Since I easily can, map the first 1G.
13057+ /* Since I easily can, map the first 2G.
13058 * Don't set NX because code runs from these pages.
13059 */
13060- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13061+NEXT_PAGE(level2_ident_pgt)
13062+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13063
13064 NEXT_PAGE(level2_kernel_pgt)
13065 /*
13066@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13067 * If you want to increase this then increase MODULES_VADDR
13068 * too.)
13069 */
13070- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13071- KERNEL_IMAGE_SIZE/PMD_SIZE)
13072-
13073-NEXT_PAGE(level2_spare_pgt)
13074- .fill 512, 8, 0
13075+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13076
13077 #undef PMDS
13078 #undef NEXT_PAGE
13079
13080- .data
13081+ .align PAGE_SIZE
13082+ENTRY(cpu_gdt_table)
13083+ .rept NR_CPUS
13084+ .quad 0x0000000000000000 /* NULL descriptor */
13085+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13086+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13087+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13088+ .quad 0x00cffb000000ffff /* __USER32_CS */
13089+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13090+ .quad 0x00affb000000ffff /* __USER_CS */
13091+
13092+#ifdef CONFIG_PAX_KERNEXEC
13093+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13094+#else
13095+ .quad 0x0 /* unused */
13096+#endif
13097+
13098+ .quad 0,0 /* TSS */
13099+ .quad 0,0 /* LDT */
13100+ .quad 0,0,0 /* three TLS descriptors */
13101+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13102+ /* asm/segment.h:GDT_ENTRIES must match this */
13103+
13104+ /* zero the remaining page */
13105+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13106+ .endr
13107+
13108 .align 16
13109 .globl early_gdt_descr
13110 early_gdt_descr:
13111 .word GDT_ENTRIES*8-1
13112 early_gdt_descr_base:
13113- .quad INIT_PER_CPU_VAR(gdt_page)
13114+ .quad cpu_gdt_table
13115
13116 ENTRY(phys_base)
13117 /* This must match the first entry in level2_kernel_pgt */
13118 .quad 0x0000000000000000
13119
13120 #include "../../x86/xen/xen-head.S"
13121-
13122- .section .bss, "aw", @nobits
13123+
13124+ .section .rodata,"a",@progbits
13125 .align L1_CACHE_BYTES
13126 ENTRY(idt_table)
13127- .skip IDT_ENTRIES * 16
13128+ .fill 512,8,0
13129
13130 __PAGE_ALIGNED_BSS
13131 .align PAGE_SIZE
13132diff -urNp linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c
13133--- linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-05-19 00:06:34.000000000 -0400
13134+++ linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-05 19:44:33.000000000 -0400
13135@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13136 EXPORT_SYMBOL(cmpxchg8b_emu);
13137 #endif
13138
13139+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13140+
13141 /* Networking helper routines. */
13142 EXPORT_SYMBOL(csum_partial_copy_generic);
13143+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13144+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13145
13146 EXPORT_SYMBOL(__get_user_1);
13147 EXPORT_SYMBOL(__get_user_2);
13148@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13149
13150 EXPORT_SYMBOL(csum_partial);
13151 EXPORT_SYMBOL(empty_zero_page);
13152+
13153+#ifdef CONFIG_PAX_KERNEXEC
13154+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13155+#endif
13156diff -urNp linux-2.6.39.4/arch/x86/kernel/i8259.c linux-2.6.39.4/arch/x86/kernel/i8259.c
13157--- linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-05-19 00:06:34.000000000 -0400
13158+++ linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-08-05 19:44:33.000000000 -0400
13159@@ -210,7 +210,7 @@ spurious_8259A_irq:
13160 "spurious 8259A interrupt: IRQ%d.\n", irq);
13161 spurious_irq_mask |= irqmask;
13162 }
13163- atomic_inc(&irq_err_count);
13164+ atomic_inc_unchecked(&irq_err_count);
13165 /*
13166 * Theoretically we do not have to handle this IRQ,
13167 * but in Linux this does not cause problems and is
13168diff -urNp linux-2.6.39.4/arch/x86/kernel/init_task.c linux-2.6.39.4/arch/x86/kernel/init_task.c
13169--- linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-05-19 00:06:34.000000000 -0400
13170+++ linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-08-05 19:44:33.000000000 -0400
13171@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13172 * way process stacks are handled. This is done by having a special
13173 * "init_task" linker map entry..
13174 */
13175-union thread_union init_thread_union __init_task_data =
13176- { INIT_THREAD_INFO(init_task) };
13177+union thread_union init_thread_union __init_task_data;
13178
13179 /*
13180 * Initial task structure.
13181@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13182 * section. Since TSS's are completely CPU-local, we want them
13183 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13184 */
13185-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13186-
13187+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13188+EXPORT_SYMBOL(init_tss);
13189diff -urNp linux-2.6.39.4/arch/x86/kernel/ioport.c linux-2.6.39.4/arch/x86/kernel/ioport.c
13190--- linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400
13191+++ linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-08-05 19:44:33.000000000 -0400
13192@@ -6,6 +6,7 @@
13193 #include <linux/sched.h>
13194 #include <linux/kernel.h>
13195 #include <linux/capability.h>
13196+#include <linux/security.h>
13197 #include <linux/errno.h>
13198 #include <linux/types.h>
13199 #include <linux/ioport.h>
13200@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13201
13202 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13203 return -EINVAL;
13204+#ifdef CONFIG_GRKERNSEC_IO
13205+ if (turn_on && grsec_disable_privio) {
13206+ gr_handle_ioperm();
13207+ return -EPERM;
13208+ }
13209+#endif
13210 if (turn_on && !capable(CAP_SYS_RAWIO))
13211 return -EPERM;
13212
13213@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13214 * because the ->io_bitmap_max value must match the bitmap
13215 * contents:
13216 */
13217- tss = &per_cpu(init_tss, get_cpu());
13218+ tss = init_tss + get_cpu();
13219
13220 if (turn_on)
13221 bitmap_clear(t->io_bitmap_ptr, from, num);
13222@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13223 return -EINVAL;
13224 /* Trying to gain more privileges? */
13225 if (level > old) {
13226+#ifdef CONFIG_GRKERNSEC_IO
13227+ if (grsec_disable_privio) {
13228+ gr_handle_iopl();
13229+ return -EPERM;
13230+ }
13231+#endif
13232 if (!capable(CAP_SYS_RAWIO))
13233 return -EPERM;
13234 }
13235diff -urNp linux-2.6.39.4/arch/x86/kernel/irq_32.c linux-2.6.39.4/arch/x86/kernel/irq_32.c
13236--- linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-05-19 00:06:34.000000000 -0400
13237+++ linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-08-05 19:44:33.000000000 -0400
13238@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13239 __asm__ __volatile__("andl %%esp,%0" :
13240 "=r" (sp) : "0" (THREAD_SIZE - 1));
13241
13242- return sp < (sizeof(struct thread_info) + STACK_WARN);
13243+ return sp < STACK_WARN;
13244 }
13245
13246 static void print_stack_overflow(void)
13247@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13248 * per-CPU IRQ handling contexts (thread information and stack)
13249 */
13250 union irq_ctx {
13251- struct thread_info tinfo;
13252- u32 stack[THREAD_SIZE/sizeof(u32)];
13253+ unsigned long previous_esp;
13254+ u32 stack[THREAD_SIZE/sizeof(u32)];
13255 } __attribute__((aligned(THREAD_SIZE)));
13256
13257 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13258@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13259 static inline int
13260 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13261 {
13262- union irq_ctx *curctx, *irqctx;
13263+ union irq_ctx *irqctx;
13264 u32 *isp, arg1, arg2;
13265
13266- curctx = (union irq_ctx *) current_thread_info();
13267 irqctx = __this_cpu_read(hardirq_ctx);
13268
13269 /*
13270@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13271 * handler) we can't do that and just have to keep using the
13272 * current stack (which is the irq stack already after all)
13273 */
13274- if (unlikely(curctx == irqctx))
13275+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13276 return 0;
13277
13278 /* build the stack frame on the IRQ stack */
13279- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13280- irqctx->tinfo.task = curctx->tinfo.task;
13281- irqctx->tinfo.previous_esp = current_stack_pointer;
13282+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13283+ irqctx->previous_esp = current_stack_pointer;
13284
13285- /*
13286- * Copy the softirq bits in preempt_count so that the
13287- * softirq checks work in the hardirq context.
13288- */
13289- irqctx->tinfo.preempt_count =
13290- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13291- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13292+#ifdef CONFIG_PAX_MEMORY_UDEREF
13293+ __set_fs(MAKE_MM_SEG(0));
13294+#endif
13295
13296 if (unlikely(overflow))
13297 call_on_stack(print_stack_overflow, isp);
13298@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13299 : "0" (irq), "1" (desc), "2" (isp),
13300 "D" (desc->handle_irq)
13301 : "memory", "cc", "ecx");
13302+
13303+#ifdef CONFIG_PAX_MEMORY_UDEREF
13304+ __set_fs(current_thread_info()->addr_limit);
13305+#endif
13306+
13307 return 1;
13308 }
13309
13310@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13311 */
13312 void __cpuinit irq_ctx_init(int cpu)
13313 {
13314- union irq_ctx *irqctx;
13315-
13316 if (per_cpu(hardirq_ctx, cpu))
13317 return;
13318
13319- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13320- THREAD_FLAGS,
13321- THREAD_ORDER));
13322- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13323- irqctx->tinfo.cpu = cpu;
13324- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13325- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13326-
13327- per_cpu(hardirq_ctx, cpu) = irqctx;
13328-
13329- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13330- THREAD_FLAGS,
13331- THREAD_ORDER));
13332- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13333- irqctx->tinfo.cpu = cpu;
13334- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13335-
13336- per_cpu(softirq_ctx, cpu) = irqctx;
13337+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13338+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13339
13340 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13341 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13342@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13343 asmlinkage void do_softirq(void)
13344 {
13345 unsigned long flags;
13346- struct thread_info *curctx;
13347 union irq_ctx *irqctx;
13348 u32 *isp;
13349
13350@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13351 local_irq_save(flags);
13352
13353 if (local_softirq_pending()) {
13354- curctx = current_thread_info();
13355 irqctx = __this_cpu_read(softirq_ctx);
13356- irqctx->tinfo.task = curctx->task;
13357- irqctx->tinfo.previous_esp = current_stack_pointer;
13358+ irqctx->previous_esp = current_stack_pointer;
13359
13360 /* build the stack frame on the softirq stack */
13361- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13362+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13363+
13364+#ifdef CONFIG_PAX_MEMORY_UDEREF
13365+ __set_fs(MAKE_MM_SEG(0));
13366+#endif
13367
13368 call_on_stack(__do_softirq, isp);
13369+
13370+#ifdef CONFIG_PAX_MEMORY_UDEREF
13371+ __set_fs(current_thread_info()->addr_limit);
13372+#endif
13373+
13374 /*
13375 * Shouldn't happen, we returned above if in_interrupt():
13376 */
13377diff -urNp linux-2.6.39.4/arch/x86/kernel/irq.c linux-2.6.39.4/arch/x86/kernel/irq.c
13378--- linux-2.6.39.4/arch/x86/kernel/irq.c 2011-05-19 00:06:34.000000000 -0400
13379+++ linux-2.6.39.4/arch/x86/kernel/irq.c 2011-08-05 19:44:33.000000000 -0400
13380@@ -17,7 +17,7 @@
13381 #include <asm/mce.h>
13382 #include <asm/hw_irq.h>
13383
13384-atomic_t irq_err_count;
13385+atomic_unchecked_t irq_err_count;
13386
13387 /* Function pointer for generic interrupt vector handling */
13388 void (*x86_platform_ipi_callback)(void) = NULL;
13389@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13390 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13391 seq_printf(p, " Machine check polls\n");
13392 #endif
13393- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13394+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13395 #if defined(CONFIG_X86_IO_APIC)
13396- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13397+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13398 #endif
13399 return 0;
13400 }
13401@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13402
13403 u64 arch_irq_stat(void)
13404 {
13405- u64 sum = atomic_read(&irq_err_count);
13406+ u64 sum = atomic_read_unchecked(&irq_err_count);
13407
13408 #ifdef CONFIG_X86_IO_APIC
13409- sum += atomic_read(&irq_mis_count);
13410+ sum += atomic_read_unchecked(&irq_mis_count);
13411 #endif
13412 return sum;
13413 }
13414diff -urNp linux-2.6.39.4/arch/x86/kernel/kgdb.c linux-2.6.39.4/arch/x86/kernel/kgdb.c
13415--- linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
13416+++ linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-08-05 20:34:06.000000000 -0400
13417@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13418 #ifdef CONFIG_X86_32
13419 switch (regno) {
13420 case GDB_SS:
13421- if (!user_mode_vm(regs))
13422+ if (!user_mode(regs))
13423 *(unsigned long *)mem = __KERNEL_DS;
13424 break;
13425 case GDB_SP:
13426- if (!user_mode_vm(regs))
13427+ if (!user_mode(regs))
13428 *(unsigned long *)mem = kernel_stack_pointer(regs);
13429 break;
13430 case GDB_GS:
13431@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13432 case 'k':
13433 /* clear the trace bit */
13434 linux_regs->flags &= ~X86_EFLAGS_TF;
13435- atomic_set(&kgdb_cpu_doing_single_step, -1);
13436+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13437
13438 /* set the trace bit if we're stepping */
13439 if (remcomInBuffer[0] == 's') {
13440 linux_regs->flags |= X86_EFLAGS_TF;
13441- atomic_set(&kgdb_cpu_doing_single_step,
13442+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13443 raw_smp_processor_id());
13444 }
13445
13446@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13447 return NOTIFY_DONE;
13448
13449 case DIE_DEBUG:
13450- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13451+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13452 if (user_mode(regs))
13453 return single_step_cont(regs, args);
13454 break;
13455diff -urNp linux-2.6.39.4/arch/x86/kernel/kprobes.c linux-2.6.39.4/arch/x86/kernel/kprobes.c
13456--- linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
13457+++ linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-08-05 19:44:33.000000000 -0400
13458@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13459 } __attribute__((packed)) *insn;
13460
13461 insn = (struct __arch_relative_insn *)from;
13462+
13463+ pax_open_kernel();
13464 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13465 insn->op = op;
13466+ pax_close_kernel();
13467 }
13468
13469 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13470@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13471 kprobe_opcode_t opcode;
13472 kprobe_opcode_t *orig_opcodes = opcodes;
13473
13474- if (search_exception_tables((unsigned long)opcodes))
13475+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13476 return 0; /* Page fault may occur on this address. */
13477
13478 retry:
13479@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13480 }
13481 }
13482 insn_get_length(&insn);
13483+ pax_open_kernel();
13484 memcpy(dest, insn.kaddr, insn.length);
13485+ pax_close_kernel();
13486
13487 #ifdef CONFIG_X86_64
13488 if (insn_rip_relative(&insn)) {
13489@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13490 (u8 *) dest;
13491 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13492 disp = (u8 *) dest + insn_offset_displacement(&insn);
13493+ pax_open_kernel();
13494 *(s32 *) disp = (s32) newdisp;
13495+ pax_close_kernel();
13496 }
13497 #endif
13498 return insn.length;
13499@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13500 */
13501 __copy_instruction(p->ainsn.insn, p->addr, 0);
13502
13503- if (can_boost(p->addr))
13504+ if (can_boost(ktla_ktva(p->addr)))
13505 p->ainsn.boostable = 0;
13506 else
13507 p->ainsn.boostable = -1;
13508
13509- p->opcode = *p->addr;
13510+ p->opcode = *(ktla_ktva(p->addr));
13511 }
13512
13513 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13514@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13515 * nor set current_kprobe, because it doesn't use single
13516 * stepping.
13517 */
13518- regs->ip = (unsigned long)p->ainsn.insn;
13519+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13520 preempt_enable_no_resched();
13521 return;
13522 }
13523@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13524 if (p->opcode == BREAKPOINT_INSTRUCTION)
13525 regs->ip = (unsigned long)p->addr;
13526 else
13527- regs->ip = (unsigned long)p->ainsn.insn;
13528+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13529 }
13530
13531 /*
13532@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13533 setup_singlestep(p, regs, kcb, 0);
13534 return 1;
13535 }
13536- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13537+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13538 /*
13539 * The breakpoint instruction was removed right
13540 * after we hit it. Another cpu has removed
13541@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13542 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13543 {
13544 unsigned long *tos = stack_addr(regs);
13545- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13546+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13547 unsigned long orig_ip = (unsigned long)p->addr;
13548 kprobe_opcode_t *insn = p->ainsn.insn;
13549
13550@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13551 struct die_args *args = data;
13552 int ret = NOTIFY_DONE;
13553
13554- if (args->regs && user_mode_vm(args->regs))
13555+ if (args->regs && user_mode(args->regs))
13556 return ret;
13557
13558 switch (val) {
13559@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13560 * Verify if the address gap is in 2GB range, because this uses
13561 * a relative jump.
13562 */
13563- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13564+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13565 if (abs(rel) > 0x7fffffff)
13566 return -ERANGE;
13567
13568@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13569 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13570
13571 /* Set probe function call */
13572- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13573+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13574
13575 /* Set returning jmp instruction at the tail of out-of-line buffer */
13576 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13577- (u8 *)op->kp.addr + op->optinsn.size);
13578+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13579
13580 flush_icache_range((unsigned long) buf,
13581 (unsigned long) buf + TMPL_END_IDX +
13582@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13583 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13584
13585 /* Backup instructions which will be replaced by jump address */
13586- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13587+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13588 RELATIVE_ADDR_SIZE);
13589
13590 insn_buf[0] = RELATIVEJUMP_OPCODE;
13591diff -urNp linux-2.6.39.4/arch/x86/kernel/ldt.c linux-2.6.39.4/arch/x86/kernel/ldt.c
13592--- linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-05-19 00:06:34.000000000 -0400
13593+++ linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-08-05 19:44:33.000000000 -0400
13594@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13595 if (reload) {
13596 #ifdef CONFIG_SMP
13597 preempt_disable();
13598- load_LDT(pc);
13599+ load_LDT_nolock(pc);
13600 if (!cpumask_equal(mm_cpumask(current->mm),
13601 cpumask_of(smp_processor_id())))
13602 smp_call_function(flush_ldt, current->mm, 1);
13603 preempt_enable();
13604 #else
13605- load_LDT(pc);
13606+ load_LDT_nolock(pc);
13607 #endif
13608 }
13609 if (oldsize) {
13610@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13611 return err;
13612
13613 for (i = 0; i < old->size; i++)
13614- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13615+ write_ldt_entry(new->ldt, i, old->ldt + i);
13616 return 0;
13617 }
13618
13619@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13620 retval = copy_ldt(&mm->context, &old_mm->context);
13621 mutex_unlock(&old_mm->context.lock);
13622 }
13623+
13624+ if (tsk == current) {
13625+ mm->context.vdso = 0;
13626+
13627+#ifdef CONFIG_X86_32
13628+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13629+ mm->context.user_cs_base = 0UL;
13630+ mm->context.user_cs_limit = ~0UL;
13631+
13632+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13633+ cpus_clear(mm->context.cpu_user_cs_mask);
13634+#endif
13635+
13636+#endif
13637+#endif
13638+
13639+ }
13640+
13641 return retval;
13642 }
13643
13644@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13645 }
13646 }
13647
13648+#ifdef CONFIG_PAX_SEGMEXEC
13649+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13650+ error = -EINVAL;
13651+ goto out_unlock;
13652+ }
13653+#endif
13654+
13655 fill_ldt(&ldt, &ldt_info);
13656 if (oldmode)
13657 ldt.avl = 0;
13658diff -urNp linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c
13659--- linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-05-19 00:06:34.000000000 -0400
13660+++ linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-08-05 19:44:33.000000000 -0400
13661@@ -27,7 +27,7 @@
13662 #include <asm/cacheflush.h>
13663 #include <asm/debugreg.h>
13664
13665-static void set_idt(void *newidt, __u16 limit)
13666+static void set_idt(struct desc_struct *newidt, __u16 limit)
13667 {
13668 struct desc_ptr curidt;
13669
13670@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13671 }
13672
13673
13674-static void set_gdt(void *newgdt, __u16 limit)
13675+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13676 {
13677 struct desc_ptr curgdt;
13678
13679@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13680 }
13681
13682 control_page = page_address(image->control_code_page);
13683- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13684+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13685
13686 relocate_kernel_ptr = control_page;
13687 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13688diff -urNp linux-2.6.39.4/arch/x86/kernel/microcode_intel.c linux-2.6.39.4/arch/x86/kernel/microcode_intel.c
13689--- linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-05-19 00:06:34.000000000 -0400
13690+++ linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-08-05 20:34:06.000000000 -0400
13691@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13692
13693 static int get_ucode_user(void *to, const void *from, size_t n)
13694 {
13695- return copy_from_user(to, from, n);
13696+ return copy_from_user(to, (__force const void __user *)from, n);
13697 }
13698
13699 static enum ucode_state
13700 request_microcode_user(int cpu, const void __user *buf, size_t size)
13701 {
13702- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13703+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13704 }
13705
13706 static void microcode_fini_cpu(int cpu)
13707diff -urNp linux-2.6.39.4/arch/x86/kernel/module.c linux-2.6.39.4/arch/x86/kernel/module.c
13708--- linux-2.6.39.4/arch/x86/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
13709+++ linux-2.6.39.4/arch/x86/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
13710@@ -35,21 +35,66 @@
13711 #define DEBUGP(fmt...)
13712 #endif
13713
13714-void *module_alloc(unsigned long size)
13715+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13716 {
13717 if (PAGE_ALIGN(size) > MODULES_LEN)
13718 return NULL;
13719 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13720- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13721+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13722 -1, __builtin_return_address(0));
13723 }
13724
13725+void *module_alloc(unsigned long size)
13726+{
13727+
13728+#ifdef CONFIG_PAX_KERNEXEC
13729+ return __module_alloc(size, PAGE_KERNEL);
13730+#else
13731+ return __module_alloc(size, PAGE_KERNEL_EXEC);
13732+#endif
13733+
13734+}
13735+
13736 /* Free memory returned from module_alloc */
13737 void module_free(struct module *mod, void *module_region)
13738 {
13739 vfree(module_region);
13740 }
13741
13742+#ifdef CONFIG_PAX_KERNEXEC
13743+#ifdef CONFIG_X86_32
13744+void *module_alloc_exec(unsigned long size)
13745+{
13746+ struct vm_struct *area;
13747+
13748+ if (size == 0)
13749+ return NULL;
13750+
13751+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13752+ return area ? area->addr : NULL;
13753+}
13754+EXPORT_SYMBOL(module_alloc_exec);
13755+
13756+void module_free_exec(struct module *mod, void *module_region)
13757+{
13758+ vunmap(module_region);
13759+}
13760+EXPORT_SYMBOL(module_free_exec);
13761+#else
13762+void module_free_exec(struct module *mod, void *module_region)
13763+{
13764+ module_free(mod, module_region);
13765+}
13766+EXPORT_SYMBOL(module_free_exec);
13767+
13768+void *module_alloc_exec(unsigned long size)
13769+{
13770+ return __module_alloc(size, PAGE_KERNEL_RX);
13771+}
13772+EXPORT_SYMBOL(module_alloc_exec);
13773+#endif
13774+#endif
13775+
13776 /* We don't need anything special. */
13777 int module_frob_arch_sections(Elf_Ehdr *hdr,
13778 Elf_Shdr *sechdrs,
13779@@ -69,14 +114,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13780 unsigned int i;
13781 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13782 Elf32_Sym *sym;
13783- uint32_t *location;
13784+ uint32_t *plocation, location;
13785
13786 DEBUGP("Applying relocate section %u to %u\n", relsec,
13787 sechdrs[relsec].sh_info);
13788 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13789 /* This is where to make the change */
13790- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13791- + rel[i].r_offset;
13792+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13793+ location = (uint32_t)plocation;
13794+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13795+ plocation = ktla_ktva((void *)plocation);
13796 /* This is the symbol it is referring to. Note that all
13797 undefined symbols have been resolved. */
13798 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13799@@ -85,11 +132,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13800 switch (ELF32_R_TYPE(rel[i].r_info)) {
13801 case R_386_32:
13802 /* We add the value into the location given */
13803- *location += sym->st_value;
13804+ pax_open_kernel();
13805+ *plocation += sym->st_value;
13806+ pax_close_kernel();
13807 break;
13808 case R_386_PC32:
13809 /* Add the value, subtract its postition */
13810- *location += sym->st_value - (uint32_t)location;
13811+ pax_open_kernel();
13812+ *plocation += sym->st_value - location;
13813+ pax_close_kernel();
13814 break;
13815 default:
13816 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13817@@ -145,21 +196,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13818 case R_X86_64_NONE:
13819 break;
13820 case R_X86_64_64:
13821+ pax_open_kernel();
13822 *(u64 *)loc = val;
13823+ pax_close_kernel();
13824 break;
13825 case R_X86_64_32:
13826+ pax_open_kernel();
13827 *(u32 *)loc = val;
13828+ pax_close_kernel();
13829 if (val != *(u32 *)loc)
13830 goto overflow;
13831 break;
13832 case R_X86_64_32S:
13833+ pax_open_kernel();
13834 *(s32 *)loc = val;
13835+ pax_close_kernel();
13836 if ((s64)val != *(s32 *)loc)
13837 goto overflow;
13838 break;
13839 case R_X86_64_PC32:
13840 val -= (u64)loc;
13841+ pax_open_kernel();
13842 *(u32 *)loc = val;
13843+ pax_close_kernel();
13844+
13845 #if 0
13846 if ((s64)val != *(s32 *)loc)
13847 goto overflow;
13848diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt.c linux-2.6.39.4/arch/x86/kernel/paravirt.c
13849--- linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-05-19 00:06:34.000000000 -0400
13850+++ linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-08-05 19:44:33.000000000 -0400
13851@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13852 {
13853 return x;
13854 }
13855+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13856+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13857+#endif
13858
13859 void __init default_banner(void)
13860 {
13861@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13862 * corresponding structure. */
13863 static void *get_call_destination(u8 type)
13864 {
13865- struct paravirt_patch_template tmpl = {
13866+ const struct paravirt_patch_template tmpl = {
13867 .pv_init_ops = pv_init_ops,
13868 .pv_time_ops = pv_time_ops,
13869 .pv_cpu_ops = pv_cpu_ops,
13870@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13871 .pv_lock_ops = pv_lock_ops,
13872 #endif
13873 };
13874+
13875+ pax_track_stack();
13876+
13877 return *((void **)&tmpl + type);
13878 }
13879
13880@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13881 if (opfunc == NULL)
13882 /* If there's no function, patch it with a ud2a (BUG) */
13883 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13884- else if (opfunc == _paravirt_nop)
13885+ else if (opfunc == (void *)_paravirt_nop)
13886 /* If the operation is a nop, then nop the callsite */
13887 ret = paravirt_patch_nop();
13888
13889 /* identity functions just return their single argument */
13890- else if (opfunc == _paravirt_ident_32)
13891+ else if (opfunc == (void *)_paravirt_ident_32)
13892 ret = paravirt_patch_ident_32(insnbuf, len);
13893- else if (opfunc == _paravirt_ident_64)
13894+ else if (opfunc == (void *)_paravirt_ident_64)
13895 ret = paravirt_patch_ident_64(insnbuf, len);
13896+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13897+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13898+ ret = paravirt_patch_ident_64(insnbuf, len);
13899+#endif
13900
13901 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13902 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13903@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13904 if (insn_len > len || start == NULL)
13905 insn_len = len;
13906 else
13907- memcpy(insnbuf, start, insn_len);
13908+ memcpy(insnbuf, ktla_ktva(start), insn_len);
13909
13910 return insn_len;
13911 }
13912@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13913 preempt_enable();
13914 }
13915
13916-struct pv_info pv_info = {
13917+struct pv_info pv_info __read_only = {
13918 .name = "bare hardware",
13919 .paravirt_enabled = 0,
13920 .kernel_rpl = 0,
13921 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13922 };
13923
13924-struct pv_init_ops pv_init_ops = {
13925+struct pv_init_ops pv_init_ops __read_only = {
13926 .patch = native_patch,
13927 };
13928
13929-struct pv_time_ops pv_time_ops = {
13930+struct pv_time_ops pv_time_ops __read_only = {
13931 .sched_clock = native_sched_clock,
13932 };
13933
13934-struct pv_irq_ops pv_irq_ops = {
13935+struct pv_irq_ops pv_irq_ops __read_only = {
13936 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13937 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13938 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13939@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13940 #endif
13941 };
13942
13943-struct pv_cpu_ops pv_cpu_ops = {
13944+struct pv_cpu_ops pv_cpu_ops __read_only = {
13945 .cpuid = native_cpuid,
13946 .get_debugreg = native_get_debugreg,
13947 .set_debugreg = native_set_debugreg,
13948@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13949 .end_context_switch = paravirt_nop,
13950 };
13951
13952-struct pv_apic_ops pv_apic_ops = {
13953+struct pv_apic_ops pv_apic_ops __read_only = {
13954 #ifdef CONFIG_X86_LOCAL_APIC
13955 .startup_ipi_hook = paravirt_nop,
13956 #endif
13957 };
13958
13959-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13960+#ifdef CONFIG_X86_32
13961+#ifdef CONFIG_X86_PAE
13962+/* 64-bit pagetable entries */
13963+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13964+#else
13965 /* 32-bit pagetable entries */
13966 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13967+#endif
13968 #else
13969 /* 64-bit pagetable entries */
13970 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13971 #endif
13972
13973-struct pv_mmu_ops pv_mmu_ops = {
13974+struct pv_mmu_ops pv_mmu_ops __read_only = {
13975
13976 .read_cr2 = native_read_cr2,
13977 .write_cr2 = native_write_cr2,
13978@@ -465,6 +480,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13979 },
13980
13981 .set_fixmap = native_set_fixmap,
13982+
13983+#ifdef CONFIG_PAX_KERNEXEC
13984+ .pax_open_kernel = native_pax_open_kernel,
13985+ .pax_close_kernel = native_pax_close_kernel,
13986+#endif
13987+
13988 };
13989
13990 EXPORT_SYMBOL_GPL(pv_time_ops);
13991diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c
13992--- linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-05-19 00:06:34.000000000 -0400
13993+++ linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-05 19:44:33.000000000 -0400
13994@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13995 arch_spin_lock(lock);
13996 }
13997
13998-struct pv_lock_ops pv_lock_ops = {
13999+struct pv_lock_ops pv_lock_ops __read_only = {
14000 #ifdef CONFIG_SMP
14001 .spin_is_locked = __ticket_spin_is_locked,
14002 .spin_is_contended = __ticket_spin_is_contended,
14003diff -urNp linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c
14004--- linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-05-19 00:06:34.000000000 -0400
14005+++ linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-08-05 19:44:35.000000000 -0400
14006@@ -2,7 +2,7 @@
14007 #include <asm/iommu_table.h>
14008 #include <linux/string.h>
14009 #include <linux/kallsyms.h>
14010-
14011+#include <linux/sched.h>
14012
14013 #define DEBUG 1
14014
14015@@ -53,6 +53,8 @@ void __init check_iommu_entries(struct i
14016 char sym_p[KSYM_SYMBOL_LEN];
14017 char sym_q[KSYM_SYMBOL_LEN];
14018
14019+ pax_track_stack();
14020+
14021 /* Simple cyclic dependency checker. */
14022 for (p = start; p < finish; p++) {
14023 q = find_dependents_of(start, finish, p);
14024diff -urNp linux-2.6.39.4/arch/x86/kernel/process_32.c linux-2.6.39.4/arch/x86/kernel/process_32.c
14025--- linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-06-25 12:55:22.000000000 -0400
14026+++ linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-08-05 19:44:35.000000000 -0400
14027@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14028 unsigned long thread_saved_pc(struct task_struct *tsk)
14029 {
14030 return ((unsigned long *)tsk->thread.sp)[3];
14031+//XXX return tsk->thread.eip;
14032 }
14033
14034 #ifndef CONFIG_SMP
14035@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14036 unsigned long sp;
14037 unsigned short ss, gs;
14038
14039- if (user_mode_vm(regs)) {
14040+ if (user_mode(regs)) {
14041 sp = regs->sp;
14042 ss = regs->ss & 0xffff;
14043- gs = get_user_gs(regs);
14044 } else {
14045 sp = kernel_stack_pointer(regs);
14046 savesegment(ss, ss);
14047- savesegment(gs, gs);
14048 }
14049+ gs = get_user_gs(regs);
14050
14051 show_regs_common();
14052
14053@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14054 struct task_struct *tsk;
14055 int err;
14056
14057- childregs = task_pt_regs(p);
14058+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14059 *childregs = *regs;
14060 childregs->ax = 0;
14061 childregs->sp = sp;
14062
14063 p->thread.sp = (unsigned long) childregs;
14064 p->thread.sp0 = (unsigned long) (childregs+1);
14065+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14066
14067 p->thread.ip = (unsigned long) ret_from_fork;
14068
14069@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14070 struct thread_struct *prev = &prev_p->thread,
14071 *next = &next_p->thread;
14072 int cpu = smp_processor_id();
14073- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14074+ struct tss_struct *tss = init_tss + cpu;
14075 bool preload_fpu;
14076
14077 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14078@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14079 */
14080 lazy_save_gs(prev->gs);
14081
14082+#ifdef CONFIG_PAX_MEMORY_UDEREF
14083+ __set_fs(task_thread_info(next_p)->addr_limit);
14084+#endif
14085+
14086 /*
14087 * Load the per-thread Thread-Local Storage descriptor.
14088 */
14089@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14090 */
14091 arch_end_context_switch(next_p);
14092
14093+ percpu_write(current_task, next_p);
14094+ percpu_write(current_tinfo, &next_p->tinfo);
14095+
14096 if (preload_fpu)
14097 __math_state_restore();
14098
14099@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14100 if (prev->gs | next->gs)
14101 lazy_load_gs(next->gs);
14102
14103- percpu_write(current_task, next_p);
14104-
14105 return prev_p;
14106 }
14107
14108@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14109 } while (count++ < 16);
14110 return 0;
14111 }
14112-
14113diff -urNp linux-2.6.39.4/arch/x86/kernel/process_64.c linux-2.6.39.4/arch/x86/kernel/process_64.c
14114--- linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-06-25 12:55:22.000000000 -0400
14115+++ linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-08-05 19:44:35.000000000 -0400
14116@@ -87,7 +87,7 @@ static void __exit_idle(void)
14117 void exit_idle(void)
14118 {
14119 /* idle loop has pid 0 */
14120- if (current->pid)
14121+ if (task_pid_nr(current))
14122 return;
14123 __exit_idle();
14124 }
14125@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14126 struct pt_regs *childregs;
14127 struct task_struct *me = current;
14128
14129- childregs = ((struct pt_regs *)
14130- (THREAD_SIZE + task_stack_page(p))) - 1;
14131+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14132 *childregs = *regs;
14133
14134 childregs->ax = 0;
14135@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14136 p->thread.sp = (unsigned long) childregs;
14137 p->thread.sp0 = (unsigned long) (childregs+1);
14138 p->thread.usersp = me->thread.usersp;
14139+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14140
14141 set_tsk_thread_flag(p, TIF_FORK);
14142
14143@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14144 struct thread_struct *prev = &prev_p->thread;
14145 struct thread_struct *next = &next_p->thread;
14146 int cpu = smp_processor_id();
14147- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14148+ struct tss_struct *tss = init_tss + cpu;
14149 unsigned fsindex, gsindex;
14150 bool preload_fpu;
14151
14152@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14153 prev->usersp = percpu_read(old_rsp);
14154 percpu_write(old_rsp, next->usersp);
14155 percpu_write(current_task, next_p);
14156+ percpu_write(current_tinfo, &next_p->tinfo);
14157
14158- percpu_write(kernel_stack,
14159- (unsigned long)task_stack_page(next_p) +
14160- THREAD_SIZE - KERNEL_STACK_OFFSET);
14161+ percpu_write(kernel_stack, next->sp0);
14162
14163 /*
14164 * Now maybe reload the debug registers and handle I/O bitmaps
14165@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14166 if (!p || p == current || p->state == TASK_RUNNING)
14167 return 0;
14168 stack = (unsigned long)task_stack_page(p);
14169- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14170+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14171 return 0;
14172 fp = *(u64 *)(p->thread.sp);
14173 do {
14174- if (fp < (unsigned long)stack ||
14175- fp >= (unsigned long)stack+THREAD_SIZE)
14176+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14177 return 0;
14178 ip = *(u64 *)(fp+8);
14179 if (!in_sched_functions(ip))
14180diff -urNp linux-2.6.39.4/arch/x86/kernel/process.c linux-2.6.39.4/arch/x86/kernel/process.c
14181--- linux-2.6.39.4/arch/x86/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
14182+++ linux-2.6.39.4/arch/x86/kernel/process.c 2011-08-05 19:44:35.000000000 -0400
14183@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14184
14185 void free_thread_info(struct thread_info *ti)
14186 {
14187- free_thread_xstate(ti->task);
14188 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14189 }
14190
14191+static struct kmem_cache *task_struct_cachep;
14192+
14193 void arch_task_cache_init(void)
14194 {
14195- task_xstate_cachep =
14196- kmem_cache_create("task_xstate", xstate_size,
14197+ /* create a slab on which task_structs can be allocated */
14198+ task_struct_cachep =
14199+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14200+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14201+
14202+ task_xstate_cachep =
14203+ kmem_cache_create("task_xstate", xstate_size,
14204 __alignof__(union thread_xstate),
14205- SLAB_PANIC | SLAB_NOTRACK, NULL);
14206+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14207+}
14208+
14209+struct task_struct *alloc_task_struct_node(int node)
14210+{
14211+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14212+}
14213+
14214+void free_task_struct(struct task_struct *task)
14215+{
14216+ free_thread_xstate(task);
14217+ kmem_cache_free(task_struct_cachep, task);
14218 }
14219
14220 /*
14221@@ -70,7 +87,7 @@ void exit_thread(void)
14222 unsigned long *bp = t->io_bitmap_ptr;
14223
14224 if (bp) {
14225- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14226+ struct tss_struct *tss = init_tss + get_cpu();
14227
14228 t->io_bitmap_ptr = NULL;
14229 clear_thread_flag(TIF_IO_BITMAP);
14230@@ -106,7 +123,7 @@ void show_regs_common(void)
14231
14232 printk(KERN_CONT "\n");
14233 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14234- current->pid, current->comm, print_tainted(),
14235+ task_pid_nr(current), current->comm, print_tainted(),
14236 init_utsname()->release,
14237 (int)strcspn(init_utsname()->version, " "),
14238 init_utsname()->version);
14239@@ -120,6 +137,9 @@ void flush_thread(void)
14240 {
14241 struct task_struct *tsk = current;
14242
14243+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14244+ loadsegment(gs, 0);
14245+#endif
14246 flush_ptrace_hw_breakpoint(tsk);
14247 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14248 /*
14249@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14250 regs.di = (unsigned long) arg;
14251
14252 #ifdef CONFIG_X86_32
14253- regs.ds = __USER_DS;
14254- regs.es = __USER_DS;
14255+ regs.ds = __KERNEL_DS;
14256+ regs.es = __KERNEL_DS;
14257 regs.fs = __KERNEL_PERCPU;
14258- regs.gs = __KERNEL_STACK_CANARY;
14259+ savesegment(gs, regs.gs);
14260 #else
14261 regs.ss = __KERNEL_DS;
14262 #endif
14263@@ -401,7 +421,7 @@ void default_idle(void)
14264 EXPORT_SYMBOL(default_idle);
14265 #endif
14266
14267-void stop_this_cpu(void *dummy)
14268+__noreturn void stop_this_cpu(void *dummy)
14269 {
14270 local_irq_disable();
14271 /*
14272@@ -665,16 +685,34 @@ static int __init idle_setup(char *str)
14273 }
14274 early_param("idle", idle_setup);
14275
14276-unsigned long arch_align_stack(unsigned long sp)
14277+#ifdef CONFIG_PAX_RANDKSTACK
14278+asmlinkage void pax_randomize_kstack(void)
14279 {
14280- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14281- sp -= get_random_int() % 8192;
14282- return sp & ~0xf;
14283-}
14284+ struct thread_struct *thread = &current->thread;
14285+ unsigned long time;
14286
14287-unsigned long arch_randomize_brk(struct mm_struct *mm)
14288-{
14289- unsigned long range_end = mm->brk + 0x02000000;
14290- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14291-}
14292+ if (!randomize_va_space)
14293+ return;
14294+
14295+ rdtscl(time);
14296+
14297+ /* P4 seems to return a 0 LSB, ignore it */
14298+#ifdef CONFIG_MPENTIUM4
14299+ time &= 0x3EUL;
14300+ time <<= 2;
14301+#elif defined(CONFIG_X86_64)
14302+ time &= 0xFUL;
14303+ time <<= 4;
14304+#else
14305+ time &= 0x1FUL;
14306+ time <<= 3;
14307+#endif
14308+
14309+ thread->sp0 ^= time;
14310+ load_sp0(init_tss + smp_processor_id(), thread);
14311
14312+#ifdef CONFIG_X86_64
14313+ percpu_write(kernel_stack, thread->sp0);
14314+#endif
14315+}
14316+#endif
14317diff -urNp linux-2.6.39.4/arch/x86/kernel/ptrace.c linux-2.6.39.4/arch/x86/kernel/ptrace.c
14318--- linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
14319+++ linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-08-05 19:44:35.000000000 -0400
14320@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14321 unsigned long addr, unsigned long data)
14322 {
14323 int ret;
14324- unsigned long __user *datap = (unsigned long __user *)data;
14325+ unsigned long __user *datap = (__force unsigned long __user *)data;
14326
14327 switch (request) {
14328 /* read the word at location addr in the USER area. */
14329@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14330 if ((int) addr < 0)
14331 return -EIO;
14332 ret = do_get_thread_area(child, addr,
14333- (struct user_desc __user *)data);
14334+ (__force struct user_desc __user *) data);
14335 break;
14336
14337 case PTRACE_SET_THREAD_AREA:
14338 if ((int) addr < 0)
14339 return -EIO;
14340 ret = do_set_thread_area(child, addr,
14341- (struct user_desc __user *)data, 0);
14342+ (__force struct user_desc __user *) data, 0);
14343 break;
14344 #endif
14345
14346@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14347 memset(info, 0, sizeof(*info));
14348 info->si_signo = SIGTRAP;
14349 info->si_code = si_code;
14350- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14351+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14352 }
14353
14354 void user_single_step_siginfo(struct task_struct *tsk,
14355@@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *ts
14356 * We must return the syscall number to actually look up in the table.
14357 * This can be -1L to skip running any syscall at all.
14358 */
14359-asmregparm long syscall_trace_enter(struct pt_regs *regs)
14360+long syscall_trace_enter(struct pt_regs *regs)
14361 {
14362 long ret = 0;
14363
14364@@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(stru
14365 return ret ?: regs->orig_ax;
14366 }
14367
14368-asmregparm void syscall_trace_leave(struct pt_regs *regs)
14369+void syscall_trace_leave(struct pt_regs *regs)
14370 {
14371 bool step;
14372
14373diff -urNp linux-2.6.39.4/arch/x86/kernel/pvclock.c linux-2.6.39.4/arch/x86/kernel/pvclock.c
14374--- linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-05-19 00:06:34.000000000 -0400
14375+++ linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-08-05 19:44:35.000000000 -0400
14376@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14377 return pv_tsc_khz;
14378 }
14379
14380-static atomic64_t last_value = ATOMIC64_INIT(0);
14381+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14382
14383 void pvclock_resume(void)
14384 {
14385- atomic64_set(&last_value, 0);
14386+ atomic64_set_unchecked(&last_value, 0);
14387 }
14388
14389 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14390@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14391 * updating at the same time, and one of them could be slightly behind,
14392 * making the assumption that last_value always go forward fail to hold.
14393 */
14394- last = atomic64_read(&last_value);
14395+ last = atomic64_read_unchecked(&last_value);
14396 do {
14397 if (ret < last)
14398 return last;
14399- last = atomic64_cmpxchg(&last_value, last, ret);
14400+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14401 } while (unlikely(last != ret));
14402
14403 return ret;
14404diff -urNp linux-2.6.39.4/arch/x86/kernel/reboot.c linux-2.6.39.4/arch/x86/kernel/reboot.c
14405--- linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:11:51.000000000 -0400
14406+++ linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:12:20.000000000 -0400
14407@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14408 EXPORT_SYMBOL(pm_power_off);
14409
14410 static const struct desc_ptr no_idt = {};
14411-static int reboot_mode;
14412+static unsigned short reboot_mode;
14413 enum reboot_type reboot_type = BOOT_KBD;
14414 int reboot_force;
14415
14416@@ -307,13 +307,17 @@ core_initcall(reboot_init);
14417 extern const unsigned char machine_real_restart_asm[];
14418 extern const u64 machine_real_restart_gdt[3];
14419
14420-void machine_real_restart(unsigned int type)
14421+__noreturn void machine_real_restart(unsigned int type)
14422 {
14423 void *restart_va;
14424 unsigned long restart_pa;
14425- void (*restart_lowmem)(unsigned int);
14426+ void (* __noreturn restart_lowmem)(unsigned int);
14427 u64 *lowmem_gdt;
14428
14429+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14430+ struct desc_struct *gdt;
14431+#endif
14432+
14433 local_irq_disable();
14434
14435 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14436@@ -339,14 +343,14 @@ void machine_real_restart(unsigned int t
14437 boot)". This seems like a fairly standard thing that gets set by
14438 REBOOT.COM programs, and the previous reset routine did this
14439 too. */
14440- *((unsigned short *)0x472) = reboot_mode;
14441+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14442
14443 /* Patch the GDT in the low memory trampoline */
14444 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14445
14446 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14447 restart_pa = virt_to_phys(restart_va);
14448- restart_lowmem = (void (*)(unsigned int))restart_pa;
14449+ restart_lowmem = (void *)restart_pa;
14450
14451 /* GDT[0]: GDT self-pointer */
14452 lowmem_gdt[0] =
14453@@ -357,7 +361,33 @@ void machine_real_restart(unsigned int t
14454 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14455
14456 /* Jump to the identity-mapped low memory code */
14457+
14458+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14459+ gdt = get_cpu_gdt_table(smp_processor_id());
14460+ pax_open_kernel();
14461+#ifdef CONFIG_PAX_MEMORY_UDEREF
14462+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14463+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14464+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14465+#endif
14466+#ifdef CONFIG_PAX_KERNEXEC
14467+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14468+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14469+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14470+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14471+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14472+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14473+#endif
14474+ pax_close_kernel();
14475+#endif
14476+
14477+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14478+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14479+ unreachable();
14480+#else
14481 restart_lowmem(type);
14482+#endif
14483+
14484 }
14485 #ifdef CONFIG_APM_MODULE
14486 EXPORT_SYMBOL(machine_real_restart);
14487@@ -486,7 +516,7 @@ void __attribute__((weak)) mach_reboot_f
14488 {
14489 }
14490
14491-static void native_machine_emergency_restart(void)
14492+__noreturn static void native_machine_emergency_restart(void)
14493 {
14494 int i;
14495
14496@@ -601,13 +631,13 @@ void native_machine_shutdown(void)
14497 #endif
14498 }
14499
14500-static void __machine_emergency_restart(int emergency)
14501+static __noreturn void __machine_emergency_restart(int emergency)
14502 {
14503 reboot_emergency = emergency;
14504 machine_ops.emergency_restart();
14505 }
14506
14507-static void native_machine_restart(char *__unused)
14508+static __noreturn void native_machine_restart(char *__unused)
14509 {
14510 printk("machine restart\n");
14511
14512@@ -616,7 +646,7 @@ static void native_machine_restart(char
14513 __machine_emergency_restart(0);
14514 }
14515
14516-static void native_machine_halt(void)
14517+static __noreturn void native_machine_halt(void)
14518 {
14519 /* stop other cpus and apics */
14520 machine_shutdown();
14521@@ -627,7 +657,7 @@ static void native_machine_halt(void)
14522 stop_this_cpu(NULL);
14523 }
14524
14525-static void native_machine_power_off(void)
14526+__noreturn static void native_machine_power_off(void)
14527 {
14528 if (pm_power_off) {
14529 if (!reboot_force)
14530@@ -636,6 +666,7 @@ static void native_machine_power_off(voi
14531 }
14532 /* a fallback in case there is no PM info available */
14533 tboot_shutdown(TB_SHUTDOWN_HALT);
14534+ unreachable();
14535 }
14536
14537 struct machine_ops machine_ops = {
14538diff -urNp linux-2.6.39.4/arch/x86/kernel/setup.c linux-2.6.39.4/arch/x86/kernel/setup.c
14539--- linux-2.6.39.4/arch/x86/kernel/setup.c 2011-06-25 12:55:22.000000000 -0400
14540+++ linux-2.6.39.4/arch/x86/kernel/setup.c 2011-08-05 19:44:35.000000000 -0400
14541@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14542 * area (640->1Mb) as ram even though it is not.
14543 * take them out.
14544 */
14545- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14546+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14547 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14548 }
14549
14550@@ -775,14 +775,14 @@ void __init setup_arch(char **cmdline_p)
14551
14552 if (!boot_params.hdr.root_flags)
14553 root_mountflags &= ~MS_RDONLY;
14554- init_mm.start_code = (unsigned long) _text;
14555- init_mm.end_code = (unsigned long) _etext;
14556+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14557+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14558 init_mm.end_data = (unsigned long) _edata;
14559 init_mm.brk = _brk_end;
14560
14561- code_resource.start = virt_to_phys(_text);
14562- code_resource.end = virt_to_phys(_etext)-1;
14563- data_resource.start = virt_to_phys(_etext);
14564+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14565+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14566+ data_resource.start = virt_to_phys(_sdata);
14567 data_resource.end = virt_to_phys(_edata)-1;
14568 bss_resource.start = virt_to_phys(&__bss_start);
14569 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14570diff -urNp linux-2.6.39.4/arch/x86/kernel/setup_percpu.c linux-2.6.39.4/arch/x86/kernel/setup_percpu.c
14571--- linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-05-19 00:06:34.000000000 -0400
14572+++ linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-08-05 19:44:35.000000000 -0400
14573@@ -21,19 +21,17 @@
14574 #include <asm/cpu.h>
14575 #include <asm/stackprotector.h>
14576
14577-DEFINE_PER_CPU(int, cpu_number);
14578+#ifdef CONFIG_SMP
14579+DEFINE_PER_CPU(unsigned int, cpu_number);
14580 EXPORT_PER_CPU_SYMBOL(cpu_number);
14581+#endif
14582
14583-#ifdef CONFIG_X86_64
14584 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14585-#else
14586-#define BOOT_PERCPU_OFFSET 0
14587-#endif
14588
14589 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14590 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14591
14592-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14593+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14594 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14595 };
14596 EXPORT_SYMBOL(__per_cpu_offset);
14597@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14598 {
14599 #ifdef CONFIG_X86_32
14600 struct desc_struct gdt;
14601+ unsigned long base = per_cpu_offset(cpu);
14602
14603- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14604- 0x2 | DESCTYPE_S, 0x8);
14605- gdt.s = 1;
14606+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14607+ 0x83 | DESCTYPE_S, 0xC);
14608 write_gdt_entry(get_cpu_gdt_table(cpu),
14609 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14610 #endif
14611@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14612 /* alrighty, percpu areas up and running */
14613 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14614 for_each_possible_cpu(cpu) {
14615+#ifdef CONFIG_CC_STACKPROTECTOR
14616+#ifdef CONFIG_X86_32
14617+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14618+#endif
14619+#endif
14620 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14621 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14622 per_cpu(cpu_number, cpu) = cpu;
14623@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14624 */
14625 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14626 #endif
14627+#ifdef CONFIG_CC_STACKPROTECTOR
14628+#ifdef CONFIG_X86_32
14629+ if (!cpu)
14630+ per_cpu(stack_canary.canary, cpu) = canary;
14631+#endif
14632+#endif
14633 /*
14634 * Up to this point, the boot CPU has been using .init.data
14635 * area. Reload any changed state for the boot CPU.
14636diff -urNp linux-2.6.39.4/arch/x86/kernel/signal.c linux-2.6.39.4/arch/x86/kernel/signal.c
14637--- linux-2.6.39.4/arch/x86/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
14638+++ linux-2.6.39.4/arch/x86/kernel/signal.c 2011-08-05 19:44:35.000000000 -0400
14639@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14640 * Align the stack pointer according to the i386 ABI,
14641 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14642 */
14643- sp = ((sp + 4) & -16ul) - 4;
14644+ sp = ((sp - 12) & -16ul) - 4;
14645 #else /* !CONFIG_X86_32 */
14646 sp = round_down(sp, 16) - 8;
14647 #endif
14648@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14649 * Return an always-bogus address instead so we will die with SIGSEGV.
14650 */
14651 if (onsigstack && !likely(on_sig_stack(sp)))
14652- return (void __user *)-1L;
14653+ return (__force void __user *)-1L;
14654
14655 /* save i387 state */
14656 if (used_math() && save_i387_xstate(*fpstate) < 0)
14657- return (void __user *)-1L;
14658+ return (__force void __user *)-1L;
14659
14660 return (void __user *)sp;
14661 }
14662@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14663 }
14664
14665 if (current->mm->context.vdso)
14666- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14667+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14668 else
14669- restorer = &frame->retcode;
14670+ restorer = (void __user *)&frame->retcode;
14671 if (ka->sa.sa_flags & SA_RESTORER)
14672 restorer = ka->sa.sa_restorer;
14673
14674@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14675 * reasons and because gdb uses it as a signature to notice
14676 * signal handler stack frames.
14677 */
14678- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14679+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14680
14681 if (err)
14682 return -EFAULT;
14683@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14684 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14685
14686 /* Set up to return from userspace. */
14687- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14688+ if (current->mm->context.vdso)
14689+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14690+ else
14691+ restorer = (void __user *)&frame->retcode;
14692 if (ka->sa.sa_flags & SA_RESTORER)
14693 restorer = ka->sa.sa_restorer;
14694 put_user_ex(restorer, &frame->pretcode);
14695@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14696 * reasons and because gdb uses it as a signature to notice
14697 * signal handler stack frames.
14698 */
14699- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14700+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14701 } put_user_catch(err);
14702
14703 if (err)
14704@@ -773,6 +776,8 @@ static void do_signal(struct pt_regs *re
14705 int signr;
14706 sigset_t *oldset;
14707
14708+ pax_track_stack();
14709+
14710 /*
14711 * We want the common case to go fast, which is why we may in certain
14712 * cases get here from kernel mode. Just return without doing anything
14713@@ -780,7 +785,7 @@ static void do_signal(struct pt_regs *re
14714 * X86_32: vm86 regs switched out by assembly code before reaching
14715 * here, so testing against kernel CS suffices.
14716 */
14717- if (!user_mode(regs))
14718+ if (!user_mode_novm(regs))
14719 return;
14720
14721 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14722diff -urNp linux-2.6.39.4/arch/x86/kernel/smpboot.c linux-2.6.39.4/arch/x86/kernel/smpboot.c
14723--- linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-06-25 12:55:22.000000000 -0400
14724+++ linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-08-05 19:44:35.000000000 -0400
14725@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14726 set_idle_for_cpu(cpu, c_idle.idle);
14727 do_rest:
14728 per_cpu(current_task, cpu) = c_idle.idle;
14729+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14730 #ifdef CONFIG_X86_32
14731 /* Stack for startup_32 can be just as for start_secondary onwards */
14732 irq_ctx_init(cpu);
14733 #else
14734 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14735 initial_gs = per_cpu_offset(cpu);
14736- per_cpu(kernel_stack, cpu) =
14737- (unsigned long)task_stack_page(c_idle.idle) -
14738- KERNEL_STACK_OFFSET + THREAD_SIZE;
14739+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14740 #endif
14741+
14742+ pax_open_kernel();
14743 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14744+ pax_close_kernel();
14745+
14746 initial_code = (unsigned long)start_secondary;
14747 stack_start = c_idle.idle->thread.sp;
14748
14749@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14750
14751 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14752
14753+#ifdef CONFIG_PAX_PER_CPU_PGD
14754+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14755+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14756+ KERNEL_PGD_PTRS);
14757+#endif
14758+
14759 err = do_boot_cpu(apicid, cpu);
14760 if (err) {
14761 pr_debug("do_boot_cpu failed %d\n", err);
14762diff -urNp linux-2.6.39.4/arch/x86/kernel/step.c linux-2.6.39.4/arch/x86/kernel/step.c
14763--- linux-2.6.39.4/arch/x86/kernel/step.c 2011-05-19 00:06:34.000000000 -0400
14764+++ linux-2.6.39.4/arch/x86/kernel/step.c 2011-08-05 19:44:35.000000000 -0400
14765@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14766 struct desc_struct *desc;
14767 unsigned long base;
14768
14769- seg &= ~7UL;
14770+ seg >>= 3;
14771
14772 mutex_lock(&child->mm->context.lock);
14773- if (unlikely((seg >> 3) >= child->mm->context.size))
14774+ if (unlikely(seg >= child->mm->context.size))
14775 addr = -1L; /* bogus selector, access would fault */
14776 else {
14777 desc = child->mm->context.ldt + seg;
14778@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14779 addr += base;
14780 }
14781 mutex_unlock(&child->mm->context.lock);
14782- }
14783+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14784+ addr = ktla_ktva(addr);
14785
14786 return addr;
14787 }
14788@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14789 unsigned char opcode[15];
14790 unsigned long addr = convert_ip_to_linear(child, regs);
14791
14792+ if (addr == -EINVAL)
14793+ return 0;
14794+
14795 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14796 for (i = 0; i < copied; i++) {
14797 switch (opcode[i]) {
14798@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14799
14800 #ifdef CONFIG_X86_64
14801 case 0x40 ... 0x4f:
14802- if (regs->cs != __USER_CS)
14803+ if ((regs->cs & 0xffff) != __USER_CS)
14804 /* 32-bit mode: register increment */
14805 return 0;
14806 /* 64-bit mode: REX prefix */
14807diff -urNp linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S
14808--- linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-05-19 00:06:34.000000000 -0400
14809+++ linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-08-05 19:44:35.000000000 -0400
14810@@ -1,3 +1,4 @@
14811+.section .rodata,"a",@progbits
14812 ENTRY(sys_call_table)
14813 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14814 .long sys_exit
14815diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c
14816--- linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-05-19 00:06:34.000000000 -0400
14817+++ linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-08-05 19:44:35.000000000 -0400
14818@@ -24,17 +24,224 @@
14819
14820 #include <asm/syscalls.h>
14821
14822-/*
14823- * Do a system call from kernel instead of calling sys_execve so we
14824- * end up with proper pt_regs.
14825- */
14826-int kernel_execve(const char *filename,
14827- const char *const argv[],
14828- const char *const envp[])
14829+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14830 {
14831- long __res;
14832- asm volatile ("int $0x80"
14833- : "=a" (__res)
14834- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14835- return __res;
14836+ unsigned long pax_task_size = TASK_SIZE;
14837+
14838+#ifdef CONFIG_PAX_SEGMEXEC
14839+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14840+ pax_task_size = SEGMEXEC_TASK_SIZE;
14841+#endif
14842+
14843+ if (len > pax_task_size || addr > pax_task_size - len)
14844+ return -EINVAL;
14845+
14846+ return 0;
14847+}
14848+
14849+unsigned long
14850+arch_get_unmapped_area(struct file *filp, unsigned long addr,
14851+ unsigned long len, unsigned long pgoff, unsigned long flags)
14852+{
14853+ struct mm_struct *mm = current->mm;
14854+ struct vm_area_struct *vma;
14855+ unsigned long start_addr, pax_task_size = TASK_SIZE;
14856+
14857+#ifdef CONFIG_PAX_SEGMEXEC
14858+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14859+ pax_task_size = SEGMEXEC_TASK_SIZE;
14860+#endif
14861+
14862+ pax_task_size -= PAGE_SIZE;
14863+
14864+ if (len > pax_task_size)
14865+ return -ENOMEM;
14866+
14867+ if (flags & MAP_FIXED)
14868+ return addr;
14869+
14870+#ifdef CONFIG_PAX_RANDMMAP
14871+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14872+#endif
14873+
14874+ if (addr) {
14875+ addr = PAGE_ALIGN(addr);
14876+ if (pax_task_size - len >= addr) {
14877+ vma = find_vma(mm, addr);
14878+ if (check_heap_stack_gap(vma, addr, len))
14879+ return addr;
14880+ }
14881+ }
14882+ if (len > mm->cached_hole_size) {
14883+ start_addr = addr = mm->free_area_cache;
14884+ } else {
14885+ start_addr = addr = mm->mmap_base;
14886+ mm->cached_hole_size = 0;
14887+ }
14888+
14889+#ifdef CONFIG_PAX_PAGEEXEC
14890+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14891+ start_addr = 0x00110000UL;
14892+
14893+#ifdef CONFIG_PAX_RANDMMAP
14894+ if (mm->pax_flags & MF_PAX_RANDMMAP)
14895+ start_addr += mm->delta_mmap & 0x03FFF000UL;
14896+#endif
14897+
14898+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14899+ start_addr = addr = mm->mmap_base;
14900+ else
14901+ addr = start_addr;
14902+ }
14903+#endif
14904+
14905+full_search:
14906+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14907+ /* At this point: (!vma || addr < vma->vm_end). */
14908+ if (pax_task_size - len < addr) {
14909+ /*
14910+ * Start a new search - just in case we missed
14911+ * some holes.
14912+ */
14913+ if (start_addr != mm->mmap_base) {
14914+ start_addr = addr = mm->mmap_base;
14915+ mm->cached_hole_size = 0;
14916+ goto full_search;
14917+ }
14918+ return -ENOMEM;
14919+ }
14920+ if (check_heap_stack_gap(vma, addr, len))
14921+ break;
14922+ if (addr + mm->cached_hole_size < vma->vm_start)
14923+ mm->cached_hole_size = vma->vm_start - addr;
14924+ addr = vma->vm_end;
14925+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
14926+ start_addr = addr = mm->mmap_base;
14927+ mm->cached_hole_size = 0;
14928+ goto full_search;
14929+ }
14930+ }
14931+
14932+ /*
14933+ * Remember the place where we stopped the search:
14934+ */
14935+ mm->free_area_cache = addr + len;
14936+ return addr;
14937+}
14938+
14939+unsigned long
14940+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14941+ const unsigned long len, const unsigned long pgoff,
14942+ const unsigned long flags)
14943+{
14944+ struct vm_area_struct *vma;
14945+ struct mm_struct *mm = current->mm;
14946+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14947+
14948+#ifdef CONFIG_PAX_SEGMEXEC
14949+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14950+ pax_task_size = SEGMEXEC_TASK_SIZE;
14951+#endif
14952+
14953+ pax_task_size -= PAGE_SIZE;
14954+
14955+ /* requested length too big for entire address space */
14956+ if (len > pax_task_size)
14957+ return -ENOMEM;
14958+
14959+ if (flags & MAP_FIXED)
14960+ return addr;
14961+
14962+#ifdef CONFIG_PAX_PAGEEXEC
14963+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14964+ goto bottomup;
14965+#endif
14966+
14967+#ifdef CONFIG_PAX_RANDMMAP
14968+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14969+#endif
14970+
14971+ /* requesting a specific address */
14972+ if (addr) {
14973+ addr = PAGE_ALIGN(addr);
14974+ if (pax_task_size - len >= addr) {
14975+ vma = find_vma(mm, addr);
14976+ if (check_heap_stack_gap(vma, addr, len))
14977+ return addr;
14978+ }
14979+ }
14980+
14981+ /* check if free_area_cache is useful for us */
14982+ if (len <= mm->cached_hole_size) {
14983+ mm->cached_hole_size = 0;
14984+ mm->free_area_cache = mm->mmap_base;
14985+ }
14986+
14987+ /* either no address requested or can't fit in requested address hole */
14988+ addr = mm->free_area_cache;
14989+
14990+ /* make sure it can fit in the remaining address space */
14991+ if (addr > len) {
14992+ vma = find_vma(mm, addr-len);
14993+ if (check_heap_stack_gap(vma, addr - len, len))
14994+ /* remember the address as a hint for next time */
14995+ return (mm->free_area_cache = addr-len);
14996+ }
14997+
14998+ if (mm->mmap_base < len)
14999+ goto bottomup;
15000+
15001+ addr = mm->mmap_base-len;
15002+
15003+ do {
15004+ /*
15005+ * Lookup failure means no vma is above this address,
15006+ * else if new region fits below vma->vm_start,
15007+ * return with success:
15008+ */
15009+ vma = find_vma(mm, addr);
15010+ if (check_heap_stack_gap(vma, addr, len))
15011+ /* remember the address as a hint for next time */
15012+ return (mm->free_area_cache = addr);
15013+
15014+ /* remember the largest hole we saw so far */
15015+ if (addr + mm->cached_hole_size < vma->vm_start)
15016+ mm->cached_hole_size = vma->vm_start - addr;
15017+
15018+ /* try just below the current vma->vm_start */
15019+ addr = skip_heap_stack_gap(vma, len);
15020+ } while (!IS_ERR_VALUE(addr));
15021+
15022+bottomup:
15023+ /*
15024+ * A failed mmap() very likely causes application failure,
15025+ * so fall back to the bottom-up function here. This scenario
15026+ * can happen with large stack limits and large mmap()
15027+ * allocations.
15028+ */
15029+
15030+#ifdef CONFIG_PAX_SEGMEXEC
15031+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15032+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15033+ else
15034+#endif
15035+
15036+ mm->mmap_base = TASK_UNMAPPED_BASE;
15037+
15038+#ifdef CONFIG_PAX_RANDMMAP
15039+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15040+ mm->mmap_base += mm->delta_mmap;
15041+#endif
15042+
15043+ mm->free_area_cache = mm->mmap_base;
15044+ mm->cached_hole_size = ~0UL;
15045+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15046+ /*
15047+ * Restore the topdown base:
15048+ */
15049+ mm->mmap_base = base;
15050+ mm->free_area_cache = base;
15051+ mm->cached_hole_size = ~0UL;
15052+
15053+ return addr;
15054 }
15055diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c
15056--- linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-05-19 00:06:34.000000000 -0400
15057+++ linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-08-05 19:44:35.000000000 -0400
15058@@ -32,8 +32,8 @@ out:
15059 return error;
15060 }
15061
15062-static void find_start_end(unsigned long flags, unsigned long *begin,
15063- unsigned long *end)
15064+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15065+ unsigned long *begin, unsigned long *end)
15066 {
15067 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15068 unsigned long new_begin;
15069@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15070 *begin = new_begin;
15071 }
15072 } else {
15073- *begin = TASK_UNMAPPED_BASE;
15074+ *begin = mm->mmap_base;
15075 *end = TASK_SIZE;
15076 }
15077 }
15078@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15079 if (flags & MAP_FIXED)
15080 return addr;
15081
15082- find_start_end(flags, &begin, &end);
15083+ find_start_end(mm, flags, &begin, &end);
15084
15085 if (len > end)
15086 return -ENOMEM;
15087
15088+#ifdef CONFIG_PAX_RANDMMAP
15089+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15090+#endif
15091+
15092 if (addr) {
15093 addr = PAGE_ALIGN(addr);
15094 vma = find_vma(mm, addr);
15095- if (end - len >= addr &&
15096- (!vma || addr + len <= vma->vm_start))
15097+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15098 return addr;
15099 }
15100 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15101@@ -106,7 +109,7 @@ full_search:
15102 }
15103 return -ENOMEM;
15104 }
15105- if (!vma || addr + len <= vma->vm_start) {
15106+ if (check_heap_stack_gap(vma, addr, len)) {
15107 /*
15108 * Remember the place where we stopped the search:
15109 */
15110@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15111 {
15112 struct vm_area_struct *vma;
15113 struct mm_struct *mm = current->mm;
15114- unsigned long addr = addr0;
15115+ unsigned long base = mm->mmap_base, addr = addr0;
15116
15117 /* requested length too big for entire address space */
15118 if (len > TASK_SIZE)
15119@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15120 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15121 goto bottomup;
15122
15123+#ifdef CONFIG_PAX_RANDMMAP
15124+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15125+#endif
15126+
15127 /* requesting a specific address */
15128 if (addr) {
15129 addr = PAGE_ALIGN(addr);
15130- vma = find_vma(mm, addr);
15131- if (TASK_SIZE - len >= addr &&
15132- (!vma || addr + len <= vma->vm_start))
15133- return addr;
15134+ if (TASK_SIZE - len >= addr) {
15135+ vma = find_vma(mm, addr);
15136+ if (check_heap_stack_gap(vma, addr, len))
15137+ return addr;
15138+ }
15139 }
15140
15141 /* check if free_area_cache is useful for us */
15142@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15143 /* make sure it can fit in the remaining address space */
15144 if (addr > len) {
15145 vma = find_vma(mm, addr-len);
15146- if (!vma || addr <= vma->vm_start)
15147+ if (check_heap_stack_gap(vma, addr - len, len))
15148 /* remember the address as a hint for next time */
15149 return mm->free_area_cache = addr-len;
15150 }
15151@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15152 * return with success:
15153 */
15154 vma = find_vma(mm, addr);
15155- if (!vma || addr+len <= vma->vm_start)
15156+ if (check_heap_stack_gap(vma, addr, len))
15157 /* remember the address as a hint for next time */
15158 return mm->free_area_cache = addr;
15159
15160@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15161 mm->cached_hole_size = vma->vm_start - addr;
15162
15163 /* try just below the current vma->vm_start */
15164- addr = vma->vm_start-len;
15165- } while (len < vma->vm_start);
15166+ addr = skip_heap_stack_gap(vma, len);
15167+ } while (!IS_ERR_VALUE(addr));
15168
15169 bottomup:
15170 /*
15171@@ -198,13 +206,21 @@ bottomup:
15172 * can happen with large stack limits and large mmap()
15173 * allocations.
15174 */
15175+ mm->mmap_base = TASK_UNMAPPED_BASE;
15176+
15177+#ifdef CONFIG_PAX_RANDMMAP
15178+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15179+ mm->mmap_base += mm->delta_mmap;
15180+#endif
15181+
15182+ mm->free_area_cache = mm->mmap_base;
15183 mm->cached_hole_size = ~0UL;
15184- mm->free_area_cache = TASK_UNMAPPED_BASE;
15185 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15186 /*
15187 * Restore the topdown base:
15188 */
15189- mm->free_area_cache = mm->mmap_base;
15190+ mm->mmap_base = base;
15191+ mm->free_area_cache = base;
15192 mm->cached_hole_size = ~0UL;
15193
15194 return addr;
15195diff -urNp linux-2.6.39.4/arch/x86/kernel/tboot.c linux-2.6.39.4/arch/x86/kernel/tboot.c
15196--- linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-05-19 00:06:34.000000000 -0400
15197+++ linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-08-05 19:44:35.000000000 -0400
15198@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
15199
15200 void tboot_shutdown(u32 shutdown_type)
15201 {
15202- void (*shutdown)(void);
15203+ void (* __noreturn shutdown)(void);
15204
15205 if (!tboot_enabled())
15206 return;
15207@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
15208
15209 switch_to_tboot_pt();
15210
15211- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15212+ shutdown = (void *)tboot->shutdown_entry;
15213 shutdown();
15214
15215 /* should not reach here */
15216@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15217 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15218 }
15219
15220-static atomic_t ap_wfs_count;
15221+static atomic_unchecked_t ap_wfs_count;
15222
15223 static int tboot_wait_for_aps(int num_aps)
15224 {
15225@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
15226 {
15227 switch (action) {
15228 case CPU_DYING:
15229- atomic_inc(&ap_wfs_count);
15230+ atomic_inc_unchecked(&ap_wfs_count);
15231 if (num_online_cpus() == 1)
15232- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15233+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15234 return NOTIFY_BAD;
15235 break;
15236 }
15237@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
15238
15239 tboot_create_trampoline();
15240
15241- atomic_set(&ap_wfs_count, 0);
15242+ atomic_set_unchecked(&ap_wfs_count, 0);
15243 register_hotcpu_notifier(&tboot_cpu_notifier);
15244 return 0;
15245 }
15246diff -urNp linux-2.6.39.4/arch/x86/kernel/time.c linux-2.6.39.4/arch/x86/kernel/time.c
15247--- linux-2.6.39.4/arch/x86/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
15248+++ linux-2.6.39.4/arch/x86/kernel/time.c 2011-08-05 19:44:35.000000000 -0400
15249@@ -22,17 +22,13 @@
15250 #include <asm/hpet.h>
15251 #include <asm/time.h>
15252
15253-#ifdef CONFIG_X86_64
15254-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
15255-#endif
15256-
15257 unsigned long profile_pc(struct pt_regs *regs)
15258 {
15259 unsigned long pc = instruction_pointer(regs);
15260
15261- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15262+ if (!user_mode(regs) && in_lock_functions(pc)) {
15263 #ifdef CONFIG_FRAME_POINTER
15264- return *(unsigned long *)(regs->bp + sizeof(long));
15265+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15266 #else
15267 unsigned long *sp =
15268 (unsigned long *)kernel_stack_pointer(regs);
15269@@ -41,11 +37,17 @@ unsigned long profile_pc(struct pt_regs
15270 * or above a saved flags. Eflags has bits 22-31 zero,
15271 * kernel addresses don't.
15272 */
15273+
15274+#ifdef CONFIG_PAX_KERNEXEC
15275+ return ktla_ktva(sp[0]);
15276+#else
15277 if (sp[0] >> 22)
15278 return sp[0];
15279 if (sp[1] >> 22)
15280 return sp[1];
15281 #endif
15282+
15283+#endif
15284 }
15285 return pc;
15286 }
15287diff -urNp linux-2.6.39.4/arch/x86/kernel/tls.c linux-2.6.39.4/arch/x86/kernel/tls.c
15288--- linux-2.6.39.4/arch/x86/kernel/tls.c 2011-05-19 00:06:34.000000000 -0400
15289+++ linux-2.6.39.4/arch/x86/kernel/tls.c 2011-08-05 19:44:35.000000000 -0400
15290@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15291 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15292 return -EINVAL;
15293
15294+#ifdef CONFIG_PAX_SEGMEXEC
15295+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15296+ return -EINVAL;
15297+#endif
15298+
15299 set_tls_desc(p, idx, &info, 1);
15300
15301 return 0;
15302diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_32.S linux-2.6.39.4/arch/x86/kernel/trampoline_32.S
15303--- linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-05-19 00:06:34.000000000 -0400
15304+++ linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-08-05 19:44:35.000000000 -0400
15305@@ -32,6 +32,12 @@
15306 #include <asm/segment.h>
15307 #include <asm/page_types.h>
15308
15309+#ifdef CONFIG_PAX_KERNEXEC
15310+#define ta(X) (X)
15311+#else
15312+#define ta(X) ((X) - __PAGE_OFFSET)
15313+#endif
15314+
15315 #ifdef CONFIG_SMP
15316
15317 .section ".x86_trampoline","a"
15318@@ -62,7 +68,7 @@ r_base = .
15319 inc %ax # protected mode (PE) bit
15320 lmsw %ax # into protected mode
15321 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15322- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15323+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15324
15325 # These need to be in the same 64K segment as the above;
15326 # hence we don't use the boot_gdt_descr defined in head.S
15327diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_64.S linux-2.6.39.4/arch/x86/kernel/trampoline_64.S
15328--- linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-05-19 00:06:34.000000000 -0400
15329+++ linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-08-05 19:44:35.000000000 -0400
15330@@ -90,7 +90,7 @@ startup_32:
15331 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15332 movl %eax, %ds
15333
15334- movl $X86_CR4_PAE, %eax
15335+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15336 movl %eax, %cr4 # Enable PAE mode
15337
15338 # Setup trampoline 4 level pagetables
15339@@ -138,7 +138,7 @@ tidt:
15340 # so the kernel can live anywhere
15341 .balign 4
15342 tgdt:
15343- .short tgdt_end - tgdt # gdt limit
15344+ .short tgdt_end - tgdt - 1 # gdt limit
15345 .long tgdt - r_base
15346 .short 0
15347 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15348diff -urNp linux-2.6.39.4/arch/x86/kernel/traps.c linux-2.6.39.4/arch/x86/kernel/traps.c
15349--- linux-2.6.39.4/arch/x86/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
15350+++ linux-2.6.39.4/arch/x86/kernel/traps.c 2011-08-05 19:44:35.000000000 -0400
15351@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15352
15353 /* Do we ignore FPU interrupts ? */
15354 char ignore_fpu_irq;
15355-
15356-/*
15357- * The IDT has to be page-aligned to simplify the Pentium
15358- * F0 0F bug workaround.
15359- */
15360-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15361 #endif
15362
15363 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15364@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15365 }
15366
15367 static void __kprobes
15368-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15369+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15370 long error_code, siginfo_t *info)
15371 {
15372 struct task_struct *tsk = current;
15373
15374 #ifdef CONFIG_X86_32
15375- if (regs->flags & X86_VM_MASK) {
15376+ if (v8086_mode(regs)) {
15377 /*
15378 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15379 * On nmi (interrupt 2), do_trap should not be called.
15380@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15381 }
15382 #endif
15383
15384- if (!user_mode(regs))
15385+ if (!user_mode_novm(regs))
15386 goto kernel_trap;
15387
15388 #ifdef CONFIG_X86_32
15389@@ -157,7 +151,7 @@ trap_signal:
15390 printk_ratelimit()) {
15391 printk(KERN_INFO
15392 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15393- tsk->comm, tsk->pid, str,
15394+ tsk->comm, task_pid_nr(tsk), str,
15395 regs->ip, regs->sp, error_code);
15396 print_vma_addr(" in ", regs->ip);
15397 printk("\n");
15398@@ -174,8 +168,20 @@ kernel_trap:
15399 if (!fixup_exception(regs)) {
15400 tsk->thread.error_code = error_code;
15401 tsk->thread.trap_no = trapnr;
15402+
15403+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15404+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15405+ str = "PAX: suspicious stack segment fault";
15406+#endif
15407+
15408 die(str, regs, error_code);
15409 }
15410+
15411+#ifdef CONFIG_PAX_REFCOUNT
15412+ if (trapnr == 4)
15413+ pax_report_refcount_overflow(regs);
15414+#endif
15415+
15416 return;
15417
15418 #ifdef CONFIG_X86_32
15419@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15420 conditional_sti(regs);
15421
15422 #ifdef CONFIG_X86_32
15423- if (regs->flags & X86_VM_MASK)
15424+ if (v8086_mode(regs))
15425 goto gp_in_vm86;
15426 #endif
15427
15428 tsk = current;
15429- if (!user_mode(regs))
15430+ if (!user_mode_novm(regs))
15431 goto gp_in_kernel;
15432
15433+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15434+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15435+ struct mm_struct *mm = tsk->mm;
15436+ unsigned long limit;
15437+
15438+ down_write(&mm->mmap_sem);
15439+ limit = mm->context.user_cs_limit;
15440+ if (limit < TASK_SIZE) {
15441+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15442+ up_write(&mm->mmap_sem);
15443+ return;
15444+ }
15445+ up_write(&mm->mmap_sem);
15446+ }
15447+#endif
15448+
15449 tsk->thread.error_code = error_code;
15450 tsk->thread.trap_no = 13;
15451
15452@@ -304,6 +326,13 @@ gp_in_kernel:
15453 if (notify_die(DIE_GPF, "general protection fault", regs,
15454 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15455 return;
15456+
15457+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15458+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15459+ die("PAX: suspicious general protection fault", regs, error_code);
15460+ else
15461+#endif
15462+
15463 die("general protection fault", regs, error_code);
15464 }
15465
15466@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15467 dotraplinkage notrace __kprobes void
15468 do_nmi(struct pt_regs *regs, long error_code)
15469 {
15470+
15471+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15472+ if (!user_mode(regs)) {
15473+ unsigned long cs = regs->cs & 0xFFFF;
15474+ unsigned long ip = ktva_ktla(regs->ip);
15475+
15476+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15477+ regs->ip = ip;
15478+ }
15479+#endif
15480+
15481 nmi_enter();
15482
15483 inc_irq_stat(__nmi_count);
15484@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15485 /* It's safe to allow irq's after DR6 has been saved */
15486 preempt_conditional_sti(regs);
15487
15488- if (regs->flags & X86_VM_MASK) {
15489+ if (v8086_mode(regs)) {
15490 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15491 error_code, 1);
15492 preempt_conditional_cli(regs);
15493@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15494 * We already checked v86 mode above, so we can check for kernel mode
15495 * by just checking the CPL of CS.
15496 */
15497- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15498+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15499 tsk->thread.debugreg6 &= ~DR_STEP;
15500 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15501 regs->flags &= ~X86_EFLAGS_TF;
15502@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15503 return;
15504 conditional_sti(regs);
15505
15506- if (!user_mode_vm(regs))
15507+ if (!user_mode(regs))
15508 {
15509 if (!fixup_exception(regs)) {
15510 task->thread.error_code = error_code;
15511@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15512 void __math_state_restore(void)
15513 {
15514 struct thread_info *thread = current_thread_info();
15515- struct task_struct *tsk = thread->task;
15516+ struct task_struct *tsk = current;
15517
15518 /*
15519 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15520@@ -750,8 +790,7 @@ void __math_state_restore(void)
15521 */
15522 asmlinkage void math_state_restore(void)
15523 {
15524- struct thread_info *thread = current_thread_info();
15525- struct task_struct *tsk = thread->task;
15526+ struct task_struct *tsk = current;
15527
15528 if (!tsk_used_math(tsk)) {
15529 local_irq_enable();
15530diff -urNp linux-2.6.39.4/arch/x86/kernel/verify_cpu.S linux-2.6.39.4/arch/x86/kernel/verify_cpu.S
15531--- linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-05-19 00:06:34.000000000 -0400
15532+++ linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-08-05 19:44:35.000000000 -0400
15533@@ -20,6 +20,7 @@
15534 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15535 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15536 * arch/x86/kernel/head_32.S: processor startup
15537+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15538 *
15539 * verify_cpu, returns the status of longmode and SSE in register %eax.
15540 * 0: Success 1: Failure
15541diff -urNp linux-2.6.39.4/arch/x86/kernel/vm86_32.c linux-2.6.39.4/arch/x86/kernel/vm86_32.c
15542--- linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-05-19 00:06:34.000000000 -0400
15543+++ linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-08-05 19:44:35.000000000 -0400
15544@@ -41,6 +41,7 @@
15545 #include <linux/ptrace.h>
15546 #include <linux/audit.h>
15547 #include <linux/stddef.h>
15548+#include <linux/grsecurity.h>
15549
15550 #include <asm/uaccess.h>
15551 #include <asm/io.h>
15552@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15553 do_exit(SIGSEGV);
15554 }
15555
15556- tss = &per_cpu(init_tss, get_cpu());
15557+ tss = init_tss + get_cpu();
15558 current->thread.sp0 = current->thread.saved_sp0;
15559 current->thread.sysenter_cs = __KERNEL_CS;
15560 load_sp0(tss, &current->thread);
15561@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15562 struct task_struct *tsk;
15563 int tmp, ret = -EPERM;
15564
15565+#ifdef CONFIG_GRKERNSEC_VM86
15566+ if (!capable(CAP_SYS_RAWIO)) {
15567+ gr_handle_vm86();
15568+ goto out;
15569+ }
15570+#endif
15571+
15572 tsk = current;
15573 if (tsk->thread.saved_sp0)
15574 goto out;
15575@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15576 int tmp, ret;
15577 struct vm86plus_struct __user *v86;
15578
15579+#ifdef CONFIG_GRKERNSEC_VM86
15580+ if (!capable(CAP_SYS_RAWIO)) {
15581+ gr_handle_vm86();
15582+ ret = -EPERM;
15583+ goto out;
15584+ }
15585+#endif
15586+
15587 tsk = current;
15588 switch (cmd) {
15589 case VM86_REQUEST_IRQ:
15590@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15591 tsk->thread.saved_fs = info->regs32->fs;
15592 tsk->thread.saved_gs = get_user_gs(info->regs32);
15593
15594- tss = &per_cpu(init_tss, get_cpu());
15595+ tss = init_tss + get_cpu();
15596 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15597 if (cpu_has_sep)
15598 tsk->thread.sysenter_cs = 0;
15599@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15600 goto cannot_handle;
15601 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15602 goto cannot_handle;
15603- intr_ptr = (unsigned long __user *) (i << 2);
15604+ intr_ptr = (__force unsigned long __user *) (i << 2);
15605 if (get_user(segoffs, intr_ptr))
15606 goto cannot_handle;
15607 if ((segoffs >> 16) == BIOSSEG)
15608diff -urNp linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S
15609--- linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
15610+++ linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-08-05 19:44:35.000000000 -0400
15611@@ -26,6 +26,13 @@
15612 #include <asm/page_types.h>
15613 #include <asm/cache.h>
15614 #include <asm/boot.h>
15615+#include <asm/segment.h>
15616+
15617+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15618+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15619+#else
15620+#define __KERNEL_TEXT_OFFSET 0
15621+#endif
15622
15623 #undef i386 /* in case the preprocessor is a 32bit one */
15624
15625@@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
15626 #ifdef CONFIG_X86_32
15627 OUTPUT_ARCH(i386)
15628 ENTRY(phys_startup_32)
15629-jiffies = jiffies_64;
15630 #else
15631 OUTPUT_ARCH(i386:x86-64)
15632 ENTRY(phys_startup_64)
15633-jiffies_64 = jiffies;
15634 #endif
15635
15636 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
15637@@ -69,31 +74,46 @@ jiffies_64 = jiffies;
15638
15639 PHDRS {
15640 text PT_LOAD FLAGS(5); /* R_E */
15641+#ifdef CONFIG_X86_32
15642+ module PT_LOAD FLAGS(5); /* R_E */
15643+#endif
15644+#ifdef CONFIG_XEN
15645+ rodata PT_LOAD FLAGS(5); /* R_E */
15646+#else
15647+ rodata PT_LOAD FLAGS(4); /* R__ */
15648+#endif
15649 data PT_LOAD FLAGS(6); /* RW_ */
15650 #ifdef CONFIG_X86_64
15651 user PT_LOAD FLAGS(5); /* R_E */
15652+#endif
15653+ init.begin PT_LOAD FLAGS(6); /* RW_ */
15654 #ifdef CONFIG_SMP
15655 percpu PT_LOAD FLAGS(6); /* RW_ */
15656 #endif
15657+ text.init PT_LOAD FLAGS(5); /* R_E */
15658+ text.exit PT_LOAD FLAGS(5); /* R_E */
15659 init PT_LOAD FLAGS(7); /* RWE */
15660-#endif
15661 note PT_NOTE FLAGS(0); /* ___ */
15662 }
15663
15664 SECTIONS
15665 {
15666 #ifdef CONFIG_X86_32
15667- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15668- phys_startup_32 = startup_32 - LOAD_OFFSET;
15669+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15670 #else
15671- . = __START_KERNEL;
15672- phys_startup_64 = startup_64 - LOAD_OFFSET;
15673+ . = __START_KERNEL;
15674 #endif
15675
15676 /* Text and read-only data */
15677- .text : AT(ADDR(.text) - LOAD_OFFSET) {
15678- _text = .;
15679+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15680 /* bootstrapping code */
15681+#ifdef CONFIG_X86_32
15682+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15683+#else
15684+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15685+#endif
15686+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15687+ _text = .;
15688 HEAD_TEXT
15689 #ifdef CONFIG_X86_32
15690 . = ALIGN(PAGE_SIZE);
15691@@ -109,13 +129,47 @@ SECTIONS
15692 IRQENTRY_TEXT
15693 *(.fixup)
15694 *(.gnu.warning)
15695- /* End of text section */
15696- _etext = .;
15697 } :text = 0x9090
15698
15699- NOTES :text :note
15700+ . += __KERNEL_TEXT_OFFSET;
15701+
15702+#ifdef CONFIG_X86_32
15703+ . = ALIGN(PAGE_SIZE);
15704+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15705+
15706+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15707+ MODULES_EXEC_VADDR = .;
15708+ BYTE(0)
15709+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15710+ . = ALIGN(HPAGE_SIZE);
15711+ MODULES_EXEC_END = . - 1;
15712+#endif
15713+
15714+ } :module
15715+#endif
15716+
15717+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15718+ /* End of text section */
15719+ _etext = . - __KERNEL_TEXT_OFFSET;
15720+ }
15721
15722- EXCEPTION_TABLE(16) :text = 0x9090
15723+#ifdef CONFIG_X86_32
15724+ . = ALIGN(PAGE_SIZE);
15725+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15726+ *(.idt)
15727+ . = ALIGN(PAGE_SIZE);
15728+ *(.empty_zero_page)
15729+ *(.initial_pg_fixmap)
15730+ *(.initial_pg_pmd)
15731+ *(.initial_page_table)
15732+ *(.swapper_pg_dir)
15733+ } :rodata
15734+#endif
15735+
15736+ . = ALIGN(PAGE_SIZE);
15737+ NOTES :rodata :note
15738+
15739+ EXCEPTION_TABLE(16) :rodata
15740
15741 #if defined(CONFIG_DEBUG_RODATA)
15742 /* .text should occupy whole number of pages */
15743@@ -127,16 +181,20 @@ SECTIONS
15744
15745 /* Data */
15746 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15747+
15748+#ifdef CONFIG_PAX_KERNEXEC
15749+ . = ALIGN(HPAGE_SIZE);
15750+#else
15751+ . = ALIGN(PAGE_SIZE);
15752+#endif
15753+
15754 /* Start of data section */
15755 _sdata = .;
15756
15757 /* init_task */
15758 INIT_TASK_DATA(THREAD_SIZE)
15759
15760-#ifdef CONFIG_X86_32
15761- /* 32 bit has nosave before _edata */
15762 NOSAVE_DATA
15763-#endif
15764
15765 PAGE_ALIGNED_DATA(PAGE_SIZE)
15766
15767@@ -145,6 +203,8 @@ SECTIONS
15768 DATA_DATA
15769 CONSTRUCTORS
15770
15771+ jiffies = jiffies_64;
15772+
15773 /* rarely changed data like cpu maps */
15774 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
15775
15776@@ -199,12 +259,6 @@ SECTIONS
15777 }
15778 vgetcpu_mode = VVIRT(.vgetcpu_mode);
15779
15780- . = ALIGN(L1_CACHE_BYTES);
15781- .jiffies : AT(VLOAD(.jiffies)) {
15782- *(.jiffies)
15783- }
15784- jiffies = VVIRT(.jiffies);
15785-
15786 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
15787 *(.vsyscall_3)
15788 }
15789@@ -220,12 +274,19 @@ SECTIONS
15790 #endif /* CONFIG_X86_64 */
15791
15792 /* Init code and data - will be freed after init */
15793- . = ALIGN(PAGE_SIZE);
15794 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15795+ BYTE(0)
15796+
15797+#ifdef CONFIG_PAX_KERNEXEC
15798+ . = ALIGN(HPAGE_SIZE);
15799+#else
15800+ . = ALIGN(PAGE_SIZE);
15801+#endif
15802+
15803 __init_begin = .; /* paired with __init_end */
15804- }
15805+ } :init.begin
15806
15807-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15808+#ifdef CONFIG_SMP
15809 /*
15810 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15811 * output PHDR, so the next output section - .init.text - should
15812@@ -234,12 +295,27 @@ SECTIONS
15813 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15814 #endif
15815
15816- INIT_TEXT_SECTION(PAGE_SIZE)
15817-#ifdef CONFIG_X86_64
15818- :init
15819-#endif
15820+ . = ALIGN(PAGE_SIZE);
15821+ init_begin = .;
15822+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15823+ VMLINUX_SYMBOL(_sinittext) = .;
15824+ INIT_TEXT
15825+ VMLINUX_SYMBOL(_einittext) = .;
15826+ . = ALIGN(PAGE_SIZE);
15827+ } :text.init
15828
15829- INIT_DATA_SECTION(16)
15830+ /*
15831+ * .exit.text is discard at runtime, not link time, to deal with
15832+ * references from .altinstructions and .eh_frame
15833+ */
15834+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15835+ EXIT_TEXT
15836+ . = ALIGN(16);
15837+ } :text.exit
15838+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15839+
15840+ . = ALIGN(PAGE_SIZE);
15841+ INIT_DATA_SECTION(16) :init
15842
15843 /*
15844 * Code and data for a variety of lowlevel trampolines, to be
15845@@ -306,19 +382,12 @@ SECTIONS
15846 }
15847
15848 . = ALIGN(8);
15849- /*
15850- * .exit.text is discard at runtime, not link time, to deal with
15851- * references from .altinstructions and .eh_frame
15852- */
15853- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15854- EXIT_TEXT
15855- }
15856
15857 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15858 EXIT_DATA
15859 }
15860
15861-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15862+#ifndef CONFIG_SMP
15863 PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
15864 #endif
15865
15866@@ -337,16 +406,10 @@ SECTIONS
15867 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15868 __smp_locks = .;
15869 *(.smp_locks)
15870- . = ALIGN(PAGE_SIZE);
15871 __smp_locks_end = .;
15872+ . = ALIGN(PAGE_SIZE);
15873 }
15874
15875-#ifdef CONFIG_X86_64
15876- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15877- NOSAVE_DATA
15878- }
15879-#endif
15880-
15881 /* BSS */
15882 . = ALIGN(PAGE_SIZE);
15883 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15884@@ -362,6 +425,7 @@ SECTIONS
15885 __brk_base = .;
15886 . += 64 * 1024; /* 64k alignment slop space */
15887 *(.brk_reservation) /* areas brk users have reserved */
15888+ . = ALIGN(HPAGE_SIZE);
15889 __brk_limit = .;
15890 }
15891
15892@@ -388,13 +452,12 @@ SECTIONS
15893 * for the boot processor.
15894 */
15895 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15896-INIT_PER_CPU(gdt_page);
15897 INIT_PER_CPU(irq_stack_union);
15898
15899 /*
15900 * Build-time check on the image size:
15901 */
15902-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15903+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15904 "kernel image bigger than KERNEL_IMAGE_SIZE");
15905
15906 #ifdef CONFIG_SMP
15907diff -urNp linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c
15908--- linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-05-19 00:06:34.000000000 -0400
15909+++ linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-08-05 19:44:35.000000000 -0400
15910@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
15911
15912 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
15913 /* copy vsyscall data */
15914+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
15915 vsyscall_gtod_data.clock.vread = clock->vread;
15916 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
15917 vsyscall_gtod_data.clock.mask = clock->mask;
15918@@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
15919 We do this here because otherwise user space would do it on
15920 its own in a likely inferior way (no access to jiffies).
15921 If you don't like it pass NULL. */
15922- if (tcache && tcache->blob[0] == (j = __jiffies)) {
15923+ if (tcache && tcache->blob[0] == (j = jiffies)) {
15924 p = tcache->blob[1];
15925 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
15926 /* Load per CPU data from RDTSCP */
15927diff -urNp linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c
15928--- linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-05-19 00:06:34.000000000 -0400
15929+++ linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-05 19:44:35.000000000 -0400
15930@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15931 EXPORT_SYMBOL(copy_user_generic_string);
15932 EXPORT_SYMBOL(copy_user_generic_unrolled);
15933 EXPORT_SYMBOL(__copy_user_nocache);
15934-EXPORT_SYMBOL(_copy_from_user);
15935-EXPORT_SYMBOL(_copy_to_user);
15936
15937 EXPORT_SYMBOL(copy_page);
15938 EXPORT_SYMBOL(clear_page);
15939diff -urNp linux-2.6.39.4/arch/x86/kernel/xsave.c linux-2.6.39.4/arch/x86/kernel/xsave.c
15940--- linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-05-19 00:06:34.000000000 -0400
15941+++ linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-08-05 19:44:35.000000000 -0400
15942@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15943 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15944 return -EINVAL;
15945
15946- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15947+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15948 fx_sw_user->extended_size -
15949 FP_XSTATE_MAGIC2_SIZE));
15950 if (err)
15951@@ -267,7 +267,7 @@ fx_only:
15952 * the other extended state.
15953 */
15954 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15955- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15956+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15957 }
15958
15959 /*
15960@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15961 if (use_xsave())
15962 err = restore_user_xstate(buf);
15963 else
15964- err = fxrstor_checking((__force struct i387_fxsave_struct *)
15965+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
15966 buf);
15967 if (unlikely(err)) {
15968 /*
15969diff -urNp linux-2.6.39.4/arch/x86/kvm/emulate.c linux-2.6.39.4/arch/x86/kvm/emulate.c
15970--- linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-05-19 00:06:34.000000000 -0400
15971+++ linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-08-05 19:44:35.000000000 -0400
15972@@ -89,7 +89,7 @@
15973 #define Src2ImmByte (2<<29)
15974 #define Src2One (3<<29)
15975 #define Src2Imm (4<<29)
15976-#define Src2Mask (7<<29)
15977+#define Src2Mask (7U<<29)
15978
15979 #define X2(x...) x, x
15980 #define X3(x...) X2(x), x
15981@@ -190,6 +190,7 @@ struct group_dual {
15982
15983 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15984 do { \
15985+ unsigned long _tmp; \
15986 __asm__ __volatile__ ( \
15987 _PRE_EFLAGS("0", "4", "2") \
15988 _op _suffix " %"_x"3,%1; " \
15989@@ -203,8 +204,6 @@ struct group_dual {
15990 /* Raw emulation: instruction has two explicit operands. */
15991 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15992 do { \
15993- unsigned long _tmp; \
15994- \
15995 switch ((_dst).bytes) { \
15996 case 2: \
15997 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15998@@ -220,7 +219,6 @@ struct group_dual {
15999
16000 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16001 do { \
16002- unsigned long _tmp; \
16003 switch ((_dst).bytes) { \
16004 case 1: \
16005 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16006diff -urNp linux-2.6.39.4/arch/x86/kvm/lapic.c linux-2.6.39.4/arch/x86/kvm/lapic.c
16007--- linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-05-19 00:06:34.000000000 -0400
16008+++ linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-08-05 19:44:35.000000000 -0400
16009@@ -53,7 +53,7 @@
16010 #define APIC_BUS_CYCLE_NS 1
16011
16012 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16013-#define apic_debug(fmt, arg...)
16014+#define apic_debug(fmt, arg...) do {} while (0)
16015
16016 #define APIC_LVT_NUM 6
16017 /* 14 is the version for Xeon and Pentium 8.4.8*/
16018diff -urNp linux-2.6.39.4/arch/x86/kvm/mmu.c linux-2.6.39.4/arch/x86/kvm/mmu.c
16019--- linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-05-19 00:06:34.000000000 -0400
16020+++ linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-08-05 19:44:35.000000000 -0400
16021@@ -3240,7 +3240,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16022
16023 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16024
16025- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16026+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16027
16028 /*
16029 * Assume that the pte write on a page table of the same type
16030@@ -3275,7 +3275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16031 smp_rmb();
16032
16033 spin_lock(&vcpu->kvm->mmu_lock);
16034- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16035+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16036 gentry = 0;
16037 kvm_mmu_free_some_pages(vcpu);
16038 ++vcpu->kvm->stat.mmu_pte_write;
16039diff -urNp linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h
16040--- linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-05-19 00:06:34.000000000 -0400
16041+++ linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-08-05 19:44:35.000000000 -0400
16042@@ -552,6 +552,8 @@ static int FNAME(page_fault)(struct kvm_
16043 unsigned long mmu_seq;
16044 bool map_writable;
16045
16046+ pax_track_stack();
16047+
16048 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16049
16050 r = mmu_topup_memory_caches(vcpu);
16051@@ -672,7 +674,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16052 if (need_flush)
16053 kvm_flush_remote_tlbs(vcpu->kvm);
16054
16055- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16056+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16057
16058 spin_unlock(&vcpu->kvm->mmu_lock);
16059
16060diff -urNp linux-2.6.39.4/arch/x86/kvm/svm.c linux-2.6.39.4/arch/x86/kvm/svm.c
16061--- linux-2.6.39.4/arch/x86/kvm/svm.c 2011-05-19 00:06:34.000000000 -0400
16062+++ linux-2.6.39.4/arch/x86/kvm/svm.c 2011-08-05 20:34:06.000000000 -0400
16063@@ -3278,7 +3278,11 @@ static void reload_tss(struct kvm_vcpu *
16064 int cpu = raw_smp_processor_id();
16065
16066 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16067+
16068+ pax_open_kernel();
16069 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16070+ pax_close_kernel();
16071+
16072 load_TR_desc();
16073 }
16074
16075@@ -3656,6 +3660,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16076 #endif
16077 #endif
16078
16079+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16080+ __set_fs(current_thread_info()->addr_limit);
16081+#endif
16082+
16083 reload_tss(vcpu);
16084
16085 local_irq_disable();
16086diff -urNp linux-2.6.39.4/arch/x86/kvm/vmx.c linux-2.6.39.4/arch/x86/kvm/vmx.c
16087--- linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-05-19 00:06:34.000000000 -0400
16088+++ linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-08-05 20:34:06.000000000 -0400
16089@@ -725,7 +725,11 @@ static void reload_tss(void)
16090 struct desc_struct *descs;
16091
16092 descs = (void *)gdt->address;
16093+
16094+ pax_open_kernel();
16095 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16096+ pax_close_kernel();
16097+
16098 load_TR_desc();
16099 }
16100
16101@@ -1648,8 +1652,11 @@ static __init int hardware_setup(void)
16102 if (!cpu_has_vmx_flexpriority())
16103 flexpriority_enabled = 0;
16104
16105- if (!cpu_has_vmx_tpr_shadow())
16106- kvm_x86_ops->update_cr8_intercept = NULL;
16107+ if (!cpu_has_vmx_tpr_shadow()) {
16108+ pax_open_kernel();
16109+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16110+ pax_close_kernel();
16111+ }
16112
16113 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16114 kvm_disable_largepages();
16115@@ -2693,7 +2700,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16116 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16117
16118 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16119- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16120+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16121 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16122 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16123 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16124@@ -4068,6 +4075,12 @@ static void __noclone vmx_vcpu_run(struc
16125 "jmp .Lkvm_vmx_return \n\t"
16126 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16127 ".Lkvm_vmx_return: "
16128+
16129+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16130+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16131+ ".Lkvm_vmx_return2: "
16132+#endif
16133+
16134 /* Save guest registers, load host registers, keep flags */
16135 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16136 "pop %0 \n\t"
16137@@ -4116,6 +4129,11 @@ static void __noclone vmx_vcpu_run(struc
16138 #endif
16139 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16140 [wordsize]"i"(sizeof(ulong))
16141+
16142+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16143+ ,[cs]"i"(__KERNEL_CS)
16144+#endif
16145+
16146 : "cc", "memory"
16147 , R"ax", R"bx", R"di", R"si"
16148 #ifdef CONFIG_X86_64
16149@@ -4130,7 +4148,16 @@ static void __noclone vmx_vcpu_run(struc
16150
16151 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16152
16153- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16154+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16155+
16156+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16157+ loadsegment(fs, __KERNEL_PERCPU);
16158+#endif
16159+
16160+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16161+ __set_fs(current_thread_info()->addr_limit);
16162+#endif
16163+
16164 vmx->launched = 1;
16165
16166 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16167diff -urNp linux-2.6.39.4/arch/x86/kvm/x86.c linux-2.6.39.4/arch/x86/kvm/x86.c
16168--- linux-2.6.39.4/arch/x86/kvm/x86.c 2011-05-19 00:06:34.000000000 -0400
16169+++ linux-2.6.39.4/arch/x86/kvm/x86.c 2011-08-05 20:34:06.000000000 -0400
16170@@ -2050,6 +2050,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16171 if (n < msr_list.nmsrs)
16172 goto out;
16173 r = -EFAULT;
16174+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16175+ goto out;
16176 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16177 num_msrs_to_save * sizeof(u32)))
16178 goto out;
16179@@ -2217,15 +2219,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16180 struct kvm_cpuid2 *cpuid,
16181 struct kvm_cpuid_entry2 __user *entries)
16182 {
16183- int r;
16184+ int r, i;
16185
16186 r = -E2BIG;
16187 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16188 goto out;
16189 r = -EFAULT;
16190- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16191- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16192+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16193 goto out;
16194+ for (i = 0; i < cpuid->nent; ++i) {
16195+ struct kvm_cpuid_entry2 cpuid_entry;
16196+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16197+ goto out;
16198+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16199+ }
16200 vcpu->arch.cpuid_nent = cpuid->nent;
16201 kvm_apic_set_version(vcpu);
16202 kvm_x86_ops->cpuid_update(vcpu);
16203@@ -2240,15 +2247,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16204 struct kvm_cpuid2 *cpuid,
16205 struct kvm_cpuid_entry2 __user *entries)
16206 {
16207- int r;
16208+ int r, i;
16209
16210 r = -E2BIG;
16211 if (cpuid->nent < vcpu->arch.cpuid_nent)
16212 goto out;
16213 r = -EFAULT;
16214- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16215- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16216+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16217 goto out;
16218+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16219+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16220+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16221+ goto out;
16222+ }
16223 return 0;
16224
16225 out:
16226@@ -2526,7 +2537,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16227 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16228 struct kvm_interrupt *irq)
16229 {
16230- if (irq->irq < 0 || irq->irq >= 256)
16231+ if (irq->irq >= 256)
16232 return -EINVAL;
16233 if (irqchip_in_kernel(vcpu->kvm))
16234 return -ENXIO;
16235@@ -4690,7 +4701,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16236 }
16237 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16238
16239-int kvm_arch_init(void *opaque)
16240+int kvm_arch_init(const void *opaque)
16241 {
16242 int r;
16243 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16244diff -urNp linux-2.6.39.4/arch/x86/lguest/boot.c linux-2.6.39.4/arch/x86/lguest/boot.c
16245--- linux-2.6.39.4/arch/x86/lguest/boot.c 2011-06-25 12:55:22.000000000 -0400
16246+++ linux-2.6.39.4/arch/x86/lguest/boot.c 2011-08-05 20:34:06.000000000 -0400
16247@@ -1178,9 +1178,10 @@ static __init int early_put_chars(u32 vt
16248 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16249 * Launcher to reboot us.
16250 */
16251-static void lguest_restart(char *reason)
16252+static __noreturn void lguest_restart(char *reason)
16253 {
16254 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16255+ BUG();
16256 }
16257
16258 /*G:050
16259diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_32.c linux-2.6.39.4/arch/x86/lib/atomic64_32.c
16260--- linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-05-19 00:06:34.000000000 -0400
16261+++ linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-08-05 19:44:35.000000000 -0400
16262@@ -8,18 +8,30 @@
16263
16264 long long atomic64_read_cx8(long long, const atomic64_t *v);
16265 EXPORT_SYMBOL(atomic64_read_cx8);
16266+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16267+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16268 long long atomic64_set_cx8(long long, const atomic64_t *v);
16269 EXPORT_SYMBOL(atomic64_set_cx8);
16270+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16271+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16272 long long atomic64_xchg_cx8(long long, unsigned high);
16273 EXPORT_SYMBOL(atomic64_xchg_cx8);
16274 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16275 EXPORT_SYMBOL(atomic64_add_return_cx8);
16276+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16277+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16278 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16279 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16280+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16281+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16282 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16283 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16284+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16285+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16286 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16287 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16288+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16289+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16290 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16291 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16292 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16293@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16294 #ifndef CONFIG_X86_CMPXCHG64
16295 long long atomic64_read_386(long long, const atomic64_t *v);
16296 EXPORT_SYMBOL(atomic64_read_386);
16297+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16298+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16299 long long atomic64_set_386(long long, const atomic64_t *v);
16300 EXPORT_SYMBOL(atomic64_set_386);
16301+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16302+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16303 long long atomic64_xchg_386(long long, unsigned high);
16304 EXPORT_SYMBOL(atomic64_xchg_386);
16305 long long atomic64_add_return_386(long long a, atomic64_t *v);
16306 EXPORT_SYMBOL(atomic64_add_return_386);
16307+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16308+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16309 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16310 EXPORT_SYMBOL(atomic64_sub_return_386);
16311+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16312+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16313 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16314 EXPORT_SYMBOL(atomic64_inc_return_386);
16315+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16316+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16317 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16318 EXPORT_SYMBOL(atomic64_dec_return_386);
16319+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16320+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16321 long long atomic64_add_386(long long a, atomic64_t *v);
16322 EXPORT_SYMBOL(atomic64_add_386);
16323+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16324+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16325 long long atomic64_sub_386(long long a, atomic64_t *v);
16326 EXPORT_SYMBOL(atomic64_sub_386);
16327+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16328+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16329 long long atomic64_inc_386(long long a, atomic64_t *v);
16330 EXPORT_SYMBOL(atomic64_inc_386);
16331+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16332+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16333 long long atomic64_dec_386(long long a, atomic64_t *v);
16334 EXPORT_SYMBOL(atomic64_dec_386);
16335+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16336+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16337 long long atomic64_dec_if_positive_386(atomic64_t *v);
16338 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16339 int atomic64_inc_not_zero_386(atomic64_t *v);
16340diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S
16341--- linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-05-19 00:06:34.000000000 -0400
16342+++ linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-08-05 19:44:35.000000000 -0400
16343@@ -48,6 +48,10 @@ BEGIN(read)
16344 movl (v), %eax
16345 movl 4(v), %edx
16346 RET_ENDP
16347+BEGIN(read_unchecked)
16348+ movl (v), %eax
16349+ movl 4(v), %edx
16350+RET_ENDP
16351 #undef v
16352
16353 #define v %esi
16354@@ -55,6 +59,10 @@ BEGIN(set)
16355 movl %ebx, (v)
16356 movl %ecx, 4(v)
16357 RET_ENDP
16358+BEGIN(set_unchecked)
16359+ movl %ebx, (v)
16360+ movl %ecx, 4(v)
16361+RET_ENDP
16362 #undef v
16363
16364 #define v %esi
16365@@ -70,6 +78,20 @@ RET_ENDP
16366 BEGIN(add)
16367 addl %eax, (v)
16368 adcl %edx, 4(v)
16369+
16370+#ifdef CONFIG_PAX_REFCOUNT
16371+ jno 0f
16372+ subl %eax, (v)
16373+ sbbl %edx, 4(v)
16374+ int $4
16375+0:
16376+ _ASM_EXTABLE(0b, 0b)
16377+#endif
16378+
16379+RET_ENDP
16380+BEGIN(add_unchecked)
16381+ addl %eax, (v)
16382+ adcl %edx, 4(v)
16383 RET_ENDP
16384 #undef v
16385
16386@@ -77,6 +99,24 @@ RET_ENDP
16387 BEGIN(add_return)
16388 addl (v), %eax
16389 adcl 4(v), %edx
16390+
16391+#ifdef CONFIG_PAX_REFCOUNT
16392+ into
16393+1234:
16394+ _ASM_EXTABLE(1234b, 2f)
16395+#endif
16396+
16397+ movl %eax, (v)
16398+ movl %edx, 4(v)
16399+
16400+#ifdef CONFIG_PAX_REFCOUNT
16401+2:
16402+#endif
16403+
16404+RET_ENDP
16405+BEGIN(add_return_unchecked)
16406+ addl (v), %eax
16407+ adcl 4(v), %edx
16408 movl %eax, (v)
16409 movl %edx, 4(v)
16410 RET_ENDP
16411@@ -86,6 +126,20 @@ RET_ENDP
16412 BEGIN(sub)
16413 subl %eax, (v)
16414 sbbl %edx, 4(v)
16415+
16416+#ifdef CONFIG_PAX_REFCOUNT
16417+ jno 0f
16418+ addl %eax, (v)
16419+ adcl %edx, 4(v)
16420+ int $4
16421+0:
16422+ _ASM_EXTABLE(0b, 0b)
16423+#endif
16424+
16425+RET_ENDP
16426+BEGIN(sub_unchecked)
16427+ subl %eax, (v)
16428+ sbbl %edx, 4(v)
16429 RET_ENDP
16430 #undef v
16431
16432@@ -96,6 +150,27 @@ BEGIN(sub_return)
16433 sbbl $0, %edx
16434 addl (v), %eax
16435 adcl 4(v), %edx
16436+
16437+#ifdef CONFIG_PAX_REFCOUNT
16438+ into
16439+1234:
16440+ _ASM_EXTABLE(1234b, 2f)
16441+#endif
16442+
16443+ movl %eax, (v)
16444+ movl %edx, 4(v)
16445+
16446+#ifdef CONFIG_PAX_REFCOUNT
16447+2:
16448+#endif
16449+
16450+RET_ENDP
16451+BEGIN(sub_return_unchecked)
16452+ negl %edx
16453+ negl %eax
16454+ sbbl $0, %edx
16455+ addl (v), %eax
16456+ adcl 4(v), %edx
16457 movl %eax, (v)
16458 movl %edx, 4(v)
16459 RET_ENDP
16460@@ -105,6 +180,20 @@ RET_ENDP
16461 BEGIN(inc)
16462 addl $1, (v)
16463 adcl $0, 4(v)
16464+
16465+#ifdef CONFIG_PAX_REFCOUNT
16466+ jno 0f
16467+ subl $1, (v)
16468+ sbbl $0, 4(v)
16469+ int $4
16470+0:
16471+ _ASM_EXTABLE(0b, 0b)
16472+#endif
16473+
16474+RET_ENDP
16475+BEGIN(inc_unchecked)
16476+ addl $1, (v)
16477+ adcl $0, 4(v)
16478 RET_ENDP
16479 #undef v
16480
16481@@ -114,6 +203,26 @@ BEGIN(inc_return)
16482 movl 4(v), %edx
16483 addl $1, %eax
16484 adcl $0, %edx
16485+
16486+#ifdef CONFIG_PAX_REFCOUNT
16487+ into
16488+1234:
16489+ _ASM_EXTABLE(1234b, 2f)
16490+#endif
16491+
16492+ movl %eax, (v)
16493+ movl %edx, 4(v)
16494+
16495+#ifdef CONFIG_PAX_REFCOUNT
16496+2:
16497+#endif
16498+
16499+RET_ENDP
16500+BEGIN(inc_return_unchecked)
16501+ movl (v), %eax
16502+ movl 4(v), %edx
16503+ addl $1, %eax
16504+ adcl $0, %edx
16505 movl %eax, (v)
16506 movl %edx, 4(v)
16507 RET_ENDP
16508@@ -123,6 +232,20 @@ RET_ENDP
16509 BEGIN(dec)
16510 subl $1, (v)
16511 sbbl $0, 4(v)
16512+
16513+#ifdef CONFIG_PAX_REFCOUNT
16514+ jno 0f
16515+ addl $1, (v)
16516+ adcl $0, 4(v)
16517+ int $4
16518+0:
16519+ _ASM_EXTABLE(0b, 0b)
16520+#endif
16521+
16522+RET_ENDP
16523+BEGIN(dec_unchecked)
16524+ subl $1, (v)
16525+ sbbl $0, 4(v)
16526 RET_ENDP
16527 #undef v
16528
16529@@ -132,6 +255,26 @@ BEGIN(dec_return)
16530 movl 4(v), %edx
16531 subl $1, %eax
16532 sbbl $0, %edx
16533+
16534+#ifdef CONFIG_PAX_REFCOUNT
16535+ into
16536+1234:
16537+ _ASM_EXTABLE(1234b, 2f)
16538+#endif
16539+
16540+ movl %eax, (v)
16541+ movl %edx, 4(v)
16542+
16543+#ifdef CONFIG_PAX_REFCOUNT
16544+2:
16545+#endif
16546+
16547+RET_ENDP
16548+BEGIN(dec_return_unchecked)
16549+ movl (v), %eax
16550+ movl 4(v), %edx
16551+ subl $1, %eax
16552+ sbbl $0, %edx
16553 movl %eax, (v)
16554 movl %edx, 4(v)
16555 RET_ENDP
16556@@ -143,6 +286,13 @@ BEGIN(add_unless)
16557 adcl %edx, %edi
16558 addl (v), %eax
16559 adcl 4(v), %edx
16560+
16561+#ifdef CONFIG_PAX_REFCOUNT
16562+ into
16563+1234:
16564+ _ASM_EXTABLE(1234b, 2f)
16565+#endif
16566+
16567 cmpl %eax, %esi
16568 je 3f
16569 1:
16570@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16571 1:
16572 addl $1, %eax
16573 adcl $0, %edx
16574+
16575+#ifdef CONFIG_PAX_REFCOUNT
16576+ into
16577+1234:
16578+ _ASM_EXTABLE(1234b, 2f)
16579+#endif
16580+
16581 movl %eax, (v)
16582 movl %edx, 4(v)
16583 movl $1, %eax
16584@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16585 movl 4(v), %edx
16586 subl $1, %eax
16587 sbbl $0, %edx
16588+
16589+#ifdef CONFIG_PAX_REFCOUNT
16590+ into
16591+1234:
16592+ _ASM_EXTABLE(1234b, 1f)
16593+#endif
16594+
16595 js 1f
16596 movl %eax, (v)
16597 movl %edx, 4(v)
16598diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S
16599--- linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-05-19 00:06:34.000000000 -0400
16600+++ linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-05 19:44:35.000000000 -0400
16601@@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16602 CFI_ENDPROC
16603 ENDPROC(atomic64_read_cx8)
16604
16605+ENTRY(atomic64_read_unchecked_cx8)
16606+ CFI_STARTPROC
16607+
16608+ read64 %ecx
16609+ ret
16610+ CFI_ENDPROC
16611+ENDPROC(atomic64_read_unchecked_cx8)
16612+
16613 ENTRY(atomic64_set_cx8)
16614 CFI_STARTPROC
16615
16616@@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16617 CFI_ENDPROC
16618 ENDPROC(atomic64_set_cx8)
16619
16620+ENTRY(atomic64_set_unchecked_cx8)
16621+ CFI_STARTPROC
16622+
16623+1:
16624+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16625+ * are atomic on 586 and newer */
16626+ cmpxchg8b (%esi)
16627+ jne 1b
16628+
16629+ ret
16630+ CFI_ENDPROC
16631+ENDPROC(atomic64_set_unchecked_cx8)
16632+
16633 ENTRY(atomic64_xchg_cx8)
16634 CFI_STARTPROC
16635
16636@@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16637 CFI_ENDPROC
16638 ENDPROC(atomic64_xchg_cx8)
16639
16640-.macro addsub_return func ins insc
16641-ENTRY(atomic64_\func\()_return_cx8)
16642+.macro addsub_return func ins insc unchecked=""
16643+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16644 CFI_STARTPROC
16645 SAVE ebp
16646 SAVE ebx
16647@@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16648 movl %edx, %ecx
16649 \ins\()l %esi, %ebx
16650 \insc\()l %edi, %ecx
16651+
16652+.ifb \unchecked
16653+#ifdef CONFIG_PAX_REFCOUNT
16654+ into
16655+2:
16656+ _ASM_EXTABLE(2b, 3f)
16657+#endif
16658+.endif
16659+
16660 LOCK_PREFIX
16661 cmpxchg8b (%ebp)
16662 jne 1b
16663-
16664-10:
16665 movl %ebx, %eax
16666 movl %ecx, %edx
16667+
16668+.ifb \unchecked
16669+#ifdef CONFIG_PAX_REFCOUNT
16670+3:
16671+#endif
16672+.endif
16673+
16674 RESTORE edi
16675 RESTORE esi
16676 RESTORE ebx
16677 RESTORE ebp
16678 ret
16679 CFI_ENDPROC
16680-ENDPROC(atomic64_\func\()_return_cx8)
16681+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16682 .endm
16683
16684 addsub_return add add adc
16685 addsub_return sub sub sbb
16686+addsub_return add add adc _unchecked
16687+addsub_return sub sub sbb _unchecked
16688
16689-.macro incdec_return func ins insc
16690-ENTRY(atomic64_\func\()_return_cx8)
16691+.macro incdec_return func ins insc unchecked
16692+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16693 CFI_STARTPROC
16694 SAVE ebx
16695
16696@@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16697 movl %edx, %ecx
16698 \ins\()l $1, %ebx
16699 \insc\()l $0, %ecx
16700+
16701+.ifb \unchecked
16702+#ifdef CONFIG_PAX_REFCOUNT
16703+ into
16704+2:
16705+ _ASM_EXTABLE(2b, 3f)
16706+#endif
16707+.endif
16708+
16709 LOCK_PREFIX
16710 cmpxchg8b (%esi)
16711 jne 1b
16712
16713-10:
16714 movl %ebx, %eax
16715 movl %ecx, %edx
16716+
16717+.ifb \unchecked
16718+#ifdef CONFIG_PAX_REFCOUNT
16719+3:
16720+#endif
16721+.endif
16722+
16723 RESTORE ebx
16724 ret
16725 CFI_ENDPROC
16726-ENDPROC(atomic64_\func\()_return_cx8)
16727+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16728 .endm
16729
16730 incdec_return inc add adc
16731 incdec_return dec sub sbb
16732+incdec_return inc add adc _unchecked
16733+incdec_return dec sub sbb _unchecked
16734
16735 ENTRY(atomic64_dec_if_positive_cx8)
16736 CFI_STARTPROC
16737@@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16738 movl %edx, %ecx
16739 subl $1, %ebx
16740 sbb $0, %ecx
16741+
16742+#ifdef CONFIG_PAX_REFCOUNT
16743+ into
16744+1234:
16745+ _ASM_EXTABLE(1234b, 2f)
16746+#endif
16747+
16748 js 2f
16749 LOCK_PREFIX
16750 cmpxchg8b (%esi)
16751@@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16752 movl %edx, %ecx
16753 addl %esi, %ebx
16754 adcl %edi, %ecx
16755+
16756+#ifdef CONFIG_PAX_REFCOUNT
16757+ into
16758+1234:
16759+ _ASM_EXTABLE(1234b, 3f)
16760+#endif
16761+
16762 LOCK_PREFIX
16763 cmpxchg8b (%ebp)
16764 jne 1b
16765@@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16766 movl %edx, %ecx
16767 addl $1, %ebx
16768 adcl $0, %ecx
16769+
16770+#ifdef CONFIG_PAX_REFCOUNT
16771+ into
16772+1234:
16773+ _ASM_EXTABLE(1234b, 3f)
16774+#endif
16775+
16776 LOCK_PREFIX
16777 cmpxchg8b (%esi)
16778 jne 1b
16779diff -urNp linux-2.6.39.4/arch/x86/lib/checksum_32.S linux-2.6.39.4/arch/x86/lib/checksum_32.S
16780--- linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-05-19 00:06:34.000000000 -0400
16781+++ linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-08-05 19:44:35.000000000 -0400
16782@@ -28,7 +28,8 @@
16783 #include <linux/linkage.h>
16784 #include <asm/dwarf2.h>
16785 #include <asm/errno.h>
16786-
16787+#include <asm/segment.h>
16788+
16789 /*
16790 * computes a partial checksum, e.g. for TCP/UDP fragments
16791 */
16792@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16793
16794 #define ARGBASE 16
16795 #define FP 12
16796-
16797-ENTRY(csum_partial_copy_generic)
16798+
16799+ENTRY(csum_partial_copy_generic_to_user)
16800 CFI_STARTPROC
16801+
16802+#ifdef CONFIG_PAX_MEMORY_UDEREF
16803+ pushl_cfi %gs
16804+ popl_cfi %es
16805+ jmp csum_partial_copy_generic
16806+#endif
16807+
16808+ENTRY(csum_partial_copy_generic_from_user)
16809+
16810+#ifdef CONFIG_PAX_MEMORY_UDEREF
16811+ pushl_cfi %gs
16812+ popl_cfi %ds
16813+#endif
16814+
16815+ENTRY(csum_partial_copy_generic)
16816 subl $4,%esp
16817 CFI_ADJUST_CFA_OFFSET 4
16818 pushl_cfi %edi
16819@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16820 jmp 4f
16821 SRC(1: movw (%esi), %bx )
16822 addl $2, %esi
16823-DST( movw %bx, (%edi) )
16824+DST( movw %bx, %es:(%edi) )
16825 addl $2, %edi
16826 addw %bx, %ax
16827 adcl $0, %eax
16828@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16829 SRC(1: movl (%esi), %ebx )
16830 SRC( movl 4(%esi), %edx )
16831 adcl %ebx, %eax
16832-DST( movl %ebx, (%edi) )
16833+DST( movl %ebx, %es:(%edi) )
16834 adcl %edx, %eax
16835-DST( movl %edx, 4(%edi) )
16836+DST( movl %edx, %es:4(%edi) )
16837
16838 SRC( movl 8(%esi), %ebx )
16839 SRC( movl 12(%esi), %edx )
16840 adcl %ebx, %eax
16841-DST( movl %ebx, 8(%edi) )
16842+DST( movl %ebx, %es:8(%edi) )
16843 adcl %edx, %eax
16844-DST( movl %edx, 12(%edi) )
16845+DST( movl %edx, %es:12(%edi) )
16846
16847 SRC( movl 16(%esi), %ebx )
16848 SRC( movl 20(%esi), %edx )
16849 adcl %ebx, %eax
16850-DST( movl %ebx, 16(%edi) )
16851+DST( movl %ebx, %es:16(%edi) )
16852 adcl %edx, %eax
16853-DST( movl %edx, 20(%edi) )
16854+DST( movl %edx, %es:20(%edi) )
16855
16856 SRC( movl 24(%esi), %ebx )
16857 SRC( movl 28(%esi), %edx )
16858 adcl %ebx, %eax
16859-DST( movl %ebx, 24(%edi) )
16860+DST( movl %ebx, %es:24(%edi) )
16861 adcl %edx, %eax
16862-DST( movl %edx, 28(%edi) )
16863+DST( movl %edx, %es:28(%edi) )
16864
16865 lea 32(%esi), %esi
16866 lea 32(%edi), %edi
16867@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16868 shrl $2, %edx # This clears CF
16869 SRC(3: movl (%esi), %ebx )
16870 adcl %ebx, %eax
16871-DST( movl %ebx, (%edi) )
16872+DST( movl %ebx, %es:(%edi) )
16873 lea 4(%esi), %esi
16874 lea 4(%edi), %edi
16875 dec %edx
16876@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16877 jb 5f
16878 SRC( movw (%esi), %cx )
16879 leal 2(%esi), %esi
16880-DST( movw %cx, (%edi) )
16881+DST( movw %cx, %es:(%edi) )
16882 leal 2(%edi), %edi
16883 je 6f
16884 shll $16,%ecx
16885 SRC(5: movb (%esi), %cl )
16886-DST( movb %cl, (%edi) )
16887+DST( movb %cl, %es:(%edi) )
16888 6: addl %ecx, %eax
16889 adcl $0, %eax
16890 7:
16891@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16892
16893 6001:
16894 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16895- movl $-EFAULT, (%ebx)
16896+ movl $-EFAULT, %ss:(%ebx)
16897
16898 # zero the complete destination - computing the rest
16899 # is too much work
16900@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16901
16902 6002:
16903 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16904- movl $-EFAULT,(%ebx)
16905+ movl $-EFAULT,%ss:(%ebx)
16906 jmp 5000b
16907
16908 .previous
16909
16910+ pushl_cfi %ss
16911+ popl_cfi %ds
16912+ pushl_cfi %ss
16913+ popl_cfi %es
16914 popl_cfi %ebx
16915 CFI_RESTORE ebx
16916 popl_cfi %esi
16917@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16918 popl_cfi %ecx # equivalent to addl $4,%esp
16919 ret
16920 CFI_ENDPROC
16921-ENDPROC(csum_partial_copy_generic)
16922+ENDPROC(csum_partial_copy_generic_to_user)
16923
16924 #else
16925
16926 /* Version for PentiumII/PPro */
16927
16928 #define ROUND1(x) \
16929+ nop; nop; nop; \
16930 SRC(movl x(%esi), %ebx ) ; \
16931 addl %ebx, %eax ; \
16932- DST(movl %ebx, x(%edi) ) ;
16933+ DST(movl %ebx, %es:x(%edi)) ;
16934
16935 #define ROUND(x) \
16936+ nop; nop; nop; \
16937 SRC(movl x(%esi), %ebx ) ; \
16938 adcl %ebx, %eax ; \
16939- DST(movl %ebx, x(%edi) ) ;
16940+ DST(movl %ebx, %es:x(%edi)) ;
16941
16942 #define ARGBASE 12
16943-
16944-ENTRY(csum_partial_copy_generic)
16945+
16946+ENTRY(csum_partial_copy_generic_to_user)
16947 CFI_STARTPROC
16948+
16949+#ifdef CONFIG_PAX_MEMORY_UDEREF
16950+ pushl_cfi %gs
16951+ popl_cfi %es
16952+ jmp csum_partial_copy_generic
16953+#endif
16954+
16955+ENTRY(csum_partial_copy_generic_from_user)
16956+
16957+#ifdef CONFIG_PAX_MEMORY_UDEREF
16958+ pushl_cfi %gs
16959+ popl_cfi %ds
16960+#endif
16961+
16962+ENTRY(csum_partial_copy_generic)
16963 pushl_cfi %ebx
16964 CFI_REL_OFFSET ebx, 0
16965 pushl_cfi %edi
16966@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16967 subl %ebx, %edi
16968 lea -1(%esi),%edx
16969 andl $-32,%edx
16970- lea 3f(%ebx,%ebx), %ebx
16971+ lea 3f(%ebx,%ebx,2), %ebx
16972 testl %esi, %esi
16973 jmp *%ebx
16974 1: addl $64,%esi
16975@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16976 jb 5f
16977 SRC( movw (%esi), %dx )
16978 leal 2(%esi), %esi
16979-DST( movw %dx, (%edi) )
16980+DST( movw %dx, %es:(%edi) )
16981 leal 2(%edi), %edi
16982 je 6f
16983 shll $16,%edx
16984 5:
16985 SRC( movb (%esi), %dl )
16986-DST( movb %dl, (%edi) )
16987+DST( movb %dl, %es:(%edi) )
16988 6: addl %edx, %eax
16989 adcl $0, %eax
16990 7:
16991 .section .fixup, "ax"
16992 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16993- movl $-EFAULT, (%ebx)
16994+ movl $-EFAULT, %ss:(%ebx)
16995 # zero the complete destination (computing the rest is too much work)
16996 movl ARGBASE+8(%esp),%edi # dst
16997 movl ARGBASE+12(%esp),%ecx # len
16998@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16999 rep; stosb
17000 jmp 7b
17001 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17002- movl $-EFAULT, (%ebx)
17003+ movl $-EFAULT, %ss:(%ebx)
17004 jmp 7b
17005 .previous
17006
17007+#ifdef CONFIG_PAX_MEMORY_UDEREF
17008+ pushl_cfi %ss
17009+ popl_cfi %ds
17010+ pushl_cfi %ss
17011+ popl_cfi %es
17012+#endif
17013+
17014 popl_cfi %esi
17015 CFI_RESTORE esi
17016 popl_cfi %edi
17017@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17018 CFI_RESTORE ebx
17019 ret
17020 CFI_ENDPROC
17021-ENDPROC(csum_partial_copy_generic)
17022+ENDPROC(csum_partial_copy_generic_to_user)
17023
17024 #undef ROUND
17025 #undef ROUND1
17026diff -urNp linux-2.6.39.4/arch/x86/lib/clear_page_64.S linux-2.6.39.4/arch/x86/lib/clear_page_64.S
17027--- linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-05-19 00:06:34.000000000 -0400
17028+++ linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-08-05 19:44:35.000000000 -0400
17029@@ -43,7 +43,7 @@ ENDPROC(clear_page)
17030
17031 #include <asm/cpufeature.h>
17032
17033- .section .altinstr_replacement,"ax"
17034+ .section .altinstr_replacement,"a"
17035 1: .byte 0xeb /* jmp <disp8> */
17036 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17037 2:
17038diff -urNp linux-2.6.39.4/arch/x86/lib/copy_page_64.S linux-2.6.39.4/arch/x86/lib/copy_page_64.S
17039--- linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-05-19 00:06:34.000000000 -0400
17040+++ linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-08-05 19:44:35.000000000 -0400
17041@@ -104,7 +104,7 @@ ENDPROC(copy_page)
17042
17043 #include <asm/cpufeature.h>
17044
17045- .section .altinstr_replacement,"ax"
17046+ .section .altinstr_replacement,"a"
17047 1: .byte 0xeb /* jmp <disp8> */
17048 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17049 2:
17050diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_64.S linux-2.6.39.4/arch/x86/lib/copy_user_64.S
17051--- linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-06-03 00:04:13.000000000 -0400
17052+++ linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-08-05 19:44:35.000000000 -0400
17053@@ -15,13 +15,14 @@
17054 #include <asm/asm-offsets.h>
17055 #include <asm/thread_info.h>
17056 #include <asm/cpufeature.h>
17057+#include <asm/pgtable.h>
17058
17059 .macro ALTERNATIVE_JUMP feature,orig,alt
17060 0:
17061 .byte 0xe9 /* 32bit jump */
17062 .long \orig-1f /* by default jump to orig */
17063 1:
17064- .section .altinstr_replacement,"ax"
17065+ .section .altinstr_replacement,"a"
17066 2: .byte 0xe9 /* near jump with 32bit immediate */
17067 .long \alt-1b /* offset */ /* or alternatively to alt */
17068 .previous
17069@@ -64,37 +65,13 @@
17070 #endif
17071 .endm
17072
17073-/* Standard copy_to_user with segment limit checking */
17074-ENTRY(_copy_to_user)
17075- CFI_STARTPROC
17076- GET_THREAD_INFO(%rax)
17077- movq %rdi,%rcx
17078- addq %rdx,%rcx
17079- jc bad_to_user
17080- cmpq TI_addr_limit(%rax),%rcx
17081- ja bad_to_user
17082- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17083- CFI_ENDPROC
17084-ENDPROC(_copy_to_user)
17085-
17086-/* Standard copy_from_user with segment limit checking */
17087-ENTRY(_copy_from_user)
17088- CFI_STARTPROC
17089- GET_THREAD_INFO(%rax)
17090- movq %rsi,%rcx
17091- addq %rdx,%rcx
17092- jc bad_from_user
17093- cmpq TI_addr_limit(%rax),%rcx
17094- ja bad_from_user
17095- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17096- CFI_ENDPROC
17097-ENDPROC(_copy_from_user)
17098-
17099 .section .fixup,"ax"
17100 /* must zero dest */
17101 ENTRY(bad_from_user)
17102 bad_from_user:
17103 CFI_STARTPROC
17104+ testl %edx,%edx
17105+ js bad_to_user
17106 movl %edx,%ecx
17107 xorl %eax,%eax
17108 rep
17109diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S
17110--- linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-05-19 00:06:34.000000000 -0400
17111+++ linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-05 19:44:35.000000000 -0400
17112@@ -14,6 +14,7 @@
17113 #include <asm/current.h>
17114 #include <asm/asm-offsets.h>
17115 #include <asm/thread_info.h>
17116+#include <asm/pgtable.h>
17117
17118 .macro ALIGN_DESTINATION
17119 #ifdef FIX_ALIGNMENT
17120@@ -50,6 +51,15 @@
17121 */
17122 ENTRY(__copy_user_nocache)
17123 CFI_STARTPROC
17124+
17125+#ifdef CONFIG_PAX_MEMORY_UDEREF
17126+ mov $PAX_USER_SHADOW_BASE,%rcx
17127+ cmp %rcx,%rsi
17128+ jae 1f
17129+ add %rcx,%rsi
17130+1:
17131+#endif
17132+
17133 cmpl $8,%edx
17134 jb 20f /* less then 8 bytes, go to byte copy loop */
17135 ALIGN_DESTINATION
17136diff -urNp linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c
17137--- linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-05-19 00:06:34.000000000 -0400
17138+++ linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-08-05 19:44:35.000000000 -0400
17139@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17140 len -= 2;
17141 }
17142 }
17143+
17144+#ifdef CONFIG_PAX_MEMORY_UDEREF
17145+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17146+ src += PAX_USER_SHADOW_BASE;
17147+#endif
17148+
17149 isum = csum_partial_copy_generic((__force const void *)src,
17150 dst, len, isum, errp, NULL);
17151 if (unlikely(*errp))
17152@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17153 }
17154
17155 *errp = 0;
17156+
17157+#ifdef CONFIG_PAX_MEMORY_UDEREF
17158+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17159+ dst += PAX_USER_SHADOW_BASE;
17160+#endif
17161+
17162 return csum_partial_copy_generic(src, (void __force *)dst,
17163 len, isum, NULL, errp);
17164 }
17165diff -urNp linux-2.6.39.4/arch/x86/lib/getuser.S linux-2.6.39.4/arch/x86/lib/getuser.S
17166--- linux-2.6.39.4/arch/x86/lib/getuser.S 2011-05-19 00:06:34.000000000 -0400
17167+++ linux-2.6.39.4/arch/x86/lib/getuser.S 2011-08-05 19:44:35.000000000 -0400
17168@@ -33,14 +33,35 @@
17169 #include <asm/asm-offsets.h>
17170 #include <asm/thread_info.h>
17171 #include <asm/asm.h>
17172+#include <asm/segment.h>
17173+#include <asm/pgtable.h>
17174+
17175+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17176+#define __copyuser_seg gs;
17177+#else
17178+#define __copyuser_seg
17179+#endif
17180
17181 .text
17182 ENTRY(__get_user_1)
17183 CFI_STARTPROC
17184+
17185+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17186 GET_THREAD_INFO(%_ASM_DX)
17187 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17188 jae bad_get_user
17189-1: movzb (%_ASM_AX),%edx
17190+
17191+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17192+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17193+ cmp %_ASM_DX,%_ASM_AX
17194+ jae 1234f
17195+ add %_ASM_DX,%_ASM_AX
17196+1234:
17197+#endif
17198+
17199+#endif
17200+
17201+1: __copyuser_seg movzb (%_ASM_AX),%edx
17202 xor %eax,%eax
17203 ret
17204 CFI_ENDPROC
17205@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17206 ENTRY(__get_user_2)
17207 CFI_STARTPROC
17208 add $1,%_ASM_AX
17209+
17210+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17211 jc bad_get_user
17212 GET_THREAD_INFO(%_ASM_DX)
17213 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17214 jae bad_get_user
17215-2: movzwl -1(%_ASM_AX),%edx
17216+
17217+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17218+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17219+ cmp %_ASM_DX,%_ASM_AX
17220+ jae 1234f
17221+ add %_ASM_DX,%_ASM_AX
17222+1234:
17223+#endif
17224+
17225+#endif
17226+
17227+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17228 xor %eax,%eax
17229 ret
17230 CFI_ENDPROC
17231@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17232 ENTRY(__get_user_4)
17233 CFI_STARTPROC
17234 add $3,%_ASM_AX
17235+
17236+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17237 jc bad_get_user
17238 GET_THREAD_INFO(%_ASM_DX)
17239 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17240 jae bad_get_user
17241-3: mov -3(%_ASM_AX),%edx
17242+
17243+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17244+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17245+ cmp %_ASM_DX,%_ASM_AX
17246+ jae 1234f
17247+ add %_ASM_DX,%_ASM_AX
17248+1234:
17249+#endif
17250+
17251+#endif
17252+
17253+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17254 xor %eax,%eax
17255 ret
17256 CFI_ENDPROC
17257@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17258 GET_THREAD_INFO(%_ASM_DX)
17259 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17260 jae bad_get_user
17261+
17262+#ifdef CONFIG_PAX_MEMORY_UDEREF
17263+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17264+ cmp %_ASM_DX,%_ASM_AX
17265+ jae 1234f
17266+ add %_ASM_DX,%_ASM_AX
17267+1234:
17268+#endif
17269+
17270 4: movq -7(%_ASM_AX),%_ASM_DX
17271 xor %eax,%eax
17272 ret
17273diff -urNp linux-2.6.39.4/arch/x86/lib/insn.c linux-2.6.39.4/arch/x86/lib/insn.c
17274--- linux-2.6.39.4/arch/x86/lib/insn.c 2011-05-19 00:06:34.000000000 -0400
17275+++ linux-2.6.39.4/arch/x86/lib/insn.c 2011-08-05 19:44:35.000000000 -0400
17276@@ -21,6 +21,11 @@
17277 #include <linux/string.h>
17278 #include <asm/inat.h>
17279 #include <asm/insn.h>
17280+#ifdef __KERNEL__
17281+#include <asm/pgtable_types.h>
17282+#else
17283+#define ktla_ktva(addr) addr
17284+#endif
17285
17286 #define get_next(t, insn) \
17287 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17288@@ -40,8 +45,8 @@
17289 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17290 {
17291 memset(insn, 0, sizeof(*insn));
17292- insn->kaddr = kaddr;
17293- insn->next_byte = kaddr;
17294+ insn->kaddr = ktla_ktva(kaddr);
17295+ insn->next_byte = ktla_ktva(kaddr);
17296 insn->x86_64 = x86_64 ? 1 : 0;
17297 insn->opnd_bytes = 4;
17298 if (x86_64)
17299diff -urNp linux-2.6.39.4/arch/x86/lib/mmx_32.c linux-2.6.39.4/arch/x86/lib/mmx_32.c
17300--- linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-05-19 00:06:34.000000000 -0400
17301+++ linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-08-05 19:44:35.000000000 -0400
17302@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17303 {
17304 void *p;
17305 int i;
17306+ unsigned long cr0;
17307
17308 if (unlikely(in_interrupt()))
17309 return __memcpy(to, from, len);
17310@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17311 kernel_fpu_begin();
17312
17313 __asm__ __volatile__ (
17314- "1: prefetch (%0)\n" /* This set is 28 bytes */
17315- " prefetch 64(%0)\n"
17316- " prefetch 128(%0)\n"
17317- " prefetch 192(%0)\n"
17318- " prefetch 256(%0)\n"
17319+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17320+ " prefetch 64(%1)\n"
17321+ " prefetch 128(%1)\n"
17322+ " prefetch 192(%1)\n"
17323+ " prefetch 256(%1)\n"
17324 "2: \n"
17325 ".section .fixup, \"ax\"\n"
17326- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17327+ "3: \n"
17328+
17329+#ifdef CONFIG_PAX_KERNEXEC
17330+ " movl %%cr0, %0\n"
17331+ " movl %0, %%eax\n"
17332+ " andl $0xFFFEFFFF, %%eax\n"
17333+ " movl %%eax, %%cr0\n"
17334+#endif
17335+
17336+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17337+
17338+#ifdef CONFIG_PAX_KERNEXEC
17339+ " movl %0, %%cr0\n"
17340+#endif
17341+
17342 " jmp 2b\n"
17343 ".previous\n"
17344 _ASM_EXTABLE(1b, 3b)
17345- : : "r" (from));
17346+ : "=&r" (cr0) : "r" (from) : "ax");
17347
17348 for ( ; i > 5; i--) {
17349 __asm__ __volatile__ (
17350- "1: prefetch 320(%0)\n"
17351- "2: movq (%0), %%mm0\n"
17352- " movq 8(%0), %%mm1\n"
17353- " movq 16(%0), %%mm2\n"
17354- " movq 24(%0), %%mm3\n"
17355- " movq %%mm0, (%1)\n"
17356- " movq %%mm1, 8(%1)\n"
17357- " movq %%mm2, 16(%1)\n"
17358- " movq %%mm3, 24(%1)\n"
17359- " movq 32(%0), %%mm0\n"
17360- " movq 40(%0), %%mm1\n"
17361- " movq 48(%0), %%mm2\n"
17362- " movq 56(%0), %%mm3\n"
17363- " movq %%mm0, 32(%1)\n"
17364- " movq %%mm1, 40(%1)\n"
17365- " movq %%mm2, 48(%1)\n"
17366- " movq %%mm3, 56(%1)\n"
17367+ "1: prefetch 320(%1)\n"
17368+ "2: movq (%1), %%mm0\n"
17369+ " movq 8(%1), %%mm1\n"
17370+ " movq 16(%1), %%mm2\n"
17371+ " movq 24(%1), %%mm3\n"
17372+ " movq %%mm0, (%2)\n"
17373+ " movq %%mm1, 8(%2)\n"
17374+ " movq %%mm2, 16(%2)\n"
17375+ " movq %%mm3, 24(%2)\n"
17376+ " movq 32(%1), %%mm0\n"
17377+ " movq 40(%1), %%mm1\n"
17378+ " movq 48(%1), %%mm2\n"
17379+ " movq 56(%1), %%mm3\n"
17380+ " movq %%mm0, 32(%2)\n"
17381+ " movq %%mm1, 40(%2)\n"
17382+ " movq %%mm2, 48(%2)\n"
17383+ " movq %%mm3, 56(%2)\n"
17384 ".section .fixup, \"ax\"\n"
17385- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17386+ "3:\n"
17387+
17388+#ifdef CONFIG_PAX_KERNEXEC
17389+ " movl %%cr0, %0\n"
17390+ " movl %0, %%eax\n"
17391+ " andl $0xFFFEFFFF, %%eax\n"
17392+ " movl %%eax, %%cr0\n"
17393+#endif
17394+
17395+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17396+
17397+#ifdef CONFIG_PAX_KERNEXEC
17398+ " movl %0, %%cr0\n"
17399+#endif
17400+
17401 " jmp 2b\n"
17402 ".previous\n"
17403 _ASM_EXTABLE(1b, 3b)
17404- : : "r" (from), "r" (to) : "memory");
17405+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17406
17407 from += 64;
17408 to += 64;
17409@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17410 static void fast_copy_page(void *to, void *from)
17411 {
17412 int i;
17413+ unsigned long cr0;
17414
17415 kernel_fpu_begin();
17416
17417@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17418 * but that is for later. -AV
17419 */
17420 __asm__ __volatile__(
17421- "1: prefetch (%0)\n"
17422- " prefetch 64(%0)\n"
17423- " prefetch 128(%0)\n"
17424- " prefetch 192(%0)\n"
17425- " prefetch 256(%0)\n"
17426+ "1: prefetch (%1)\n"
17427+ " prefetch 64(%1)\n"
17428+ " prefetch 128(%1)\n"
17429+ " prefetch 192(%1)\n"
17430+ " prefetch 256(%1)\n"
17431 "2: \n"
17432 ".section .fixup, \"ax\"\n"
17433- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17434+ "3: \n"
17435+
17436+#ifdef CONFIG_PAX_KERNEXEC
17437+ " movl %%cr0, %0\n"
17438+ " movl %0, %%eax\n"
17439+ " andl $0xFFFEFFFF, %%eax\n"
17440+ " movl %%eax, %%cr0\n"
17441+#endif
17442+
17443+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17444+
17445+#ifdef CONFIG_PAX_KERNEXEC
17446+ " movl %0, %%cr0\n"
17447+#endif
17448+
17449 " jmp 2b\n"
17450 ".previous\n"
17451- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17452+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17453
17454 for (i = 0; i < (4096-320)/64; i++) {
17455 __asm__ __volatile__ (
17456- "1: prefetch 320(%0)\n"
17457- "2: movq (%0), %%mm0\n"
17458- " movntq %%mm0, (%1)\n"
17459- " movq 8(%0), %%mm1\n"
17460- " movntq %%mm1, 8(%1)\n"
17461- " movq 16(%0), %%mm2\n"
17462- " movntq %%mm2, 16(%1)\n"
17463- " movq 24(%0), %%mm3\n"
17464- " movntq %%mm3, 24(%1)\n"
17465- " movq 32(%0), %%mm4\n"
17466- " movntq %%mm4, 32(%1)\n"
17467- " movq 40(%0), %%mm5\n"
17468- " movntq %%mm5, 40(%1)\n"
17469- " movq 48(%0), %%mm6\n"
17470- " movntq %%mm6, 48(%1)\n"
17471- " movq 56(%0), %%mm7\n"
17472- " movntq %%mm7, 56(%1)\n"
17473+ "1: prefetch 320(%1)\n"
17474+ "2: movq (%1), %%mm0\n"
17475+ " movntq %%mm0, (%2)\n"
17476+ " movq 8(%1), %%mm1\n"
17477+ " movntq %%mm1, 8(%2)\n"
17478+ " movq 16(%1), %%mm2\n"
17479+ " movntq %%mm2, 16(%2)\n"
17480+ " movq 24(%1), %%mm3\n"
17481+ " movntq %%mm3, 24(%2)\n"
17482+ " movq 32(%1), %%mm4\n"
17483+ " movntq %%mm4, 32(%2)\n"
17484+ " movq 40(%1), %%mm5\n"
17485+ " movntq %%mm5, 40(%2)\n"
17486+ " movq 48(%1), %%mm6\n"
17487+ " movntq %%mm6, 48(%2)\n"
17488+ " movq 56(%1), %%mm7\n"
17489+ " movntq %%mm7, 56(%2)\n"
17490 ".section .fixup, \"ax\"\n"
17491- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17492+ "3:\n"
17493+
17494+#ifdef CONFIG_PAX_KERNEXEC
17495+ " movl %%cr0, %0\n"
17496+ " movl %0, %%eax\n"
17497+ " andl $0xFFFEFFFF, %%eax\n"
17498+ " movl %%eax, %%cr0\n"
17499+#endif
17500+
17501+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17502+
17503+#ifdef CONFIG_PAX_KERNEXEC
17504+ " movl %0, %%cr0\n"
17505+#endif
17506+
17507 " jmp 2b\n"
17508 ".previous\n"
17509- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17510+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17511
17512 from += 64;
17513 to += 64;
17514@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17515 static void fast_copy_page(void *to, void *from)
17516 {
17517 int i;
17518+ unsigned long cr0;
17519
17520 kernel_fpu_begin();
17521
17522 __asm__ __volatile__ (
17523- "1: prefetch (%0)\n"
17524- " prefetch 64(%0)\n"
17525- " prefetch 128(%0)\n"
17526- " prefetch 192(%0)\n"
17527- " prefetch 256(%0)\n"
17528+ "1: prefetch (%1)\n"
17529+ " prefetch 64(%1)\n"
17530+ " prefetch 128(%1)\n"
17531+ " prefetch 192(%1)\n"
17532+ " prefetch 256(%1)\n"
17533 "2: \n"
17534 ".section .fixup, \"ax\"\n"
17535- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17536+ "3: \n"
17537+
17538+#ifdef CONFIG_PAX_KERNEXEC
17539+ " movl %%cr0, %0\n"
17540+ " movl %0, %%eax\n"
17541+ " andl $0xFFFEFFFF, %%eax\n"
17542+ " movl %%eax, %%cr0\n"
17543+#endif
17544+
17545+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17546+
17547+#ifdef CONFIG_PAX_KERNEXEC
17548+ " movl %0, %%cr0\n"
17549+#endif
17550+
17551 " jmp 2b\n"
17552 ".previous\n"
17553- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17554+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17555
17556 for (i = 0; i < 4096/64; i++) {
17557 __asm__ __volatile__ (
17558- "1: prefetch 320(%0)\n"
17559- "2: movq (%0), %%mm0\n"
17560- " movq 8(%0), %%mm1\n"
17561- " movq 16(%0), %%mm2\n"
17562- " movq 24(%0), %%mm3\n"
17563- " movq %%mm0, (%1)\n"
17564- " movq %%mm1, 8(%1)\n"
17565- " movq %%mm2, 16(%1)\n"
17566- " movq %%mm3, 24(%1)\n"
17567- " movq 32(%0), %%mm0\n"
17568- " movq 40(%0), %%mm1\n"
17569- " movq 48(%0), %%mm2\n"
17570- " movq 56(%0), %%mm3\n"
17571- " movq %%mm0, 32(%1)\n"
17572- " movq %%mm1, 40(%1)\n"
17573- " movq %%mm2, 48(%1)\n"
17574- " movq %%mm3, 56(%1)\n"
17575+ "1: prefetch 320(%1)\n"
17576+ "2: movq (%1), %%mm0\n"
17577+ " movq 8(%1), %%mm1\n"
17578+ " movq 16(%1), %%mm2\n"
17579+ " movq 24(%1), %%mm3\n"
17580+ " movq %%mm0, (%2)\n"
17581+ " movq %%mm1, 8(%2)\n"
17582+ " movq %%mm2, 16(%2)\n"
17583+ " movq %%mm3, 24(%2)\n"
17584+ " movq 32(%1), %%mm0\n"
17585+ " movq 40(%1), %%mm1\n"
17586+ " movq 48(%1), %%mm2\n"
17587+ " movq 56(%1), %%mm3\n"
17588+ " movq %%mm0, 32(%2)\n"
17589+ " movq %%mm1, 40(%2)\n"
17590+ " movq %%mm2, 48(%2)\n"
17591+ " movq %%mm3, 56(%2)\n"
17592 ".section .fixup, \"ax\"\n"
17593- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17594+ "3:\n"
17595+
17596+#ifdef CONFIG_PAX_KERNEXEC
17597+ " movl %%cr0, %0\n"
17598+ " movl %0, %%eax\n"
17599+ " andl $0xFFFEFFFF, %%eax\n"
17600+ " movl %%eax, %%cr0\n"
17601+#endif
17602+
17603+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17604+
17605+#ifdef CONFIG_PAX_KERNEXEC
17606+ " movl %0, %%cr0\n"
17607+#endif
17608+
17609 " jmp 2b\n"
17610 ".previous\n"
17611 _ASM_EXTABLE(1b, 3b)
17612- : : "r" (from), "r" (to) : "memory");
17613+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17614
17615 from += 64;
17616 to += 64;
17617diff -urNp linux-2.6.39.4/arch/x86/lib/putuser.S linux-2.6.39.4/arch/x86/lib/putuser.S
17618--- linux-2.6.39.4/arch/x86/lib/putuser.S 2011-05-19 00:06:34.000000000 -0400
17619+++ linux-2.6.39.4/arch/x86/lib/putuser.S 2011-08-05 19:44:35.000000000 -0400
17620@@ -15,7 +15,8 @@
17621 #include <asm/thread_info.h>
17622 #include <asm/errno.h>
17623 #include <asm/asm.h>
17624-
17625+#include <asm/segment.h>
17626+#include <asm/pgtable.h>
17627
17628 /*
17629 * __put_user_X
17630@@ -29,52 +30,119 @@
17631 * as they get called from within inline assembly.
17632 */
17633
17634-#define ENTER CFI_STARTPROC ; \
17635- GET_THREAD_INFO(%_ASM_BX)
17636+#define ENTER CFI_STARTPROC
17637 #define EXIT ret ; \
17638 CFI_ENDPROC
17639
17640+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17641+#define _DEST %_ASM_CX,%_ASM_BX
17642+#else
17643+#define _DEST %_ASM_CX
17644+#endif
17645+
17646+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17647+#define __copyuser_seg gs;
17648+#else
17649+#define __copyuser_seg
17650+#endif
17651+
17652 .text
17653 ENTRY(__put_user_1)
17654 ENTER
17655+
17656+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17657+ GET_THREAD_INFO(%_ASM_BX)
17658 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17659 jae bad_put_user
17660-1: movb %al,(%_ASM_CX)
17661+
17662+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17663+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17664+ cmp %_ASM_BX,%_ASM_CX
17665+ jb 1234f
17666+ xor %ebx,%ebx
17667+1234:
17668+#endif
17669+
17670+#endif
17671+
17672+1: __copyuser_seg movb %al,(_DEST)
17673 xor %eax,%eax
17674 EXIT
17675 ENDPROC(__put_user_1)
17676
17677 ENTRY(__put_user_2)
17678 ENTER
17679+
17680+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17681+ GET_THREAD_INFO(%_ASM_BX)
17682 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17683 sub $1,%_ASM_BX
17684 cmp %_ASM_BX,%_ASM_CX
17685 jae bad_put_user
17686-2: movw %ax,(%_ASM_CX)
17687+
17688+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17689+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17690+ cmp %_ASM_BX,%_ASM_CX
17691+ jb 1234f
17692+ xor %ebx,%ebx
17693+1234:
17694+#endif
17695+
17696+#endif
17697+
17698+2: __copyuser_seg movw %ax,(_DEST)
17699 xor %eax,%eax
17700 EXIT
17701 ENDPROC(__put_user_2)
17702
17703 ENTRY(__put_user_4)
17704 ENTER
17705+
17706+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17707+ GET_THREAD_INFO(%_ASM_BX)
17708 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17709 sub $3,%_ASM_BX
17710 cmp %_ASM_BX,%_ASM_CX
17711 jae bad_put_user
17712-3: movl %eax,(%_ASM_CX)
17713+
17714+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17715+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17716+ cmp %_ASM_BX,%_ASM_CX
17717+ jb 1234f
17718+ xor %ebx,%ebx
17719+1234:
17720+#endif
17721+
17722+#endif
17723+
17724+3: __copyuser_seg movl %eax,(_DEST)
17725 xor %eax,%eax
17726 EXIT
17727 ENDPROC(__put_user_4)
17728
17729 ENTRY(__put_user_8)
17730 ENTER
17731+
17732+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17733+ GET_THREAD_INFO(%_ASM_BX)
17734 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17735 sub $7,%_ASM_BX
17736 cmp %_ASM_BX,%_ASM_CX
17737 jae bad_put_user
17738-4: mov %_ASM_AX,(%_ASM_CX)
17739+
17740+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17741+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17742+ cmp %_ASM_BX,%_ASM_CX
17743+ jb 1234f
17744+ xor %ebx,%ebx
17745+1234:
17746+#endif
17747+
17748+#endif
17749+
17750+4: __copyuser_seg mov %_ASM_AX,(_DEST)
17751 #ifdef CONFIG_X86_32
17752-5: movl %edx,4(%_ASM_CX)
17753+5: __copyuser_seg movl %edx,4(_DEST)
17754 #endif
17755 xor %eax,%eax
17756 EXIT
17757diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_32.c linux-2.6.39.4/arch/x86/lib/usercopy_32.c
17758--- linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-05-19 00:06:34.000000000 -0400
17759+++ linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-08-05 19:44:35.000000000 -0400
17760@@ -43,7 +43,7 @@ do { \
17761 __asm__ __volatile__( \
17762 " testl %1,%1\n" \
17763 " jz 2f\n" \
17764- "0: lodsb\n" \
17765+ "0: "__copyuser_seg"lodsb\n" \
17766 " stosb\n" \
17767 " testb %%al,%%al\n" \
17768 " jz 1f\n" \
17769@@ -128,10 +128,12 @@ do { \
17770 int __d0; \
17771 might_fault(); \
17772 __asm__ __volatile__( \
17773+ __COPYUSER_SET_ES \
17774 "0: rep; stosl\n" \
17775 " movl %2,%0\n" \
17776 "1: rep; stosb\n" \
17777 "2:\n" \
17778+ __COPYUSER_RESTORE_ES \
17779 ".section .fixup,\"ax\"\n" \
17780 "3: lea 0(%2,%0,4),%0\n" \
17781 " jmp 2b\n" \
17782@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17783 might_fault();
17784
17785 __asm__ __volatile__(
17786+ __COPYUSER_SET_ES
17787 " testl %0, %0\n"
17788 " jz 3f\n"
17789 " andl %0,%%ecx\n"
17790@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17791 " subl %%ecx,%0\n"
17792 " addl %0,%%eax\n"
17793 "1:\n"
17794+ __COPYUSER_RESTORE_ES
17795 ".section .fixup,\"ax\"\n"
17796 "2: xorl %%eax,%%eax\n"
17797 " jmp 1b\n"
17798@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17799
17800 #ifdef CONFIG_X86_INTEL_USERCOPY
17801 static unsigned long
17802-__copy_user_intel(void __user *to, const void *from, unsigned long size)
17803+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17804 {
17805 int d0, d1;
17806 __asm__ __volatile__(
17807@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17808 " .align 2,0x90\n"
17809 "3: movl 0(%4), %%eax\n"
17810 "4: movl 4(%4), %%edx\n"
17811- "5: movl %%eax, 0(%3)\n"
17812- "6: movl %%edx, 4(%3)\n"
17813+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17814+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17815 "7: movl 8(%4), %%eax\n"
17816 "8: movl 12(%4),%%edx\n"
17817- "9: movl %%eax, 8(%3)\n"
17818- "10: movl %%edx, 12(%3)\n"
17819+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17820+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17821 "11: movl 16(%4), %%eax\n"
17822 "12: movl 20(%4), %%edx\n"
17823- "13: movl %%eax, 16(%3)\n"
17824- "14: movl %%edx, 20(%3)\n"
17825+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17826+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17827 "15: movl 24(%4), %%eax\n"
17828 "16: movl 28(%4), %%edx\n"
17829- "17: movl %%eax, 24(%3)\n"
17830- "18: movl %%edx, 28(%3)\n"
17831+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17832+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17833 "19: movl 32(%4), %%eax\n"
17834 "20: movl 36(%4), %%edx\n"
17835- "21: movl %%eax, 32(%3)\n"
17836- "22: movl %%edx, 36(%3)\n"
17837+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17838+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17839 "23: movl 40(%4), %%eax\n"
17840 "24: movl 44(%4), %%edx\n"
17841- "25: movl %%eax, 40(%3)\n"
17842- "26: movl %%edx, 44(%3)\n"
17843+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17844+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17845 "27: movl 48(%4), %%eax\n"
17846 "28: movl 52(%4), %%edx\n"
17847- "29: movl %%eax, 48(%3)\n"
17848- "30: movl %%edx, 52(%3)\n"
17849+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17850+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17851 "31: movl 56(%4), %%eax\n"
17852 "32: movl 60(%4), %%edx\n"
17853- "33: movl %%eax, 56(%3)\n"
17854- "34: movl %%edx, 60(%3)\n"
17855+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17856+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17857 " addl $-64, %0\n"
17858 " addl $64, %4\n"
17859 " addl $64, %3\n"
17860@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17861 " shrl $2, %0\n"
17862 " andl $3, %%eax\n"
17863 " cld\n"
17864+ __COPYUSER_SET_ES
17865 "99: rep; movsl\n"
17866 "36: movl %%eax, %0\n"
17867 "37: rep; movsb\n"
17868 "100:\n"
17869+ __COPYUSER_RESTORE_ES
17870+ ".section .fixup,\"ax\"\n"
17871+ "101: lea 0(%%eax,%0,4),%0\n"
17872+ " jmp 100b\n"
17873+ ".previous\n"
17874+ ".section __ex_table,\"a\"\n"
17875+ " .align 4\n"
17876+ " .long 1b,100b\n"
17877+ " .long 2b,100b\n"
17878+ " .long 3b,100b\n"
17879+ " .long 4b,100b\n"
17880+ " .long 5b,100b\n"
17881+ " .long 6b,100b\n"
17882+ " .long 7b,100b\n"
17883+ " .long 8b,100b\n"
17884+ " .long 9b,100b\n"
17885+ " .long 10b,100b\n"
17886+ " .long 11b,100b\n"
17887+ " .long 12b,100b\n"
17888+ " .long 13b,100b\n"
17889+ " .long 14b,100b\n"
17890+ " .long 15b,100b\n"
17891+ " .long 16b,100b\n"
17892+ " .long 17b,100b\n"
17893+ " .long 18b,100b\n"
17894+ " .long 19b,100b\n"
17895+ " .long 20b,100b\n"
17896+ " .long 21b,100b\n"
17897+ " .long 22b,100b\n"
17898+ " .long 23b,100b\n"
17899+ " .long 24b,100b\n"
17900+ " .long 25b,100b\n"
17901+ " .long 26b,100b\n"
17902+ " .long 27b,100b\n"
17903+ " .long 28b,100b\n"
17904+ " .long 29b,100b\n"
17905+ " .long 30b,100b\n"
17906+ " .long 31b,100b\n"
17907+ " .long 32b,100b\n"
17908+ " .long 33b,100b\n"
17909+ " .long 34b,100b\n"
17910+ " .long 35b,100b\n"
17911+ " .long 36b,100b\n"
17912+ " .long 37b,100b\n"
17913+ " .long 99b,101b\n"
17914+ ".previous"
17915+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
17916+ : "1"(to), "2"(from), "0"(size)
17917+ : "eax", "edx", "memory");
17918+ return size;
17919+}
17920+
17921+static unsigned long
17922+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17923+{
17924+ int d0, d1;
17925+ __asm__ __volatile__(
17926+ " .align 2,0x90\n"
17927+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17928+ " cmpl $67, %0\n"
17929+ " jbe 3f\n"
17930+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17931+ " .align 2,0x90\n"
17932+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17933+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17934+ "5: movl %%eax, 0(%3)\n"
17935+ "6: movl %%edx, 4(%3)\n"
17936+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17937+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17938+ "9: movl %%eax, 8(%3)\n"
17939+ "10: movl %%edx, 12(%3)\n"
17940+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17941+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17942+ "13: movl %%eax, 16(%3)\n"
17943+ "14: movl %%edx, 20(%3)\n"
17944+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17945+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17946+ "17: movl %%eax, 24(%3)\n"
17947+ "18: movl %%edx, 28(%3)\n"
17948+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17949+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17950+ "21: movl %%eax, 32(%3)\n"
17951+ "22: movl %%edx, 36(%3)\n"
17952+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17953+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17954+ "25: movl %%eax, 40(%3)\n"
17955+ "26: movl %%edx, 44(%3)\n"
17956+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17957+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17958+ "29: movl %%eax, 48(%3)\n"
17959+ "30: movl %%edx, 52(%3)\n"
17960+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17961+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17962+ "33: movl %%eax, 56(%3)\n"
17963+ "34: movl %%edx, 60(%3)\n"
17964+ " addl $-64, %0\n"
17965+ " addl $64, %4\n"
17966+ " addl $64, %3\n"
17967+ " cmpl $63, %0\n"
17968+ " ja 1b\n"
17969+ "35: movl %0, %%eax\n"
17970+ " shrl $2, %0\n"
17971+ " andl $3, %%eax\n"
17972+ " cld\n"
17973+ "99: rep; "__copyuser_seg" movsl\n"
17974+ "36: movl %%eax, %0\n"
17975+ "37: rep; "__copyuser_seg" movsb\n"
17976+ "100:\n"
17977 ".section .fixup,\"ax\"\n"
17978 "101: lea 0(%%eax,%0,4),%0\n"
17979 " jmp 100b\n"
17980@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17981 int d0, d1;
17982 __asm__ __volatile__(
17983 " .align 2,0x90\n"
17984- "0: movl 32(%4), %%eax\n"
17985+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17986 " cmpl $67, %0\n"
17987 " jbe 2f\n"
17988- "1: movl 64(%4), %%eax\n"
17989+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17990 " .align 2,0x90\n"
17991- "2: movl 0(%4), %%eax\n"
17992- "21: movl 4(%4), %%edx\n"
17993+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17994+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17995 " movl %%eax, 0(%3)\n"
17996 " movl %%edx, 4(%3)\n"
17997- "3: movl 8(%4), %%eax\n"
17998- "31: movl 12(%4),%%edx\n"
17999+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18000+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18001 " movl %%eax, 8(%3)\n"
18002 " movl %%edx, 12(%3)\n"
18003- "4: movl 16(%4), %%eax\n"
18004- "41: movl 20(%4), %%edx\n"
18005+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18006+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18007 " movl %%eax, 16(%3)\n"
18008 " movl %%edx, 20(%3)\n"
18009- "10: movl 24(%4), %%eax\n"
18010- "51: movl 28(%4), %%edx\n"
18011+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18012+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18013 " movl %%eax, 24(%3)\n"
18014 " movl %%edx, 28(%3)\n"
18015- "11: movl 32(%4), %%eax\n"
18016- "61: movl 36(%4), %%edx\n"
18017+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18018+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18019 " movl %%eax, 32(%3)\n"
18020 " movl %%edx, 36(%3)\n"
18021- "12: movl 40(%4), %%eax\n"
18022- "71: movl 44(%4), %%edx\n"
18023+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18024+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18025 " movl %%eax, 40(%3)\n"
18026 " movl %%edx, 44(%3)\n"
18027- "13: movl 48(%4), %%eax\n"
18028- "81: movl 52(%4), %%edx\n"
18029+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18030+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18031 " movl %%eax, 48(%3)\n"
18032 " movl %%edx, 52(%3)\n"
18033- "14: movl 56(%4), %%eax\n"
18034- "91: movl 60(%4), %%edx\n"
18035+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18036+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18037 " movl %%eax, 56(%3)\n"
18038 " movl %%edx, 60(%3)\n"
18039 " addl $-64, %0\n"
18040@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18041 " shrl $2, %0\n"
18042 " andl $3, %%eax\n"
18043 " cld\n"
18044- "6: rep; movsl\n"
18045+ "6: rep; "__copyuser_seg" movsl\n"
18046 " movl %%eax,%0\n"
18047- "7: rep; movsb\n"
18048+ "7: rep; "__copyuser_seg" movsb\n"
18049 "8:\n"
18050 ".section .fixup,\"ax\"\n"
18051 "9: lea 0(%%eax,%0,4),%0\n"
18052@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18053
18054 __asm__ __volatile__(
18055 " .align 2,0x90\n"
18056- "0: movl 32(%4), %%eax\n"
18057+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18058 " cmpl $67, %0\n"
18059 " jbe 2f\n"
18060- "1: movl 64(%4), %%eax\n"
18061+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18062 " .align 2,0x90\n"
18063- "2: movl 0(%4), %%eax\n"
18064- "21: movl 4(%4), %%edx\n"
18065+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18066+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18067 " movnti %%eax, 0(%3)\n"
18068 " movnti %%edx, 4(%3)\n"
18069- "3: movl 8(%4), %%eax\n"
18070- "31: movl 12(%4),%%edx\n"
18071+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18072+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18073 " movnti %%eax, 8(%3)\n"
18074 " movnti %%edx, 12(%3)\n"
18075- "4: movl 16(%4), %%eax\n"
18076- "41: movl 20(%4), %%edx\n"
18077+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18078+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18079 " movnti %%eax, 16(%3)\n"
18080 " movnti %%edx, 20(%3)\n"
18081- "10: movl 24(%4), %%eax\n"
18082- "51: movl 28(%4), %%edx\n"
18083+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18084+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18085 " movnti %%eax, 24(%3)\n"
18086 " movnti %%edx, 28(%3)\n"
18087- "11: movl 32(%4), %%eax\n"
18088- "61: movl 36(%4), %%edx\n"
18089+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18090+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18091 " movnti %%eax, 32(%3)\n"
18092 " movnti %%edx, 36(%3)\n"
18093- "12: movl 40(%4), %%eax\n"
18094- "71: movl 44(%4), %%edx\n"
18095+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18096+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18097 " movnti %%eax, 40(%3)\n"
18098 " movnti %%edx, 44(%3)\n"
18099- "13: movl 48(%4), %%eax\n"
18100- "81: movl 52(%4), %%edx\n"
18101+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18102+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18103 " movnti %%eax, 48(%3)\n"
18104 " movnti %%edx, 52(%3)\n"
18105- "14: movl 56(%4), %%eax\n"
18106- "91: movl 60(%4), %%edx\n"
18107+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18108+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18109 " movnti %%eax, 56(%3)\n"
18110 " movnti %%edx, 60(%3)\n"
18111 " addl $-64, %0\n"
18112@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18113 " shrl $2, %0\n"
18114 " andl $3, %%eax\n"
18115 " cld\n"
18116- "6: rep; movsl\n"
18117+ "6: rep; "__copyuser_seg" movsl\n"
18118 " movl %%eax,%0\n"
18119- "7: rep; movsb\n"
18120+ "7: rep; "__copyuser_seg" movsb\n"
18121 "8:\n"
18122 ".section .fixup,\"ax\"\n"
18123 "9: lea 0(%%eax,%0,4),%0\n"
18124@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18125
18126 __asm__ __volatile__(
18127 " .align 2,0x90\n"
18128- "0: movl 32(%4), %%eax\n"
18129+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18130 " cmpl $67, %0\n"
18131 " jbe 2f\n"
18132- "1: movl 64(%4), %%eax\n"
18133+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18134 " .align 2,0x90\n"
18135- "2: movl 0(%4), %%eax\n"
18136- "21: movl 4(%4), %%edx\n"
18137+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18138+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18139 " movnti %%eax, 0(%3)\n"
18140 " movnti %%edx, 4(%3)\n"
18141- "3: movl 8(%4), %%eax\n"
18142- "31: movl 12(%4),%%edx\n"
18143+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18144+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18145 " movnti %%eax, 8(%3)\n"
18146 " movnti %%edx, 12(%3)\n"
18147- "4: movl 16(%4), %%eax\n"
18148- "41: movl 20(%4), %%edx\n"
18149+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18150+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18151 " movnti %%eax, 16(%3)\n"
18152 " movnti %%edx, 20(%3)\n"
18153- "10: movl 24(%4), %%eax\n"
18154- "51: movl 28(%4), %%edx\n"
18155+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18156+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18157 " movnti %%eax, 24(%3)\n"
18158 " movnti %%edx, 28(%3)\n"
18159- "11: movl 32(%4), %%eax\n"
18160- "61: movl 36(%4), %%edx\n"
18161+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18162+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18163 " movnti %%eax, 32(%3)\n"
18164 " movnti %%edx, 36(%3)\n"
18165- "12: movl 40(%4), %%eax\n"
18166- "71: movl 44(%4), %%edx\n"
18167+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18168+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18169 " movnti %%eax, 40(%3)\n"
18170 " movnti %%edx, 44(%3)\n"
18171- "13: movl 48(%4), %%eax\n"
18172- "81: movl 52(%4), %%edx\n"
18173+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18174+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18175 " movnti %%eax, 48(%3)\n"
18176 " movnti %%edx, 52(%3)\n"
18177- "14: movl 56(%4), %%eax\n"
18178- "91: movl 60(%4), %%edx\n"
18179+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18180+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18181 " movnti %%eax, 56(%3)\n"
18182 " movnti %%edx, 60(%3)\n"
18183 " addl $-64, %0\n"
18184@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18185 " shrl $2, %0\n"
18186 " andl $3, %%eax\n"
18187 " cld\n"
18188- "6: rep; movsl\n"
18189+ "6: rep; "__copyuser_seg" movsl\n"
18190 " movl %%eax,%0\n"
18191- "7: rep; movsb\n"
18192+ "7: rep; "__copyuser_seg" movsb\n"
18193 "8:\n"
18194 ".section .fixup,\"ax\"\n"
18195 "9: lea 0(%%eax,%0,4),%0\n"
18196@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18197 */
18198 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18199 unsigned long size);
18200-unsigned long __copy_user_intel(void __user *to, const void *from,
18201+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18202+ unsigned long size);
18203+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18204 unsigned long size);
18205 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18206 const void __user *from, unsigned long size);
18207 #endif /* CONFIG_X86_INTEL_USERCOPY */
18208
18209 /* Generic arbitrary sized copy. */
18210-#define __copy_user(to, from, size) \
18211+#define __copy_user(to, from, size, prefix, set, restore) \
18212 do { \
18213 int __d0, __d1, __d2; \
18214 __asm__ __volatile__( \
18215+ set \
18216 " cmp $7,%0\n" \
18217 " jbe 1f\n" \
18218 " movl %1,%0\n" \
18219 " negl %0\n" \
18220 " andl $7,%0\n" \
18221 " subl %0,%3\n" \
18222- "4: rep; movsb\n" \
18223+ "4: rep; "prefix"movsb\n" \
18224 " movl %3,%0\n" \
18225 " shrl $2,%0\n" \
18226 " andl $3,%3\n" \
18227 " .align 2,0x90\n" \
18228- "0: rep; movsl\n" \
18229+ "0: rep; "prefix"movsl\n" \
18230 " movl %3,%0\n" \
18231- "1: rep; movsb\n" \
18232+ "1: rep; "prefix"movsb\n" \
18233 "2:\n" \
18234+ restore \
18235 ".section .fixup,\"ax\"\n" \
18236 "5: addl %3,%0\n" \
18237 " jmp 2b\n" \
18238@@ -682,14 +799,14 @@ do { \
18239 " negl %0\n" \
18240 " andl $7,%0\n" \
18241 " subl %0,%3\n" \
18242- "4: rep; movsb\n" \
18243+ "4: rep; "__copyuser_seg"movsb\n" \
18244 " movl %3,%0\n" \
18245 " shrl $2,%0\n" \
18246 " andl $3,%3\n" \
18247 " .align 2,0x90\n" \
18248- "0: rep; movsl\n" \
18249+ "0: rep; "__copyuser_seg"movsl\n" \
18250 " movl %3,%0\n" \
18251- "1: rep; movsb\n" \
18252+ "1: rep; "__copyuser_seg"movsb\n" \
18253 "2:\n" \
18254 ".section .fixup,\"ax\"\n" \
18255 "5: addl %3,%0\n" \
18256@@ -775,9 +892,9 @@ survive:
18257 }
18258 #endif
18259 if (movsl_is_ok(to, from, n))
18260- __copy_user(to, from, n);
18261+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18262 else
18263- n = __copy_user_intel(to, from, n);
18264+ n = __generic_copy_to_user_intel(to, from, n);
18265 return n;
18266 }
18267 EXPORT_SYMBOL(__copy_to_user_ll);
18268@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18269 unsigned long n)
18270 {
18271 if (movsl_is_ok(to, from, n))
18272- __copy_user(to, from, n);
18273+ __copy_user(to, from, n, __copyuser_seg, "", "");
18274 else
18275- n = __copy_user_intel((void __user *)to,
18276- (const void *)from, n);
18277+ n = __generic_copy_from_user_intel(to, from, n);
18278 return n;
18279 }
18280 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18281@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18282 if (n > 64 && cpu_has_xmm2)
18283 n = __copy_user_intel_nocache(to, from, n);
18284 else
18285- __copy_user(to, from, n);
18286+ __copy_user(to, from, n, __copyuser_seg, "", "");
18287 #else
18288- __copy_user(to, from, n);
18289+ __copy_user(to, from, n, __copyuser_seg, "", "");
18290 #endif
18291 return n;
18292 }
18293 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18294
18295-/**
18296- * copy_to_user: - Copy a block of data into user space.
18297- * @to: Destination address, in user space.
18298- * @from: Source address, in kernel space.
18299- * @n: Number of bytes to copy.
18300- *
18301- * Context: User context only. This function may sleep.
18302- *
18303- * Copy data from kernel space to user space.
18304- *
18305- * Returns number of bytes that could not be copied.
18306- * On success, this will be zero.
18307- */
18308-unsigned long
18309-copy_to_user(void __user *to, const void *from, unsigned long n)
18310+void copy_from_user_overflow(void)
18311 {
18312- if (access_ok(VERIFY_WRITE, to, n))
18313- n = __copy_to_user(to, from, n);
18314- return n;
18315+ WARN(1, "Buffer overflow detected!\n");
18316 }
18317-EXPORT_SYMBOL(copy_to_user);
18318+EXPORT_SYMBOL(copy_from_user_overflow);
18319
18320-/**
18321- * copy_from_user: - Copy a block of data from user space.
18322- * @to: Destination address, in kernel space.
18323- * @from: Source address, in user space.
18324- * @n: Number of bytes to copy.
18325- *
18326- * Context: User context only. This function may sleep.
18327- *
18328- * Copy data from user space to kernel space.
18329- *
18330- * Returns number of bytes that could not be copied.
18331- * On success, this will be zero.
18332- *
18333- * If some data could not be copied, this function will pad the copied
18334- * data to the requested size using zero bytes.
18335- */
18336-unsigned long
18337-_copy_from_user(void *to, const void __user *from, unsigned long n)
18338+void copy_to_user_overflow(void)
18339 {
18340- if (access_ok(VERIFY_READ, from, n))
18341- n = __copy_from_user(to, from, n);
18342- else
18343- memset(to, 0, n);
18344- return n;
18345+ WARN(1, "Buffer overflow detected!\n");
18346 }
18347-EXPORT_SYMBOL(_copy_from_user);
18348+EXPORT_SYMBOL(copy_to_user_overflow);
18349
18350-void copy_from_user_overflow(void)
18351+#ifdef CONFIG_PAX_MEMORY_UDEREF
18352+void __set_fs(mm_segment_t x)
18353 {
18354- WARN(1, "Buffer overflow detected!\n");
18355+ switch (x.seg) {
18356+ case 0:
18357+ loadsegment(gs, 0);
18358+ break;
18359+ case TASK_SIZE_MAX:
18360+ loadsegment(gs, __USER_DS);
18361+ break;
18362+ case -1UL:
18363+ loadsegment(gs, __KERNEL_DS);
18364+ break;
18365+ default:
18366+ BUG();
18367+ }
18368+ return;
18369 }
18370-EXPORT_SYMBOL(copy_from_user_overflow);
18371+EXPORT_SYMBOL(__set_fs);
18372+
18373+void set_fs(mm_segment_t x)
18374+{
18375+ current_thread_info()->addr_limit = x;
18376+ __set_fs(x);
18377+}
18378+EXPORT_SYMBOL(set_fs);
18379+#endif
18380diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_64.c linux-2.6.39.4/arch/x86/lib/usercopy_64.c
18381--- linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
18382+++ linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-08-05 19:44:35.000000000 -0400
18383@@ -42,6 +42,12 @@ long
18384 __strncpy_from_user(char *dst, const char __user *src, long count)
18385 {
18386 long res;
18387+
18388+#ifdef CONFIG_PAX_MEMORY_UDEREF
18389+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18390+ src += PAX_USER_SHADOW_BASE;
18391+#endif
18392+
18393 __do_strncpy_from_user(dst, src, count, res);
18394 return res;
18395 }
18396@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18397 {
18398 long __d0;
18399 might_fault();
18400+
18401+#ifdef CONFIG_PAX_MEMORY_UDEREF
18402+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18403+ addr += PAX_USER_SHADOW_BASE;
18404+#endif
18405+
18406 /* no memory constraint because it doesn't change any memory gcc knows
18407 about */
18408 asm volatile(
18409@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18410
18411 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18412 {
18413- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18414+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18415+
18416+#ifdef CONFIG_PAX_MEMORY_UDEREF
18417+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18418+ to += PAX_USER_SHADOW_BASE;
18419+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18420+ from += PAX_USER_SHADOW_BASE;
18421+#endif
18422+
18423 return copy_user_generic((__force void *)to, (__force void *)from, len);
18424- }
18425- return len;
18426+ }
18427+ return len;
18428 }
18429 EXPORT_SYMBOL(copy_in_user);
18430
18431diff -urNp linux-2.6.39.4/arch/x86/Makefile linux-2.6.39.4/arch/x86/Makefile
18432--- linux-2.6.39.4/arch/x86/Makefile 2011-05-19 00:06:34.000000000 -0400
18433+++ linux-2.6.39.4/arch/x86/Makefile 2011-08-05 19:44:35.000000000 -0400
18434@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18435 else
18436 BITS := 64
18437 UTS_MACHINE := x86_64
18438+ biarch := $(call cc-option,-m64)
18439 CHECKFLAGS += -D__x86_64__ -m64
18440
18441 KBUILD_AFLAGS += -m64
18442@@ -195,3 +196,12 @@ define archhelp
18443 echo ' FDARGS="..." arguments for the booted kernel'
18444 echo ' FDINITRD=file initrd for the booted kernel'
18445 endef
18446+
18447+define OLD_LD
18448+
18449+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18450+*** Please upgrade your binutils to 2.18 or newer
18451+endef
18452+
18453+archprepare:
18454+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18455diff -urNp linux-2.6.39.4/arch/x86/mm/extable.c linux-2.6.39.4/arch/x86/mm/extable.c
18456--- linux-2.6.39.4/arch/x86/mm/extable.c 2011-05-19 00:06:34.000000000 -0400
18457+++ linux-2.6.39.4/arch/x86/mm/extable.c 2011-08-05 19:44:35.000000000 -0400
18458@@ -1,14 +1,71 @@
18459 #include <linux/module.h>
18460 #include <linux/spinlock.h>
18461+#include <linux/sort.h>
18462 #include <asm/uaccess.h>
18463+#include <asm/pgtable.h>
18464
18465+/*
18466+ * The exception table needs to be sorted so that the binary
18467+ * search that we use to find entries in it works properly.
18468+ * This is used both for the kernel exception table and for
18469+ * the exception tables of modules that get loaded.
18470+ */
18471+static int cmp_ex(const void *a, const void *b)
18472+{
18473+ const struct exception_table_entry *x = a, *y = b;
18474+
18475+ /* avoid overflow */
18476+ if (x->insn > y->insn)
18477+ return 1;
18478+ if (x->insn < y->insn)
18479+ return -1;
18480+ return 0;
18481+}
18482+
18483+static void swap_ex(void *a, void *b, int size)
18484+{
18485+ struct exception_table_entry t, *x = a, *y = b;
18486+
18487+ t = *x;
18488+
18489+ pax_open_kernel();
18490+ *x = *y;
18491+ *y = t;
18492+ pax_close_kernel();
18493+}
18494+
18495+void sort_extable(struct exception_table_entry *start,
18496+ struct exception_table_entry *finish)
18497+{
18498+ sort(start, finish - start, sizeof(struct exception_table_entry),
18499+ cmp_ex, swap_ex);
18500+}
18501+
18502+#ifdef CONFIG_MODULES
18503+/*
18504+ * If the exception table is sorted, any referring to the module init
18505+ * will be at the beginning or the end.
18506+ */
18507+void trim_init_extable(struct module *m)
18508+{
18509+ /*trim the beginning*/
18510+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
18511+ m->extable++;
18512+ m->num_exentries--;
18513+ }
18514+ /*trim the end*/
18515+ while (m->num_exentries &&
18516+ within_module_init(m->extable[m->num_exentries-1].insn, m))
18517+ m->num_exentries--;
18518+}
18519+#endif /* CONFIG_MODULES */
18520
18521 int fixup_exception(struct pt_regs *regs)
18522 {
18523 const struct exception_table_entry *fixup;
18524
18525 #ifdef CONFIG_PNPBIOS
18526- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18527+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18528 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18529 extern u32 pnp_bios_is_utter_crap;
18530 pnp_bios_is_utter_crap = 1;
18531diff -urNp linux-2.6.39.4/arch/x86/mm/fault.c linux-2.6.39.4/arch/x86/mm/fault.c
18532--- linux-2.6.39.4/arch/x86/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
18533+++ linux-2.6.39.4/arch/x86/mm/fault.c 2011-08-05 19:44:35.000000000 -0400
18534@@ -12,10 +12,18 @@
18535 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
18536 #include <linux/perf_event.h> /* perf_sw_event */
18537 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18538+#include <linux/unistd.h>
18539+#include <linux/compiler.h>
18540
18541 #include <asm/traps.h> /* dotraplinkage, ... */
18542 #include <asm/pgalloc.h> /* pgd_*(), ... */
18543 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18544+#include <asm/vsyscall.h>
18545+#include <asm/tlbflush.h>
18546+
18547+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18548+#include <asm/stacktrace.h>
18549+#endif
18550
18551 /*
18552 * Page fault error code bits:
18553@@ -53,7 +61,7 @@ static inline int __kprobes notify_page_
18554 int ret = 0;
18555
18556 /* kprobe_running() needs smp_processor_id() */
18557- if (kprobes_built_in() && !user_mode_vm(regs)) {
18558+ if (kprobes_built_in() && !user_mode(regs)) {
18559 preempt_disable();
18560 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18561 ret = 1;
18562@@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re
18563 return !instr_lo || (instr_lo>>1) == 1;
18564 case 0x00:
18565 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18566- if (probe_kernel_address(instr, opcode))
18567+ if (user_mode(regs)) {
18568+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18569+ return 0;
18570+ } else if (probe_kernel_address(instr, opcode))
18571 return 0;
18572
18573 *prefetch = (instr_lo == 0xF) &&
18574@@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign
18575 while (instr < max_instr) {
18576 unsigned char opcode;
18577
18578- if (probe_kernel_address(instr, opcode))
18579+ if (user_mode(regs)) {
18580+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18581+ break;
18582+ } else if (probe_kernel_address(instr, opcode))
18583 break;
18584
18585 instr++;
18586@@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s
18587 force_sig_info(si_signo, &info, tsk);
18588 }
18589
18590+#ifdef CONFIG_PAX_EMUTRAMP
18591+static int pax_handle_fetch_fault(struct pt_regs *regs);
18592+#endif
18593+
18594+#ifdef CONFIG_PAX_PAGEEXEC
18595+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18596+{
18597+ pgd_t *pgd;
18598+ pud_t *pud;
18599+ pmd_t *pmd;
18600+
18601+ pgd = pgd_offset(mm, address);
18602+ if (!pgd_present(*pgd))
18603+ return NULL;
18604+ pud = pud_offset(pgd, address);
18605+ if (!pud_present(*pud))
18606+ return NULL;
18607+ pmd = pmd_offset(pud, address);
18608+ if (!pmd_present(*pmd))
18609+ return NULL;
18610+ return pmd;
18611+}
18612+#endif
18613+
18614 DEFINE_SPINLOCK(pgd_lock);
18615 LIST_HEAD(pgd_list);
18616
18617@@ -229,10 +267,22 @@ void vmalloc_sync_all(void)
18618 for (address = VMALLOC_START & PMD_MASK;
18619 address >= TASK_SIZE && address < FIXADDR_TOP;
18620 address += PMD_SIZE) {
18621+
18622+#ifdef CONFIG_PAX_PER_CPU_PGD
18623+ unsigned long cpu;
18624+#else
18625 struct page *page;
18626+#endif
18627
18628 spin_lock(&pgd_lock);
18629+
18630+#ifdef CONFIG_PAX_PER_CPU_PGD
18631+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18632+ pgd_t *pgd = get_cpu_pgd(cpu);
18633+ pmd_t *ret;
18634+#else
18635 list_for_each_entry(page, &pgd_list, lru) {
18636+ pgd_t *pgd = page_address(page);
18637 spinlock_t *pgt_lock;
18638 pmd_t *ret;
18639
18640@@ -240,8 +290,13 @@ void vmalloc_sync_all(void)
18641 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18642
18643 spin_lock(pgt_lock);
18644- ret = vmalloc_sync_one(page_address(page), address);
18645+#endif
18646+
18647+ ret = vmalloc_sync_one(pgd, address);
18648+
18649+#ifndef CONFIG_PAX_PER_CPU_PGD
18650 spin_unlock(pgt_lock);
18651+#endif
18652
18653 if (!ret)
18654 break;
18655@@ -275,6 +330,11 @@ static noinline __kprobes int vmalloc_fa
18656 * an interrupt in the middle of a task switch..
18657 */
18658 pgd_paddr = read_cr3();
18659+
18660+#ifdef CONFIG_PAX_PER_CPU_PGD
18661+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18662+#endif
18663+
18664 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18665 if (!pmd_k)
18666 return -1;
18667@@ -370,7 +430,14 @@ static noinline __kprobes int vmalloc_fa
18668 * happen within a race in page table update. In the later
18669 * case just flush:
18670 */
18671+
18672+#ifdef CONFIG_PAX_PER_CPU_PGD
18673+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18674+ pgd = pgd_offset_cpu(smp_processor_id(), address);
18675+#else
18676 pgd = pgd_offset(current->active_mm, address);
18677+#endif
18678+
18679 pgd_ref = pgd_offset_k(address);
18680 if (pgd_none(*pgd_ref))
18681 return -1;
18682@@ -532,7 +599,7 @@ static int is_errata93(struct pt_regs *r
18683 static int is_errata100(struct pt_regs *regs, unsigned long address)
18684 {
18685 #ifdef CONFIG_X86_64
18686- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18687+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18688 return 1;
18689 #endif
18690 return 0;
18691@@ -559,7 +626,7 @@ static int is_f00f_bug(struct pt_regs *r
18692 }
18693
18694 static const char nx_warning[] = KERN_CRIT
18695-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18696+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18697
18698 static void
18699 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18700@@ -568,15 +635,26 @@ show_fault_oops(struct pt_regs *regs, un
18701 if (!oops_may_print())
18702 return;
18703
18704- if (error_code & PF_INSTR) {
18705+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18706 unsigned int level;
18707
18708 pte_t *pte = lookup_address(address, &level);
18709
18710 if (pte && pte_present(*pte) && !pte_exec(*pte))
18711- printk(nx_warning, current_uid());
18712+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18713 }
18714
18715+#ifdef CONFIG_PAX_KERNEXEC
18716+ if (init_mm.start_code <= address && address < init_mm.end_code) {
18717+ if (current->signal->curr_ip)
18718+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18719+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18720+ else
18721+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18722+ current->comm, task_pid_nr(current), current_uid(), current_euid());
18723+ }
18724+#endif
18725+
18726 printk(KERN_ALERT "BUG: unable to handle kernel ");
18727 if (address < PAGE_SIZE)
18728 printk(KERN_CONT "NULL pointer dereference");
18729@@ -701,6 +779,68 @@ __bad_area_nosemaphore(struct pt_regs *r
18730 unsigned long address, int si_code)
18731 {
18732 struct task_struct *tsk = current;
18733+ struct mm_struct *mm = tsk->mm;
18734+
18735+#ifdef CONFIG_X86_64
18736+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18737+ if (regs->ip == (unsigned long)vgettimeofday) {
18738+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
18739+ return;
18740+ } else if (regs->ip == (unsigned long)vtime) {
18741+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
18742+ return;
18743+ } else if (regs->ip == (unsigned long)vgetcpu) {
18744+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
18745+ return;
18746+ }
18747+ }
18748+#endif
18749+
18750+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18751+ if (mm && (error_code & PF_USER)) {
18752+ unsigned long ip = regs->ip;
18753+
18754+ if (v8086_mode(regs))
18755+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18756+
18757+ /*
18758+ * It's possible to have interrupts off here:
18759+ */
18760+ local_irq_enable();
18761+
18762+#ifdef CONFIG_PAX_PAGEEXEC
18763+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18764+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18765+
18766+#ifdef CONFIG_PAX_EMUTRAMP
18767+ switch (pax_handle_fetch_fault(regs)) {
18768+ case 2:
18769+ return;
18770+ }
18771+#endif
18772+
18773+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18774+ do_group_exit(SIGKILL);
18775+ }
18776+#endif
18777+
18778+#ifdef CONFIG_PAX_SEGMEXEC
18779+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18780+
18781+#ifdef CONFIG_PAX_EMUTRAMP
18782+ switch (pax_handle_fetch_fault(regs)) {
18783+ case 2:
18784+ return;
18785+ }
18786+#endif
18787+
18788+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18789+ do_group_exit(SIGKILL);
18790+ }
18791+#endif
18792+
18793+ }
18794+#endif
18795
18796 /* User mode accesses just cause a SIGSEGV */
18797 if (error_code & PF_USER) {
18798@@ -855,6 +995,99 @@ static int spurious_fault_check(unsigned
18799 return 1;
18800 }
18801
18802+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18803+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18804+{
18805+ pte_t *pte;
18806+ pmd_t *pmd;
18807+ spinlock_t *ptl;
18808+ unsigned char pte_mask;
18809+
18810+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18811+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
18812+ return 0;
18813+
18814+ /* PaX: it's our fault, let's handle it if we can */
18815+
18816+ /* PaX: take a look at read faults before acquiring any locks */
18817+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18818+ /* instruction fetch attempt from a protected page in user mode */
18819+ up_read(&mm->mmap_sem);
18820+
18821+#ifdef CONFIG_PAX_EMUTRAMP
18822+ switch (pax_handle_fetch_fault(regs)) {
18823+ case 2:
18824+ return 1;
18825+ }
18826+#endif
18827+
18828+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18829+ do_group_exit(SIGKILL);
18830+ }
18831+
18832+ pmd = pax_get_pmd(mm, address);
18833+ if (unlikely(!pmd))
18834+ return 0;
18835+
18836+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18837+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18838+ pte_unmap_unlock(pte, ptl);
18839+ return 0;
18840+ }
18841+
18842+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18843+ /* write attempt to a protected page in user mode */
18844+ pte_unmap_unlock(pte, ptl);
18845+ return 0;
18846+ }
18847+
18848+#ifdef CONFIG_SMP
18849+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18850+#else
18851+ if (likely(address > get_limit(regs->cs)))
18852+#endif
18853+ {
18854+ set_pte(pte, pte_mkread(*pte));
18855+ __flush_tlb_one(address);
18856+ pte_unmap_unlock(pte, ptl);
18857+ up_read(&mm->mmap_sem);
18858+ return 1;
18859+ }
18860+
18861+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18862+
18863+ /*
18864+ * PaX: fill DTLB with user rights and retry
18865+ */
18866+ __asm__ __volatile__ (
18867+ "orb %2,(%1)\n"
18868+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18869+/*
18870+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18871+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18872+ * page fault when examined during a TLB load attempt. this is true not only
18873+ * for PTEs holding a non-present entry but also present entries that will
18874+ * raise a page fault (such as those set up by PaX, or the copy-on-write
18875+ * mechanism). in effect it means that we do *not* need to flush the TLBs
18876+ * for our target pages since their PTEs are simply not in the TLBs at all.
18877+
18878+ * the best thing in omitting it is that we gain around 15-20% speed in the
18879+ * fast path of the page fault handler and can get rid of tracing since we
18880+ * can no longer flush unintended entries.
18881+ */
18882+ "invlpg (%0)\n"
18883+#endif
18884+ __copyuser_seg"testb $0,(%0)\n"
18885+ "xorb %3,(%1)\n"
18886+ :
18887+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18888+ : "memory", "cc");
18889+ pte_unmap_unlock(pte, ptl);
18890+ up_read(&mm->mmap_sem);
18891+ return 1;
18892+}
18893+#endif
18894+
18895 /*
18896 * Handle a spurious fault caused by a stale TLB entry.
18897 *
18898@@ -927,6 +1160,9 @@ int show_unhandled_signals = 1;
18899 static inline int
18900 access_error(unsigned long error_code, struct vm_area_struct *vma)
18901 {
18902+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18903+ return 1;
18904+
18905 if (error_code & PF_WRITE) {
18906 /* write, present and write, not present: */
18907 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18908@@ -960,19 +1196,33 @@ do_page_fault(struct pt_regs *regs, unsi
18909 {
18910 struct vm_area_struct *vma;
18911 struct task_struct *tsk;
18912- unsigned long address;
18913 struct mm_struct *mm;
18914 int fault;
18915 int write = error_code & PF_WRITE;
18916 unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
18917 (write ? FAULT_FLAG_WRITE : 0);
18918
18919+ /* Get the faulting address: */
18920+ unsigned long address = read_cr2();
18921+
18922+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18923+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18924+ if (!search_exception_tables(regs->ip)) {
18925+ bad_area_nosemaphore(regs, error_code, address);
18926+ return;
18927+ }
18928+ if (address < PAX_USER_SHADOW_BASE) {
18929+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18930+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18931+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18932+ } else
18933+ address -= PAX_USER_SHADOW_BASE;
18934+ }
18935+#endif
18936+
18937 tsk = current;
18938 mm = tsk->mm;
18939
18940- /* Get the faulting address: */
18941- address = read_cr2();
18942-
18943 /*
18944 * Detect and handle instructions that would cause a page fault for
18945 * both a tracked kernel page and a userspace page.
18946@@ -1032,7 +1282,7 @@ do_page_fault(struct pt_regs *regs, unsi
18947 * User-mode registers count as a user access even for any
18948 * potential system fault or CPU buglet:
18949 */
18950- if (user_mode_vm(regs)) {
18951+ if (user_mode(regs)) {
18952 local_irq_enable();
18953 error_code |= PF_USER;
18954 } else {
18955@@ -1087,6 +1337,11 @@ retry:
18956 might_sleep();
18957 }
18958
18959+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18960+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18961+ return;
18962+#endif
18963+
18964 vma = find_vma(mm, address);
18965 if (unlikely(!vma)) {
18966 bad_area(regs, error_code, address);
18967@@ -1098,18 +1353,24 @@ retry:
18968 bad_area(regs, error_code, address);
18969 return;
18970 }
18971- if (error_code & PF_USER) {
18972- /*
18973- * Accessing the stack below %sp is always a bug.
18974- * The large cushion allows instructions like enter
18975- * and pusha to work. ("enter $65535, $31" pushes
18976- * 32 pointers and then decrements %sp by 65535.)
18977- */
18978- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18979- bad_area(regs, error_code, address);
18980- return;
18981- }
18982+ /*
18983+ * Accessing the stack below %sp is always a bug.
18984+ * The large cushion allows instructions like enter
18985+ * and pusha to work. ("enter $65535, $31" pushes
18986+ * 32 pointers and then decrements %sp by 65535.)
18987+ */
18988+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18989+ bad_area(regs, error_code, address);
18990+ return;
18991 }
18992+
18993+#ifdef CONFIG_PAX_SEGMEXEC
18994+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18995+ bad_area(regs, error_code, address);
18996+ return;
18997+ }
18998+#endif
18999+
19000 if (unlikely(expand_stack(vma, address))) {
19001 bad_area(regs, error_code, address);
19002 return;
19003@@ -1164,3 +1425,199 @@ good_area:
19004
19005 up_read(&mm->mmap_sem);
19006 }
19007+
19008+#ifdef CONFIG_PAX_EMUTRAMP
19009+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19010+{
19011+ int err;
19012+
19013+ do { /* PaX: gcc trampoline emulation #1 */
19014+ unsigned char mov1, mov2;
19015+ unsigned short jmp;
19016+ unsigned int addr1, addr2;
19017+
19018+#ifdef CONFIG_X86_64
19019+ if ((regs->ip + 11) >> 32)
19020+ break;
19021+#endif
19022+
19023+ err = get_user(mov1, (unsigned char __user *)regs->ip);
19024+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19025+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19026+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19027+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19028+
19029+ if (err)
19030+ break;
19031+
19032+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19033+ regs->cx = addr1;
19034+ regs->ax = addr2;
19035+ regs->ip = addr2;
19036+ return 2;
19037+ }
19038+ } while (0);
19039+
19040+ do { /* PaX: gcc trampoline emulation #2 */
19041+ unsigned char mov, jmp;
19042+ unsigned int addr1, addr2;
19043+
19044+#ifdef CONFIG_X86_64
19045+ if ((regs->ip + 9) >> 32)
19046+ break;
19047+#endif
19048+
19049+ err = get_user(mov, (unsigned char __user *)regs->ip);
19050+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19051+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19052+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19053+
19054+ if (err)
19055+ break;
19056+
19057+ if (mov == 0xB9 && jmp == 0xE9) {
19058+ regs->cx = addr1;
19059+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19060+ return 2;
19061+ }
19062+ } while (0);
19063+
19064+ return 1; /* PaX in action */
19065+}
19066+
19067+#ifdef CONFIG_X86_64
19068+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19069+{
19070+ int err;
19071+
19072+ do { /* PaX: gcc trampoline emulation #1 */
19073+ unsigned short mov1, mov2, jmp1;
19074+ unsigned char jmp2;
19075+ unsigned int addr1;
19076+ unsigned long addr2;
19077+
19078+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19079+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19080+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19081+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19082+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19083+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19084+
19085+ if (err)
19086+ break;
19087+
19088+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19089+ regs->r11 = addr1;
19090+ regs->r10 = addr2;
19091+ regs->ip = addr1;
19092+ return 2;
19093+ }
19094+ } while (0);
19095+
19096+ do { /* PaX: gcc trampoline emulation #2 */
19097+ unsigned short mov1, mov2, jmp1;
19098+ unsigned char jmp2;
19099+ unsigned long addr1, addr2;
19100+
19101+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19102+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19103+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19104+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19105+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19106+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19107+
19108+ if (err)
19109+ break;
19110+
19111+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19112+ regs->r11 = addr1;
19113+ regs->r10 = addr2;
19114+ regs->ip = addr1;
19115+ return 2;
19116+ }
19117+ } while (0);
19118+
19119+ return 1; /* PaX in action */
19120+}
19121+#endif
19122+
19123+/*
19124+ * PaX: decide what to do with offenders (regs->ip = fault address)
19125+ *
19126+ * returns 1 when task should be killed
19127+ * 2 when gcc trampoline was detected
19128+ */
19129+static int pax_handle_fetch_fault(struct pt_regs *regs)
19130+{
19131+ if (v8086_mode(regs))
19132+ return 1;
19133+
19134+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19135+ return 1;
19136+
19137+#ifdef CONFIG_X86_32
19138+ return pax_handle_fetch_fault_32(regs);
19139+#else
19140+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19141+ return pax_handle_fetch_fault_32(regs);
19142+ else
19143+ return pax_handle_fetch_fault_64(regs);
19144+#endif
19145+}
19146+#endif
19147+
19148+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19149+void pax_report_insns(void *pc, void *sp)
19150+{
19151+ long i;
19152+
19153+ printk(KERN_ERR "PAX: bytes at PC: ");
19154+ for (i = 0; i < 20; i++) {
19155+ unsigned char c;
19156+ if (get_user(c, (__force unsigned char __user *)pc+i))
19157+ printk(KERN_CONT "?? ");
19158+ else
19159+ printk(KERN_CONT "%02x ", c);
19160+ }
19161+ printk("\n");
19162+
19163+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19164+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19165+ unsigned long c;
19166+ if (get_user(c, (__force unsigned long __user *)sp+i))
19167+#ifdef CONFIG_X86_32
19168+ printk(KERN_CONT "???????? ");
19169+#else
19170+ printk(KERN_CONT "???????????????? ");
19171+#endif
19172+ else
19173+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19174+ }
19175+ printk("\n");
19176+}
19177+#endif
19178+
19179+/**
19180+ * probe_kernel_write(): safely attempt to write to a location
19181+ * @dst: address to write to
19182+ * @src: pointer to the data that shall be written
19183+ * @size: size of the data chunk
19184+ *
19185+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19186+ * happens, handle that and return -EFAULT.
19187+ */
19188+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19189+{
19190+ long ret;
19191+ mm_segment_t old_fs = get_fs();
19192+
19193+ set_fs(KERNEL_DS);
19194+ pagefault_disable();
19195+ pax_open_kernel();
19196+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19197+ pax_close_kernel();
19198+ pagefault_enable();
19199+ set_fs(old_fs);
19200+
19201+ return ret ? -EFAULT : 0;
19202+}
19203diff -urNp linux-2.6.39.4/arch/x86/mm/gup.c linux-2.6.39.4/arch/x86/mm/gup.c
19204--- linux-2.6.39.4/arch/x86/mm/gup.c 2011-05-19 00:06:34.000000000 -0400
19205+++ linux-2.6.39.4/arch/x86/mm/gup.c 2011-08-05 19:44:35.000000000 -0400
19206@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19207 addr = start;
19208 len = (unsigned long) nr_pages << PAGE_SHIFT;
19209 end = start + len;
19210- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19211+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19212 (void __user *)start, len)))
19213 return 0;
19214
19215diff -urNp linux-2.6.39.4/arch/x86/mm/highmem_32.c linux-2.6.39.4/arch/x86/mm/highmem_32.c
19216--- linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-05-19 00:06:34.000000000 -0400
19217+++ linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-08-05 19:44:35.000000000 -0400
19218@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19219 idx = type + KM_TYPE_NR*smp_processor_id();
19220 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19221 BUG_ON(!pte_none(*(kmap_pte-idx)));
19222+
19223+ pax_open_kernel();
19224 set_pte(kmap_pte-idx, mk_pte(page, prot));
19225+ pax_close_kernel();
19226
19227 return (void *)vaddr;
19228 }
19229diff -urNp linux-2.6.39.4/arch/x86/mm/hugetlbpage.c linux-2.6.39.4/arch/x86/mm/hugetlbpage.c
19230--- linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
19231+++ linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-08-05 19:44:35.000000000 -0400
19232@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19233 struct hstate *h = hstate_file(file);
19234 struct mm_struct *mm = current->mm;
19235 struct vm_area_struct *vma;
19236- unsigned long start_addr;
19237+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19238+
19239+#ifdef CONFIG_PAX_SEGMEXEC
19240+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19241+ pax_task_size = SEGMEXEC_TASK_SIZE;
19242+#endif
19243+
19244+ pax_task_size -= PAGE_SIZE;
19245
19246 if (len > mm->cached_hole_size) {
19247- start_addr = mm->free_area_cache;
19248+ start_addr = mm->free_area_cache;
19249 } else {
19250- start_addr = TASK_UNMAPPED_BASE;
19251- mm->cached_hole_size = 0;
19252+ start_addr = mm->mmap_base;
19253+ mm->cached_hole_size = 0;
19254 }
19255
19256 full_search:
19257@@ -280,26 +287,27 @@ full_search:
19258
19259 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19260 /* At this point: (!vma || addr < vma->vm_end). */
19261- if (TASK_SIZE - len < addr) {
19262+ if (pax_task_size - len < addr) {
19263 /*
19264 * Start a new search - just in case we missed
19265 * some holes.
19266 */
19267- if (start_addr != TASK_UNMAPPED_BASE) {
19268- start_addr = TASK_UNMAPPED_BASE;
19269+ if (start_addr != mm->mmap_base) {
19270+ start_addr = mm->mmap_base;
19271 mm->cached_hole_size = 0;
19272 goto full_search;
19273 }
19274 return -ENOMEM;
19275 }
19276- if (!vma || addr + len <= vma->vm_start) {
19277- mm->free_area_cache = addr + len;
19278- return addr;
19279- }
19280+ if (check_heap_stack_gap(vma, addr, len))
19281+ break;
19282 if (addr + mm->cached_hole_size < vma->vm_start)
19283 mm->cached_hole_size = vma->vm_start - addr;
19284 addr = ALIGN(vma->vm_end, huge_page_size(h));
19285 }
19286+
19287+ mm->free_area_cache = addr + len;
19288+ return addr;
19289 }
19290
19291 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19292@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19293 {
19294 struct hstate *h = hstate_file(file);
19295 struct mm_struct *mm = current->mm;
19296- struct vm_area_struct *vma, *prev_vma;
19297- unsigned long base = mm->mmap_base, addr = addr0;
19298+ struct vm_area_struct *vma;
19299+ unsigned long base = mm->mmap_base, addr;
19300 unsigned long largest_hole = mm->cached_hole_size;
19301- int first_time = 1;
19302
19303 /* don't allow allocations above current base */
19304 if (mm->free_area_cache > base)
19305@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19306 largest_hole = 0;
19307 mm->free_area_cache = base;
19308 }
19309-try_again:
19310+
19311 /* make sure it can fit in the remaining address space */
19312 if (mm->free_area_cache < len)
19313 goto fail;
19314
19315 /* either no address requested or can't fit in requested address hole */
19316- addr = (mm->free_area_cache - len) & huge_page_mask(h);
19317+ addr = (mm->free_area_cache - len);
19318 do {
19319+ addr &= huge_page_mask(h);
19320+ vma = find_vma(mm, addr);
19321 /*
19322 * Lookup failure means no vma is above this address,
19323 * i.e. return with success:
19324- */
19325- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19326- return addr;
19327-
19328- /*
19329 * new region fits between prev_vma->vm_end and
19330 * vma->vm_start, use it:
19331 */
19332- if (addr + len <= vma->vm_start &&
19333- (!prev_vma || (addr >= prev_vma->vm_end))) {
19334+ if (check_heap_stack_gap(vma, addr, len)) {
19335 /* remember the address as a hint for next time */
19336- mm->cached_hole_size = largest_hole;
19337- return (mm->free_area_cache = addr);
19338- } else {
19339- /* pull free_area_cache down to the first hole */
19340- if (mm->free_area_cache == vma->vm_end) {
19341- mm->free_area_cache = vma->vm_start;
19342- mm->cached_hole_size = largest_hole;
19343- }
19344+ mm->cached_hole_size = largest_hole;
19345+ return (mm->free_area_cache = addr);
19346+ }
19347+ /* pull free_area_cache down to the first hole */
19348+ if (mm->free_area_cache == vma->vm_end) {
19349+ mm->free_area_cache = vma->vm_start;
19350+ mm->cached_hole_size = largest_hole;
19351 }
19352
19353 /* remember the largest hole we saw so far */
19354 if (addr + largest_hole < vma->vm_start)
19355- largest_hole = vma->vm_start - addr;
19356+ largest_hole = vma->vm_start - addr;
19357
19358 /* try just below the current vma->vm_start */
19359- addr = (vma->vm_start - len) & huge_page_mask(h);
19360- } while (len <= vma->vm_start);
19361+ addr = skip_heap_stack_gap(vma, len);
19362+ } while (!IS_ERR_VALUE(addr));
19363
19364 fail:
19365 /*
19366- * if hint left us with no space for the requested
19367- * mapping then try again:
19368- */
19369- if (first_time) {
19370- mm->free_area_cache = base;
19371- largest_hole = 0;
19372- first_time = 0;
19373- goto try_again;
19374- }
19375- /*
19376 * A failed mmap() very likely causes application failure,
19377 * so fall back to the bottom-up function here. This scenario
19378 * can happen with large stack limits and large mmap()
19379 * allocations.
19380 */
19381- mm->free_area_cache = TASK_UNMAPPED_BASE;
19382+
19383+#ifdef CONFIG_PAX_SEGMEXEC
19384+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19385+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19386+ else
19387+#endif
19388+
19389+ mm->mmap_base = TASK_UNMAPPED_BASE;
19390+
19391+#ifdef CONFIG_PAX_RANDMMAP
19392+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19393+ mm->mmap_base += mm->delta_mmap;
19394+#endif
19395+
19396+ mm->free_area_cache = mm->mmap_base;
19397 mm->cached_hole_size = ~0UL;
19398 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19399 len, pgoff, flags);
19400@@ -386,6 +392,7 @@ fail:
19401 /*
19402 * Restore the topdown base:
19403 */
19404+ mm->mmap_base = base;
19405 mm->free_area_cache = base;
19406 mm->cached_hole_size = ~0UL;
19407
19408@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19409 struct hstate *h = hstate_file(file);
19410 struct mm_struct *mm = current->mm;
19411 struct vm_area_struct *vma;
19412+ unsigned long pax_task_size = TASK_SIZE;
19413
19414 if (len & ~huge_page_mask(h))
19415 return -EINVAL;
19416- if (len > TASK_SIZE)
19417+
19418+#ifdef CONFIG_PAX_SEGMEXEC
19419+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19420+ pax_task_size = SEGMEXEC_TASK_SIZE;
19421+#endif
19422+
19423+ pax_task_size -= PAGE_SIZE;
19424+
19425+ if (len > pax_task_size)
19426 return -ENOMEM;
19427
19428 if (flags & MAP_FIXED) {
19429@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19430 if (addr) {
19431 addr = ALIGN(addr, huge_page_size(h));
19432 vma = find_vma(mm, addr);
19433- if (TASK_SIZE - len >= addr &&
19434- (!vma || addr + len <= vma->vm_start))
19435+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19436 return addr;
19437 }
19438 if (mm->get_unmapped_area == arch_get_unmapped_area)
19439diff -urNp linux-2.6.39.4/arch/x86/mm/init_32.c linux-2.6.39.4/arch/x86/mm/init_32.c
19440--- linux-2.6.39.4/arch/x86/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
19441+++ linux-2.6.39.4/arch/x86/mm/init_32.c 2011-08-05 19:44:35.000000000 -0400
19442@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19443 }
19444
19445 /*
19446- * Creates a middle page table and puts a pointer to it in the
19447- * given global directory entry. This only returns the gd entry
19448- * in non-PAE compilation mode, since the middle layer is folded.
19449- */
19450-static pmd_t * __init one_md_table_init(pgd_t *pgd)
19451-{
19452- pud_t *pud;
19453- pmd_t *pmd_table;
19454-
19455-#ifdef CONFIG_X86_PAE
19456- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19457- if (after_bootmem)
19458- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19459- else
19460- pmd_table = (pmd_t *)alloc_low_page();
19461- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19462- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19463- pud = pud_offset(pgd, 0);
19464- BUG_ON(pmd_table != pmd_offset(pud, 0));
19465-
19466- return pmd_table;
19467- }
19468-#endif
19469- pud = pud_offset(pgd, 0);
19470- pmd_table = pmd_offset(pud, 0);
19471-
19472- return pmd_table;
19473-}
19474-
19475-/*
19476 * Create a page table and place a pointer to it in a middle page
19477 * directory entry:
19478 */
19479@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19480 page_table = (pte_t *)alloc_low_page();
19481
19482 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19483+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19484+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19485+#else
19486 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19487+#endif
19488 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19489 }
19490
19491 return pte_offset_kernel(pmd, 0);
19492 }
19493
19494+static pmd_t * __init one_md_table_init(pgd_t *pgd)
19495+{
19496+ pud_t *pud;
19497+ pmd_t *pmd_table;
19498+
19499+ pud = pud_offset(pgd, 0);
19500+ pmd_table = pmd_offset(pud, 0);
19501+
19502+ return pmd_table;
19503+}
19504+
19505 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19506 {
19507 int pgd_idx = pgd_index(vaddr);
19508@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19509 int pgd_idx, pmd_idx;
19510 unsigned long vaddr;
19511 pgd_t *pgd;
19512+ pud_t *pud;
19513 pmd_t *pmd;
19514 pte_t *pte = NULL;
19515
19516@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19517 pgd = pgd_base + pgd_idx;
19518
19519 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19520- pmd = one_md_table_init(pgd);
19521- pmd = pmd + pmd_index(vaddr);
19522+ pud = pud_offset(pgd, vaddr);
19523+ pmd = pmd_offset(pud, vaddr);
19524+
19525+#ifdef CONFIG_X86_PAE
19526+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19527+#endif
19528+
19529 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19530 pmd++, pmd_idx++) {
19531 pte = page_table_kmap_check(one_page_table_init(pmd),
19532@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19533 }
19534 }
19535
19536-static inline int is_kernel_text(unsigned long addr)
19537+static inline int is_kernel_text(unsigned long start, unsigned long end)
19538 {
19539- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19540- return 1;
19541- return 0;
19542+ if ((start > ktla_ktva((unsigned long)_etext) ||
19543+ end <= ktla_ktva((unsigned long)_stext)) &&
19544+ (start > ktla_ktva((unsigned long)_einittext) ||
19545+ end <= ktla_ktva((unsigned long)_sinittext)) &&
19546+
19547+#ifdef CONFIG_ACPI_SLEEP
19548+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19549+#endif
19550+
19551+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19552+ return 0;
19553+ return 1;
19554 }
19555
19556 /*
19557@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19558 unsigned long last_map_addr = end;
19559 unsigned long start_pfn, end_pfn;
19560 pgd_t *pgd_base = swapper_pg_dir;
19561- int pgd_idx, pmd_idx, pte_ofs;
19562+ unsigned int pgd_idx, pmd_idx, pte_ofs;
19563 unsigned long pfn;
19564 pgd_t *pgd;
19565+ pud_t *pud;
19566 pmd_t *pmd;
19567 pte_t *pte;
19568 unsigned pages_2m, pages_4k;
19569@@ -281,8 +282,13 @@ repeat:
19570 pfn = start_pfn;
19571 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19572 pgd = pgd_base + pgd_idx;
19573- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19574- pmd = one_md_table_init(pgd);
19575+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19576+ pud = pud_offset(pgd, 0);
19577+ pmd = pmd_offset(pud, 0);
19578+
19579+#ifdef CONFIG_X86_PAE
19580+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19581+#endif
19582
19583 if (pfn >= end_pfn)
19584 continue;
19585@@ -294,14 +300,13 @@ repeat:
19586 #endif
19587 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19588 pmd++, pmd_idx++) {
19589- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19590+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19591
19592 /*
19593 * Map with big pages if possible, otherwise
19594 * create normal page tables:
19595 */
19596 if (use_pse) {
19597- unsigned int addr2;
19598 pgprot_t prot = PAGE_KERNEL_LARGE;
19599 /*
19600 * first pass will use the same initial
19601@@ -311,11 +316,7 @@ repeat:
19602 __pgprot(PTE_IDENT_ATTR |
19603 _PAGE_PSE);
19604
19605- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19606- PAGE_OFFSET + PAGE_SIZE-1;
19607-
19608- if (is_kernel_text(addr) ||
19609- is_kernel_text(addr2))
19610+ if (is_kernel_text(address, address + PMD_SIZE))
19611 prot = PAGE_KERNEL_LARGE_EXEC;
19612
19613 pages_2m++;
19614@@ -332,7 +333,7 @@ repeat:
19615 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19616 pte += pte_ofs;
19617 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19618- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19619+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19620 pgprot_t prot = PAGE_KERNEL;
19621 /*
19622 * first pass will use the same initial
19623@@ -340,7 +341,7 @@ repeat:
19624 */
19625 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19626
19627- if (is_kernel_text(addr))
19628+ if (is_kernel_text(address, address + PAGE_SIZE))
19629 prot = PAGE_KERNEL_EXEC;
19630
19631 pages_4k++;
19632@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19633
19634 pud = pud_offset(pgd, va);
19635 pmd = pmd_offset(pud, va);
19636- if (!pmd_present(*pmd))
19637+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
19638 break;
19639
19640 pte = pte_offset_kernel(pmd, va);
19641@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19642
19643 static void __init pagetable_init(void)
19644 {
19645- pgd_t *pgd_base = swapper_pg_dir;
19646-
19647- permanent_kmaps_init(pgd_base);
19648+ permanent_kmaps_init(swapper_pg_dir);
19649 }
19650
19651-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19652+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19653 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19654
19655 /* user-defined highmem size */
19656@@ -754,6 +753,12 @@ void __init mem_init(void)
19657
19658 pci_iommu_alloc();
19659
19660+#ifdef CONFIG_PAX_PER_CPU_PGD
19661+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19662+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19663+ KERNEL_PGD_PTRS);
19664+#endif
19665+
19666 #ifdef CONFIG_FLATMEM
19667 BUG_ON(!mem_map);
19668 #endif
19669@@ -771,7 +776,7 @@ void __init mem_init(void)
19670 set_highmem_pages_init();
19671
19672 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19673- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19674+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19675 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19676
19677 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19678@@ -812,10 +817,10 @@ void __init mem_init(void)
19679 ((unsigned long)&__init_end -
19680 (unsigned long)&__init_begin) >> 10,
19681
19682- (unsigned long)&_etext, (unsigned long)&_edata,
19683- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19684+ (unsigned long)&_sdata, (unsigned long)&_edata,
19685+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19686
19687- (unsigned long)&_text, (unsigned long)&_etext,
19688+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19689 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19690
19691 /*
19692@@ -893,6 +898,7 @@ void set_kernel_text_rw(void)
19693 if (!kernel_set_to_readonly)
19694 return;
19695
19696+ start = ktla_ktva(start);
19697 pr_debug("Set kernel text: %lx - %lx for read write\n",
19698 start, start+size);
19699
19700@@ -907,6 +913,7 @@ void set_kernel_text_ro(void)
19701 if (!kernel_set_to_readonly)
19702 return;
19703
19704+ start = ktla_ktva(start);
19705 pr_debug("Set kernel text: %lx - %lx for read only\n",
19706 start, start+size);
19707
19708@@ -935,6 +942,7 @@ void mark_rodata_ro(void)
19709 unsigned long start = PFN_ALIGN(_text);
19710 unsigned long size = PFN_ALIGN(_etext) - start;
19711
19712+ start = ktla_ktva(start);
19713 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19714 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19715 size >> 10);
19716diff -urNp linux-2.6.39.4/arch/x86/mm/init_64.c linux-2.6.39.4/arch/x86/mm/init_64.c
19717--- linux-2.6.39.4/arch/x86/mm/init_64.c 2011-05-19 00:06:34.000000000 -0400
19718+++ linux-2.6.39.4/arch/x86/mm/init_64.c 2011-08-05 19:44:35.000000000 -0400
19719@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa
19720 * around without checking the pgd every time.
19721 */
19722
19723-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19724+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19725 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19726
19727 int force_personality32;
19728@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star
19729
19730 for (address = start; address <= end; address += PGDIR_SIZE) {
19731 const pgd_t *pgd_ref = pgd_offset_k(address);
19732+
19733+#ifdef CONFIG_PAX_PER_CPU_PGD
19734+ unsigned long cpu;
19735+#else
19736 struct page *page;
19737+#endif
19738
19739 if (pgd_none(*pgd_ref))
19740 continue;
19741
19742 spin_lock(&pgd_lock);
19743+
19744+#ifdef CONFIG_PAX_PER_CPU_PGD
19745+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19746+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
19747+#else
19748 list_for_each_entry(page, &pgd_list, lru) {
19749 pgd_t *pgd;
19750 spinlock_t *pgt_lock;
19751@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star
19752 /* the pgt_lock only for Xen */
19753 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19754 spin_lock(pgt_lock);
19755+#endif
19756
19757 if (pgd_none(*pgd))
19758 set_pgd(pgd, *pgd_ref);
19759@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star
19760 BUG_ON(pgd_page_vaddr(*pgd)
19761 != pgd_page_vaddr(*pgd_ref));
19762
19763+#ifndef CONFIG_PAX_PER_CPU_PGD
19764 spin_unlock(pgt_lock);
19765+#endif
19766+
19767 }
19768 spin_unlock(&pgd_lock);
19769 }
19770@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19771 pmd = fill_pmd(pud, vaddr);
19772 pte = fill_pte(pmd, vaddr);
19773
19774+ pax_open_kernel();
19775 set_pte(pte, new_pte);
19776+ pax_close_kernel();
19777
19778 /*
19779 * It's enough to flush this one mapping.
19780@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(
19781 pgd = pgd_offset_k((unsigned long)__va(phys));
19782 if (pgd_none(*pgd)) {
19783 pud = (pud_t *) spp_getpage();
19784- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19785- _PAGE_USER));
19786+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19787 }
19788 pud = pud_offset(pgd, (unsigned long)__va(phys));
19789 if (pud_none(*pud)) {
19790 pmd = (pmd_t *) spp_getpage();
19791- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19792- _PAGE_USER));
19793+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19794 }
19795 pmd = pmd_offset(pud, phys);
19796 BUG_ON(!pmd_none(*pmd));
19797@@ -698,6 +712,12 @@ void __init mem_init(void)
19798
19799 pci_iommu_alloc();
19800
19801+#ifdef CONFIG_PAX_PER_CPU_PGD
19802+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19803+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19804+ KERNEL_PGD_PTRS);
19805+#endif
19806+
19807 /* clear_bss() already clear the empty_zero_page */
19808
19809 reservedpages = 0;
19810@@ -858,8 +878,8 @@ int kern_addr_valid(unsigned long addr)
19811 static struct vm_area_struct gate_vma = {
19812 .vm_start = VSYSCALL_START,
19813 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19814- .vm_page_prot = PAGE_READONLY_EXEC,
19815- .vm_flags = VM_READ | VM_EXEC
19816+ .vm_page_prot = PAGE_READONLY,
19817+ .vm_flags = VM_READ
19818 };
19819
19820 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19821@@ -893,7 +913,7 @@ int in_gate_area_no_mm(unsigned long add
19822
19823 const char *arch_vma_name(struct vm_area_struct *vma)
19824 {
19825- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19826+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19827 return "[vdso]";
19828 if (vma == &gate_vma)
19829 return "[vsyscall]";
19830diff -urNp linux-2.6.39.4/arch/x86/mm/init.c linux-2.6.39.4/arch/x86/mm/init.c
19831--- linux-2.6.39.4/arch/x86/mm/init.c 2011-05-19 00:06:34.000000000 -0400
19832+++ linux-2.6.39.4/arch/x86/mm/init.c 2011-08-05 19:44:35.000000000 -0400
19833@@ -33,7 +33,7 @@ int direct_gbpages
19834 static void __init find_early_table_space(unsigned long end, int use_pse,
19835 int use_gbpages)
19836 {
19837- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19838+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19839 phys_addr_t base;
19840
19841 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19842@@ -315,12 +315,34 @@ unsigned long __init_refok init_memory_m
19843 */
19844 int devmem_is_allowed(unsigned long pagenr)
19845 {
19846- if (pagenr <= 256)
19847+#ifdef CONFIG_GRKERNSEC_KMEM
19848+ /* allow BDA */
19849+ if (!pagenr)
19850+ return 1;
19851+ /* allow EBDA */
19852+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19853+ return 1;
19854+#else
19855+ if (!pagenr)
19856+ return 1;
19857+#ifdef CONFIG_VM86
19858+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19859+ return 1;
19860+#endif
19861+#endif
19862+
19863+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19864 return 1;
19865+#ifdef CONFIG_GRKERNSEC_KMEM
19866+ /* throw out everything else below 1MB */
19867+ if (pagenr <= 256)
19868+ return 0;
19869+#endif
19870 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19871 return 0;
19872 if (!page_is_ram(pagenr))
19873 return 1;
19874+
19875 return 0;
19876 }
19877
19878@@ -375,6 +397,86 @@ void free_init_pages(char *what, unsigne
19879
19880 void free_initmem(void)
19881 {
19882+
19883+#ifdef CONFIG_PAX_KERNEXEC
19884+#ifdef CONFIG_X86_32
19885+ /* PaX: limit KERNEL_CS to actual size */
19886+ unsigned long addr, limit;
19887+ struct desc_struct d;
19888+ int cpu;
19889+
19890+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19891+ limit = (limit - 1UL) >> PAGE_SHIFT;
19892+
19893+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19894+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
19895+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19896+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19897+ }
19898+
19899+ /* PaX: make KERNEL_CS read-only */
19900+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19901+ if (!paravirt_enabled())
19902+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19903+/*
19904+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19905+ pgd = pgd_offset_k(addr);
19906+ pud = pud_offset(pgd, addr);
19907+ pmd = pmd_offset(pud, addr);
19908+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19909+ }
19910+*/
19911+#ifdef CONFIG_X86_PAE
19912+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19913+/*
19914+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19915+ pgd = pgd_offset_k(addr);
19916+ pud = pud_offset(pgd, addr);
19917+ pmd = pmd_offset(pud, addr);
19918+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19919+ }
19920+*/
19921+#endif
19922+
19923+#ifdef CONFIG_MODULES
19924+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19925+#endif
19926+
19927+#else
19928+ pgd_t *pgd;
19929+ pud_t *pud;
19930+ pmd_t *pmd;
19931+ unsigned long addr, end;
19932+
19933+ /* PaX: make kernel code/rodata read-only, rest non-executable */
19934+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19935+ pgd = pgd_offset_k(addr);
19936+ pud = pud_offset(pgd, addr);
19937+ pmd = pmd_offset(pud, addr);
19938+ if (!pmd_present(*pmd))
19939+ continue;
19940+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19941+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19942+ else
19943+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19944+ }
19945+
19946+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19947+ end = addr + KERNEL_IMAGE_SIZE;
19948+ for (; addr < end; addr += PMD_SIZE) {
19949+ pgd = pgd_offset_k(addr);
19950+ pud = pud_offset(pgd, addr);
19951+ pmd = pmd_offset(pud, addr);
19952+ if (!pmd_present(*pmd))
19953+ continue;
19954+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19955+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19956+ }
19957+#endif
19958+
19959+ flush_tlb_all();
19960+#endif
19961+
19962 free_init_pages("unused kernel memory",
19963 (unsigned long)(&__init_begin),
19964 (unsigned long)(&__init_end));
19965diff -urNp linux-2.6.39.4/arch/x86/mm/iomap_32.c linux-2.6.39.4/arch/x86/mm/iomap_32.c
19966--- linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-05-19 00:06:34.000000000 -0400
19967+++ linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-08-05 19:44:35.000000000 -0400
19968@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19969 type = kmap_atomic_idx_push();
19970 idx = type + KM_TYPE_NR * smp_processor_id();
19971 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19972+
19973+ pax_open_kernel();
19974 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19975+ pax_close_kernel();
19976+
19977 arch_flush_lazy_mmu_mode();
19978
19979 return (void *)vaddr;
19980diff -urNp linux-2.6.39.4/arch/x86/mm/ioremap.c linux-2.6.39.4/arch/x86/mm/ioremap.c
19981--- linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-05-19 00:06:34.000000000 -0400
19982+++ linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-08-05 19:44:35.000000000 -0400
19983@@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re
19984 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19985 int is_ram = page_is_ram(pfn);
19986
19987- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19988+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19989 return NULL;
19990 WARN_ON_ONCE(is_ram);
19991 }
19992@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19993 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19994
19995 static __initdata int after_paging_init;
19996-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19997+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19998
19999 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20000 {
20001@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20002 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20003
20004 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20005- memset(bm_pte, 0, sizeof(bm_pte));
20006- pmd_populate_kernel(&init_mm, pmd, bm_pte);
20007+ pmd_populate_user(&init_mm, pmd, bm_pte);
20008
20009 /*
20010 * The boot-ioremap range spans multiple pmds, for which
20011diff -urNp linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c
20012--- linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-19 00:06:34.000000000 -0400
20013+++ linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-05 19:44:35.000000000 -0400
20014@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20015 * memory (e.g. tracked pages)? For now, we need this to avoid
20016 * invoking kmemcheck for PnP BIOS calls.
20017 */
20018- if (regs->flags & X86_VM_MASK)
20019+ if (v8086_mode(regs))
20020 return false;
20021- if (regs->cs != __KERNEL_CS)
20022+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20023 return false;
20024
20025 pte = kmemcheck_pte_lookup(address);
20026diff -urNp linux-2.6.39.4/arch/x86/mm/mmap.c linux-2.6.39.4/arch/x86/mm/mmap.c
20027--- linux-2.6.39.4/arch/x86/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
20028+++ linux-2.6.39.4/arch/x86/mm/mmap.c 2011-08-05 19:44:35.000000000 -0400
20029@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20030 * Leave an at least ~128 MB hole with possible stack randomization.
20031 */
20032 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20033-#define MAX_GAP (TASK_SIZE/6*5)
20034+#define MAX_GAP (pax_task_size/6*5)
20035
20036 /*
20037 * True on X86_32 or when emulating IA32 on X86_64
20038@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20039 return rnd << PAGE_SHIFT;
20040 }
20041
20042-static unsigned long mmap_base(void)
20043+static unsigned long mmap_base(struct mm_struct *mm)
20044 {
20045 unsigned long gap = rlimit(RLIMIT_STACK);
20046+ unsigned long pax_task_size = TASK_SIZE;
20047+
20048+#ifdef CONFIG_PAX_SEGMEXEC
20049+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20050+ pax_task_size = SEGMEXEC_TASK_SIZE;
20051+#endif
20052
20053 if (gap < MIN_GAP)
20054 gap = MIN_GAP;
20055 else if (gap > MAX_GAP)
20056 gap = MAX_GAP;
20057
20058- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20059+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20060 }
20061
20062 /*
20063 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20064 * does, but not when emulating X86_32
20065 */
20066-static unsigned long mmap_legacy_base(void)
20067+static unsigned long mmap_legacy_base(struct mm_struct *mm)
20068 {
20069- if (mmap_is_ia32())
20070+ if (mmap_is_ia32()) {
20071+
20072+#ifdef CONFIG_PAX_SEGMEXEC
20073+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20074+ return SEGMEXEC_TASK_UNMAPPED_BASE;
20075+ else
20076+#endif
20077+
20078 return TASK_UNMAPPED_BASE;
20079- else
20080+ } else
20081 return TASK_UNMAPPED_BASE + mmap_rnd();
20082 }
20083
20084@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20085 void arch_pick_mmap_layout(struct mm_struct *mm)
20086 {
20087 if (mmap_is_legacy()) {
20088- mm->mmap_base = mmap_legacy_base();
20089+ mm->mmap_base = mmap_legacy_base(mm);
20090+
20091+#ifdef CONFIG_PAX_RANDMMAP
20092+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20093+ mm->mmap_base += mm->delta_mmap;
20094+#endif
20095+
20096 mm->get_unmapped_area = arch_get_unmapped_area;
20097 mm->unmap_area = arch_unmap_area;
20098 } else {
20099- mm->mmap_base = mmap_base();
20100+ mm->mmap_base = mmap_base(mm);
20101+
20102+#ifdef CONFIG_PAX_RANDMMAP
20103+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20104+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20105+#endif
20106+
20107 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20108 mm->unmap_area = arch_unmap_area_topdown;
20109 }
20110diff -urNp linux-2.6.39.4/arch/x86/mm/mmio-mod.c linux-2.6.39.4/arch/x86/mm/mmio-mod.c
20111--- linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-05-19 00:06:34.000000000 -0400
20112+++ linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-08-05 19:44:35.000000000 -0400
20113@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20114 break;
20115 default:
20116 {
20117- unsigned char *ip = (unsigned char *)instptr;
20118+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20119 my_trace->opcode = MMIO_UNKNOWN_OP;
20120 my_trace->width = 0;
20121 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20122@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20123 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20124 void __iomem *addr)
20125 {
20126- static atomic_t next_id;
20127+ static atomic_unchecked_t next_id;
20128 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20129 /* These are page-unaligned. */
20130 struct mmiotrace_map map = {
20131@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20132 .private = trace
20133 },
20134 .phys = offset,
20135- .id = atomic_inc_return(&next_id)
20136+ .id = atomic_inc_return_unchecked(&next_id)
20137 };
20138 map.map_id = trace->id;
20139
20140diff -urNp linux-2.6.39.4/arch/x86/mm/numa_32.c linux-2.6.39.4/arch/x86/mm/numa_32.c
20141--- linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-05-19 00:06:34.000000000 -0400
20142+++ linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-08-05 19:44:35.000000000 -0400
20143@@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int
20144 }
20145 #endif
20146
20147-extern unsigned long find_max_low_pfn(void);
20148 extern unsigned long highend_pfn, highstart_pfn;
20149
20150 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
20151diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr.c linux-2.6.39.4/arch/x86/mm/pageattr.c
20152--- linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-05-19 00:06:34.000000000 -0400
20153+++ linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-08-05 19:44:35.000000000 -0400
20154@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20155 */
20156 #ifdef CONFIG_PCI_BIOS
20157 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20158- pgprot_val(forbidden) |= _PAGE_NX;
20159+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20160 #endif
20161
20162 /*
20163@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20164 * Does not cover __inittext since that is gone later on. On
20165 * 64bit we do not enforce !NX on the low mapping
20166 */
20167- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20168- pgprot_val(forbidden) |= _PAGE_NX;
20169+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20170+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20171
20172+#ifdef CONFIG_DEBUG_RODATA
20173 /*
20174 * The .rodata section needs to be read-only. Using the pfn
20175 * catches all aliases.
20176@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20177 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20178 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20179 pgprot_val(forbidden) |= _PAGE_RW;
20180+#endif
20181
20182 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20183 /*
20184@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20185 }
20186 #endif
20187
20188+#ifdef CONFIG_PAX_KERNEXEC
20189+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20190+ pgprot_val(forbidden) |= _PAGE_RW;
20191+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20192+ }
20193+#endif
20194+
20195 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20196
20197 return prot;
20198@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20199 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20200 {
20201 /* change init_mm */
20202+ pax_open_kernel();
20203 set_pte_atomic(kpte, pte);
20204+
20205 #ifdef CONFIG_X86_32
20206 if (!SHARED_KERNEL_PMD) {
20207+
20208+#ifdef CONFIG_PAX_PER_CPU_PGD
20209+ unsigned long cpu;
20210+#else
20211 struct page *page;
20212+#endif
20213
20214+#ifdef CONFIG_PAX_PER_CPU_PGD
20215+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20216+ pgd_t *pgd = get_cpu_pgd(cpu);
20217+#else
20218 list_for_each_entry(page, &pgd_list, lru) {
20219- pgd_t *pgd;
20220+ pgd_t *pgd = (pgd_t *)page_address(page);
20221+#endif
20222+
20223 pud_t *pud;
20224 pmd_t *pmd;
20225
20226- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20227+ pgd += pgd_index(address);
20228 pud = pud_offset(pgd, address);
20229 pmd = pmd_offset(pud, address);
20230 set_pte_atomic((pte_t *)pmd, pte);
20231 }
20232 }
20233 #endif
20234+ pax_close_kernel();
20235 }
20236
20237 static int
20238diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr-test.c linux-2.6.39.4/arch/x86/mm/pageattr-test.c
20239--- linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-05-19 00:06:34.000000000 -0400
20240+++ linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-08-05 19:44:35.000000000 -0400
20241@@ -36,7 +36,7 @@ enum {
20242
20243 static int pte_testbit(pte_t pte)
20244 {
20245- return pte_flags(pte) & _PAGE_UNUSED1;
20246+ return pte_flags(pte) & _PAGE_CPA_TEST;
20247 }
20248
20249 struct split_state {
20250diff -urNp linux-2.6.39.4/arch/x86/mm/pat.c linux-2.6.39.4/arch/x86/mm/pat.c
20251--- linux-2.6.39.4/arch/x86/mm/pat.c 2011-05-19 00:06:34.000000000 -0400
20252+++ linux-2.6.39.4/arch/x86/mm/pat.c 2011-08-05 19:44:35.000000000 -0400
20253@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20254
20255 if (!entry) {
20256 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20257- current->comm, current->pid, start, end);
20258+ current->comm, task_pid_nr(current), start, end);
20259 return -EINVAL;
20260 }
20261
20262@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20263 while (cursor < to) {
20264 if (!devmem_is_allowed(pfn)) {
20265 printk(KERN_INFO
20266- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20267- current->comm, from, to);
20268+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20269+ current->comm, from, to, cursor);
20270 return 0;
20271 }
20272 cursor += PAGE_SIZE;
20273@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20274 printk(KERN_INFO
20275 "%s:%d ioremap_change_attr failed %s "
20276 "for %Lx-%Lx\n",
20277- current->comm, current->pid,
20278+ current->comm, task_pid_nr(current),
20279 cattr_name(flags),
20280 base, (unsigned long long)(base + size));
20281 return -EINVAL;
20282@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20283 if (want_flags != flags) {
20284 printk(KERN_WARNING
20285 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20286- current->comm, current->pid,
20287+ current->comm, task_pid_nr(current),
20288 cattr_name(want_flags),
20289 (unsigned long long)paddr,
20290 (unsigned long long)(paddr + size),
20291@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20292 free_memtype(paddr, paddr + size);
20293 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20294 " for %Lx-%Lx, got %s\n",
20295- current->comm, current->pid,
20296+ current->comm, task_pid_nr(current),
20297 cattr_name(want_flags),
20298 (unsigned long long)paddr,
20299 (unsigned long long)(paddr + size),
20300diff -urNp linux-2.6.39.4/arch/x86/mm/pf_in.c linux-2.6.39.4/arch/x86/mm/pf_in.c
20301--- linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-05-19 00:06:34.000000000 -0400
20302+++ linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-08-05 19:44:35.000000000 -0400
20303@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20304 int i;
20305 enum reason_type rv = OTHERS;
20306
20307- p = (unsigned char *)ins_addr;
20308+ p = (unsigned char *)ktla_ktva(ins_addr);
20309 p += skip_prefix(p, &prf);
20310 p += get_opcode(p, &opcode);
20311
20312@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20313 struct prefix_bits prf;
20314 int i;
20315
20316- p = (unsigned char *)ins_addr;
20317+ p = (unsigned char *)ktla_ktva(ins_addr);
20318 p += skip_prefix(p, &prf);
20319 p += get_opcode(p, &opcode);
20320
20321@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20322 struct prefix_bits prf;
20323 int i;
20324
20325- p = (unsigned char *)ins_addr;
20326+ p = (unsigned char *)ktla_ktva(ins_addr);
20327 p += skip_prefix(p, &prf);
20328 p += get_opcode(p, &opcode);
20329
20330@@ -416,7 +416,7 @@ unsigned long get_ins_reg_val(unsigned l
20331 int i;
20332 unsigned long rv;
20333
20334- p = (unsigned char *)ins_addr;
20335+ p = (unsigned char *)ktla_ktva(ins_addr);
20336 p += skip_prefix(p, &prf);
20337 p += get_opcode(p, &opcode);
20338 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20339@@ -476,7 +476,7 @@ unsigned long get_ins_imm_val(unsigned l
20340 int i;
20341 unsigned long rv;
20342
20343- p = (unsigned char *)ins_addr;
20344+ p = (unsigned char *)ktla_ktva(ins_addr);
20345 p += skip_prefix(p, &prf);
20346 p += get_opcode(p, &opcode);
20347 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20348diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable_32.c linux-2.6.39.4/arch/x86/mm/pgtable_32.c
20349--- linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-05-19 00:06:34.000000000 -0400
20350+++ linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-08-05 19:44:35.000000000 -0400
20351@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20352 return;
20353 }
20354 pte = pte_offset_kernel(pmd, vaddr);
20355+
20356+ pax_open_kernel();
20357 if (pte_val(pteval))
20358 set_pte_at(&init_mm, vaddr, pte, pteval);
20359 else
20360 pte_clear(&init_mm, vaddr, pte);
20361+ pax_close_kernel();
20362
20363 /*
20364 * It's enough to flush this one mapping.
20365diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable.c linux-2.6.39.4/arch/x86/mm/pgtable.c
20366--- linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-05-19 00:06:34.000000000 -0400
20367+++ linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-08-05 19:44:35.000000000 -0400
20368@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20369 list_del(&page->lru);
20370 }
20371
20372-#define UNSHARED_PTRS_PER_PGD \
20373- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20374+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20375+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20376
20377+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20378+{
20379+ while (count--)
20380+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20381+}
20382+#endif
20383+
20384+#ifdef CONFIG_PAX_PER_CPU_PGD
20385+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20386+{
20387+ while (count--)
20388+
20389+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20390+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20391+#else
20392+ *dst++ = *src++;
20393+#endif
20394
20395+}
20396+#endif
20397+
20398+#ifdef CONFIG_X86_64
20399+#define pxd_t pud_t
20400+#define pyd_t pgd_t
20401+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20402+#define pxd_free(mm, pud) pud_free((mm), (pud))
20403+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20404+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20405+#define PYD_SIZE PGDIR_SIZE
20406+#else
20407+#define pxd_t pmd_t
20408+#define pyd_t pud_t
20409+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20410+#define pxd_free(mm, pud) pmd_free((mm), (pud))
20411+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20412+#define pyd_offset(mm ,address) pud_offset((mm), (address))
20413+#define PYD_SIZE PUD_SIZE
20414+#endif
20415+
20416+#ifdef CONFIG_PAX_PER_CPU_PGD
20417+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20418+static inline void pgd_dtor(pgd_t *pgd) {}
20419+#else
20420 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20421 {
20422 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20423@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20424 pgd_list_del(pgd);
20425 spin_unlock(&pgd_lock);
20426 }
20427+#endif
20428
20429 /*
20430 * List of all pgd's needed for non-PAE so it can invalidate entries
20431@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20432 * -- wli
20433 */
20434
20435-#ifdef CONFIG_X86_PAE
20436+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20437 /*
20438 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20439 * updating the top-level pagetable entries to guarantee the
20440@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20441 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20442 * and initialize the kernel pmds here.
20443 */
20444-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20445+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20446
20447 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20448 {
20449@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20450 */
20451 flush_tlb_mm(mm);
20452 }
20453+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20454+#define PREALLOCATED_PXDS USER_PGD_PTRS
20455 #else /* !CONFIG_X86_PAE */
20456
20457 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20458-#define PREALLOCATED_PMDS 0
20459+#define PREALLOCATED_PXDS 0
20460
20461 #endif /* CONFIG_X86_PAE */
20462
20463-static void free_pmds(pmd_t *pmds[])
20464+static void free_pxds(pxd_t *pxds[])
20465 {
20466 int i;
20467
20468- for(i = 0; i < PREALLOCATED_PMDS; i++)
20469- if (pmds[i])
20470- free_page((unsigned long)pmds[i]);
20471+ for(i = 0; i < PREALLOCATED_PXDS; i++)
20472+ if (pxds[i])
20473+ free_page((unsigned long)pxds[i]);
20474 }
20475
20476-static int preallocate_pmds(pmd_t *pmds[])
20477+static int preallocate_pxds(pxd_t *pxds[])
20478 {
20479 int i;
20480 bool failed = false;
20481
20482- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20483- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20484- if (pmd == NULL)
20485+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20486+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20487+ if (pxd == NULL)
20488 failed = true;
20489- pmds[i] = pmd;
20490+ pxds[i] = pxd;
20491 }
20492
20493 if (failed) {
20494- free_pmds(pmds);
20495+ free_pxds(pxds);
20496 return -ENOMEM;
20497 }
20498
20499@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20500 * preallocate which never got a corresponding vma will need to be
20501 * freed manually.
20502 */
20503-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20504+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20505 {
20506 int i;
20507
20508- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20509+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20510 pgd_t pgd = pgdp[i];
20511
20512 if (pgd_val(pgd) != 0) {
20513- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20514+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20515
20516- pgdp[i] = native_make_pgd(0);
20517+ set_pgd(pgdp + i, native_make_pgd(0));
20518
20519- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20520- pmd_free(mm, pmd);
20521+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20522+ pxd_free(mm, pxd);
20523 }
20524 }
20525 }
20526
20527-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20528+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20529 {
20530- pud_t *pud;
20531+ pyd_t *pyd;
20532 unsigned long addr;
20533 int i;
20534
20535- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20536+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20537 return;
20538
20539- pud = pud_offset(pgd, 0);
20540+#ifdef CONFIG_X86_64
20541+ pyd = pyd_offset(mm, 0L);
20542+#else
20543+ pyd = pyd_offset(pgd, 0L);
20544+#endif
20545
20546- for (addr = i = 0; i < PREALLOCATED_PMDS;
20547- i++, pud++, addr += PUD_SIZE) {
20548- pmd_t *pmd = pmds[i];
20549+ for (addr = i = 0; i < PREALLOCATED_PXDS;
20550+ i++, pyd++, addr += PYD_SIZE) {
20551+ pxd_t *pxd = pxds[i];
20552
20553 if (i >= KERNEL_PGD_BOUNDARY)
20554- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20555- sizeof(pmd_t) * PTRS_PER_PMD);
20556+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20557+ sizeof(pxd_t) * PTRS_PER_PMD);
20558
20559- pud_populate(mm, pud, pmd);
20560+ pyd_populate(mm, pyd, pxd);
20561 }
20562 }
20563
20564 pgd_t *pgd_alloc(struct mm_struct *mm)
20565 {
20566 pgd_t *pgd;
20567- pmd_t *pmds[PREALLOCATED_PMDS];
20568+ pxd_t *pxds[PREALLOCATED_PXDS];
20569
20570 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20571
20572@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20573
20574 mm->pgd = pgd;
20575
20576- if (preallocate_pmds(pmds) != 0)
20577+ if (preallocate_pxds(pxds) != 0)
20578 goto out_free_pgd;
20579
20580 if (paravirt_pgd_alloc(mm) != 0)
20581- goto out_free_pmds;
20582+ goto out_free_pxds;
20583
20584 /*
20585 * Make sure that pre-populating the pmds is atomic with
20586@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20587 spin_lock(&pgd_lock);
20588
20589 pgd_ctor(mm, pgd);
20590- pgd_prepopulate_pmd(mm, pgd, pmds);
20591+ pgd_prepopulate_pxd(mm, pgd, pxds);
20592
20593 spin_unlock(&pgd_lock);
20594
20595 return pgd;
20596
20597-out_free_pmds:
20598- free_pmds(pmds);
20599+out_free_pxds:
20600+ free_pxds(pxds);
20601 out_free_pgd:
20602 free_page((unsigned long)pgd);
20603 out:
20604@@ -295,7 +344,7 @@ out:
20605
20606 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20607 {
20608- pgd_mop_up_pmds(mm, pgd);
20609+ pgd_mop_up_pxds(mm, pgd);
20610 pgd_dtor(pgd);
20611 paravirt_pgd_free(mm, pgd);
20612 free_page((unsigned long)pgd);
20613diff -urNp linux-2.6.39.4/arch/x86/mm/setup_nx.c linux-2.6.39.4/arch/x86/mm/setup_nx.c
20614--- linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-05-19 00:06:34.000000000 -0400
20615+++ linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-08-05 19:44:35.000000000 -0400
20616@@ -5,8 +5,10 @@
20617 #include <asm/pgtable.h>
20618 #include <asm/proto.h>
20619
20620+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20621 static int disable_nx __cpuinitdata;
20622
20623+#ifndef CONFIG_PAX_PAGEEXEC
20624 /*
20625 * noexec = on|off
20626 *
20627@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20628 return 0;
20629 }
20630 early_param("noexec", noexec_setup);
20631+#endif
20632+
20633+#endif
20634
20635 void __cpuinit x86_configure_nx(void)
20636 {
20637+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20638 if (cpu_has_nx && !disable_nx)
20639 __supported_pte_mask |= _PAGE_NX;
20640 else
20641+#endif
20642 __supported_pte_mask &= ~_PAGE_NX;
20643 }
20644
20645diff -urNp linux-2.6.39.4/arch/x86/mm/tlb.c linux-2.6.39.4/arch/x86/mm/tlb.c
20646--- linux-2.6.39.4/arch/x86/mm/tlb.c 2011-05-19 00:06:34.000000000 -0400
20647+++ linux-2.6.39.4/arch/x86/mm/tlb.c 2011-08-05 19:44:35.000000000 -0400
20648@@ -65,7 +65,11 @@ void leave_mm(int cpu)
20649 BUG();
20650 cpumask_clear_cpu(cpu,
20651 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20652+
20653+#ifndef CONFIG_PAX_PER_CPU_PGD
20654 load_cr3(swapper_pg_dir);
20655+#endif
20656+
20657 }
20658 EXPORT_SYMBOL_GPL(leave_mm);
20659
20660diff -urNp linux-2.6.39.4/arch/x86/oprofile/backtrace.c linux-2.6.39.4/arch/x86/oprofile/backtrace.c
20661--- linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-05-19 00:06:34.000000000 -0400
20662+++ linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-08-05 19:44:35.000000000 -0400
20663@@ -57,7 +57,7 @@ dump_user_backtrace_32(struct stack_fram
20664 struct stack_frame_ia32 *fp;
20665
20666 /* Also check accessibility of one struct frame_head beyond */
20667- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
20668+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
20669 return NULL;
20670 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
20671 return NULL;
20672@@ -123,7 +123,7 @@ x86_backtrace(struct pt_regs * const reg
20673 {
20674 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20675
20676- if (!user_mode_vm(regs)) {
20677+ if (!user_mode(regs)) {
20678 unsigned long stack = kernel_stack_pointer(regs);
20679 if (depth)
20680 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20681diff -urNp linux-2.6.39.4/arch/x86/pci/mrst.c linux-2.6.39.4/arch/x86/pci/mrst.c
20682--- linux-2.6.39.4/arch/x86/pci/mrst.c 2011-05-19 00:06:34.000000000 -0400
20683+++ linux-2.6.39.4/arch/x86/pci/mrst.c 2011-08-05 20:34:06.000000000 -0400
20684@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20685 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20686 pci_mmcfg_late_init();
20687 pcibios_enable_irq = mrst_pci_irq_enable;
20688- pci_root_ops = pci_mrst_ops;
20689+ pax_open_kernel();
20690+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20691+ pax_close_kernel();
20692 /* Continue with standard init */
20693 return 1;
20694 }
20695diff -urNp linux-2.6.39.4/arch/x86/pci/pcbios.c linux-2.6.39.4/arch/x86/pci/pcbios.c
20696--- linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-05-19 00:06:34.000000000 -0400
20697+++ linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-08-05 20:34:06.000000000 -0400
20698@@ -79,50 +79,93 @@ union bios32 {
20699 static struct {
20700 unsigned long address;
20701 unsigned short segment;
20702-} bios32_indirect = { 0, __KERNEL_CS };
20703+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20704
20705 /*
20706 * Returns the entry point for the given service, NULL on error
20707 */
20708
20709-static unsigned long bios32_service(unsigned long service)
20710+static unsigned long __devinit bios32_service(unsigned long service)
20711 {
20712 unsigned char return_code; /* %al */
20713 unsigned long address; /* %ebx */
20714 unsigned long length; /* %ecx */
20715 unsigned long entry; /* %edx */
20716 unsigned long flags;
20717+ struct desc_struct d, *gdt;
20718
20719 local_irq_save(flags);
20720- __asm__("lcall *(%%edi); cld"
20721+
20722+ gdt = get_cpu_gdt_table(smp_processor_id());
20723+
20724+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20725+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20726+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20727+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20728+
20729+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20730 : "=a" (return_code),
20731 "=b" (address),
20732 "=c" (length),
20733 "=d" (entry)
20734 : "0" (service),
20735 "1" (0),
20736- "D" (&bios32_indirect));
20737+ "D" (&bios32_indirect),
20738+ "r"(__PCIBIOS_DS)
20739+ : "memory");
20740+
20741+ pax_open_kernel();
20742+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20743+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20744+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20745+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20746+ pax_close_kernel();
20747+
20748 local_irq_restore(flags);
20749
20750 switch (return_code) {
20751- case 0:
20752- return address + entry;
20753- case 0x80: /* Not present */
20754- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20755- return 0;
20756- default: /* Shouldn't happen */
20757- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20758- service, return_code);
20759+ case 0: {
20760+ int cpu;
20761+ unsigned char flags;
20762+
20763+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20764+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20765+ printk(KERN_WARNING "bios32_service: not valid\n");
20766 return 0;
20767+ }
20768+ address = address + PAGE_OFFSET;
20769+ length += 16UL; /* some BIOSs underreport this... */
20770+ flags = 4;
20771+ if (length >= 64*1024*1024) {
20772+ length >>= PAGE_SHIFT;
20773+ flags |= 8;
20774+ }
20775+
20776+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20777+ gdt = get_cpu_gdt_table(cpu);
20778+ pack_descriptor(&d, address, length, 0x9b, flags);
20779+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20780+ pack_descriptor(&d, address, length, 0x93, flags);
20781+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20782+ }
20783+ return entry;
20784+ }
20785+ case 0x80: /* Not present */
20786+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20787+ return 0;
20788+ default: /* Shouldn't happen */
20789+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20790+ service, return_code);
20791+ return 0;
20792 }
20793 }
20794
20795 static struct {
20796 unsigned long address;
20797 unsigned short segment;
20798-} pci_indirect = { 0, __KERNEL_CS };
20799+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20800
20801-static int pci_bios_present;
20802+static int pci_bios_present __read_only;
20803
20804 static int __devinit check_pcibios(void)
20805 {
20806@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20807 unsigned long flags, pcibios_entry;
20808
20809 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20810- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20811+ pci_indirect.address = pcibios_entry;
20812
20813 local_irq_save(flags);
20814- __asm__(
20815- "lcall *(%%edi); cld\n\t"
20816+ __asm__("movw %w6, %%ds\n\t"
20817+ "lcall *%%ss:(%%edi); cld\n\t"
20818+ "push %%ss\n\t"
20819+ "pop %%ds\n\t"
20820 "jc 1f\n\t"
20821 "xor %%ah, %%ah\n"
20822 "1:"
20823@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20824 "=b" (ebx),
20825 "=c" (ecx)
20826 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20827- "D" (&pci_indirect)
20828+ "D" (&pci_indirect),
20829+ "r" (__PCIBIOS_DS)
20830 : "memory");
20831 local_irq_restore(flags);
20832
20833@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20834
20835 switch (len) {
20836 case 1:
20837- __asm__("lcall *(%%esi); cld\n\t"
20838+ __asm__("movw %w6, %%ds\n\t"
20839+ "lcall *%%ss:(%%esi); cld\n\t"
20840+ "push %%ss\n\t"
20841+ "pop %%ds\n\t"
20842 "jc 1f\n\t"
20843 "xor %%ah, %%ah\n"
20844 "1:"
20845@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20846 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20847 "b" (bx),
20848 "D" ((long)reg),
20849- "S" (&pci_indirect));
20850+ "S" (&pci_indirect),
20851+ "r" (__PCIBIOS_DS));
20852 /*
20853 * Zero-extend the result beyond 8 bits, do not trust the
20854 * BIOS having done it:
20855@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20856 *value &= 0xff;
20857 break;
20858 case 2:
20859- __asm__("lcall *(%%esi); cld\n\t"
20860+ __asm__("movw %w6, %%ds\n\t"
20861+ "lcall *%%ss:(%%esi); cld\n\t"
20862+ "push %%ss\n\t"
20863+ "pop %%ds\n\t"
20864 "jc 1f\n\t"
20865 "xor %%ah, %%ah\n"
20866 "1:"
20867@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20868 : "1" (PCIBIOS_READ_CONFIG_WORD),
20869 "b" (bx),
20870 "D" ((long)reg),
20871- "S" (&pci_indirect));
20872+ "S" (&pci_indirect),
20873+ "r" (__PCIBIOS_DS));
20874 /*
20875 * Zero-extend the result beyond 16 bits, do not trust the
20876 * BIOS having done it:
20877@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20878 *value &= 0xffff;
20879 break;
20880 case 4:
20881- __asm__("lcall *(%%esi); cld\n\t"
20882+ __asm__("movw %w6, %%ds\n\t"
20883+ "lcall *%%ss:(%%esi); cld\n\t"
20884+ "push %%ss\n\t"
20885+ "pop %%ds\n\t"
20886 "jc 1f\n\t"
20887 "xor %%ah, %%ah\n"
20888 "1:"
20889@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20890 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20891 "b" (bx),
20892 "D" ((long)reg),
20893- "S" (&pci_indirect));
20894+ "S" (&pci_indirect),
20895+ "r" (__PCIBIOS_DS));
20896 break;
20897 }
20898
20899@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20900
20901 switch (len) {
20902 case 1:
20903- __asm__("lcall *(%%esi); cld\n\t"
20904+ __asm__("movw %w6, %%ds\n\t"
20905+ "lcall *%%ss:(%%esi); cld\n\t"
20906+ "push %%ss\n\t"
20907+ "pop %%ds\n\t"
20908 "jc 1f\n\t"
20909 "xor %%ah, %%ah\n"
20910 "1:"
20911@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20912 "c" (value),
20913 "b" (bx),
20914 "D" ((long)reg),
20915- "S" (&pci_indirect));
20916+ "S" (&pci_indirect),
20917+ "r" (__PCIBIOS_DS));
20918 break;
20919 case 2:
20920- __asm__("lcall *(%%esi); cld\n\t"
20921+ __asm__("movw %w6, %%ds\n\t"
20922+ "lcall *%%ss:(%%esi); cld\n\t"
20923+ "push %%ss\n\t"
20924+ "pop %%ds\n\t"
20925 "jc 1f\n\t"
20926 "xor %%ah, %%ah\n"
20927 "1:"
20928@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20929 "c" (value),
20930 "b" (bx),
20931 "D" ((long)reg),
20932- "S" (&pci_indirect));
20933+ "S" (&pci_indirect),
20934+ "r" (__PCIBIOS_DS));
20935 break;
20936 case 4:
20937- __asm__("lcall *(%%esi); cld\n\t"
20938+ __asm__("movw %w6, %%ds\n\t"
20939+ "lcall *%%ss:(%%esi); cld\n\t"
20940+ "push %%ss\n\t"
20941+ "pop %%ds\n\t"
20942 "jc 1f\n\t"
20943 "xor %%ah, %%ah\n"
20944 "1:"
20945@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20946 "c" (value),
20947 "b" (bx),
20948 "D" ((long)reg),
20949- "S" (&pci_indirect));
20950+ "S" (&pci_indirect),
20951+ "r" (__PCIBIOS_DS));
20952 break;
20953 }
20954
20955@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20956
20957 DBG("PCI: Fetching IRQ routing table... ");
20958 __asm__("push %%es\n\t"
20959+ "movw %w8, %%ds\n\t"
20960 "push %%ds\n\t"
20961 "pop %%es\n\t"
20962- "lcall *(%%esi); cld\n\t"
20963+ "lcall *%%ss:(%%esi); cld\n\t"
20964 "pop %%es\n\t"
20965+ "push %%ss\n\t"
20966+ "pop %%ds\n"
20967 "jc 1f\n\t"
20968 "xor %%ah, %%ah\n"
20969 "1:"
20970@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20971 "1" (0),
20972 "D" ((long) &opt),
20973 "S" (&pci_indirect),
20974- "m" (opt)
20975+ "m" (opt),
20976+ "r" (__PCIBIOS_DS)
20977 : "memory");
20978 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20979 if (ret & 0xff00)
20980@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20981 {
20982 int ret;
20983
20984- __asm__("lcall *(%%esi); cld\n\t"
20985+ __asm__("movw %w5, %%ds\n\t"
20986+ "lcall *%%ss:(%%esi); cld\n\t"
20987+ "push %%ss\n\t"
20988+ "pop %%ds\n"
20989 "jc 1f\n\t"
20990 "xor %%ah, %%ah\n"
20991 "1:"
20992@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20993 : "0" (PCIBIOS_SET_PCI_HW_INT),
20994 "b" ((dev->bus->number << 8) | dev->devfn),
20995 "c" ((irq << 8) | (pin + 10)),
20996- "S" (&pci_indirect));
20997+ "S" (&pci_indirect),
20998+ "r" (__PCIBIOS_DS));
20999 return !(ret & 0xff00);
21000 }
21001 EXPORT_SYMBOL(pcibios_set_irq_routing);
21002diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_32.c linux-2.6.39.4/arch/x86/platform/efi/efi_32.c
21003--- linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-05-19 00:06:34.000000000 -0400
21004+++ linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-08-05 19:44:35.000000000 -0400
21005@@ -38,70 +38,37 @@
21006 */
21007
21008 static unsigned long efi_rt_eflags;
21009-static pgd_t efi_bak_pg_dir_pointer[2];
21010+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21011
21012-void efi_call_phys_prelog(void)
21013+void __init efi_call_phys_prelog(void)
21014 {
21015- unsigned long cr4;
21016- unsigned long temp;
21017 struct desc_ptr gdt_descr;
21018
21019 local_irq_save(efi_rt_eflags);
21020
21021- /*
21022- * If I don't have PAE, I should just duplicate two entries in page
21023- * directory. If I have PAE, I just need to duplicate one entry in
21024- * page directory.
21025- */
21026- cr4 = read_cr4_safe();
21027-
21028- if (cr4 & X86_CR4_PAE) {
21029- efi_bak_pg_dir_pointer[0].pgd =
21030- swapper_pg_dir[pgd_index(0)].pgd;
21031- swapper_pg_dir[0].pgd =
21032- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21033- } else {
21034- efi_bak_pg_dir_pointer[0].pgd =
21035- swapper_pg_dir[pgd_index(0)].pgd;
21036- efi_bak_pg_dir_pointer[1].pgd =
21037- swapper_pg_dir[pgd_index(0x400000)].pgd;
21038- swapper_pg_dir[pgd_index(0)].pgd =
21039- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21040- temp = PAGE_OFFSET + 0x400000;
21041- swapper_pg_dir[pgd_index(0x400000)].pgd =
21042- swapper_pg_dir[pgd_index(temp)].pgd;
21043- }
21044+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21045+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21046+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21047
21048 /*
21049 * After the lock is released, the original page table is restored.
21050 */
21051 __flush_tlb_all();
21052
21053- gdt_descr.address = __pa(get_cpu_gdt_table(0));
21054+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
21055 gdt_descr.size = GDT_SIZE - 1;
21056 load_gdt(&gdt_descr);
21057 }
21058
21059-void efi_call_phys_epilog(void)
21060+void __init efi_call_phys_epilog(void)
21061 {
21062- unsigned long cr4;
21063 struct desc_ptr gdt_descr;
21064
21065- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21066+ gdt_descr.address = get_cpu_gdt_table(0);
21067 gdt_descr.size = GDT_SIZE - 1;
21068 load_gdt(&gdt_descr);
21069
21070- cr4 = read_cr4_safe();
21071-
21072- if (cr4 & X86_CR4_PAE) {
21073- swapper_pg_dir[pgd_index(0)].pgd =
21074- efi_bak_pg_dir_pointer[0].pgd;
21075- } else {
21076- swapper_pg_dir[pgd_index(0)].pgd =
21077- efi_bak_pg_dir_pointer[0].pgd;
21078- swapper_pg_dir[pgd_index(0x400000)].pgd =
21079- efi_bak_pg_dir_pointer[1].pgd;
21080- }
21081+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21082
21083 /*
21084 * After the lock is released, the original page table is restored.
21085diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S
21086--- linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-05-19 00:06:34.000000000 -0400
21087+++ linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-05 19:44:35.000000000 -0400
21088@@ -6,6 +6,7 @@
21089 */
21090
21091 #include <linux/linkage.h>
21092+#include <linux/init.h>
21093 #include <asm/page_types.h>
21094
21095 /*
21096@@ -20,7 +21,7 @@
21097 * service functions will comply with gcc calling convention, too.
21098 */
21099
21100-.text
21101+__INIT
21102 ENTRY(efi_call_phys)
21103 /*
21104 * 0. The function can only be called in Linux kernel. So CS has been
21105@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
21106 * The mapping of lower virtual memory has been created in prelog and
21107 * epilog.
21108 */
21109- movl $1f, %edx
21110- subl $__PAGE_OFFSET, %edx
21111- jmp *%edx
21112+ jmp 1f-__PAGE_OFFSET
21113 1:
21114
21115 /*
21116@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
21117 * parameter 2, ..., param n. To make things easy, we save the return
21118 * address of efi_call_phys in a global variable.
21119 */
21120- popl %edx
21121- movl %edx, saved_return_addr
21122- /* get the function pointer into ECX*/
21123- popl %ecx
21124- movl %ecx, efi_rt_function_ptr
21125- movl $2f, %edx
21126- subl $__PAGE_OFFSET, %edx
21127- pushl %edx
21128+ popl (saved_return_addr)
21129+ popl (efi_rt_function_ptr)
21130
21131 /*
21132 * 3. Clear PG bit in %CR0.
21133@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21134 /*
21135 * 5. Call the physical function.
21136 */
21137- jmp *%ecx
21138+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
21139
21140-2:
21141 /*
21142 * 6. After EFI runtime service returns, control will return to
21143 * following instruction. We'd better readjust stack pointer first.
21144@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21145 movl %cr0, %edx
21146 orl $0x80000000, %edx
21147 movl %edx, %cr0
21148- jmp 1f
21149-1:
21150+
21151 /*
21152 * 8. Now restore the virtual mode from flat mode by
21153 * adding EIP with PAGE_OFFSET.
21154 */
21155- movl $1f, %edx
21156- jmp *%edx
21157+ jmp 1f+__PAGE_OFFSET
21158 1:
21159
21160 /*
21161 * 9. Balance the stack. And because EAX contain the return value,
21162 * we'd better not clobber it.
21163 */
21164- leal efi_rt_function_ptr, %edx
21165- movl (%edx), %ecx
21166- pushl %ecx
21167+ pushl (efi_rt_function_ptr)
21168
21169 /*
21170- * 10. Push the saved return address onto the stack and return.
21171+ * 10. Return to the saved return address.
21172 */
21173- leal saved_return_addr, %edx
21174- movl (%edx), %ecx
21175- pushl %ecx
21176- ret
21177+ jmpl *(saved_return_addr)
21178 ENDPROC(efi_call_phys)
21179 .previous
21180
21181-.data
21182+__INITDATA
21183 saved_return_addr:
21184 .long 0
21185 efi_rt_function_ptr:
21186diff -urNp linux-2.6.39.4/arch/x86/platform/mrst/mrst.c linux-2.6.39.4/arch/x86/platform/mrst/mrst.c
21187--- linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-05-19 00:06:34.000000000 -0400
21188+++ linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-08-05 20:34:06.000000000 -0400
21189@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21190 }
21191
21192 /* Reboot and power off are handled by the SCU on a MID device */
21193-static void mrst_power_off(void)
21194+static __noreturn void mrst_power_off(void)
21195 {
21196 intel_scu_ipc_simple_command(0xf1, 1);
21197+ BUG();
21198 }
21199
21200-static void mrst_reboot(void)
21201+static __noreturn void mrst_reboot(void)
21202 {
21203 intel_scu_ipc_simple_command(0xf1, 0);
21204+ BUG();
21205 }
21206
21207 /*
21208diff -urNp linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c
21209--- linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-05-19 00:06:34.000000000 -0400
21210+++ linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-08-05 19:44:35.000000000 -0400
21211@@ -342,6 +342,8 @@ static void uv_reset_with_ipi(struct bau
21212 cpumask_t mask;
21213 struct reset_args reset_args;
21214
21215+ pax_track_stack();
21216+
21217 reset_args.sender = sender;
21218
21219 cpus_clear(mask);
21220diff -urNp linux-2.6.39.4/arch/x86/power/cpu.c linux-2.6.39.4/arch/x86/power/cpu.c
21221--- linux-2.6.39.4/arch/x86/power/cpu.c 2011-05-19 00:06:34.000000000 -0400
21222+++ linux-2.6.39.4/arch/x86/power/cpu.c 2011-08-05 19:44:35.000000000 -0400
21223@@ -130,7 +130,7 @@ static void do_fpu_end(void)
21224 static void fix_processor_context(void)
21225 {
21226 int cpu = smp_processor_id();
21227- struct tss_struct *t = &per_cpu(init_tss, cpu);
21228+ struct tss_struct *t = init_tss + cpu;
21229
21230 set_tss_desc(cpu, t); /*
21231 * This just modifies memory; should not be
21232@@ -140,7 +140,9 @@ static void fix_processor_context(void)
21233 */
21234
21235 #ifdef CONFIG_X86_64
21236+ pax_open_kernel();
21237 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21238+ pax_close_kernel();
21239
21240 syscall_init(); /* This sets MSR_*STAR and related */
21241 #endif
21242diff -urNp linux-2.6.39.4/arch/x86/vdso/Makefile linux-2.6.39.4/arch/x86/vdso/Makefile
21243--- linux-2.6.39.4/arch/x86/vdso/Makefile 2011-05-19 00:06:34.000000000 -0400
21244+++ linux-2.6.39.4/arch/x86/vdso/Makefile 2011-08-05 19:44:35.000000000 -0400
21245@@ -123,7 +123,7 @@ quiet_cmd_vdso = VDSO $@
21246 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21247 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21248
21249-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21250+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21251 GCOV_PROFILE := n
21252
21253 #
21254diff -urNp linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c
21255--- linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-05-19 00:06:34.000000000 -0400
21256+++ linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-08-05 19:44:35.000000000 -0400
21257@@ -22,24 +22,48 @@
21258 #include <asm/hpet.h>
21259 #include <asm/unistd.h>
21260 #include <asm/io.h>
21261+#include <asm/fixmap.h>
21262 #include "vextern.h"
21263
21264 #define gtod vdso_vsyscall_gtod_data
21265
21266+notrace noinline long __vdso_fallback_time(long *t)
21267+{
21268+ long secs;
21269+ asm volatile("syscall"
21270+ : "=a" (secs)
21271+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
21272+ return secs;
21273+}
21274+
21275 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
21276 {
21277 long ret;
21278 asm("syscall" : "=a" (ret) :
21279- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
21280+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
21281 return ret;
21282 }
21283
21284+notrace static inline cycle_t __vdso_vread_hpet(void)
21285+{
21286+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
21287+}
21288+
21289+notrace static inline cycle_t __vdso_vread_tsc(void)
21290+{
21291+ cycle_t ret = (cycle_t)vget_cycles();
21292+
21293+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
21294+}
21295+
21296 notrace static inline long vgetns(void)
21297 {
21298 long v;
21299- cycles_t (*vread)(void);
21300- vread = gtod->clock.vread;
21301- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
21302+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
21303+ v = __vdso_vread_tsc();
21304+ else
21305+ v = __vdso_vread_hpet();
21306+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
21307 return (v * gtod->clock.mult) >> gtod->clock.shift;
21308 }
21309
21310@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
21311
21312 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
21313 {
21314- if (likely(gtod->sysctl_enabled))
21315+ if (likely(gtod->sysctl_enabled &&
21316+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21317+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21318 switch (clock) {
21319 case CLOCK_REALTIME:
21320 if (likely(gtod->clock.vread))
21321@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
21322 int clock_gettime(clockid_t, struct timespec *)
21323 __attribute__((weak, alias("__vdso_clock_gettime")));
21324
21325-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21326+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
21327 {
21328 long ret;
21329- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
21330+ asm("syscall" : "=a" (ret) :
21331+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
21332+ return ret;
21333+}
21334+
21335+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21336+{
21337+ if (likely(gtod->sysctl_enabled &&
21338+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21339+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21340+ {
21341 if (likely(tv != NULL)) {
21342 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
21343 offsetof(struct timespec, tv_nsec) ||
21344@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
21345 }
21346 return 0;
21347 }
21348- asm("syscall" : "=a" (ret) :
21349- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
21350- return ret;
21351+ return __vdso_fallback_gettimeofday(tv, tz);
21352 }
21353 int gettimeofday(struct timeval *, struct timezone *)
21354 __attribute__((weak, alias("__vdso_gettimeofday")));
21355diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c
21356--- linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-05-19 00:06:34.000000000 -0400
21357+++ linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-08-05 19:44:35.000000000 -0400
21358@@ -25,6 +25,7 @@
21359 #include <asm/tlbflush.h>
21360 #include <asm/vdso.h>
21361 #include <asm/proto.h>
21362+#include <asm/mman.h>
21363
21364 enum {
21365 VDSO_DISABLED = 0,
21366@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21367 void enable_sep_cpu(void)
21368 {
21369 int cpu = get_cpu();
21370- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21371+ struct tss_struct *tss = init_tss + cpu;
21372
21373 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21374 put_cpu();
21375@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21376 gate_vma.vm_start = FIXADDR_USER_START;
21377 gate_vma.vm_end = FIXADDR_USER_END;
21378 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21379- gate_vma.vm_page_prot = __P101;
21380+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21381 /*
21382 * Make sure the vDSO gets into every core dump.
21383 * Dumping its contents makes post-mortem fully interpretable later
21384@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21385 if (compat)
21386 addr = VDSO_HIGH_BASE;
21387 else {
21388- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21389+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21390 if (IS_ERR_VALUE(addr)) {
21391 ret = addr;
21392 goto up_fail;
21393 }
21394 }
21395
21396- current->mm->context.vdso = (void *)addr;
21397+ current->mm->context.vdso = addr;
21398
21399 if (compat_uses_vma || !compat) {
21400 /*
21401@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21402 }
21403
21404 current_thread_info()->sysenter_return =
21405- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21406+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21407
21408 up_fail:
21409 if (ret)
21410- current->mm->context.vdso = NULL;
21411+ current->mm->context.vdso = 0;
21412
21413 up_write(&mm->mmap_sem);
21414
21415@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21416
21417 const char *arch_vma_name(struct vm_area_struct *vma)
21418 {
21419- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21420+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21421 return "[vdso]";
21422+
21423+#ifdef CONFIG_PAX_SEGMEXEC
21424+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21425+ return "[vdso]";
21426+#endif
21427+
21428 return NULL;
21429 }
21430
21431@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21432 * Check to see if the corresponding task was created in compat vdso
21433 * mode.
21434 */
21435- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21436+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21437 return &gate_vma;
21438 return NULL;
21439 }
21440diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso.lds.S linux-2.6.39.4/arch/x86/vdso/vdso.lds.S
21441--- linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-05-19 00:06:34.000000000 -0400
21442+++ linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-08-05 19:44:35.000000000 -0400
21443@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
21444 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
21445 #include "vextern.h"
21446 #undef VEXTERN
21447+
21448+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
21449+VEXTERN(fallback_gettimeofday)
21450+VEXTERN(fallback_time)
21451+VEXTERN(getcpu)
21452+#undef VEXTERN
21453diff -urNp linux-2.6.39.4/arch/x86/vdso/vextern.h linux-2.6.39.4/arch/x86/vdso/vextern.h
21454--- linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-05-19 00:06:34.000000000 -0400
21455+++ linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-08-05 19:44:35.000000000 -0400
21456@@ -11,6 +11,5 @@
21457 put into vextern.h and be referenced as a pointer with vdso prefix.
21458 The main kernel later fills in the values. */
21459
21460-VEXTERN(jiffies)
21461 VEXTERN(vgetcpu_mode)
21462 VEXTERN(vsyscall_gtod_data)
21463diff -urNp linux-2.6.39.4/arch/x86/vdso/vma.c linux-2.6.39.4/arch/x86/vdso/vma.c
21464--- linux-2.6.39.4/arch/x86/vdso/vma.c 2011-05-19 00:06:34.000000000 -0400
21465+++ linux-2.6.39.4/arch/x86/vdso/vma.c 2011-08-05 19:44:35.000000000 -0400
21466@@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
21467 if (!vbase)
21468 goto oom;
21469
21470- if (memcmp(vbase, "\177ELF", 4)) {
21471+ if (memcmp(vbase, ELFMAG, SELFMAG)) {
21472 printk("VDSO: I'm broken; not ELF\n");
21473 vdso_enabled = 0;
21474 }
21475@@ -118,7 +118,7 @@ int arch_setup_additional_pages(struct l
21476 goto up_fail;
21477 }
21478
21479- current->mm->context.vdso = (void *)addr;
21480+ current->mm->context.vdso = addr;
21481
21482 ret = install_special_mapping(mm, addr, vdso_size,
21483 VM_READ|VM_EXEC|
21484@@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l
21485 VM_ALWAYSDUMP,
21486 vdso_pages);
21487 if (ret) {
21488- current->mm->context.vdso = NULL;
21489+ current->mm->context.vdso = 0;
21490 goto up_fail;
21491 }
21492
21493@@ -134,10 +134,3 @@ up_fail:
21494 up_write(&mm->mmap_sem);
21495 return ret;
21496 }
21497-
21498-static __init int vdso_setup(char *s)
21499-{
21500- vdso_enabled = simple_strtoul(s, NULL, 0);
21501- return 0;
21502-}
21503-__setup("vdso=", vdso_setup);
21504diff -urNp linux-2.6.39.4/arch/x86/xen/enlighten.c linux-2.6.39.4/arch/x86/xen/enlighten.c
21505--- linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-05-19 00:06:34.000000000 -0400
21506+++ linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-08-05 19:44:35.000000000 -0400
21507@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21508
21509 struct shared_info xen_dummy_shared_info;
21510
21511-void *xen_initial_gdt;
21512-
21513 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21514 __read_mostly int xen_have_vector_callback;
21515 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21516@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21517 #endif
21518 };
21519
21520-static void xen_reboot(int reason)
21521+static __noreturn void xen_reboot(int reason)
21522 {
21523 struct sched_shutdown r = { .reason = reason };
21524
21525@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21526 BUG();
21527 }
21528
21529-static void xen_restart(char *msg)
21530+static __noreturn void xen_restart(char *msg)
21531 {
21532 xen_reboot(SHUTDOWN_reboot);
21533 }
21534
21535-static void xen_emergency_restart(void)
21536+static __noreturn void xen_emergency_restart(void)
21537 {
21538 xen_reboot(SHUTDOWN_reboot);
21539 }
21540
21541-static void xen_machine_halt(void)
21542+static __noreturn void xen_machine_halt(void)
21543 {
21544 xen_reboot(SHUTDOWN_poweroff);
21545 }
21546@@ -1127,7 +1125,17 @@ asmlinkage void __init xen_start_kernel(
21547 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21548
21549 /* Work out if we support NX */
21550- x86_configure_nx();
21551+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21552+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21553+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21554+ unsigned l, h;
21555+
21556+ __supported_pte_mask |= _PAGE_NX;
21557+ rdmsr(MSR_EFER, l, h);
21558+ l |= EFER_NX;
21559+ wrmsr(MSR_EFER, l, h);
21560+ }
21561+#endif
21562
21563 xen_setup_features();
21564
21565@@ -1158,13 +1166,6 @@ asmlinkage void __init xen_start_kernel(
21566
21567 machine_ops = xen_machine_ops;
21568
21569- /*
21570- * The only reliable way to retain the initial address of the
21571- * percpu gdt_page is to remember it here, so we can go and
21572- * mark it RW later, when the initial percpu area is freed.
21573- */
21574- xen_initial_gdt = &per_cpu(gdt_page, 0);
21575-
21576 xen_smp_init();
21577
21578 #ifdef CONFIG_ACPI_NUMA
21579diff -urNp linux-2.6.39.4/arch/x86/xen/mmu.c linux-2.6.39.4/arch/x86/xen/mmu.c
21580--- linux-2.6.39.4/arch/x86/xen/mmu.c 2011-07-09 09:18:51.000000000 -0400
21581+++ linux-2.6.39.4/arch/x86/xen/mmu.c 2011-08-05 19:44:35.000000000 -0400
21582@@ -1801,6 +1801,8 @@ __init pgd_t *xen_setup_kernel_pagetable
21583 convert_pfn_mfn(init_level4_pgt);
21584 convert_pfn_mfn(level3_ident_pgt);
21585 convert_pfn_mfn(level3_kernel_pgt);
21586+ convert_pfn_mfn(level3_vmalloc_pgt);
21587+ convert_pfn_mfn(level3_vmemmap_pgt);
21588
21589 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21590 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21591@@ -1819,7 +1821,10 @@ __init pgd_t *xen_setup_kernel_pagetable
21592 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21593 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21594 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21595+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21596+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21597 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21598+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21599 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21600 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21601
21602diff -urNp linux-2.6.39.4/arch/x86/xen/smp.c linux-2.6.39.4/arch/x86/xen/smp.c
21603--- linux-2.6.39.4/arch/x86/xen/smp.c 2011-07-09 09:18:51.000000000 -0400
21604+++ linux-2.6.39.4/arch/x86/xen/smp.c 2011-08-05 19:44:35.000000000 -0400
21605@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
21606 {
21607 BUG_ON(smp_processor_id() != 0);
21608 native_smp_prepare_boot_cpu();
21609-
21610- /* We've switched to the "real" per-cpu gdt, so make sure the
21611- old memory can be recycled */
21612- make_lowmem_page_readwrite(xen_initial_gdt);
21613-
21614 xen_filter_cpu_maps();
21615 xen_setup_vcpu_info_placement();
21616 }
21617@@ -266,12 +261,12 @@ cpu_initialize_context(unsigned int cpu,
21618 gdt = get_cpu_gdt_table(cpu);
21619
21620 ctxt->flags = VGCF_IN_KERNEL;
21621- ctxt->user_regs.ds = __USER_DS;
21622- ctxt->user_regs.es = __USER_DS;
21623+ ctxt->user_regs.ds = __KERNEL_DS;
21624+ ctxt->user_regs.es = __KERNEL_DS;
21625 ctxt->user_regs.ss = __KERNEL_DS;
21626 #ifdef CONFIG_X86_32
21627 ctxt->user_regs.fs = __KERNEL_PERCPU;
21628- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21629+ savesegment(gs, ctxt->user_regs.gs);
21630 #else
21631 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21632 #endif
21633@@ -322,13 +317,12 @@ static int __cpuinit xen_cpu_up(unsigned
21634 int rc;
21635
21636 per_cpu(current_task, cpu) = idle;
21637+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
21638 #ifdef CONFIG_X86_32
21639 irq_ctx_init(cpu);
21640 #else
21641 clear_tsk_thread_flag(idle, TIF_FORK);
21642- per_cpu(kernel_stack, cpu) =
21643- (unsigned long)task_stack_page(idle) -
21644- KERNEL_STACK_OFFSET + THREAD_SIZE;
21645+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21646 #endif
21647 xen_setup_runstate_info(cpu);
21648 xen_setup_timer(cpu);
21649diff -urNp linux-2.6.39.4/arch/x86/xen/xen-asm_32.S linux-2.6.39.4/arch/x86/xen/xen-asm_32.S
21650--- linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-05-19 00:06:34.000000000 -0400
21651+++ linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-08-05 19:44:35.000000000 -0400
21652@@ -83,14 +83,14 @@ ENTRY(xen_iret)
21653 ESP_OFFSET=4 # bytes pushed onto stack
21654
21655 /*
21656- * Store vcpu_info pointer for easy access. Do it this way to
21657- * avoid having to reload %fs
21658+ * Store vcpu_info pointer for easy access.
21659 */
21660 #ifdef CONFIG_SMP
21661- GET_THREAD_INFO(%eax)
21662- movl TI_cpu(%eax), %eax
21663- movl __per_cpu_offset(,%eax,4), %eax
21664- mov xen_vcpu(%eax), %eax
21665+ push %fs
21666+ mov $(__KERNEL_PERCPU), %eax
21667+ mov %eax, %fs
21668+ mov PER_CPU_VAR(xen_vcpu), %eax
21669+ pop %fs
21670 #else
21671 movl xen_vcpu, %eax
21672 #endif
21673diff -urNp linux-2.6.39.4/arch/x86/xen/xen-head.S linux-2.6.39.4/arch/x86/xen/xen-head.S
21674--- linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-05-19 00:06:34.000000000 -0400
21675+++ linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-08-05 19:44:35.000000000 -0400
21676@@ -19,6 +19,17 @@ ENTRY(startup_xen)
21677 #ifdef CONFIG_X86_32
21678 mov %esi,xen_start_info
21679 mov $init_thread_union+THREAD_SIZE,%esp
21680+#ifdef CONFIG_SMP
21681+ movl $cpu_gdt_table,%edi
21682+ movl $__per_cpu_load,%eax
21683+ movw %ax,__KERNEL_PERCPU + 2(%edi)
21684+ rorl $16,%eax
21685+ movb %al,__KERNEL_PERCPU + 4(%edi)
21686+ movb %ah,__KERNEL_PERCPU + 7(%edi)
21687+ movl $__per_cpu_end - 1,%eax
21688+ subl $__per_cpu_start,%eax
21689+ movw %ax,__KERNEL_PERCPU + 0(%edi)
21690+#endif
21691 #else
21692 mov %rsi,xen_start_info
21693 mov $init_thread_union+THREAD_SIZE,%rsp
21694diff -urNp linux-2.6.39.4/arch/x86/xen/xen-ops.h linux-2.6.39.4/arch/x86/xen/xen-ops.h
21695--- linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-05-19 00:06:34.000000000 -0400
21696+++ linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-08-05 19:44:35.000000000 -0400
21697@@ -10,8 +10,6 @@
21698 extern const char xen_hypervisor_callback[];
21699 extern const char xen_failsafe_callback[];
21700
21701-extern void *xen_initial_gdt;
21702-
21703 struct trap_info;
21704 void xen_copy_trap_info(struct trap_info *traps);
21705
21706diff -urNp linux-2.6.39.4/block/blk-iopoll.c linux-2.6.39.4/block/blk-iopoll.c
21707--- linux-2.6.39.4/block/blk-iopoll.c 2011-05-19 00:06:34.000000000 -0400
21708+++ linux-2.6.39.4/block/blk-iopoll.c 2011-08-05 19:44:35.000000000 -0400
21709@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21710 }
21711 EXPORT_SYMBOL(blk_iopoll_complete);
21712
21713-static void blk_iopoll_softirq(struct softirq_action *h)
21714+static void blk_iopoll_softirq(void)
21715 {
21716 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21717 int rearm = 0, budget = blk_iopoll_budget;
21718diff -urNp linux-2.6.39.4/block/blk-map.c linux-2.6.39.4/block/blk-map.c
21719--- linux-2.6.39.4/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
21720+++ linux-2.6.39.4/block/blk-map.c 2011-08-05 19:44:35.000000000 -0400
21721@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21722 if (!len || !kbuf)
21723 return -EINVAL;
21724
21725- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21726+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21727 if (do_copy)
21728 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21729 else
21730diff -urNp linux-2.6.39.4/block/blk-softirq.c linux-2.6.39.4/block/blk-softirq.c
21731--- linux-2.6.39.4/block/blk-softirq.c 2011-05-19 00:06:34.000000000 -0400
21732+++ linux-2.6.39.4/block/blk-softirq.c 2011-08-05 19:44:35.000000000 -0400
21733@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21734 * Softirq action handler - move entries to local list and loop over them
21735 * while passing them to the queue registered handler.
21736 */
21737-static void blk_done_softirq(struct softirq_action *h)
21738+static void blk_done_softirq(void)
21739 {
21740 struct list_head *cpu_list, local_list;
21741
21742diff -urNp linux-2.6.39.4/block/bsg.c linux-2.6.39.4/block/bsg.c
21743--- linux-2.6.39.4/block/bsg.c 2011-05-19 00:06:34.000000000 -0400
21744+++ linux-2.6.39.4/block/bsg.c 2011-08-05 19:44:35.000000000 -0400
21745@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21746 struct sg_io_v4 *hdr, struct bsg_device *bd,
21747 fmode_t has_write_perm)
21748 {
21749+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21750+ unsigned char *cmdptr;
21751+
21752 if (hdr->request_len > BLK_MAX_CDB) {
21753 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21754 if (!rq->cmd)
21755 return -ENOMEM;
21756- }
21757+ cmdptr = rq->cmd;
21758+ } else
21759+ cmdptr = tmpcmd;
21760
21761- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21762+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21763 hdr->request_len))
21764 return -EFAULT;
21765
21766+ if (cmdptr != rq->cmd)
21767+ memcpy(rq->cmd, cmdptr, hdr->request_len);
21768+
21769 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21770 if (blk_verify_command(rq->cmd, has_write_perm))
21771 return -EPERM;
21772diff -urNp linux-2.6.39.4/block/scsi_ioctl.c linux-2.6.39.4/block/scsi_ioctl.c
21773--- linux-2.6.39.4/block/scsi_ioctl.c 2011-05-19 00:06:34.000000000 -0400
21774+++ linux-2.6.39.4/block/scsi_ioctl.c 2011-08-05 19:44:35.000000000 -0400
21775@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21776 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21777 struct sg_io_hdr *hdr, fmode_t mode)
21778 {
21779- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21780+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21781+ unsigned char *cmdptr;
21782+
21783+ if (rq->cmd != rq->__cmd)
21784+ cmdptr = rq->cmd;
21785+ else
21786+ cmdptr = tmpcmd;
21787+
21788+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21789 return -EFAULT;
21790+
21791+ if (cmdptr != rq->cmd)
21792+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21793+
21794 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21795 return -EPERM;
21796
21797@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21798 int err;
21799 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21800 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21801+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21802+ unsigned char *cmdptr;
21803
21804 if (!sic)
21805 return -EINVAL;
21806@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21807 */
21808 err = -EFAULT;
21809 rq->cmd_len = cmdlen;
21810- if (copy_from_user(rq->cmd, sic->data, cmdlen))
21811+
21812+ if (rq->cmd != rq->__cmd)
21813+ cmdptr = rq->cmd;
21814+ else
21815+ cmdptr = tmpcmd;
21816+
21817+ if (copy_from_user(cmdptr, sic->data, cmdlen))
21818 goto error;
21819
21820+ if (rq->cmd != cmdptr)
21821+ memcpy(rq->cmd, cmdptr, cmdlen);
21822+
21823 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21824 goto error;
21825
21826diff -urNp linux-2.6.39.4/crypto/cryptd.c linux-2.6.39.4/crypto/cryptd.c
21827--- linux-2.6.39.4/crypto/cryptd.c 2011-05-19 00:06:34.000000000 -0400
21828+++ linux-2.6.39.4/crypto/cryptd.c 2011-08-05 20:34:06.000000000 -0400
21829@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21830
21831 struct cryptd_blkcipher_request_ctx {
21832 crypto_completion_t complete;
21833-};
21834+} __no_const;
21835
21836 struct cryptd_hash_ctx {
21837 struct crypto_shash *child;
21838@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21839
21840 struct cryptd_aead_request_ctx {
21841 crypto_completion_t complete;
21842-};
21843+} __no_const;
21844
21845 static void cryptd_queue_worker(struct work_struct *work);
21846
21847diff -urNp linux-2.6.39.4/crypto/gf128mul.c linux-2.6.39.4/crypto/gf128mul.c
21848--- linux-2.6.39.4/crypto/gf128mul.c 2011-05-19 00:06:34.000000000 -0400
21849+++ linux-2.6.39.4/crypto/gf128mul.c 2011-08-05 19:44:35.000000000 -0400
21850@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21851 for (i = 0; i < 7; ++i)
21852 gf128mul_x_lle(&p[i + 1], &p[i]);
21853
21854- memset(r, 0, sizeof(r));
21855+ memset(r, 0, sizeof(*r));
21856 for (i = 0;;) {
21857 u8 ch = ((u8 *)b)[15 - i];
21858
21859@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21860 for (i = 0; i < 7; ++i)
21861 gf128mul_x_bbe(&p[i + 1], &p[i]);
21862
21863- memset(r, 0, sizeof(r));
21864+ memset(r, 0, sizeof(*r));
21865 for (i = 0;;) {
21866 u8 ch = ((u8 *)b)[i];
21867
21868diff -urNp linux-2.6.39.4/crypto/serpent.c linux-2.6.39.4/crypto/serpent.c
21869--- linux-2.6.39.4/crypto/serpent.c 2011-05-19 00:06:34.000000000 -0400
21870+++ linux-2.6.39.4/crypto/serpent.c 2011-08-05 19:44:35.000000000 -0400
21871@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21872 u32 r0,r1,r2,r3,r4;
21873 int i;
21874
21875+ pax_track_stack();
21876+
21877 /* Copy key, add padding */
21878
21879 for (i = 0; i < keylen; ++i)
21880diff -urNp linux-2.6.39.4/Documentation/dontdiff linux-2.6.39.4/Documentation/dontdiff
21881--- linux-2.6.39.4/Documentation/dontdiff 2011-05-19 00:06:34.000000000 -0400
21882+++ linux-2.6.39.4/Documentation/dontdiff 2011-08-05 19:44:35.000000000 -0400
21883@@ -1,13 +1,16 @@
21884 *.a
21885 *.aux
21886 *.bin
21887+*.cis
21888 *.cpio
21889 *.csp
21890+*.dbg
21891 *.dsp
21892 *.dvi
21893 *.elf
21894 *.eps
21895 *.fw
21896+*.gcno
21897 *.gen.S
21898 *.gif
21899 *.grep
21900@@ -38,8 +41,10 @@
21901 *.tab.h
21902 *.tex
21903 *.ver
21904+*.vim
21905 *.xml
21906 *_MODULES
21907+*_reg_safe.h
21908 *_vga16.c
21909 *~
21910 *.9
21911@@ -49,11 +54,16 @@
21912 53c700_d.h
21913 CVS
21914 ChangeSet
21915+GPATH
21916+GRTAGS
21917+GSYMS
21918+GTAGS
21919 Image
21920 Kerntypes
21921 Module.markers
21922 Module.symvers
21923 PENDING
21924+PERF*
21925 SCCS
21926 System.map*
21927 TAGS
21928@@ -80,8 +90,11 @@ btfixupprep
21929 build
21930 bvmlinux
21931 bzImage*
21932+capability_names.h
21933 capflags.c
21934 classlist.h*
21935+clut_vga16.c
21936+common-cmds.h
21937 comp*.log
21938 compile.h*
21939 conf
21940@@ -106,16 +119,19 @@ fore200e_mkfirm
21941 fore200e_pca_fw.c*
21942 gconf
21943 gen-devlist
21944+gen-kdb_cmds.c
21945 gen_crc32table
21946 gen_init_cpio
21947 generated
21948 genheaders
21949 genksyms
21950 *_gray256.c
21951+hash
21952 ihex2fw
21953 ikconfig.h*
21954 inat-tables.c
21955 initramfs_data.cpio
21956+initramfs_data.cpio.bz2
21957 initramfs_data.cpio.gz
21958 initramfs_list
21959 int16.c
21960@@ -125,7 +141,6 @@ int32.c
21961 int4.c
21962 int8.c
21963 kallsyms
21964-kconfig
21965 keywords.c
21966 ksym.c*
21967 ksym.h*
21968@@ -149,7 +164,9 @@ mkboot
21969 mkbugboot
21970 mkcpustr
21971 mkdep
21972+mkpiggy
21973 mkprep
21974+mkregtable
21975 mktables
21976 mktree
21977 modpost
21978@@ -165,6 +182,7 @@ parse.h
21979 patches*
21980 pca200e.bin
21981 pca200e_ecd.bin2
21982+perf-archive
21983 piggy.gz
21984 piggyback
21985 piggy.S
21986@@ -180,7 +198,9 @@ r600_reg_safe.h
21987 raid6altivec*.c
21988 raid6int*.c
21989 raid6tables.c
21990+regdb.c
21991 relocs
21992+rlim_names.h
21993 rn50_reg_safe.h
21994 rs600_reg_safe.h
21995 rv515_reg_safe.h
21996@@ -189,6 +209,7 @@ setup
21997 setup.bin
21998 setup.elf
21999 sImage
22000+slabinfo
22001 sm_tbl*
22002 split-include
22003 syscalltab.h
22004@@ -213,13 +234,17 @@ version.h*
22005 vmlinux
22006 vmlinux-*
22007 vmlinux.aout
22008+vmlinux.bin.all
22009+vmlinux.bin.bz2
22010 vmlinux.lds
22011+vmlinux.relocs
22012 voffset.h
22013 vsyscall.lds
22014 vsyscall_32.lds
22015 wanxlfw.inc
22016 uImage
22017 unifdef
22018+utsrelease.h
22019 wakeup.bin
22020 wakeup.elf
22021 wakeup.lds
22022diff -urNp linux-2.6.39.4/Documentation/kernel-parameters.txt linux-2.6.39.4/Documentation/kernel-parameters.txt
22023--- linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-06-25 12:55:22.000000000 -0400
22024+++ linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-08-05 19:44:35.000000000 -0400
22025@@ -1879,6 +1879,13 @@ bytes respectively. Such letter suffixes
22026 the specified number of seconds. This is to be used if
22027 your oopses keep scrolling off the screen.
22028
22029+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22030+ virtualization environments that don't cope well with the
22031+ expand down segment used by UDEREF on X86-32 or the frequent
22032+ page table updates on X86-64.
22033+
22034+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22035+
22036 pcbit= [HW,ISDN]
22037
22038 pcd. [PARIDE]
22039diff -urNp linux-2.6.39.4/drivers/acpi/apei/cper.c linux-2.6.39.4/drivers/acpi/apei/cper.c
22040--- linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-05-19 00:06:34.000000000 -0400
22041+++ linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-08-05 19:44:35.000000000 -0400
22042@@ -38,12 +38,12 @@
22043 */
22044 u64 cper_next_record_id(void)
22045 {
22046- static atomic64_t seq;
22047+ static atomic64_unchecked_t seq;
22048
22049- if (!atomic64_read(&seq))
22050- atomic64_set(&seq, ((u64)get_seconds()) << 32);
22051+ if (!atomic64_read_unchecked(&seq))
22052+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22053
22054- return atomic64_inc_return(&seq);
22055+ return atomic64_inc_return_unchecked(&seq);
22056 }
22057 EXPORT_SYMBOL_GPL(cper_next_record_id);
22058
22059diff -urNp linux-2.6.39.4/drivers/acpi/power_meter.c linux-2.6.39.4/drivers/acpi/power_meter.c
22060--- linux-2.6.39.4/drivers/acpi/power_meter.c 2011-05-19 00:06:34.000000000 -0400
22061+++ linux-2.6.39.4/drivers/acpi/power_meter.c 2011-08-05 19:44:35.000000000 -0400
22062@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
22063 return res;
22064
22065 temp /= 1000;
22066- if (temp < 0)
22067- return -EINVAL;
22068
22069 mutex_lock(&resource->lock);
22070 resource->trip[attr->index - 7] = temp;
22071diff -urNp linux-2.6.39.4/drivers/acpi/proc.c linux-2.6.39.4/drivers/acpi/proc.c
22072--- linux-2.6.39.4/drivers/acpi/proc.c 2011-05-19 00:06:34.000000000 -0400
22073+++ linux-2.6.39.4/drivers/acpi/proc.c 2011-08-05 19:44:35.000000000 -0400
22074@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22075 size_t count, loff_t * ppos)
22076 {
22077 struct list_head *node, *next;
22078- char strbuf[5];
22079- char str[5] = "";
22080- unsigned int len = count;
22081-
22082- if (len > 4)
22083- len = 4;
22084- if (len < 0)
22085- return -EFAULT;
22086+ char strbuf[5] = {0};
22087
22088- if (copy_from_user(strbuf, buffer, len))
22089+ if (count > 4)
22090+ count = 4;
22091+ if (copy_from_user(strbuf, buffer, count))
22092 return -EFAULT;
22093- strbuf[len] = '\0';
22094- sscanf(strbuf, "%s", str);
22095+ strbuf[count] = '\0';
22096
22097 mutex_lock(&acpi_device_lock);
22098 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22099@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22100 if (!dev->wakeup.flags.valid)
22101 continue;
22102
22103- if (!strncmp(dev->pnp.bus_id, str, 4)) {
22104+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22105 if (device_can_wakeup(&dev->dev)) {
22106 bool enable = !device_may_wakeup(&dev->dev);
22107 device_set_wakeup_enable(&dev->dev, enable);
22108diff -urNp linux-2.6.39.4/drivers/acpi/processor_driver.c linux-2.6.39.4/drivers/acpi/processor_driver.c
22109--- linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-05-19 00:06:34.000000000 -0400
22110+++ linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-08-05 19:44:35.000000000 -0400
22111@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22112 return 0;
22113 #endif
22114
22115- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22116+ BUG_ON(pr->id >= nr_cpu_ids);
22117
22118 /*
22119 * Buggy BIOS check
22120diff -urNp linux-2.6.39.4/drivers/ata/libata-core.c linux-2.6.39.4/drivers/ata/libata-core.c
22121--- linux-2.6.39.4/drivers/ata/libata-core.c 2011-05-19 00:06:34.000000000 -0400
22122+++ linux-2.6.39.4/drivers/ata/libata-core.c 2011-08-05 20:34:06.000000000 -0400
22123@@ -4747,7 +4747,7 @@ void ata_qc_free(struct ata_queued_cmd *
22124 struct ata_port *ap;
22125 unsigned int tag;
22126
22127- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22128+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22129 ap = qc->ap;
22130
22131 qc->flags = 0;
22132@@ -4763,7 +4763,7 @@ void __ata_qc_complete(struct ata_queued
22133 struct ata_port *ap;
22134 struct ata_link *link;
22135
22136- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22137+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22138 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22139 ap = qc->ap;
22140 link = qc->dev->link;
22141@@ -5768,6 +5768,7 @@ static void ata_finalize_port_ops(struct
22142 return;
22143
22144 spin_lock(&lock);
22145+ pax_open_kernel();
22146
22147 for (cur = ops->inherits; cur; cur = cur->inherits) {
22148 void **inherit = (void **)cur;
22149@@ -5781,8 +5782,9 @@ static void ata_finalize_port_ops(struct
22150 if (IS_ERR(*pp))
22151 *pp = NULL;
22152
22153- ops->inherits = NULL;
22154+ *(struct ata_port_operations **)&ops->inherits = NULL;
22155
22156+ pax_close_kernel();
22157 spin_unlock(&lock);
22158 }
22159
22160diff -urNp linux-2.6.39.4/drivers/ata/libata-eh.c linux-2.6.39.4/drivers/ata/libata-eh.c
22161--- linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:11:51.000000000 -0400
22162+++ linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:12:20.000000000 -0400
22163@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22164 {
22165 struct ata_link *link;
22166
22167+ pax_track_stack();
22168+
22169 ata_for_each_link(link, ap, HOST_FIRST)
22170 ata_eh_link_report(link);
22171 }
22172diff -urNp linux-2.6.39.4/drivers/ata/pata_arasan_cf.c linux-2.6.39.4/drivers/ata/pata_arasan_cf.c
22173--- linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-05-19 00:06:34.000000000 -0400
22174+++ linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-08-05 20:34:06.000000000 -0400
22175@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22176 /* Handle platform specific quirks */
22177 if (pdata->quirk) {
22178 if (pdata->quirk & CF_BROKEN_PIO) {
22179- ap->ops->set_piomode = NULL;
22180+ pax_open_kernel();
22181+ *(void **)&ap->ops->set_piomode = NULL;
22182+ pax_close_kernel();
22183 ap->pio_mask = 0;
22184 }
22185 if (pdata->quirk & CF_BROKEN_MWDMA)
22186diff -urNp linux-2.6.39.4/drivers/atm/adummy.c linux-2.6.39.4/drivers/atm/adummy.c
22187--- linux-2.6.39.4/drivers/atm/adummy.c 2011-05-19 00:06:34.000000000 -0400
22188+++ linux-2.6.39.4/drivers/atm/adummy.c 2011-08-05 19:44:36.000000000 -0400
22189@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22190 vcc->pop(vcc, skb);
22191 else
22192 dev_kfree_skb_any(skb);
22193- atomic_inc(&vcc->stats->tx);
22194+ atomic_inc_unchecked(&vcc->stats->tx);
22195
22196 return 0;
22197 }
22198diff -urNp linux-2.6.39.4/drivers/atm/ambassador.c linux-2.6.39.4/drivers/atm/ambassador.c
22199--- linux-2.6.39.4/drivers/atm/ambassador.c 2011-05-19 00:06:34.000000000 -0400
22200+++ linux-2.6.39.4/drivers/atm/ambassador.c 2011-08-05 19:44:36.000000000 -0400
22201@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22202 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22203
22204 // VC layer stats
22205- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22206+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22207
22208 // free the descriptor
22209 kfree (tx_descr);
22210@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22211 dump_skb ("<<<", vc, skb);
22212
22213 // VC layer stats
22214- atomic_inc(&atm_vcc->stats->rx);
22215+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22216 __net_timestamp(skb);
22217 // end of our responsibility
22218 atm_vcc->push (atm_vcc, skb);
22219@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22220 } else {
22221 PRINTK (KERN_INFO, "dropped over-size frame");
22222 // should we count this?
22223- atomic_inc(&atm_vcc->stats->rx_drop);
22224+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22225 }
22226
22227 } else {
22228@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22229 }
22230
22231 if (check_area (skb->data, skb->len)) {
22232- atomic_inc(&atm_vcc->stats->tx_err);
22233+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22234 return -ENOMEM; // ?
22235 }
22236
22237diff -urNp linux-2.6.39.4/drivers/atm/atmtcp.c linux-2.6.39.4/drivers/atm/atmtcp.c
22238--- linux-2.6.39.4/drivers/atm/atmtcp.c 2011-05-19 00:06:34.000000000 -0400
22239+++ linux-2.6.39.4/drivers/atm/atmtcp.c 2011-08-05 19:44:36.000000000 -0400
22240@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22241 if (vcc->pop) vcc->pop(vcc,skb);
22242 else dev_kfree_skb(skb);
22243 if (dev_data) return 0;
22244- atomic_inc(&vcc->stats->tx_err);
22245+ atomic_inc_unchecked(&vcc->stats->tx_err);
22246 return -ENOLINK;
22247 }
22248 size = skb->len+sizeof(struct atmtcp_hdr);
22249@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22250 if (!new_skb) {
22251 if (vcc->pop) vcc->pop(vcc,skb);
22252 else dev_kfree_skb(skb);
22253- atomic_inc(&vcc->stats->tx_err);
22254+ atomic_inc_unchecked(&vcc->stats->tx_err);
22255 return -ENOBUFS;
22256 }
22257 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22258@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22259 if (vcc->pop) vcc->pop(vcc,skb);
22260 else dev_kfree_skb(skb);
22261 out_vcc->push(out_vcc,new_skb);
22262- atomic_inc(&vcc->stats->tx);
22263- atomic_inc(&out_vcc->stats->rx);
22264+ atomic_inc_unchecked(&vcc->stats->tx);
22265+ atomic_inc_unchecked(&out_vcc->stats->rx);
22266 return 0;
22267 }
22268
22269@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22270 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22271 read_unlock(&vcc_sklist_lock);
22272 if (!out_vcc) {
22273- atomic_inc(&vcc->stats->tx_err);
22274+ atomic_inc_unchecked(&vcc->stats->tx_err);
22275 goto done;
22276 }
22277 skb_pull(skb,sizeof(struct atmtcp_hdr));
22278@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22279 __net_timestamp(new_skb);
22280 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22281 out_vcc->push(out_vcc,new_skb);
22282- atomic_inc(&vcc->stats->tx);
22283- atomic_inc(&out_vcc->stats->rx);
22284+ atomic_inc_unchecked(&vcc->stats->tx);
22285+ atomic_inc_unchecked(&out_vcc->stats->rx);
22286 done:
22287 if (vcc->pop) vcc->pop(vcc,skb);
22288 else dev_kfree_skb(skb);
22289diff -urNp linux-2.6.39.4/drivers/atm/eni.c linux-2.6.39.4/drivers/atm/eni.c
22290--- linux-2.6.39.4/drivers/atm/eni.c 2011-05-19 00:06:34.000000000 -0400
22291+++ linux-2.6.39.4/drivers/atm/eni.c 2011-08-05 19:44:36.000000000 -0400
22292@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22293 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22294 vcc->dev->number);
22295 length = 0;
22296- atomic_inc(&vcc->stats->rx_err);
22297+ atomic_inc_unchecked(&vcc->stats->rx_err);
22298 }
22299 else {
22300 length = ATM_CELL_SIZE-1; /* no HEC */
22301@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22302 size);
22303 }
22304 eff = length = 0;
22305- atomic_inc(&vcc->stats->rx_err);
22306+ atomic_inc_unchecked(&vcc->stats->rx_err);
22307 }
22308 else {
22309 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22310@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22311 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22312 vcc->dev->number,vcc->vci,length,size << 2,descr);
22313 length = eff = 0;
22314- atomic_inc(&vcc->stats->rx_err);
22315+ atomic_inc_unchecked(&vcc->stats->rx_err);
22316 }
22317 }
22318 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22319@@ -771,7 +771,7 @@ rx_dequeued++;
22320 vcc->push(vcc,skb);
22321 pushed++;
22322 }
22323- atomic_inc(&vcc->stats->rx);
22324+ atomic_inc_unchecked(&vcc->stats->rx);
22325 }
22326 wake_up(&eni_dev->rx_wait);
22327 }
22328@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22329 PCI_DMA_TODEVICE);
22330 if (vcc->pop) vcc->pop(vcc,skb);
22331 else dev_kfree_skb_irq(skb);
22332- atomic_inc(&vcc->stats->tx);
22333+ atomic_inc_unchecked(&vcc->stats->tx);
22334 wake_up(&eni_dev->tx_wait);
22335 dma_complete++;
22336 }
22337diff -urNp linux-2.6.39.4/drivers/atm/firestream.c linux-2.6.39.4/drivers/atm/firestream.c
22338--- linux-2.6.39.4/drivers/atm/firestream.c 2011-05-19 00:06:34.000000000 -0400
22339+++ linux-2.6.39.4/drivers/atm/firestream.c 2011-08-05 19:44:36.000000000 -0400
22340@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22341 }
22342 }
22343
22344- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22345+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22346
22347 fs_dprintk (FS_DEBUG_TXMEM, "i");
22348 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22349@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22350 #endif
22351 skb_put (skb, qe->p1 & 0xffff);
22352 ATM_SKB(skb)->vcc = atm_vcc;
22353- atomic_inc(&atm_vcc->stats->rx);
22354+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22355 __net_timestamp(skb);
22356 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22357 atm_vcc->push (atm_vcc, skb);
22358@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22359 kfree (pe);
22360 }
22361 if (atm_vcc)
22362- atomic_inc(&atm_vcc->stats->rx_drop);
22363+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22364 break;
22365 case 0x1f: /* Reassembly abort: no buffers. */
22366 /* Silently increment error counter. */
22367 if (atm_vcc)
22368- atomic_inc(&atm_vcc->stats->rx_drop);
22369+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22370 break;
22371 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22372 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22373diff -urNp linux-2.6.39.4/drivers/atm/fore200e.c linux-2.6.39.4/drivers/atm/fore200e.c
22374--- linux-2.6.39.4/drivers/atm/fore200e.c 2011-05-19 00:06:34.000000000 -0400
22375+++ linux-2.6.39.4/drivers/atm/fore200e.c 2011-08-05 19:44:36.000000000 -0400
22376@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22377 #endif
22378 /* check error condition */
22379 if (*entry->status & STATUS_ERROR)
22380- atomic_inc(&vcc->stats->tx_err);
22381+ atomic_inc_unchecked(&vcc->stats->tx_err);
22382 else
22383- atomic_inc(&vcc->stats->tx);
22384+ atomic_inc_unchecked(&vcc->stats->tx);
22385 }
22386 }
22387
22388@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22389 if (skb == NULL) {
22390 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22391
22392- atomic_inc(&vcc->stats->rx_drop);
22393+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22394 return -ENOMEM;
22395 }
22396
22397@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22398
22399 dev_kfree_skb_any(skb);
22400
22401- atomic_inc(&vcc->stats->rx_drop);
22402+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22403 return -ENOMEM;
22404 }
22405
22406 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22407
22408 vcc->push(vcc, skb);
22409- atomic_inc(&vcc->stats->rx);
22410+ atomic_inc_unchecked(&vcc->stats->rx);
22411
22412 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22413
22414@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22415 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22416 fore200e->atm_dev->number,
22417 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22418- atomic_inc(&vcc->stats->rx_err);
22419+ atomic_inc_unchecked(&vcc->stats->rx_err);
22420 }
22421 }
22422
22423@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22424 goto retry_here;
22425 }
22426
22427- atomic_inc(&vcc->stats->tx_err);
22428+ atomic_inc_unchecked(&vcc->stats->tx_err);
22429
22430 fore200e->tx_sat++;
22431 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22432diff -urNp linux-2.6.39.4/drivers/atm/he.c linux-2.6.39.4/drivers/atm/he.c
22433--- linux-2.6.39.4/drivers/atm/he.c 2011-05-19 00:06:34.000000000 -0400
22434+++ linux-2.6.39.4/drivers/atm/he.c 2011-08-05 19:44:36.000000000 -0400
22435@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22436
22437 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22438 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22439- atomic_inc(&vcc->stats->rx_drop);
22440+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22441 goto return_host_buffers;
22442 }
22443
22444@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22445 RBRQ_LEN_ERR(he_dev->rbrq_head)
22446 ? "LEN_ERR" : "",
22447 vcc->vpi, vcc->vci);
22448- atomic_inc(&vcc->stats->rx_err);
22449+ atomic_inc_unchecked(&vcc->stats->rx_err);
22450 goto return_host_buffers;
22451 }
22452
22453@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22454 vcc->push(vcc, skb);
22455 spin_lock(&he_dev->global_lock);
22456
22457- atomic_inc(&vcc->stats->rx);
22458+ atomic_inc_unchecked(&vcc->stats->rx);
22459
22460 return_host_buffers:
22461 ++pdus_assembled;
22462@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22463 tpd->vcc->pop(tpd->vcc, tpd->skb);
22464 else
22465 dev_kfree_skb_any(tpd->skb);
22466- atomic_inc(&tpd->vcc->stats->tx_err);
22467+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22468 }
22469 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22470 return;
22471@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22472 vcc->pop(vcc, skb);
22473 else
22474 dev_kfree_skb_any(skb);
22475- atomic_inc(&vcc->stats->tx_err);
22476+ atomic_inc_unchecked(&vcc->stats->tx_err);
22477 return -EINVAL;
22478 }
22479
22480@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22481 vcc->pop(vcc, skb);
22482 else
22483 dev_kfree_skb_any(skb);
22484- atomic_inc(&vcc->stats->tx_err);
22485+ atomic_inc_unchecked(&vcc->stats->tx_err);
22486 return -EINVAL;
22487 }
22488 #endif
22489@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22490 vcc->pop(vcc, skb);
22491 else
22492 dev_kfree_skb_any(skb);
22493- atomic_inc(&vcc->stats->tx_err);
22494+ atomic_inc_unchecked(&vcc->stats->tx_err);
22495 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22496 return -ENOMEM;
22497 }
22498@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22499 vcc->pop(vcc, skb);
22500 else
22501 dev_kfree_skb_any(skb);
22502- atomic_inc(&vcc->stats->tx_err);
22503+ atomic_inc_unchecked(&vcc->stats->tx_err);
22504 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22505 return -ENOMEM;
22506 }
22507@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22508 __enqueue_tpd(he_dev, tpd, cid);
22509 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22510
22511- atomic_inc(&vcc->stats->tx);
22512+ atomic_inc_unchecked(&vcc->stats->tx);
22513
22514 return 0;
22515 }
22516diff -urNp linux-2.6.39.4/drivers/atm/horizon.c linux-2.6.39.4/drivers/atm/horizon.c
22517--- linux-2.6.39.4/drivers/atm/horizon.c 2011-05-19 00:06:34.000000000 -0400
22518+++ linux-2.6.39.4/drivers/atm/horizon.c 2011-08-05 19:44:36.000000000 -0400
22519@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22520 {
22521 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22522 // VC layer stats
22523- atomic_inc(&vcc->stats->rx);
22524+ atomic_inc_unchecked(&vcc->stats->rx);
22525 __net_timestamp(skb);
22526 // end of our responsibility
22527 vcc->push (vcc, skb);
22528@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22529 dev->tx_iovec = NULL;
22530
22531 // VC layer stats
22532- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22533+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22534
22535 // free the skb
22536 hrz_kfree_skb (skb);
22537diff -urNp linux-2.6.39.4/drivers/atm/idt77252.c linux-2.6.39.4/drivers/atm/idt77252.c
22538--- linux-2.6.39.4/drivers/atm/idt77252.c 2011-05-19 00:06:34.000000000 -0400
22539+++ linux-2.6.39.4/drivers/atm/idt77252.c 2011-08-05 19:44:36.000000000 -0400
22540@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22541 else
22542 dev_kfree_skb(skb);
22543
22544- atomic_inc(&vcc->stats->tx);
22545+ atomic_inc_unchecked(&vcc->stats->tx);
22546 }
22547
22548 atomic_dec(&scq->used);
22549@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22550 if ((sb = dev_alloc_skb(64)) == NULL) {
22551 printk("%s: Can't allocate buffers for aal0.\n",
22552 card->name);
22553- atomic_add(i, &vcc->stats->rx_drop);
22554+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22555 break;
22556 }
22557 if (!atm_charge(vcc, sb->truesize)) {
22558 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22559 card->name);
22560- atomic_add(i - 1, &vcc->stats->rx_drop);
22561+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22562 dev_kfree_skb(sb);
22563 break;
22564 }
22565@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22566 ATM_SKB(sb)->vcc = vcc;
22567 __net_timestamp(sb);
22568 vcc->push(vcc, sb);
22569- atomic_inc(&vcc->stats->rx);
22570+ atomic_inc_unchecked(&vcc->stats->rx);
22571
22572 cell += ATM_CELL_PAYLOAD;
22573 }
22574@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22575 "(CDC: %08x)\n",
22576 card->name, len, rpp->len, readl(SAR_REG_CDC));
22577 recycle_rx_pool_skb(card, rpp);
22578- atomic_inc(&vcc->stats->rx_err);
22579+ atomic_inc_unchecked(&vcc->stats->rx_err);
22580 return;
22581 }
22582 if (stat & SAR_RSQE_CRC) {
22583 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22584 recycle_rx_pool_skb(card, rpp);
22585- atomic_inc(&vcc->stats->rx_err);
22586+ atomic_inc_unchecked(&vcc->stats->rx_err);
22587 return;
22588 }
22589 if (skb_queue_len(&rpp->queue) > 1) {
22590@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22591 RXPRINTK("%s: Can't alloc RX skb.\n",
22592 card->name);
22593 recycle_rx_pool_skb(card, rpp);
22594- atomic_inc(&vcc->stats->rx_err);
22595+ atomic_inc_unchecked(&vcc->stats->rx_err);
22596 return;
22597 }
22598 if (!atm_charge(vcc, skb->truesize)) {
22599@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22600 __net_timestamp(skb);
22601
22602 vcc->push(vcc, skb);
22603- atomic_inc(&vcc->stats->rx);
22604+ atomic_inc_unchecked(&vcc->stats->rx);
22605
22606 return;
22607 }
22608@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22609 __net_timestamp(skb);
22610
22611 vcc->push(vcc, skb);
22612- atomic_inc(&vcc->stats->rx);
22613+ atomic_inc_unchecked(&vcc->stats->rx);
22614
22615 if (skb->truesize > SAR_FB_SIZE_3)
22616 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22617@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22618 if (vcc->qos.aal != ATM_AAL0) {
22619 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22620 card->name, vpi, vci);
22621- atomic_inc(&vcc->stats->rx_drop);
22622+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22623 goto drop;
22624 }
22625
22626 if ((sb = dev_alloc_skb(64)) == NULL) {
22627 printk("%s: Can't allocate buffers for AAL0.\n",
22628 card->name);
22629- atomic_inc(&vcc->stats->rx_err);
22630+ atomic_inc_unchecked(&vcc->stats->rx_err);
22631 goto drop;
22632 }
22633
22634@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22635 ATM_SKB(sb)->vcc = vcc;
22636 __net_timestamp(sb);
22637 vcc->push(vcc, sb);
22638- atomic_inc(&vcc->stats->rx);
22639+ atomic_inc_unchecked(&vcc->stats->rx);
22640
22641 drop:
22642 skb_pull(queue, 64);
22643@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22644
22645 if (vc == NULL) {
22646 printk("%s: NULL connection in send().\n", card->name);
22647- atomic_inc(&vcc->stats->tx_err);
22648+ atomic_inc_unchecked(&vcc->stats->tx_err);
22649 dev_kfree_skb(skb);
22650 return -EINVAL;
22651 }
22652 if (!test_bit(VCF_TX, &vc->flags)) {
22653 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22654- atomic_inc(&vcc->stats->tx_err);
22655+ atomic_inc_unchecked(&vcc->stats->tx_err);
22656 dev_kfree_skb(skb);
22657 return -EINVAL;
22658 }
22659@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22660 break;
22661 default:
22662 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22663- atomic_inc(&vcc->stats->tx_err);
22664+ atomic_inc_unchecked(&vcc->stats->tx_err);
22665 dev_kfree_skb(skb);
22666 return -EINVAL;
22667 }
22668
22669 if (skb_shinfo(skb)->nr_frags != 0) {
22670 printk("%s: No scatter-gather yet.\n", card->name);
22671- atomic_inc(&vcc->stats->tx_err);
22672+ atomic_inc_unchecked(&vcc->stats->tx_err);
22673 dev_kfree_skb(skb);
22674 return -EINVAL;
22675 }
22676@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22677
22678 err = queue_skb(card, vc, skb, oam);
22679 if (err) {
22680- atomic_inc(&vcc->stats->tx_err);
22681+ atomic_inc_unchecked(&vcc->stats->tx_err);
22682 dev_kfree_skb(skb);
22683 return err;
22684 }
22685@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22686 skb = dev_alloc_skb(64);
22687 if (!skb) {
22688 printk("%s: Out of memory in send_oam().\n", card->name);
22689- atomic_inc(&vcc->stats->tx_err);
22690+ atomic_inc_unchecked(&vcc->stats->tx_err);
22691 return -ENOMEM;
22692 }
22693 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22694diff -urNp linux-2.6.39.4/drivers/atm/iphase.c linux-2.6.39.4/drivers/atm/iphase.c
22695--- linux-2.6.39.4/drivers/atm/iphase.c 2011-05-19 00:06:34.000000000 -0400
22696+++ linux-2.6.39.4/drivers/atm/iphase.c 2011-08-05 19:44:36.000000000 -0400
22697@@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
22698 status = (u_short) (buf_desc_ptr->desc_mode);
22699 if (status & (RX_CER | RX_PTE | RX_OFL))
22700 {
22701- atomic_inc(&vcc->stats->rx_err);
22702+ atomic_inc_unchecked(&vcc->stats->rx_err);
22703 IF_ERR(printk("IA: bad packet, dropping it");)
22704 if (status & RX_CER) {
22705 IF_ERR(printk(" cause: packet CRC error\n");)
22706@@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
22707 len = dma_addr - buf_addr;
22708 if (len > iadev->rx_buf_sz) {
22709 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22710- atomic_inc(&vcc->stats->rx_err);
22711+ atomic_inc_unchecked(&vcc->stats->rx_err);
22712 goto out_free_desc;
22713 }
22714
22715@@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
22716 ia_vcc = INPH_IA_VCC(vcc);
22717 if (ia_vcc == NULL)
22718 {
22719- atomic_inc(&vcc->stats->rx_err);
22720+ atomic_inc_unchecked(&vcc->stats->rx_err);
22721 dev_kfree_skb_any(skb);
22722 atm_return(vcc, atm_guess_pdu2truesize(len));
22723 goto INCR_DLE;
22724@@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
22725 if ((length > iadev->rx_buf_sz) || (length >
22726 (skb->len - sizeof(struct cpcs_trailer))))
22727 {
22728- atomic_inc(&vcc->stats->rx_err);
22729+ atomic_inc_unchecked(&vcc->stats->rx_err);
22730 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22731 length, skb->len);)
22732 dev_kfree_skb_any(skb);
22733@@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
22734
22735 IF_RX(printk("rx_dle_intr: skb push");)
22736 vcc->push(vcc,skb);
22737- atomic_inc(&vcc->stats->rx);
22738+ atomic_inc_unchecked(&vcc->stats->rx);
22739 iadev->rx_pkt_cnt++;
22740 }
22741 INCR_DLE:
22742@@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
22743 {
22744 struct k_sonet_stats *stats;
22745 stats = &PRIV(_ia_dev[board])->sonet_stats;
22746- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22747- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22748- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22749- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22750- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22751- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22752- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22753- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22754- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22755+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22756+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22757+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22758+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22759+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22760+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22761+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22762+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22763+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22764 }
22765 ia_cmds.status = 0;
22766 break;
22767@@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22768 if ((desc == 0) || (desc > iadev->num_tx_desc))
22769 {
22770 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22771- atomic_inc(&vcc->stats->tx);
22772+ atomic_inc_unchecked(&vcc->stats->tx);
22773 if (vcc->pop)
22774 vcc->pop(vcc, skb);
22775 else
22776@@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22777 ATM_DESC(skb) = vcc->vci;
22778 skb_queue_tail(&iadev->tx_dma_q, skb);
22779
22780- atomic_inc(&vcc->stats->tx);
22781+ atomic_inc_unchecked(&vcc->stats->tx);
22782 iadev->tx_pkt_cnt++;
22783 /* Increment transaction counter */
22784 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22785
22786 #if 0
22787 /* add flow control logic */
22788- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22789+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22790 if (iavcc->vc_desc_cnt > 10) {
22791 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22792 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22793diff -urNp linux-2.6.39.4/drivers/atm/lanai.c linux-2.6.39.4/drivers/atm/lanai.c
22794--- linux-2.6.39.4/drivers/atm/lanai.c 2011-05-19 00:06:34.000000000 -0400
22795+++ linux-2.6.39.4/drivers/atm/lanai.c 2011-08-05 19:44:36.000000000 -0400
22796@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22797 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22798 lanai_endtx(lanai, lvcc);
22799 lanai_free_skb(lvcc->tx.atmvcc, skb);
22800- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22801+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22802 }
22803
22804 /* Try to fill the buffer - don't call unless there is backlog */
22805@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22806 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22807 __net_timestamp(skb);
22808 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22809- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22810+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22811 out:
22812 lvcc->rx.buf.ptr = end;
22813 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22814@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22815 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22816 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22817 lanai->stats.service_rxnotaal5++;
22818- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22819+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22820 return 0;
22821 }
22822 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22823@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22824 int bytes;
22825 read_unlock(&vcc_sklist_lock);
22826 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22827- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22828+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22829 lvcc->stats.x.aal5.service_trash++;
22830 bytes = (SERVICE_GET_END(s) * 16) -
22831 (((unsigned long) lvcc->rx.buf.ptr) -
22832@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22833 }
22834 if (s & SERVICE_STREAM) {
22835 read_unlock(&vcc_sklist_lock);
22836- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22837+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22838 lvcc->stats.x.aal5.service_stream++;
22839 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22840 "PDU on VCI %d!\n", lanai->number, vci);
22841@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22842 return 0;
22843 }
22844 DPRINTK("got rx crc error on vci %d\n", vci);
22845- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22846+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22847 lvcc->stats.x.aal5.service_rxcrc++;
22848 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22849 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22850diff -urNp linux-2.6.39.4/drivers/atm/nicstar.c linux-2.6.39.4/drivers/atm/nicstar.c
22851--- linux-2.6.39.4/drivers/atm/nicstar.c 2011-05-19 00:06:34.000000000 -0400
22852+++ linux-2.6.39.4/drivers/atm/nicstar.c 2011-08-05 19:44:36.000000000 -0400
22853@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22854 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22855 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22856 card->index);
22857- atomic_inc(&vcc->stats->tx_err);
22858+ atomic_inc_unchecked(&vcc->stats->tx_err);
22859 dev_kfree_skb_any(skb);
22860 return -EINVAL;
22861 }
22862@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22863 if (!vc->tx) {
22864 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22865 card->index);
22866- atomic_inc(&vcc->stats->tx_err);
22867+ atomic_inc_unchecked(&vcc->stats->tx_err);
22868 dev_kfree_skb_any(skb);
22869 return -EINVAL;
22870 }
22871@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22872 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22873 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22874 card->index);
22875- atomic_inc(&vcc->stats->tx_err);
22876+ atomic_inc_unchecked(&vcc->stats->tx_err);
22877 dev_kfree_skb_any(skb);
22878 return -EINVAL;
22879 }
22880
22881 if (skb_shinfo(skb)->nr_frags != 0) {
22882 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22883- atomic_inc(&vcc->stats->tx_err);
22884+ atomic_inc_unchecked(&vcc->stats->tx_err);
22885 dev_kfree_skb_any(skb);
22886 return -EINVAL;
22887 }
22888@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22889 }
22890
22891 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22892- atomic_inc(&vcc->stats->tx_err);
22893+ atomic_inc_unchecked(&vcc->stats->tx_err);
22894 dev_kfree_skb_any(skb);
22895 return -EIO;
22896 }
22897- atomic_inc(&vcc->stats->tx);
22898+ atomic_inc_unchecked(&vcc->stats->tx);
22899
22900 return 0;
22901 }
22902@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22903 printk
22904 ("nicstar%d: Can't allocate buffers for aal0.\n",
22905 card->index);
22906- atomic_add(i, &vcc->stats->rx_drop);
22907+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22908 break;
22909 }
22910 if (!atm_charge(vcc, sb->truesize)) {
22911 RXPRINTK
22912 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22913 card->index);
22914- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22915+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22916 dev_kfree_skb_any(sb);
22917 break;
22918 }
22919@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22920 ATM_SKB(sb)->vcc = vcc;
22921 __net_timestamp(sb);
22922 vcc->push(vcc, sb);
22923- atomic_inc(&vcc->stats->rx);
22924+ atomic_inc_unchecked(&vcc->stats->rx);
22925 cell += ATM_CELL_PAYLOAD;
22926 }
22927
22928@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22929 if (iovb == NULL) {
22930 printk("nicstar%d: Out of iovec buffers.\n",
22931 card->index);
22932- atomic_inc(&vcc->stats->rx_drop);
22933+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22934 recycle_rx_buf(card, skb);
22935 return;
22936 }
22937@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22938 small or large buffer itself. */
22939 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22940 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22941- atomic_inc(&vcc->stats->rx_err);
22942+ atomic_inc_unchecked(&vcc->stats->rx_err);
22943 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22944 NS_MAX_IOVECS);
22945 NS_PRV_IOVCNT(iovb) = 0;
22946@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22947 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22948 card->index);
22949 which_list(card, skb);
22950- atomic_inc(&vcc->stats->rx_err);
22951+ atomic_inc_unchecked(&vcc->stats->rx_err);
22952 recycle_rx_buf(card, skb);
22953 vc->rx_iov = NULL;
22954 recycle_iov_buf(card, iovb);
22955@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22956 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22957 card->index);
22958 which_list(card, skb);
22959- atomic_inc(&vcc->stats->rx_err);
22960+ atomic_inc_unchecked(&vcc->stats->rx_err);
22961 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22962 NS_PRV_IOVCNT(iovb));
22963 vc->rx_iov = NULL;
22964@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22965 printk(" - PDU size mismatch.\n");
22966 else
22967 printk(".\n");
22968- atomic_inc(&vcc->stats->rx_err);
22969+ atomic_inc_unchecked(&vcc->stats->rx_err);
22970 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22971 NS_PRV_IOVCNT(iovb));
22972 vc->rx_iov = NULL;
22973@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22974 /* skb points to a small buffer */
22975 if (!atm_charge(vcc, skb->truesize)) {
22976 push_rxbufs(card, skb);
22977- atomic_inc(&vcc->stats->rx_drop);
22978+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22979 } else {
22980 skb_put(skb, len);
22981 dequeue_sm_buf(card, skb);
22982@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22983 ATM_SKB(skb)->vcc = vcc;
22984 __net_timestamp(skb);
22985 vcc->push(vcc, skb);
22986- atomic_inc(&vcc->stats->rx);
22987+ atomic_inc_unchecked(&vcc->stats->rx);
22988 }
22989 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22990 struct sk_buff *sb;
22991@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22992 if (len <= NS_SMBUFSIZE) {
22993 if (!atm_charge(vcc, sb->truesize)) {
22994 push_rxbufs(card, sb);
22995- atomic_inc(&vcc->stats->rx_drop);
22996+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22997 } else {
22998 skb_put(sb, len);
22999 dequeue_sm_buf(card, sb);
23000@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23001 ATM_SKB(sb)->vcc = vcc;
23002 __net_timestamp(sb);
23003 vcc->push(vcc, sb);
23004- atomic_inc(&vcc->stats->rx);
23005+ atomic_inc_unchecked(&vcc->stats->rx);
23006 }
23007
23008 push_rxbufs(card, skb);
23009@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23010
23011 if (!atm_charge(vcc, skb->truesize)) {
23012 push_rxbufs(card, skb);
23013- atomic_inc(&vcc->stats->rx_drop);
23014+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23015 } else {
23016 dequeue_lg_buf(card, skb);
23017 #ifdef NS_USE_DESTRUCTORS
23018@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23019 ATM_SKB(skb)->vcc = vcc;
23020 __net_timestamp(skb);
23021 vcc->push(vcc, skb);
23022- atomic_inc(&vcc->stats->rx);
23023+ atomic_inc_unchecked(&vcc->stats->rx);
23024 }
23025
23026 push_rxbufs(card, sb);
23027@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23028 printk
23029 ("nicstar%d: Out of huge buffers.\n",
23030 card->index);
23031- atomic_inc(&vcc->stats->rx_drop);
23032+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23033 recycle_iovec_rx_bufs(card,
23034 (struct iovec *)
23035 iovb->data,
23036@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23037 card->hbpool.count++;
23038 } else
23039 dev_kfree_skb_any(hb);
23040- atomic_inc(&vcc->stats->rx_drop);
23041+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23042 } else {
23043 /* Copy the small buffer to the huge buffer */
23044 sb = (struct sk_buff *)iov->iov_base;
23045@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23046 #endif /* NS_USE_DESTRUCTORS */
23047 __net_timestamp(hb);
23048 vcc->push(vcc, hb);
23049- atomic_inc(&vcc->stats->rx);
23050+ atomic_inc_unchecked(&vcc->stats->rx);
23051 }
23052 }
23053
23054diff -urNp linux-2.6.39.4/drivers/atm/solos-pci.c linux-2.6.39.4/drivers/atm/solos-pci.c
23055--- linux-2.6.39.4/drivers/atm/solos-pci.c 2011-05-19 00:06:34.000000000 -0400
23056+++ linux-2.6.39.4/drivers/atm/solos-pci.c 2011-08-05 19:44:36.000000000 -0400
23057@@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
23058 }
23059 atm_charge(vcc, skb->truesize);
23060 vcc->push(vcc, skb);
23061- atomic_inc(&vcc->stats->rx);
23062+ atomic_inc_unchecked(&vcc->stats->rx);
23063 break;
23064
23065 case PKT_STATUS:
23066@@ -900,6 +900,8 @@ static int print_buffer(struct sk_buff *
23067 char msg[500];
23068 char item[10];
23069
23070+ pax_track_stack();
23071+
23072 len = buf->len;
23073 for (i = 0; i < len; i++){
23074 if(i % 8 == 0)
23075@@ -1009,7 +1011,7 @@ static uint32_t fpga_tx(struct solos_car
23076 vcc = SKB_CB(oldskb)->vcc;
23077
23078 if (vcc) {
23079- atomic_inc(&vcc->stats->tx);
23080+ atomic_inc_unchecked(&vcc->stats->tx);
23081 solos_pop(vcc, oldskb);
23082 } else
23083 dev_kfree_skb_irq(oldskb);
23084diff -urNp linux-2.6.39.4/drivers/atm/suni.c linux-2.6.39.4/drivers/atm/suni.c
23085--- linux-2.6.39.4/drivers/atm/suni.c 2011-05-19 00:06:34.000000000 -0400
23086+++ linux-2.6.39.4/drivers/atm/suni.c 2011-08-05 19:44:36.000000000 -0400
23087@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23088
23089
23090 #define ADD_LIMITED(s,v) \
23091- atomic_add((v),&stats->s); \
23092- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23093+ atomic_add_unchecked((v),&stats->s); \
23094+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23095
23096
23097 static void suni_hz(unsigned long from_timer)
23098diff -urNp linux-2.6.39.4/drivers/atm/uPD98402.c linux-2.6.39.4/drivers/atm/uPD98402.c
23099--- linux-2.6.39.4/drivers/atm/uPD98402.c 2011-05-19 00:06:34.000000000 -0400
23100+++ linux-2.6.39.4/drivers/atm/uPD98402.c 2011-08-05 19:44:36.000000000 -0400
23101@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23102 struct sonet_stats tmp;
23103 int error = 0;
23104
23105- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23106+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23107 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23108 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23109 if (zero && !error) {
23110@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23111
23112
23113 #define ADD_LIMITED(s,v) \
23114- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23115- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23116- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23117+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23118+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23119+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23120
23121
23122 static void stat_event(struct atm_dev *dev)
23123@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23124 if (reason & uPD98402_INT_PFM) stat_event(dev);
23125 if (reason & uPD98402_INT_PCO) {
23126 (void) GET(PCOCR); /* clear interrupt cause */
23127- atomic_add(GET(HECCT),
23128+ atomic_add_unchecked(GET(HECCT),
23129 &PRIV(dev)->sonet_stats.uncorr_hcs);
23130 }
23131 if ((reason & uPD98402_INT_RFO) &&
23132@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23133 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23134 uPD98402_INT_LOS),PIMR); /* enable them */
23135 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23136- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23137- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23138- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23139+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23140+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23141+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23142 return 0;
23143 }
23144
23145diff -urNp linux-2.6.39.4/drivers/atm/zatm.c linux-2.6.39.4/drivers/atm/zatm.c
23146--- linux-2.6.39.4/drivers/atm/zatm.c 2011-05-19 00:06:34.000000000 -0400
23147+++ linux-2.6.39.4/drivers/atm/zatm.c 2011-08-05 19:44:36.000000000 -0400
23148@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23149 }
23150 if (!size) {
23151 dev_kfree_skb_irq(skb);
23152- if (vcc) atomic_inc(&vcc->stats->rx_err);
23153+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23154 continue;
23155 }
23156 if (!atm_charge(vcc,skb->truesize)) {
23157@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23158 skb->len = size;
23159 ATM_SKB(skb)->vcc = vcc;
23160 vcc->push(vcc,skb);
23161- atomic_inc(&vcc->stats->rx);
23162+ atomic_inc_unchecked(&vcc->stats->rx);
23163 }
23164 zout(pos & 0xffff,MTA(mbx));
23165 #if 0 /* probably a stupid idea */
23166@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23167 skb_queue_head(&zatm_vcc->backlog,skb);
23168 break;
23169 }
23170- atomic_inc(&vcc->stats->tx);
23171+ atomic_inc_unchecked(&vcc->stats->tx);
23172 wake_up(&zatm_vcc->tx_wait);
23173 }
23174
23175diff -urNp linux-2.6.39.4/drivers/base/power/wakeup.c linux-2.6.39.4/drivers/base/power/wakeup.c
23176--- linux-2.6.39.4/drivers/base/power/wakeup.c 2011-05-19 00:06:34.000000000 -0400
23177+++ linux-2.6.39.4/drivers/base/power/wakeup.c 2011-08-05 19:44:36.000000000 -0400
23178@@ -29,14 +29,14 @@ bool events_check_enabled;
23179 * They need to be modified together atomically, so it's better to use one
23180 * atomic variable to hold them both.
23181 */
23182-static atomic_t combined_event_count = ATOMIC_INIT(0);
23183+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23184
23185 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23186 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23187
23188 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23189 {
23190- unsigned int comb = atomic_read(&combined_event_count);
23191+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
23192
23193 *cnt = (comb >> IN_PROGRESS_BITS);
23194 *inpr = comb & MAX_IN_PROGRESS;
23195@@ -351,7 +351,7 @@ static void wakeup_source_activate(struc
23196 ws->last_time = ktime_get();
23197
23198 /* Increment the counter of events in progress. */
23199- atomic_inc(&combined_event_count);
23200+ atomic_inc_unchecked(&combined_event_count);
23201 }
23202
23203 /**
23204@@ -441,7 +441,7 @@ static void wakeup_source_deactivate(str
23205 * Increment the counter of registered wakeup events and decrement the
23206 * couter of wakeup events in progress simultaneously.
23207 */
23208- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23209+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23210 }
23211
23212 /**
23213diff -urNp linux-2.6.39.4/drivers/block/cciss.c linux-2.6.39.4/drivers/block/cciss.c
23214--- linux-2.6.39.4/drivers/block/cciss.c 2011-05-19 00:06:34.000000000 -0400
23215+++ linux-2.6.39.4/drivers/block/cciss.c 2011-08-05 20:34:06.000000000 -0400
23216@@ -1151,6 +1151,8 @@ static int cciss_ioctl32_passthru(struct
23217 int err;
23218 u32 cp;
23219
23220+ memset(&arg64, 0, sizeof(arg64));
23221+
23222 err = 0;
23223 err |=
23224 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23225@@ -2933,7 +2935,7 @@ static void start_io(ctlr_info_t *h)
23226 while (!list_empty(&h->reqQ)) {
23227 c = list_entry(h->reqQ.next, CommandList_struct, list);
23228 /* can't do anything if fifo is full */
23229- if ((h->access.fifo_full(h))) {
23230+ if ((h->access->fifo_full(h))) {
23231 dev_warn(&h->pdev->dev, "fifo full\n");
23232 break;
23233 }
23234@@ -2943,7 +2945,7 @@ static void start_io(ctlr_info_t *h)
23235 h->Qdepth--;
23236
23237 /* Tell the controller execute command */
23238- h->access.submit_command(h, c);
23239+ h->access->submit_command(h, c);
23240
23241 /* Put job onto the completed Q */
23242 addQ(&h->cmpQ, c);
23243@@ -3369,17 +3371,17 @@ startio:
23244
23245 static inline unsigned long get_next_completion(ctlr_info_t *h)
23246 {
23247- return h->access.command_completed(h);
23248+ return h->access->command_completed(h);
23249 }
23250
23251 static inline int interrupt_pending(ctlr_info_t *h)
23252 {
23253- return h->access.intr_pending(h);
23254+ return h->access->intr_pending(h);
23255 }
23256
23257 static inline long interrupt_not_for_us(ctlr_info_t *h)
23258 {
23259- return ((h->access.intr_pending(h) == 0) ||
23260+ return ((h->access->intr_pending(h) == 0) ||
23261 (h->interrupts_enabled == 0));
23262 }
23263
23264@@ -3412,7 +3414,7 @@ static inline u32 next_command(ctlr_info
23265 u32 a;
23266
23267 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23268- return h->access.command_completed(h);
23269+ return h->access->command_completed(h);
23270
23271 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23272 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23273@@ -3910,7 +3912,7 @@ static void __devinit cciss_put_controll
23274 trans_support & CFGTBL_Trans_use_short_tags);
23275
23276 /* Change the access methods to the performant access methods */
23277- h->access = SA5_performant_access;
23278+ h->access = &SA5_performant_access;
23279 h->transMethod = CFGTBL_Trans_Performant;
23280
23281 return;
23282@@ -4179,7 +4181,7 @@ static int __devinit cciss_pci_init(ctlr
23283 if (prod_index < 0)
23284 return -ENODEV;
23285 h->product_name = products[prod_index].product_name;
23286- h->access = *(products[prod_index].access);
23287+ h->access = products[prod_index].access;
23288
23289 if (cciss_board_disabled(h)) {
23290 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23291@@ -4661,7 +4663,7 @@ static int __devinit cciss_init_one(stru
23292 }
23293
23294 /* make sure the board interrupts are off */
23295- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23296+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23297 if (h->msi_vector || h->msix_vector) {
23298 if (request_irq(h->intr[PERF_MODE_INT],
23299 do_cciss_msix_intr,
23300@@ -4744,7 +4746,7 @@ static int __devinit cciss_init_one(stru
23301 cciss_scsi_setup(h);
23302
23303 /* Turn the interrupts on so we can service requests */
23304- h->access.set_intr_mask(h, CCISS_INTR_ON);
23305+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23306
23307 /* Get the firmware version */
23308 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23309@@ -4828,7 +4830,7 @@ static void cciss_shutdown(struct pci_de
23310 kfree(flush_buf);
23311 if (return_code != IO_OK)
23312 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23313- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23314+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23315 free_irq(h->intr[PERF_MODE_INT], h);
23316 }
23317
23318diff -urNp linux-2.6.39.4/drivers/block/cciss.h linux-2.6.39.4/drivers/block/cciss.h
23319--- linux-2.6.39.4/drivers/block/cciss.h 2011-05-19 00:06:34.000000000 -0400
23320+++ linux-2.6.39.4/drivers/block/cciss.h 2011-08-05 20:34:06.000000000 -0400
23321@@ -100,7 +100,7 @@ struct ctlr_info
23322 /* information about each logical volume */
23323 drive_info_struct *drv[CISS_MAX_LUN];
23324
23325- struct access_method access;
23326+ struct access_method *access;
23327
23328 /* queue and queue Info */
23329 struct list_head reqQ;
23330diff -urNp linux-2.6.39.4/drivers/block/cpqarray.c linux-2.6.39.4/drivers/block/cpqarray.c
23331--- linux-2.6.39.4/drivers/block/cpqarray.c 2011-05-19 00:06:34.000000000 -0400
23332+++ linux-2.6.39.4/drivers/block/cpqarray.c 2011-08-05 20:34:06.000000000 -0400
23333@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23334 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23335 goto Enomem4;
23336 }
23337- hba[i]->access.set_intr_mask(hba[i], 0);
23338+ hba[i]->access->set_intr_mask(hba[i], 0);
23339 if (request_irq(hba[i]->intr, do_ida_intr,
23340 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23341 {
23342@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23343 add_timer(&hba[i]->timer);
23344
23345 /* Enable IRQ now that spinlock and rate limit timer are set up */
23346- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23347+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23348
23349 for(j=0; j<NWD; j++) {
23350 struct gendisk *disk = ida_gendisk[i][j];
23351@@ -694,7 +694,7 @@ DBGINFO(
23352 for(i=0; i<NR_PRODUCTS; i++) {
23353 if (board_id == products[i].board_id) {
23354 c->product_name = products[i].product_name;
23355- c->access = *(products[i].access);
23356+ c->access = products[i].access;
23357 break;
23358 }
23359 }
23360@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23361 hba[ctlr]->intr = intr;
23362 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23363 hba[ctlr]->product_name = products[j].product_name;
23364- hba[ctlr]->access = *(products[j].access);
23365+ hba[ctlr]->access = products[j].access;
23366 hba[ctlr]->ctlr = ctlr;
23367 hba[ctlr]->board_id = board_id;
23368 hba[ctlr]->pci_dev = NULL; /* not PCI */
23369@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23370 struct scatterlist tmp_sg[SG_MAX];
23371 int i, dir, seg;
23372
23373+ pax_track_stack();
23374+
23375 queue_next:
23376 creq = blk_peek_request(q);
23377 if (!creq)
23378@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23379
23380 while((c = h->reqQ) != NULL) {
23381 /* Can't do anything if we're busy */
23382- if (h->access.fifo_full(h) == 0)
23383+ if (h->access->fifo_full(h) == 0)
23384 return;
23385
23386 /* Get the first entry from the request Q */
23387@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23388 h->Qdepth--;
23389
23390 /* Tell the controller to do our bidding */
23391- h->access.submit_command(h, c);
23392+ h->access->submit_command(h, c);
23393
23394 /* Get onto the completion Q */
23395 addQ(&h->cmpQ, c);
23396@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23397 unsigned long flags;
23398 __u32 a,a1;
23399
23400- istat = h->access.intr_pending(h);
23401+ istat = h->access->intr_pending(h);
23402 /* Is this interrupt for us? */
23403 if (istat == 0)
23404 return IRQ_NONE;
23405@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23406 */
23407 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23408 if (istat & FIFO_NOT_EMPTY) {
23409- while((a = h->access.command_completed(h))) {
23410+ while((a = h->access->command_completed(h))) {
23411 a1 = a; a &= ~3;
23412 if ((c = h->cmpQ) == NULL)
23413 {
23414@@ -1449,11 +1451,11 @@ static int sendcmd(
23415 /*
23416 * Disable interrupt
23417 */
23418- info_p->access.set_intr_mask(info_p, 0);
23419+ info_p->access->set_intr_mask(info_p, 0);
23420 /* Make sure there is room in the command FIFO */
23421 /* Actually it should be completely empty at this time. */
23422 for (i = 200000; i > 0; i--) {
23423- temp = info_p->access.fifo_full(info_p);
23424+ temp = info_p->access->fifo_full(info_p);
23425 if (temp != 0) {
23426 break;
23427 }
23428@@ -1466,7 +1468,7 @@ DBG(
23429 /*
23430 * Send the cmd
23431 */
23432- info_p->access.submit_command(info_p, c);
23433+ info_p->access->submit_command(info_p, c);
23434 complete = pollcomplete(ctlr);
23435
23436 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23437@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23438 * we check the new geometry. Then turn interrupts back on when
23439 * we're done.
23440 */
23441- host->access.set_intr_mask(host, 0);
23442+ host->access->set_intr_mask(host, 0);
23443 getgeometry(ctlr);
23444- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23445+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23446
23447 for(i=0; i<NWD; i++) {
23448 struct gendisk *disk = ida_gendisk[ctlr][i];
23449@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23450 /* Wait (up to 2 seconds) for a command to complete */
23451
23452 for (i = 200000; i > 0; i--) {
23453- done = hba[ctlr]->access.command_completed(hba[ctlr]);
23454+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
23455 if (done == 0) {
23456 udelay(10); /* a short fixed delay */
23457 } else
23458diff -urNp linux-2.6.39.4/drivers/block/cpqarray.h linux-2.6.39.4/drivers/block/cpqarray.h
23459--- linux-2.6.39.4/drivers/block/cpqarray.h 2011-05-19 00:06:34.000000000 -0400
23460+++ linux-2.6.39.4/drivers/block/cpqarray.h 2011-08-05 20:34:06.000000000 -0400
23461@@ -99,7 +99,7 @@ struct ctlr_info {
23462 drv_info_t drv[NWD];
23463 struct proc_dir_entry *proc;
23464
23465- struct access_method access;
23466+ struct access_method *access;
23467
23468 cmdlist_t *reqQ;
23469 cmdlist_t *cmpQ;
23470diff -urNp linux-2.6.39.4/drivers/block/DAC960.c linux-2.6.39.4/drivers/block/DAC960.c
23471--- linux-2.6.39.4/drivers/block/DAC960.c 2011-05-19 00:06:34.000000000 -0400
23472+++ linux-2.6.39.4/drivers/block/DAC960.c 2011-08-05 19:44:36.000000000 -0400
23473@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23474 unsigned long flags;
23475 int Channel, TargetID;
23476
23477+ pax_track_stack();
23478+
23479 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23480 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23481 sizeof(DAC960_SCSI_Inquiry_T) +
23482diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_int.h linux-2.6.39.4/drivers/block/drbd/drbd_int.h
23483--- linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-05-19 00:06:34.000000000 -0400
23484+++ linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-08-05 19:44:36.000000000 -0400
23485@@ -736,7 +736,7 @@ struct drbd_request;
23486 struct drbd_epoch {
23487 struct list_head list;
23488 unsigned int barrier_nr;
23489- atomic_t epoch_size; /* increased on every request added. */
23490+ atomic_unchecked_t epoch_size; /* increased on every request added. */
23491 atomic_t active; /* increased on every req. added, and dec on every finished. */
23492 unsigned long flags;
23493 };
23494@@ -1108,7 +1108,7 @@ struct drbd_conf {
23495 void *int_dig_in;
23496 void *int_dig_vv;
23497 wait_queue_head_t seq_wait;
23498- atomic_t packet_seq;
23499+ atomic_unchecked_t packet_seq;
23500 unsigned int peer_seq;
23501 spinlock_t peer_seq_lock;
23502 unsigned int minor;
23503diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_main.c linux-2.6.39.4/drivers/block/drbd/drbd_main.c
23504--- linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-05-19 00:06:34.000000000 -0400
23505+++ linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-08-05 19:44:36.000000000 -0400
23506@@ -2387,7 +2387,7 @@ static int _drbd_send_ack(struct drbd_co
23507 p.sector = sector;
23508 p.block_id = block_id;
23509 p.blksize = blksize;
23510- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23511+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23512
23513 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23514 return false;
23515@@ -2686,7 +2686,7 @@ int drbd_send_dblock(struct drbd_conf *m
23516 p.sector = cpu_to_be64(req->sector);
23517 p.block_id = (unsigned long)req;
23518 p.seq_num = cpu_to_be32(req->seq_num =
23519- atomic_add_return(1, &mdev->packet_seq));
23520+ atomic_add_return_unchecked(1, &mdev->packet_seq));
23521
23522 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23523
23524@@ -2971,7 +2971,7 @@ void drbd_init_set_defaults(struct drbd_
23525 atomic_set(&mdev->unacked_cnt, 0);
23526 atomic_set(&mdev->local_cnt, 0);
23527 atomic_set(&mdev->net_cnt, 0);
23528- atomic_set(&mdev->packet_seq, 0);
23529+ atomic_set_unchecked(&mdev->packet_seq, 0);
23530 atomic_set(&mdev->pp_in_use, 0);
23531 atomic_set(&mdev->pp_in_use_by_net, 0);
23532 atomic_set(&mdev->rs_sect_in, 0);
23533@@ -3051,8 +3051,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23534 mdev->receiver.t_state);
23535
23536 /* no need to lock it, I'm the only thread alive */
23537- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23538- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23539+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23540+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23541 mdev->al_writ_cnt =
23542 mdev->bm_writ_cnt =
23543 mdev->read_cnt =
23544diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_nl.c linux-2.6.39.4/drivers/block/drbd/drbd_nl.c
23545--- linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-05-19 00:06:34.000000000 -0400
23546+++ linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-08-05 19:44:36.000000000 -0400
23547@@ -2298,7 +2298,7 @@ static void drbd_connector_callback(stru
23548 module_put(THIS_MODULE);
23549 }
23550
23551-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23552+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23553
23554 static unsigned short *
23555 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23556@@ -2369,7 +2369,7 @@ void drbd_bcast_state(struct drbd_conf *
23557 cn_reply->id.idx = CN_IDX_DRBD;
23558 cn_reply->id.val = CN_VAL_DRBD;
23559
23560- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23561+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23562 cn_reply->ack = 0; /* not used here. */
23563 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23564 (int)((char *)tl - (char *)reply->tag_list);
23565@@ -2401,7 +2401,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23566 cn_reply->id.idx = CN_IDX_DRBD;
23567 cn_reply->id.val = CN_VAL_DRBD;
23568
23569- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23570+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23571 cn_reply->ack = 0; /* not used here. */
23572 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23573 (int)((char *)tl - (char *)reply->tag_list);
23574@@ -2479,7 +2479,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23575 cn_reply->id.idx = CN_IDX_DRBD;
23576 cn_reply->id.val = CN_VAL_DRBD;
23577
23578- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23579+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23580 cn_reply->ack = 0; // not used here.
23581 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23582 (int)((char*)tl - (char*)reply->tag_list);
23583@@ -2518,7 +2518,7 @@ void drbd_bcast_sync_progress(struct drb
23584 cn_reply->id.idx = CN_IDX_DRBD;
23585 cn_reply->id.val = CN_VAL_DRBD;
23586
23587- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23588+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23589 cn_reply->ack = 0; /* not used here. */
23590 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23591 (int)((char *)tl - (char *)reply->tag_list);
23592diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c
23593--- linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-05-19 00:06:34.000000000 -0400
23594+++ linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-08-05 19:44:36.000000000 -0400
23595@@ -894,7 +894,7 @@ retry:
23596 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23597 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23598
23599- atomic_set(&mdev->packet_seq, 0);
23600+ atomic_set_unchecked(&mdev->packet_seq, 0);
23601 mdev->peer_seq = 0;
23602
23603 drbd_thread_start(&mdev->asender);
23604@@ -990,7 +990,7 @@ static enum finish_epoch drbd_may_finish
23605 do {
23606 next_epoch = NULL;
23607
23608- epoch_size = atomic_read(&epoch->epoch_size);
23609+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23610
23611 switch (ev & ~EV_CLEANUP) {
23612 case EV_PUT:
23613@@ -1025,7 +1025,7 @@ static enum finish_epoch drbd_may_finish
23614 rv = FE_DESTROYED;
23615 } else {
23616 epoch->flags = 0;
23617- atomic_set(&epoch->epoch_size, 0);
23618+ atomic_set_unchecked(&epoch->epoch_size, 0);
23619 /* atomic_set(&epoch->active, 0); is already zero */
23620 if (rv == FE_STILL_LIVE)
23621 rv = FE_RECYCLED;
23622@@ -1196,14 +1196,14 @@ static int receive_Barrier(struct drbd_c
23623 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23624 drbd_flush(mdev);
23625
23626- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23627+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23628 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23629 if (epoch)
23630 break;
23631 }
23632
23633 epoch = mdev->current_epoch;
23634- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23635+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23636
23637 D_ASSERT(atomic_read(&epoch->active) == 0);
23638 D_ASSERT(epoch->flags == 0);
23639@@ -1215,11 +1215,11 @@ static int receive_Barrier(struct drbd_c
23640 }
23641
23642 epoch->flags = 0;
23643- atomic_set(&epoch->epoch_size, 0);
23644+ atomic_set_unchecked(&epoch->epoch_size, 0);
23645 atomic_set(&epoch->active, 0);
23646
23647 spin_lock(&mdev->epoch_lock);
23648- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23649+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23650 list_add(&epoch->list, &mdev->current_epoch->list);
23651 mdev->current_epoch = epoch;
23652 mdev->epochs++;
23653@@ -1668,7 +1668,7 @@ static int receive_Data(struct drbd_conf
23654 spin_unlock(&mdev->peer_seq_lock);
23655
23656 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23657- atomic_inc(&mdev->current_epoch->epoch_size);
23658+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23659 return drbd_drain_block(mdev, data_size);
23660 }
23661
23662@@ -1694,7 +1694,7 @@ static int receive_Data(struct drbd_conf
23663
23664 spin_lock(&mdev->epoch_lock);
23665 e->epoch = mdev->current_epoch;
23666- atomic_inc(&e->epoch->epoch_size);
23667+ atomic_inc_unchecked(&e->epoch->epoch_size);
23668 atomic_inc(&e->epoch->active);
23669 spin_unlock(&mdev->epoch_lock);
23670
23671@@ -3905,7 +3905,7 @@ static void drbd_disconnect(struct drbd_
23672 D_ASSERT(list_empty(&mdev->done_ee));
23673
23674 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23675- atomic_set(&mdev->current_epoch->epoch_size, 0);
23676+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23677 D_ASSERT(list_empty(&mdev->current_epoch->list));
23678 }
23679
23680diff -urNp linux-2.6.39.4/drivers/block/nbd.c linux-2.6.39.4/drivers/block/nbd.c
23681--- linux-2.6.39.4/drivers/block/nbd.c 2011-06-25 12:55:22.000000000 -0400
23682+++ linux-2.6.39.4/drivers/block/nbd.c 2011-08-05 19:44:36.000000000 -0400
23683@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23684 struct kvec iov;
23685 sigset_t blocked, oldset;
23686
23687+ pax_track_stack();
23688+
23689 if (unlikely(!sock)) {
23690 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23691 lo->disk->disk_name, (send ? "send" : "recv"));
23692@@ -571,6 +573,8 @@ static void do_nbd_request(struct reques
23693 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23694 unsigned int cmd, unsigned long arg)
23695 {
23696+ pax_track_stack();
23697+
23698 switch (cmd) {
23699 case NBD_DISCONNECT: {
23700 struct request sreq;
23701diff -urNp linux-2.6.39.4/drivers/char/agp/frontend.c linux-2.6.39.4/drivers/char/agp/frontend.c
23702--- linux-2.6.39.4/drivers/char/agp/frontend.c 2011-05-19 00:06:34.000000000 -0400
23703+++ linux-2.6.39.4/drivers/char/agp/frontend.c 2011-08-05 19:44:36.000000000 -0400
23704@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23705 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23706 return -EFAULT;
23707
23708- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23709+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23710 return -EFAULT;
23711
23712 client = agp_find_client_by_pid(reserve.pid);
23713diff -urNp linux-2.6.39.4/drivers/char/briq_panel.c linux-2.6.39.4/drivers/char/briq_panel.c
23714--- linux-2.6.39.4/drivers/char/briq_panel.c 2011-05-19 00:06:34.000000000 -0400
23715+++ linux-2.6.39.4/drivers/char/briq_panel.c 2011-08-05 19:44:36.000000000 -0400
23716@@ -9,6 +9,7 @@
23717 #include <linux/types.h>
23718 #include <linux/errno.h>
23719 #include <linux/tty.h>
23720+#include <linux/mutex.h>
23721 #include <linux/timer.h>
23722 #include <linux/kernel.h>
23723 #include <linux/wait.h>
23724@@ -34,6 +35,7 @@ static int vfd_is_open;
23725 static unsigned char vfd[40];
23726 static int vfd_cursor;
23727 static unsigned char ledpb, led;
23728+static DEFINE_MUTEX(vfd_mutex);
23729
23730 static void update_vfd(void)
23731 {
23732@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23733 if (!vfd_is_open)
23734 return -EBUSY;
23735
23736+ mutex_lock(&vfd_mutex);
23737 for (;;) {
23738 char c;
23739 if (!indx)
23740 break;
23741- if (get_user(c, buf))
23742+ if (get_user(c, buf)) {
23743+ mutex_unlock(&vfd_mutex);
23744 return -EFAULT;
23745+ }
23746 if (esc) {
23747 set_led(c);
23748 esc = 0;
23749@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23750 buf++;
23751 }
23752 update_vfd();
23753+ mutex_unlock(&vfd_mutex);
23754
23755 return len;
23756 }
23757diff -urNp linux-2.6.39.4/drivers/char/genrtc.c linux-2.6.39.4/drivers/char/genrtc.c
23758--- linux-2.6.39.4/drivers/char/genrtc.c 2011-05-19 00:06:34.000000000 -0400
23759+++ linux-2.6.39.4/drivers/char/genrtc.c 2011-08-05 19:44:36.000000000 -0400
23760@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23761 switch (cmd) {
23762
23763 case RTC_PLL_GET:
23764+ memset(&pll, 0, sizeof(pll));
23765 if (get_rtc_pll(&pll))
23766 return -EINVAL;
23767 else
23768diff -urNp linux-2.6.39.4/drivers/char/hpet.c linux-2.6.39.4/drivers/char/hpet.c
23769--- linux-2.6.39.4/drivers/char/hpet.c 2011-05-19 00:06:34.000000000 -0400
23770+++ linux-2.6.39.4/drivers/char/hpet.c 2011-08-05 19:44:36.000000000 -0400
23771@@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di
23772 }
23773
23774 static int
23775-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23776+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23777 struct hpet_info *info)
23778 {
23779 struct hpet_timer __iomem *timer;
23780diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c
23781--- linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-05-19 00:06:34.000000000 -0400
23782+++ linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-05 20:34:06.000000000 -0400
23783@@ -414,7 +414,7 @@ struct ipmi_smi {
23784 struct proc_dir_entry *proc_dir;
23785 char proc_dir_name[10];
23786
23787- atomic_t stats[IPMI_NUM_STATS];
23788+ atomic_unchecked_t stats[IPMI_NUM_STATS];
23789
23790 /*
23791 * run_to_completion duplicate of smb_info, smi_info
23792@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23793
23794
23795 #define ipmi_inc_stat(intf, stat) \
23796- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23797+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23798 #define ipmi_get_stat(intf, stat) \
23799- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23800+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23801
23802 static int is_lan_addr(struct ipmi_addr *addr)
23803 {
23804@@ -2844,7 +2844,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23805 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23806 init_waitqueue_head(&intf->waitq);
23807 for (i = 0; i < IPMI_NUM_STATS; i++)
23808- atomic_set(&intf->stats[i], 0);
23809+ atomic_set_unchecked(&intf->stats[i], 0);
23810
23811 intf->proc_dir = NULL;
23812
23813@@ -4196,6 +4196,8 @@ static void send_panic_events(char *str)
23814 struct ipmi_smi_msg smi_msg;
23815 struct ipmi_recv_msg recv_msg;
23816
23817+ pax_track_stack();
23818+
23819 si = (struct ipmi_system_interface_addr *) &addr;
23820 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23821 si->channel = IPMI_BMC_CHANNEL;
23822diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c
23823--- linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-05-19 00:06:34.000000000 -0400
23824+++ linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-05 19:44:36.000000000 -0400
23825@@ -276,7 +276,7 @@ struct smi_info {
23826 unsigned char slave_addr;
23827
23828 /* Counters and things for the proc filesystem. */
23829- atomic_t stats[SI_NUM_STATS];
23830+ atomic_unchecked_t stats[SI_NUM_STATS];
23831
23832 struct task_struct *thread;
23833
23834@@ -285,9 +285,9 @@ struct smi_info {
23835 };
23836
23837 #define smi_inc_stat(smi, stat) \
23838- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23839+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23840 #define smi_get_stat(smi, stat) \
23841- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23842+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23843
23844 #define SI_MAX_PARMS 4
23845
23846@@ -3198,7 +3198,7 @@ static int try_smi_init(struct smi_info
23847 atomic_set(&new_smi->req_events, 0);
23848 new_smi->run_to_completion = 0;
23849 for (i = 0; i < SI_NUM_STATS; i++)
23850- atomic_set(&new_smi->stats[i], 0);
23851+ atomic_set_unchecked(&new_smi->stats[i], 0);
23852
23853 new_smi->interrupt_disabled = 1;
23854 atomic_set(&new_smi->stop_operation, 0);
23855diff -urNp linux-2.6.39.4/drivers/char/Kconfig linux-2.6.39.4/drivers/char/Kconfig
23856--- linux-2.6.39.4/drivers/char/Kconfig 2011-05-19 00:06:34.000000000 -0400
23857+++ linux-2.6.39.4/drivers/char/Kconfig 2011-08-05 19:44:36.000000000 -0400
23858@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23859
23860 config DEVKMEM
23861 bool "/dev/kmem virtual device support"
23862- default y
23863+ default n
23864+ depends on !GRKERNSEC_KMEM
23865 help
23866 Say Y here if you want to support the /dev/kmem device. The
23867 /dev/kmem device is rarely used, but can be used for certain
23868@@ -596,6 +597,7 @@ config DEVPORT
23869 bool
23870 depends on !M68K
23871 depends on ISA || PCI
23872+ depends on !GRKERNSEC_KMEM
23873 default y
23874
23875 source "drivers/s390/char/Kconfig"
23876diff -urNp linux-2.6.39.4/drivers/char/mem.c linux-2.6.39.4/drivers/char/mem.c
23877--- linux-2.6.39.4/drivers/char/mem.c 2011-05-19 00:06:34.000000000 -0400
23878+++ linux-2.6.39.4/drivers/char/mem.c 2011-08-05 19:44:36.000000000 -0400
23879@@ -18,6 +18,7 @@
23880 #include <linux/raw.h>
23881 #include <linux/tty.h>
23882 #include <linux/capability.h>
23883+#include <linux/security.h>
23884 #include <linux/ptrace.h>
23885 #include <linux/device.h>
23886 #include <linux/highmem.h>
23887@@ -34,6 +35,10 @@
23888 # include <linux/efi.h>
23889 #endif
23890
23891+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23892+extern struct file_operations grsec_fops;
23893+#endif
23894+
23895 static inline unsigned long size_inside_page(unsigned long start,
23896 unsigned long size)
23897 {
23898@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23899
23900 while (cursor < to) {
23901 if (!devmem_is_allowed(pfn)) {
23902+#ifdef CONFIG_GRKERNSEC_KMEM
23903+ gr_handle_mem_readwrite(from, to);
23904+#else
23905 printk(KERN_INFO
23906 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23907 current->comm, from, to);
23908+#endif
23909 return 0;
23910 }
23911 cursor += PAGE_SIZE;
23912@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23913 }
23914 return 1;
23915 }
23916+#elif defined(CONFIG_GRKERNSEC_KMEM)
23917+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23918+{
23919+ return 0;
23920+}
23921 #else
23922 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23923 {
23924@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23925
23926 while (count > 0) {
23927 unsigned long remaining;
23928+ char *temp;
23929
23930 sz = size_inside_page(p, count);
23931
23932@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23933 if (!ptr)
23934 return -EFAULT;
23935
23936- remaining = copy_to_user(buf, ptr, sz);
23937+#ifdef CONFIG_PAX_USERCOPY
23938+ temp = kmalloc(sz, GFP_KERNEL);
23939+ if (!temp) {
23940+ unxlate_dev_mem_ptr(p, ptr);
23941+ return -ENOMEM;
23942+ }
23943+ memcpy(temp, ptr, sz);
23944+#else
23945+ temp = ptr;
23946+#endif
23947+
23948+ remaining = copy_to_user(buf, temp, sz);
23949+
23950+#ifdef CONFIG_PAX_USERCOPY
23951+ kfree(temp);
23952+#endif
23953+
23954 unxlate_dev_mem_ptr(p, ptr);
23955 if (remaining)
23956 return -EFAULT;
23957@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23958 size_t count, loff_t *ppos)
23959 {
23960 unsigned long p = *ppos;
23961- ssize_t low_count, read, sz;
23962+ ssize_t low_count, read, sz, err = 0;
23963 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23964- int err = 0;
23965
23966 read = 0;
23967 if (p < (unsigned long) high_memory) {
23968@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23969 }
23970 #endif
23971 while (low_count > 0) {
23972+ char *temp;
23973+
23974 sz = size_inside_page(p, low_count);
23975
23976 /*
23977@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23978 */
23979 kbuf = xlate_dev_kmem_ptr((char *)p);
23980
23981- if (copy_to_user(buf, kbuf, sz))
23982+#ifdef CONFIG_PAX_USERCOPY
23983+ temp = kmalloc(sz, GFP_KERNEL);
23984+ if (!temp)
23985+ return -ENOMEM;
23986+ memcpy(temp, kbuf, sz);
23987+#else
23988+ temp = kbuf;
23989+#endif
23990+
23991+ err = copy_to_user(buf, temp, sz);
23992+
23993+#ifdef CONFIG_PAX_USERCOPY
23994+ kfree(temp);
23995+#endif
23996+
23997+ if (err)
23998 return -EFAULT;
23999 buf += sz;
24000 p += sz;
24001@@ -854,6 +901,9 @@ static const struct memdev {
24002 #ifdef CONFIG_CRASH_DUMP
24003 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24004 #endif
24005+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24006+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24007+#endif
24008 };
24009
24010 static int memory_open(struct inode *inode, struct file *filp)
24011diff -urNp linux-2.6.39.4/drivers/char/nvram.c linux-2.6.39.4/drivers/char/nvram.c
24012--- linux-2.6.39.4/drivers/char/nvram.c 2011-05-19 00:06:34.000000000 -0400
24013+++ linux-2.6.39.4/drivers/char/nvram.c 2011-08-05 19:44:36.000000000 -0400
24014@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24015
24016 spin_unlock_irq(&rtc_lock);
24017
24018- if (copy_to_user(buf, contents, tmp - contents))
24019+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24020 return -EFAULT;
24021
24022 *ppos = i;
24023diff -urNp linux-2.6.39.4/drivers/char/random.c linux-2.6.39.4/drivers/char/random.c
24024--- linux-2.6.39.4/drivers/char/random.c 2011-05-19 00:06:34.000000000 -0400
24025+++ linux-2.6.39.4/drivers/char/random.c 2011-08-05 19:44:36.000000000 -0400
24026@@ -261,8 +261,13 @@
24027 /*
24028 * Configuration information
24029 */
24030+#ifdef CONFIG_GRKERNSEC_RANDNET
24031+#define INPUT_POOL_WORDS 512
24032+#define OUTPUT_POOL_WORDS 128
24033+#else
24034 #define INPUT_POOL_WORDS 128
24035 #define OUTPUT_POOL_WORDS 32
24036+#endif
24037 #define SEC_XFER_SIZE 512
24038 #define EXTRACT_SIZE 10
24039
24040@@ -300,10 +305,17 @@ static struct poolinfo {
24041 int poolwords;
24042 int tap1, tap2, tap3, tap4, tap5;
24043 } poolinfo_table[] = {
24044+#ifdef CONFIG_GRKERNSEC_RANDNET
24045+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24046+ { 512, 411, 308, 208, 104, 1 },
24047+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24048+ { 128, 103, 76, 51, 25, 1 },
24049+#else
24050 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24051 { 128, 103, 76, 51, 25, 1 },
24052 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24053 { 32, 26, 20, 14, 7, 1 },
24054+#endif
24055 #if 0
24056 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24057 { 2048, 1638, 1231, 819, 411, 1 },
24058@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24059
24060 extract_buf(r, tmp);
24061 i = min_t(int, nbytes, EXTRACT_SIZE);
24062- if (copy_to_user(buf, tmp, i)) {
24063+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24064 ret = -EFAULT;
24065 break;
24066 }
24067@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24068 #include <linux/sysctl.h>
24069
24070 static int min_read_thresh = 8, min_write_thresh;
24071-static int max_read_thresh = INPUT_POOL_WORDS * 32;
24072+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24073 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24074 static char sysctl_bootid[16];
24075
24076diff -urNp linux-2.6.39.4/drivers/char/sonypi.c linux-2.6.39.4/drivers/char/sonypi.c
24077--- linux-2.6.39.4/drivers/char/sonypi.c 2011-05-19 00:06:34.000000000 -0400
24078+++ linux-2.6.39.4/drivers/char/sonypi.c 2011-08-05 19:44:36.000000000 -0400
24079@@ -55,6 +55,7 @@
24080 #include <asm/uaccess.h>
24081 #include <asm/io.h>
24082 #include <asm/system.h>
24083+#include <asm/local.h>
24084
24085 #include <linux/sonypi.h>
24086
24087@@ -491,7 +492,7 @@ static struct sonypi_device {
24088 spinlock_t fifo_lock;
24089 wait_queue_head_t fifo_proc_list;
24090 struct fasync_struct *fifo_async;
24091- int open_count;
24092+ local_t open_count;
24093 int model;
24094 struct input_dev *input_jog_dev;
24095 struct input_dev *input_key_dev;
24096@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24097 static int sonypi_misc_release(struct inode *inode, struct file *file)
24098 {
24099 mutex_lock(&sonypi_device.lock);
24100- sonypi_device.open_count--;
24101+ local_dec(&sonypi_device.open_count);
24102 mutex_unlock(&sonypi_device.lock);
24103 return 0;
24104 }
24105@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24106 {
24107 mutex_lock(&sonypi_device.lock);
24108 /* Flush input queue on first open */
24109- if (!sonypi_device.open_count)
24110+ if (!local_read(&sonypi_device.open_count))
24111 kfifo_reset(&sonypi_device.fifo);
24112- sonypi_device.open_count++;
24113+ local_inc(&sonypi_device.open_count);
24114 mutex_unlock(&sonypi_device.lock);
24115
24116 return 0;
24117diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm_bios.c linux-2.6.39.4/drivers/char/tpm/tpm_bios.c
24118--- linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-05-19 00:06:34.000000000 -0400
24119+++ linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-08-05 19:44:36.000000000 -0400
24120@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24121 event = addr;
24122
24123 if ((event->event_type == 0 && event->event_size == 0) ||
24124- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24125+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24126 return NULL;
24127
24128 return addr;
24129@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24130 return NULL;
24131
24132 if ((event->event_type == 0 && event->event_size == 0) ||
24133- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24134+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24135 return NULL;
24136
24137 (*pos)++;
24138@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24139 int i;
24140
24141 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24142- seq_putc(m, data[i]);
24143+ if (!seq_putc(m, data[i]))
24144+ return -EFAULT;
24145
24146 return 0;
24147 }
24148@@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24149 log->bios_event_log_end = log->bios_event_log + len;
24150
24151 virt = acpi_os_map_memory(start, len);
24152+ if (!virt) {
24153+ kfree(log->bios_event_log);
24154+ log->bios_event_log = NULL;
24155+ return -EFAULT;
24156+ }
24157
24158 memcpy(log->bios_event_log, virt, len);
24159
24160diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm.c linux-2.6.39.4/drivers/char/tpm/tpm.c
24161--- linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-05-19 00:06:34.000000000 -0400
24162+++ linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-08-05 19:44:36.000000000 -0400
24163@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24164 chip->vendor.req_complete_val)
24165 goto out_recv;
24166
24167- if ((status == chip->vendor.req_canceled)) {
24168+ if (status == chip->vendor.req_canceled) {
24169 dev_err(chip->dev, "Operation Canceled\n");
24170 rc = -ECANCELED;
24171 goto out;
24172@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24173
24174 struct tpm_chip *chip = dev_get_drvdata(dev);
24175
24176+ pax_track_stack();
24177+
24178 tpm_cmd.header.in = tpm_readpubek_header;
24179 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24180 "attempting to read the PUBEK");
24181diff -urNp linux-2.6.39.4/drivers/crypto/hifn_795x.c linux-2.6.39.4/drivers/crypto/hifn_795x.c
24182--- linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-05-19 00:06:34.000000000 -0400
24183+++ linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-08-05 19:44:36.000000000 -0400
24184@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24185 0xCA, 0x34, 0x2B, 0x2E};
24186 struct scatterlist sg;
24187
24188+ pax_track_stack();
24189+
24190 memset(src, 0, sizeof(src));
24191 memset(ctx.key, 0, sizeof(ctx.key));
24192
24193diff -urNp linux-2.6.39.4/drivers/crypto/padlock-aes.c linux-2.6.39.4/drivers/crypto/padlock-aes.c
24194--- linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-05-19 00:06:34.000000000 -0400
24195+++ linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-08-05 19:44:36.000000000 -0400
24196@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24197 struct crypto_aes_ctx gen_aes;
24198 int cpu;
24199
24200+ pax_track_stack();
24201+
24202 if (key_len % 8) {
24203 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24204 return -EINVAL;
24205diff -urNp linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c
24206--- linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-05-19 00:06:34.000000000 -0400
24207+++ linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-08-05 19:44:36.000000000 -0400
24208@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24209 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24210 static int edac_pci_poll_msec = 1000; /* one second workq period */
24211
24212-static atomic_t pci_parity_count = ATOMIC_INIT(0);
24213-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24214+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24215+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24216
24217 static struct kobject *edac_pci_top_main_kobj;
24218 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24219@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24220 edac_printk(KERN_CRIT, EDAC_PCI,
24221 "Signaled System Error on %s\n",
24222 pci_name(dev));
24223- atomic_inc(&pci_nonparity_count);
24224+ atomic_inc_unchecked(&pci_nonparity_count);
24225 }
24226
24227 if (status & (PCI_STATUS_PARITY)) {
24228@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24229 "Master Data Parity Error on %s\n",
24230 pci_name(dev));
24231
24232- atomic_inc(&pci_parity_count);
24233+ atomic_inc_unchecked(&pci_parity_count);
24234 }
24235
24236 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24237@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24238 "Detected Parity Error on %s\n",
24239 pci_name(dev));
24240
24241- atomic_inc(&pci_parity_count);
24242+ atomic_inc_unchecked(&pci_parity_count);
24243 }
24244 }
24245
24246@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24247 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24248 "Signaled System Error on %s\n",
24249 pci_name(dev));
24250- atomic_inc(&pci_nonparity_count);
24251+ atomic_inc_unchecked(&pci_nonparity_count);
24252 }
24253
24254 if (status & (PCI_STATUS_PARITY)) {
24255@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24256 "Master Data Parity Error on "
24257 "%s\n", pci_name(dev));
24258
24259- atomic_inc(&pci_parity_count);
24260+ atomic_inc_unchecked(&pci_parity_count);
24261 }
24262
24263 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24264@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24265 "Detected Parity Error on %s\n",
24266 pci_name(dev));
24267
24268- atomic_inc(&pci_parity_count);
24269+ atomic_inc_unchecked(&pci_parity_count);
24270 }
24271 }
24272 }
24273@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24274 if (!check_pci_errors)
24275 return;
24276
24277- before_count = atomic_read(&pci_parity_count);
24278+ before_count = atomic_read_unchecked(&pci_parity_count);
24279
24280 /* scan all PCI devices looking for a Parity Error on devices and
24281 * bridges.
24282@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24283 /* Only if operator has selected panic on PCI Error */
24284 if (edac_pci_get_panic_on_pe()) {
24285 /* If the count is different 'after' from 'before' */
24286- if (before_count != atomic_read(&pci_parity_count))
24287+ if (before_count != atomic_read_unchecked(&pci_parity_count))
24288 panic("EDAC: PCI Parity Error");
24289 }
24290 }
24291diff -urNp linux-2.6.39.4/drivers/edac/i7core_edac.c linux-2.6.39.4/drivers/edac/i7core_edac.c
24292--- linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-05-19 00:06:34.000000000 -0400
24293+++ linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-08-05 19:44:36.000000000 -0400
24294@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24295 char *type, *optype, *err, *msg;
24296 unsigned long error = m->status & 0x1ff0000l;
24297 u32 optypenum = (m->status >> 4) & 0x07;
24298- u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24299+ u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24300 u32 dimm = (m->misc >> 16) & 0x3;
24301 u32 channel = (m->misc >> 18) & 0x3;
24302 u32 syndrome = m->misc >> 32;
24303diff -urNp linux-2.6.39.4/drivers/edac/mce_amd.h linux-2.6.39.4/drivers/edac/mce_amd.h
24304--- linux-2.6.39.4/drivers/edac/mce_amd.h 2011-05-19 00:06:34.000000000 -0400
24305+++ linux-2.6.39.4/drivers/edac/mce_amd.h 2011-08-05 20:34:06.000000000 -0400
24306@@ -83,7 +83,7 @@ struct amd_decoder_ops {
24307 bool (*dc_mce)(u16, u8);
24308 bool (*ic_mce)(u16, u8);
24309 bool (*nb_mce)(u16, u8);
24310-};
24311+} __no_const;
24312
24313 void amd_report_gart_errors(bool);
24314 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24315diff -urNp linux-2.6.39.4/drivers/firewire/core-card.c linux-2.6.39.4/drivers/firewire/core-card.c
24316--- linux-2.6.39.4/drivers/firewire/core-card.c 2011-05-19 00:06:34.000000000 -0400
24317+++ linux-2.6.39.4/drivers/firewire/core-card.c 2011-08-05 20:34:06.000000000 -0400
24318@@ -652,7 +652,7 @@ void fw_card_release(struct kref *kref)
24319
24320 void fw_core_remove_card(struct fw_card *card)
24321 {
24322- struct fw_card_driver dummy_driver = dummy_driver_template;
24323+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
24324
24325 card->driver->update_phy_reg(card, 4,
24326 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24327diff -urNp linux-2.6.39.4/drivers/firewire/core-cdev.c linux-2.6.39.4/drivers/firewire/core-cdev.c
24328--- linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-05-19 00:06:34.000000000 -0400
24329+++ linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-08-05 19:44:36.000000000 -0400
24330@@ -1312,8 +1312,7 @@ static int init_iso_resource(struct clie
24331 int ret;
24332
24333 if ((request->channels == 0 && request->bandwidth == 0) ||
24334- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24335- request->bandwidth < 0)
24336+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24337 return -EINVAL;
24338
24339 r = kmalloc(sizeof(*r), GFP_KERNEL);
24340diff -urNp linux-2.6.39.4/drivers/firewire/core.h linux-2.6.39.4/drivers/firewire/core.h
24341--- linux-2.6.39.4/drivers/firewire/core.h 2011-05-19 00:06:34.000000000 -0400
24342+++ linux-2.6.39.4/drivers/firewire/core.h 2011-08-05 20:34:06.000000000 -0400
24343@@ -99,6 +99,7 @@ struct fw_card_driver {
24344
24345 int (*stop_iso)(struct fw_iso_context *ctx);
24346 };
24347+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24348
24349 void fw_card_initialize(struct fw_card *card,
24350 const struct fw_card_driver *driver, struct device *device);
24351diff -urNp linux-2.6.39.4/drivers/firewire/core-transaction.c linux-2.6.39.4/drivers/firewire/core-transaction.c
24352--- linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-05-19 00:06:34.000000000 -0400
24353+++ linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-08-05 19:44:36.000000000 -0400
24354@@ -36,6 +36,7 @@
24355 #include <linux/string.h>
24356 #include <linux/timer.h>
24357 #include <linux/types.h>
24358+#include <linux/sched.h>
24359
24360 #include <asm/byteorder.h>
24361
24362@@ -420,6 +421,8 @@ int fw_run_transaction(struct fw_card *c
24363 struct transaction_callback_data d;
24364 struct fw_transaction t;
24365
24366+ pax_track_stack();
24367+
24368 init_timer_on_stack(&t.split_timeout_timer);
24369 init_completion(&d.done);
24370 d.payload = payload;
24371diff -urNp linux-2.6.39.4/drivers/firmware/dmi_scan.c linux-2.6.39.4/drivers/firmware/dmi_scan.c
24372--- linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-05-19 00:06:34.000000000 -0400
24373+++ linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-08-05 19:44:36.000000000 -0400
24374@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24375 }
24376 }
24377 else {
24378- /*
24379- * no iounmap() for that ioremap(); it would be a no-op, but
24380- * it's so early in setup that sucker gets confused into doing
24381- * what it shouldn't if we actually call it.
24382- */
24383 p = dmi_ioremap(0xF0000, 0x10000);
24384 if (p == NULL)
24385 goto error;
24386diff -urNp linux-2.6.39.4/drivers/gpio/vr41xx_giu.c linux-2.6.39.4/drivers/gpio/vr41xx_giu.c
24387--- linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-05-19 00:06:34.000000000 -0400
24388+++ linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-08-05 19:44:36.000000000 -0400
24389@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24390 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24391 maskl, pendl, maskh, pendh);
24392
24393- atomic_inc(&irq_err_count);
24394+ atomic_inc_unchecked(&irq_err_count);
24395
24396 return -EINVAL;
24397 }
24398diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c
24399--- linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-05-19 00:06:34.000000000 -0400
24400+++ linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-05 19:44:36.000000000 -0400
24401@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24402 struct drm_crtc *tmp;
24403 int crtc_mask = 1;
24404
24405- WARN(!crtc, "checking null crtc?\n");
24406+ BUG_ON(!crtc);
24407
24408 dev = crtc->dev;
24409
24410@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24411 struct drm_encoder *encoder;
24412 bool ret = true;
24413
24414+ pax_track_stack();
24415+
24416 crtc->enabled = drm_helper_crtc_in_use(crtc);
24417 if (!crtc->enabled)
24418 return true;
24419diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_drv.c linux-2.6.39.4/drivers/gpu/drm/drm_drv.c
24420--- linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-05-19 00:06:34.000000000 -0400
24421+++ linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-08-05 19:44:36.000000000 -0400
24422@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24423
24424 dev = file_priv->minor->dev;
24425 atomic_inc(&dev->ioctl_count);
24426- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24427+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24428 ++file_priv->ioctl_count;
24429
24430 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24431diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_fops.c linux-2.6.39.4/drivers/gpu/drm/drm_fops.c
24432--- linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-05-19 00:06:34.000000000 -0400
24433+++ linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-08-05 19:44:36.000000000 -0400
24434@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24435 }
24436
24437 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24438- atomic_set(&dev->counts[i], 0);
24439+ atomic_set_unchecked(&dev->counts[i], 0);
24440
24441 dev->sigdata.lock = NULL;
24442
24443@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24444
24445 retcode = drm_open_helper(inode, filp, dev);
24446 if (!retcode) {
24447- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24448- if (!dev->open_count++)
24449+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24450+ if (local_inc_return(&dev->open_count) == 1)
24451 retcode = drm_setup(dev);
24452 }
24453 if (!retcode) {
24454@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24455
24456 mutex_lock(&drm_global_mutex);
24457
24458- DRM_DEBUG("open_count = %d\n", dev->open_count);
24459+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24460
24461 if (dev->driver->preclose)
24462 dev->driver->preclose(dev, file_priv);
24463@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24464 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24465 task_pid_nr(current),
24466 (long)old_encode_dev(file_priv->minor->device),
24467- dev->open_count);
24468+ local_read(&dev->open_count));
24469
24470 /* if the master has gone away we can't do anything with the lock */
24471 if (file_priv->minor->master)
24472@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24473 * End inline drm_release
24474 */
24475
24476- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24477- if (!--dev->open_count) {
24478+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24479+ if (local_dec_and_test(&dev->open_count)) {
24480 if (atomic_read(&dev->ioctl_count)) {
24481 DRM_ERROR("Device busy: %d\n",
24482 atomic_read(&dev->ioctl_count));
24483diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_global.c linux-2.6.39.4/drivers/gpu/drm/drm_global.c
24484--- linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-05-19 00:06:34.000000000 -0400
24485+++ linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-08-05 19:44:36.000000000 -0400
24486@@ -36,7 +36,7 @@
24487 struct drm_global_item {
24488 struct mutex mutex;
24489 void *object;
24490- int refcount;
24491+ atomic_t refcount;
24492 };
24493
24494 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24495@@ -49,7 +49,7 @@ void drm_global_init(void)
24496 struct drm_global_item *item = &glob[i];
24497 mutex_init(&item->mutex);
24498 item->object = NULL;
24499- item->refcount = 0;
24500+ atomic_set(&item->refcount, 0);
24501 }
24502 }
24503
24504@@ -59,7 +59,7 @@ void drm_global_release(void)
24505 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24506 struct drm_global_item *item = &glob[i];
24507 BUG_ON(item->object != NULL);
24508- BUG_ON(item->refcount != 0);
24509+ BUG_ON(atomic_read(&item->refcount) != 0);
24510 }
24511 }
24512
24513@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24514 void *object;
24515
24516 mutex_lock(&item->mutex);
24517- if (item->refcount == 0) {
24518+ if (atomic_read(&item->refcount) == 0) {
24519 item->object = kzalloc(ref->size, GFP_KERNEL);
24520 if (unlikely(item->object == NULL)) {
24521 ret = -ENOMEM;
24522@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24523 goto out_err;
24524
24525 }
24526- ++item->refcount;
24527+ atomic_inc(&item->refcount);
24528 ref->object = item->object;
24529 object = item->object;
24530 mutex_unlock(&item->mutex);
24531@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24532 struct drm_global_item *item = &glob[ref->global_type];
24533
24534 mutex_lock(&item->mutex);
24535- BUG_ON(item->refcount == 0);
24536+ BUG_ON(atomic_read(&item->refcount) == 0);
24537 BUG_ON(ref->object != item->object);
24538- if (--item->refcount == 0) {
24539+ if (atomic_dec_and_test(&item->refcount)) {
24540 ref->release(ref);
24541 item->object = NULL;
24542 }
24543diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_info.c linux-2.6.39.4/drivers/gpu/drm/drm_info.c
24544--- linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-05-19 00:06:34.000000000 -0400
24545+++ linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-08-05 19:44:36.000000000 -0400
24546@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24547 struct drm_local_map *map;
24548 struct drm_map_list *r_list;
24549
24550- /* Hardcoded from _DRM_FRAME_BUFFER,
24551- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24552- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24553- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24554+ static const char * const types[] = {
24555+ [_DRM_FRAME_BUFFER] = "FB",
24556+ [_DRM_REGISTERS] = "REG",
24557+ [_DRM_SHM] = "SHM",
24558+ [_DRM_AGP] = "AGP",
24559+ [_DRM_SCATTER_GATHER] = "SG",
24560+ [_DRM_CONSISTENT] = "PCI",
24561+ [_DRM_GEM] = "GEM" };
24562 const char *type;
24563 int i;
24564
24565@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24566 map = r_list->map;
24567 if (!map)
24568 continue;
24569- if (map->type < 0 || map->type > 5)
24570+ if (map->type >= ARRAY_SIZE(types))
24571 type = "??";
24572 else
24573 type = types[map->type];
24574@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24575 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24576 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24577 vma->vm_flags & VM_IO ? 'i' : '-',
24578+#ifdef CONFIG_GRKERNSEC_HIDESYM
24579+ 0);
24580+#else
24581 vma->vm_pgoff);
24582+#endif
24583
24584 #if defined(__i386__)
24585 pgprot = pgprot_val(vma->vm_page_prot);
24586diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c
24587--- linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-05-19 00:06:34.000000000 -0400
24588+++ linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-08-05 19:44:36.000000000 -0400
24589@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24590 stats->data[i].value =
24591 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24592 else
24593- stats->data[i].value = atomic_read(&dev->counts[i]);
24594+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24595 stats->data[i].type = dev->types[i];
24596 }
24597
24598diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_lock.c linux-2.6.39.4/drivers/gpu/drm/drm_lock.c
24599--- linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-05-19 00:06:34.000000000 -0400
24600+++ linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-08-05 19:44:36.000000000 -0400
24601@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24602 if (drm_lock_take(&master->lock, lock->context)) {
24603 master->lock.file_priv = file_priv;
24604 master->lock.lock_time = jiffies;
24605- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24606+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24607 break; /* Got lock */
24608 }
24609
24610@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24611 return -EINVAL;
24612 }
24613
24614- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24615+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24616
24617 if (drm_lock_free(&master->lock, lock->context)) {
24618 /* FIXME: Should really bail out here. */
24619diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c
24620--- linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-05-19 00:06:34.000000000 -0400
24621+++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-05 19:44:36.000000000 -0400
24622@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24623 dma->buflist[vertex->idx],
24624 vertex->discard, vertex->used);
24625
24626- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24627- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24628+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24629+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24630 sarea_priv->last_enqueue = dev_priv->counter - 1;
24631 sarea_priv->last_dispatch = (int)hw_status[5];
24632
24633@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24634 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24635 mc->last_render);
24636
24637- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24638- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24639+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24640+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24641 sarea_priv->last_enqueue = dev_priv->counter - 1;
24642 sarea_priv->last_dispatch = (int)hw_status[5];
24643
24644diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h
24645--- linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-05-19 00:06:34.000000000 -0400
24646+++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-05 19:44:36.000000000 -0400
24647@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24648 int page_flipping;
24649
24650 wait_queue_head_t irq_queue;
24651- atomic_t irq_received;
24652- atomic_t irq_emitted;
24653+ atomic_unchecked_t irq_received;
24654+ atomic_unchecked_t irq_emitted;
24655
24656 int front_offset;
24657 } drm_i810_private_t;
24658diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c
24659--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-19 00:06:34.000000000 -0400
24660+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-05 19:44:36.000000000 -0400
24661@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct se
24662 I915_READ(GTIMR));
24663 }
24664 seq_printf(m, "Interrupts received: %d\n",
24665- atomic_read(&dev_priv->irq_received));
24666+ atomic_read_unchecked(&dev_priv->irq_received));
24667 for (i = 0; i < I915_NUM_RINGS; i++) {
24668 if (IS_GEN6(dev)) {
24669 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24670diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c
24671--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-05-19 00:06:34.000000000 -0400
24672+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-05 19:44:36.000000000 -0400
24673@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
24674 bool can_switch;
24675
24676 spin_lock(&dev->count_lock);
24677- can_switch = (dev->open_count == 0);
24678+ can_switch = (local_read(&dev->open_count) == 0);
24679 spin_unlock(&dev->count_lock);
24680 return can_switch;
24681 }
24682diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h
24683--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-05-19 00:06:34.000000000 -0400
24684+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:34:06.000000000 -0400
24685@@ -209,7 +209,7 @@ struct drm_i915_display_funcs {
24686 /* display clock increase/decrease */
24687 /* pll clock increase/decrease */
24688 /* clock gating init */
24689-};
24690+} __no_const;
24691
24692 struct intel_device_info {
24693 u8 gen;
24694@@ -287,7 +287,7 @@ typedef struct drm_i915_private {
24695 int current_page;
24696 int page_flipping;
24697
24698- atomic_t irq_received;
24699+ atomic_unchecked_t irq_received;
24700
24701 /* protects the irq masks */
24702 spinlock_t irq_lock;
24703@@ -848,7 +848,7 @@ struct drm_i915_gem_object {
24704 * will be page flipped away on the next vblank. When it
24705 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24706 */
24707- atomic_t pending_flip;
24708+ atomic_unchecked_t pending_flip;
24709 };
24710
24711 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24712@@ -1232,7 +1232,7 @@ extern int intel_setup_gmbus(struct drm_
24713 extern void intel_teardown_gmbus(struct drm_device *dev);
24714 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24715 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24716-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24717+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24718 {
24719 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24720 }
24721diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24722--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-19 00:06:34.000000000 -0400
24723+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-05 19:44:36.000000000 -0400
24724@@ -192,7 +192,7 @@ i915_gem_object_set_to_gpu_domain(struct
24725 i915_gem_release_mmap(obj);
24726
24727 if (obj->base.pending_write_domain)
24728- cd->flips |= atomic_read(&obj->pending_flip);
24729+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24730
24731 /* The actual obj->write_domain will be updated with
24732 * pending_write_domain after we emit the accumulated flush for all
24733diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c
24734--- linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-07-09 09:18:51.000000000 -0400
24735+++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-05 19:44:36.000000000 -0400
24736@@ -1101,7 +1101,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
24737 int ret = IRQ_NONE, pipe;
24738 bool blc_event = false;
24739
24740- atomic_inc(&dev_priv->irq_received);
24741+ atomic_inc_unchecked(&dev_priv->irq_received);
24742
24743 if (HAS_PCH_SPLIT(dev))
24744 return ironlake_irq_handler(dev);
24745@@ -1666,7 +1666,7 @@ void i915_driver_irq_preinstall(struct d
24746 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24747 int pipe;
24748
24749- atomic_set(&dev_priv->irq_received, 0);
24750+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24751
24752 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24753 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24754diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c
24755--- linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-05-19 00:06:34.000000000 -0400
24756+++ linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-08-05 19:44:36.000000000 -0400
24757@@ -2244,7 +2244,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24758
24759 wait_event(dev_priv->pending_flip_queue,
24760 atomic_read(&dev_priv->mm.wedged) ||
24761- atomic_read(&obj->pending_flip) == 0);
24762+ atomic_read_unchecked(&obj->pending_flip) == 0);
24763
24764 /* Big Hammer, we also need to ensure that any pending
24765 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24766@@ -2712,7 +2712,7 @@ static void intel_crtc_wait_for_pending_
24767 obj = to_intel_framebuffer(crtc->fb)->obj;
24768 dev_priv = crtc->dev->dev_private;
24769 wait_event(dev_priv->pending_flip_queue,
24770- atomic_read(&obj->pending_flip) == 0);
24771+ atomic_read_unchecked(&obj->pending_flip) == 0);
24772 }
24773
24774 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24775@@ -6016,7 +6016,7 @@ static void do_intel_finish_page_flip(st
24776
24777 atomic_clear_mask(1 << intel_crtc->plane,
24778 &obj->pending_flip.counter);
24779- if (atomic_read(&obj->pending_flip) == 0)
24780+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
24781 wake_up(&dev_priv->pending_flip_queue);
24782
24783 schedule_work(&work->work);
24784@@ -6145,7 +6145,7 @@ static int intel_crtc_page_flip(struct d
24785 /* Block clients from rendering to the new back buffer until
24786 * the flip occurs and the object is no longer visible.
24787 */
24788- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24789+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24790
24791 switch (INTEL_INFO(dev)->gen) {
24792 case 2:
24793diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h
24794--- linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-05-19 00:06:34.000000000 -0400
24795+++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-05 19:44:36.000000000 -0400
24796@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24797 u32 clear_cmd;
24798 u32 maccess;
24799
24800- atomic_t vbl_received; /**< Number of vblanks received. */
24801+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24802 wait_queue_head_t fence_queue;
24803- atomic_t last_fence_retired;
24804+ atomic_unchecked_t last_fence_retired;
24805 u32 next_fence_to_post;
24806
24807 unsigned int fb_cpp;
24808diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c
24809--- linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-05-19 00:06:34.000000000 -0400
24810+++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-05 19:44:36.000000000 -0400
24811@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24812 if (crtc != 0)
24813 return 0;
24814
24815- return atomic_read(&dev_priv->vbl_received);
24816+ return atomic_read_unchecked(&dev_priv->vbl_received);
24817 }
24818
24819
24820@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24821 /* VBLANK interrupt */
24822 if (status & MGA_VLINEPEN) {
24823 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24824- atomic_inc(&dev_priv->vbl_received);
24825+ atomic_inc_unchecked(&dev_priv->vbl_received);
24826 drm_handle_vblank(dev, 0);
24827 handled = 1;
24828 }
24829@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24830 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24831 MGA_WRITE(MGA_PRIMEND, prim_end);
24832
24833- atomic_inc(&dev_priv->last_fence_retired);
24834+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
24835 DRM_WAKEUP(&dev_priv->fence_queue);
24836 handled = 1;
24837 }
24838@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24839 * using fences.
24840 */
24841 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24842- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24843+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24844 - *sequence) <= (1 << 23)));
24845
24846 *sequence = cur_fence;
24847diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24848--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-05-19 00:06:34.000000000 -0400
24849+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-05 20:34:06.000000000 -0400
24850@@ -228,7 +228,7 @@ struct nouveau_channel {
24851 struct list_head pending;
24852 uint32_t sequence;
24853 uint32_t sequence_ack;
24854- atomic_t last_sequence_irq;
24855+ atomic_unchecked_t last_sequence_irq;
24856 } fence;
24857
24858 /* DMA push buffer */
24859@@ -317,13 +317,13 @@ struct nouveau_instmem_engine {
24860 struct nouveau_mc_engine {
24861 int (*init)(struct drm_device *dev);
24862 void (*takedown)(struct drm_device *dev);
24863-};
24864+} __no_const;
24865
24866 struct nouveau_timer_engine {
24867 int (*init)(struct drm_device *dev);
24868 void (*takedown)(struct drm_device *dev);
24869 uint64_t (*read)(struct drm_device *dev);
24870-};
24871+} __no_const;
24872
24873 struct nouveau_fb_engine {
24874 int num_tiles;
24875@@ -516,7 +516,7 @@ struct nouveau_vram_engine {
24876 void (*put)(struct drm_device *, struct nouveau_mem **);
24877
24878 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24879-};
24880+} __no_const;
24881
24882 struct nouveau_engine {
24883 struct nouveau_instmem_engine instmem;
24884@@ -662,7 +662,7 @@ struct drm_nouveau_private {
24885 struct drm_global_reference mem_global_ref;
24886 struct ttm_bo_global_ref bo_global_ref;
24887 struct ttm_bo_device bdev;
24888- atomic_t validate_sequence;
24889+ atomic_unchecked_t validate_sequence;
24890 } ttm;
24891
24892 struct {
24893diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24894--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-19 00:06:34.000000000 -0400
24895+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-05 19:44:36.000000000 -0400
24896@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24897 if (USE_REFCNT(dev))
24898 sequence = nvchan_rd32(chan, 0x48);
24899 else
24900- sequence = atomic_read(&chan->fence.last_sequence_irq);
24901+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24902
24903 if (chan->fence.sequence_ack == sequence)
24904 goto out;
24905@@ -553,7 +553,7 @@ nouveau_fence_channel_init(struct nouvea
24906 out_initialised:
24907 INIT_LIST_HEAD(&chan->fence.pending);
24908 spin_lock_init(&chan->fence.lock);
24909- atomic_set(&chan->fence.last_sequence_irq, 0);
24910+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24911 return 0;
24912 }
24913
24914diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24915--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-19 00:06:34.000000000 -0400
24916+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-05 19:44:36.000000000 -0400
24917@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24918 int trycnt = 0;
24919 int ret, i;
24920
24921- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24922+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24923 retry:
24924 if (++trycnt > 100000) {
24925 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24926diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c
24927--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-19 00:06:34.000000000 -0400
24928+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-05 19:44:36.000000000 -0400
24929@@ -583,7 +583,7 @@ static bool nouveau_switcheroo_can_switc
24930 bool can_switch;
24931
24932 spin_lock(&dev->count_lock);
24933- can_switch = (dev->open_count == 0);
24934+ can_switch = (local_read(&dev->open_count) == 0);
24935 spin_unlock(&dev->count_lock);
24936 return can_switch;
24937 }
24938diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c
24939--- linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-19 00:06:34.000000000 -0400
24940+++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-05 19:44:36.000000000 -0400
24941@@ -552,7 +552,7 @@ static int
24942 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24943 u32 class, u32 mthd, u32 data)
24944 {
24945- atomic_set(&chan->fence.last_sequence_irq, data);
24946+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24947 return 0;
24948 }
24949
24950diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c
24951--- linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-05-19 00:06:34.000000000 -0400
24952+++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-05 19:44:36.000000000 -0400
24953@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24954
24955 /* GH: Simple idle check.
24956 */
24957- atomic_set(&dev_priv->idle_count, 0);
24958+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24959
24960 /* We don't support anything other than bus-mastering ring mode,
24961 * but the ring can be in either AGP or PCI space for the ring
24962diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h
24963--- linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-05-19 00:06:34.000000000 -0400
24964+++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-05 19:44:36.000000000 -0400
24965@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24966 int is_pci;
24967 unsigned long cce_buffers_offset;
24968
24969- atomic_t idle_count;
24970+ atomic_unchecked_t idle_count;
24971
24972 int page_flipping;
24973 int current_page;
24974 u32 crtc_offset;
24975 u32 crtc_offset_cntl;
24976
24977- atomic_t vbl_received;
24978+ atomic_unchecked_t vbl_received;
24979
24980 u32 color_fmt;
24981 unsigned int front_offset;
24982diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c
24983--- linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-05-19 00:06:34.000000000 -0400
24984+++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-05 19:44:36.000000000 -0400
24985@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24986 if (crtc != 0)
24987 return 0;
24988
24989- return atomic_read(&dev_priv->vbl_received);
24990+ return atomic_read_unchecked(&dev_priv->vbl_received);
24991 }
24992
24993 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24994@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24995 /* VBLANK interrupt */
24996 if (status & R128_CRTC_VBLANK_INT) {
24997 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24998- atomic_inc(&dev_priv->vbl_received);
24999+ atomic_inc_unchecked(&dev_priv->vbl_received);
25000 drm_handle_vblank(dev, 0);
25001 return IRQ_HANDLED;
25002 }
25003diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c
25004--- linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-05-19 00:06:34.000000000 -0400
25005+++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-08-05 19:44:36.000000000 -0400
25006@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25007
25008 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25009 {
25010- if (atomic_read(&dev_priv->idle_count) == 0)
25011+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25012 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25013 else
25014- atomic_set(&dev_priv->idle_count, 0);
25015+ atomic_set_unchecked(&dev_priv->idle_count, 0);
25016 }
25017
25018 #endif
25019diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c
25020--- linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-05-19 00:06:34.000000000 -0400
25021+++ linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-08-05 19:44:36.000000000 -0400
25022@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
25023 char name[512];
25024 int i;
25025
25026+ pax_track_stack();
25027+
25028 ctx->card = card;
25029 ctx->bios = bios;
25030
25031diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c
25032--- linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-05-19 00:06:34.000000000 -0400
25033+++ linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-05 19:44:36.000000000 -0400
25034@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
25035 regex_t mask_rex;
25036 regmatch_t match[4];
25037 char buf[1024];
25038- size_t end;
25039+ long end;
25040 int len;
25041 int done = 0;
25042 int r;
25043 unsigned o;
25044 struct offset *offset;
25045 char last_reg_s[10];
25046- int last_reg;
25047+ unsigned long last_reg;
25048
25049 if (regcomp
25050 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25051diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c
25052--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-19 00:06:34.000000000 -0400
25053+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-05 19:44:36.000000000 -0400
25054@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25055 struct radeon_gpio_rec gpio;
25056 struct radeon_hpd hpd;
25057
25058+ pax_track_stack();
25059+
25060 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25061 return false;
25062
25063diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c
25064--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-06-25 12:55:22.000000000 -0400
25065+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-05 19:44:36.000000000 -0400
25066@@ -674,7 +674,7 @@ static bool radeon_switcheroo_can_switch
25067 bool can_switch;
25068
25069 spin_lock(&dev->count_lock);
25070- can_switch = (dev->open_count == 0);
25071+ can_switch = (local_read(&dev->open_count) == 0);
25072 spin_unlock(&dev->count_lock);
25073 return can_switch;
25074 }
25075diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c
25076--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:11:51.000000000 -0400
25077+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:12:20.000000000 -0400
25078@@ -937,6 +937,8 @@ void radeon_compute_pll_legacy(struct ra
25079 uint32_t post_div;
25080 u32 pll_out_min, pll_out_max;
25081
25082+ pax_track_stack();
25083+
25084 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25085 freq = freq * 1000;
25086
25087diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h
25088--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-19 00:06:34.000000000 -0400
25089+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-05 19:44:36.000000000 -0400
25090@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25091
25092 /* SW interrupt */
25093 wait_queue_head_t swi_queue;
25094- atomic_t swi_emitted;
25095+ atomic_unchecked_t swi_emitted;
25096 int vblank_crtc;
25097 uint32_t irq_enable_reg;
25098 uint32_t r500_disp_irq_reg;
25099diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c
25100--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-19 00:06:34.000000000 -0400
25101+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-05 19:44:36.000000000 -0400
25102@@ -49,7 +49,7 @@ int radeon_fence_emit(struct radeon_devi
25103 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25104 return 0;
25105 }
25106- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25107+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25108 if (!rdev->cp.ready) {
25109 /* FIXME: cp is not running assume everythings is done right
25110 * away
25111@@ -352,7 +352,7 @@ int radeon_fence_driver_init(struct rade
25112 return r;
25113 }
25114 WREG32(rdev->fence_drv.scratch_reg, 0);
25115- atomic_set(&rdev->fence_drv.seq, 0);
25116+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25117 INIT_LIST_HEAD(&rdev->fence_drv.created);
25118 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25119 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25120diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h
25121--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-05-19 00:06:34.000000000 -0400
25122+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:34:06.000000000 -0400
25123@@ -189,7 +189,7 @@ extern int sumo_get_temp(struct radeon_d
25124 */
25125 struct radeon_fence_driver {
25126 uint32_t scratch_reg;
25127- atomic_t seq;
25128+ atomic_unchecked_t seq;
25129 uint32_t last_seq;
25130 unsigned long last_jiffies;
25131 unsigned long last_timeout;
25132@@ -958,7 +958,7 @@ struct radeon_asic {
25133 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25134 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25135 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25136-};
25137+} __no_const;
25138
25139 /*
25140 * Asic structures
25141diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25142--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-19 00:06:34.000000000 -0400
25143+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-05 19:44:36.000000000 -0400
25144@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25145 request = compat_alloc_user_space(sizeof(*request));
25146 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25147 || __put_user(req32.param, &request->param)
25148- || __put_user((void __user *)(unsigned long)req32.value,
25149+ || __put_user((unsigned long)req32.value,
25150 &request->value))
25151 return -EFAULT;
25152
25153diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c
25154--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-19 00:06:34.000000000 -0400
25155+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-05 19:44:36.000000000 -0400
25156@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25157 unsigned int ret;
25158 RING_LOCALS;
25159
25160- atomic_inc(&dev_priv->swi_emitted);
25161- ret = atomic_read(&dev_priv->swi_emitted);
25162+ atomic_inc_unchecked(&dev_priv->swi_emitted);
25163+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25164
25165 BEGIN_RING(4);
25166 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25167@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25168 drm_radeon_private_t *dev_priv =
25169 (drm_radeon_private_t *) dev->dev_private;
25170
25171- atomic_set(&dev_priv->swi_emitted, 0);
25172+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25173 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25174
25175 dev->max_vblank_count = 0x001fffff;
25176diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c
25177--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-05-19 00:06:34.000000000 -0400
25178+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-05 19:44:36.000000000 -0400
25179@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25180 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25181 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25182
25183- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25184+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25185 sarea_priv->nbox * sizeof(depth_boxes[0])))
25186 return -EFAULT;
25187
25188@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25189 {
25190 drm_radeon_private_t *dev_priv = dev->dev_private;
25191 drm_radeon_getparam_t *param = data;
25192- int value;
25193+ int value = 0;
25194
25195 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25196
25197diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c
25198--- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-19 00:06:34.000000000 -0400
25199+++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-05 20:34:06.000000000 -0400
25200@@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25201 }
25202 if (unlikely(ttm_vm_ops == NULL)) {
25203 ttm_vm_ops = vma->vm_ops;
25204- radeon_ttm_vm_ops = *ttm_vm_ops;
25205- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25206+ pax_open_kernel();
25207+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25208+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25209+ pax_close_kernel();
25210 }
25211 vma->vm_ops = &radeon_ttm_vm_ops;
25212 return 0;
25213diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c
25214--- linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-05-19 00:06:34.000000000 -0400
25215+++ linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-08-05 19:44:36.000000000 -0400
25216@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25217 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25218 rdev->pm.sideport_bandwidth.full)
25219 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25220- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25221+ read_delay_latency.full = dfixed_const(800 * 1000);
25222 read_delay_latency.full = dfixed_div(read_delay_latency,
25223 rdev->pm.igp_sideport_mclk);
25224+ a.full = dfixed_const(370);
25225+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25226 } else {
25227 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25228 rdev->pm.k8_bandwidth.full)
25229diff -urNp linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25230--- linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-19 00:06:34.000000000 -0400
25231+++ linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-05 19:44:36.000000000 -0400
25232@@ -397,9 +397,9 @@ static int ttm_pool_get_num_unused_pages
25233 */
25234 static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
25235 {
25236- static atomic_t start_pool = ATOMIC_INIT(0);
25237+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25238 unsigned i;
25239- unsigned pool_offset = atomic_add_return(1, &start_pool);
25240+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25241 struct ttm_page_pool *pool;
25242
25243 pool_offset = pool_offset % NUM_POOLS;
25244diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h
25245--- linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-05-19 00:06:34.000000000 -0400
25246+++ linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-08-05 19:44:36.000000000 -0400
25247@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25248 typedef uint32_t maskarray_t[5];
25249
25250 typedef struct drm_via_irq {
25251- atomic_t irq_received;
25252+ atomic_unchecked_t irq_received;
25253 uint32_t pending_mask;
25254 uint32_t enable_mask;
25255 wait_queue_head_t irq_queue;
25256@@ -75,7 +75,7 @@ typedef struct drm_via_private {
25257 struct timeval last_vblank;
25258 int last_vblank_valid;
25259 unsigned usec_per_vblank;
25260- atomic_t vbl_received;
25261+ atomic_unchecked_t vbl_received;
25262 drm_via_state_t hc_state;
25263 char pci_buf[VIA_PCI_BUF_SIZE];
25264 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25265diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c
25266--- linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-05-19 00:06:34.000000000 -0400
25267+++ linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-08-05 19:44:36.000000000 -0400
25268@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25269 if (crtc != 0)
25270 return 0;
25271
25272- return atomic_read(&dev_priv->vbl_received);
25273+ return atomic_read_unchecked(&dev_priv->vbl_received);
25274 }
25275
25276 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25277@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25278
25279 status = VIA_READ(VIA_REG_INTERRUPT);
25280 if (status & VIA_IRQ_VBLANK_PENDING) {
25281- atomic_inc(&dev_priv->vbl_received);
25282- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25283+ atomic_inc_unchecked(&dev_priv->vbl_received);
25284+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25285 do_gettimeofday(&cur_vblank);
25286 if (dev_priv->last_vblank_valid) {
25287 dev_priv->usec_per_vblank =
25288@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25289 dev_priv->last_vblank = cur_vblank;
25290 dev_priv->last_vblank_valid = 1;
25291 }
25292- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25293+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25294 DRM_DEBUG("US per vblank is: %u\n",
25295 dev_priv->usec_per_vblank);
25296 }
25297@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25298
25299 for (i = 0; i < dev_priv->num_irqs; ++i) {
25300 if (status & cur_irq->pending_mask) {
25301- atomic_inc(&cur_irq->irq_received);
25302+ atomic_inc_unchecked(&cur_irq->irq_received);
25303 DRM_WAKEUP(&cur_irq->irq_queue);
25304 handled = 1;
25305 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25306@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25307 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25308 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25309 masks[irq][4]));
25310- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25311+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25312 } else {
25313 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25314 (((cur_irq_sequence =
25315- atomic_read(&cur_irq->irq_received)) -
25316+ atomic_read_unchecked(&cur_irq->irq_received)) -
25317 *sequence) <= (1 << 23)));
25318 }
25319 *sequence = cur_irq_sequence;
25320@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25321 }
25322
25323 for (i = 0; i < dev_priv->num_irqs; ++i) {
25324- atomic_set(&cur_irq->irq_received, 0);
25325+ atomic_set_unchecked(&cur_irq->irq_received, 0);
25326 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25327 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25328 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25329@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25330 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25331 case VIA_IRQ_RELATIVE:
25332 irqwait->request.sequence +=
25333- atomic_read(&cur_irq->irq_received);
25334+ atomic_read_unchecked(&cur_irq->irq_received);
25335 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25336 case VIA_IRQ_ABSOLUTE:
25337 break;
25338diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25339--- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-19 00:06:34.000000000 -0400
25340+++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-05 19:44:36.000000000 -0400
25341@@ -240,7 +240,7 @@ struct vmw_private {
25342 * Fencing and IRQs.
25343 */
25344
25345- atomic_t fence_seq;
25346+ atomic_unchecked_t fence_seq;
25347 wait_queue_head_t fence_queue;
25348 wait_queue_head_t fifo_queue;
25349 atomic_t fence_queue_waiters;
25350diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25351--- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-19 00:06:34.000000000 -0400
25352+++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-05 19:44:36.000000000 -0400
25353@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25354 while (!vmw_lag_lt(queue, us)) {
25355 spin_lock(&queue->lock);
25356 if (list_empty(&queue->head))
25357- sequence = atomic_read(&dev_priv->fence_seq);
25358+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25359 else {
25360 fence = list_first_entry(&queue->head,
25361 struct vmw_fence, head);
25362diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25363--- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-19 00:06:34.000000000 -0400
25364+++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-05 20:34:06.000000000 -0400
25365@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25366 (unsigned int) min,
25367 (unsigned int) fifo->capabilities);
25368
25369- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25370+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25371 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25372 vmw_fence_queue_init(&fifo->fence_queue);
25373 return vmw_fifo_send_fence(dev_priv, &dummy);
25374@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25375
25376 fm = vmw_fifo_reserve(dev_priv, bytes);
25377 if (unlikely(fm == NULL)) {
25378- *sequence = atomic_read(&dev_priv->fence_seq);
25379+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25380 ret = -ENOMEM;
25381 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25382 false, 3*HZ);
25383@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25384 }
25385
25386 do {
25387- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25388+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25389 } while (*sequence == 0);
25390
25391 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25392diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25393--- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-19 00:06:34.000000000 -0400
25394+++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-05 19:44:36.000000000 -0400
25395@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25396 * emitted. Then the fence is stale and signaled.
25397 */
25398
25399- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25400+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25401 > VMW_FENCE_WRAP);
25402
25403 return ret;
25404@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25405
25406 if (fifo_idle)
25407 down_read(&fifo_state->rwsem);
25408- signal_seq = atomic_read(&dev_priv->fence_seq);
25409+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25410 ret = 0;
25411
25412 for (;;) {
25413diff -urNp linux-2.6.39.4/drivers/hid/hid-core.c linux-2.6.39.4/drivers/hid/hid-core.c
25414--- linux-2.6.39.4/drivers/hid/hid-core.c 2011-05-19 00:06:34.000000000 -0400
25415+++ linux-2.6.39.4/drivers/hid/hid-core.c 2011-08-05 19:44:36.000000000 -0400
25416@@ -1888,7 +1888,7 @@ static bool hid_ignore(struct hid_device
25417
25418 int hid_add_device(struct hid_device *hdev)
25419 {
25420- static atomic_t id = ATOMIC_INIT(0);
25421+ static atomic_unchecked_t id = ATOMIC_INIT(0);
25422 int ret;
25423
25424 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25425@@ -1903,7 +1903,7 @@ int hid_add_device(struct hid_device *hd
25426 /* XXX hack, any other cleaner solution after the driver core
25427 * is converted to allow more than 20 bytes as the device name? */
25428 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25429- hdev->vendor, hdev->product, atomic_inc_return(&id));
25430+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25431
25432 hid_debug_register(hdev, dev_name(&hdev->dev));
25433 ret = device_add(&hdev->dev);
25434diff -urNp linux-2.6.39.4/drivers/hid/usbhid/hiddev.c linux-2.6.39.4/drivers/hid/usbhid/hiddev.c
25435--- linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-05-19 00:06:34.000000000 -0400
25436+++ linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-08-05 19:44:36.000000000 -0400
25437@@ -613,7 +613,7 @@ static long hiddev_ioctl(struct file *fi
25438 break;
25439
25440 case HIDIOCAPPLICATION:
25441- if (arg < 0 || arg >= hid->maxapplication)
25442+ if (arg >= hid->maxapplication)
25443 break;
25444
25445 for (i = 0; i < hid->maxcollection; i++)
25446diff -urNp linux-2.6.39.4/drivers/hwmon/sht15.c linux-2.6.39.4/drivers/hwmon/sht15.c
25447--- linux-2.6.39.4/drivers/hwmon/sht15.c 2011-05-19 00:06:34.000000000 -0400
25448+++ linux-2.6.39.4/drivers/hwmon/sht15.c 2011-08-05 19:44:36.000000000 -0400
25449@@ -113,7 +113,7 @@ struct sht15_data {
25450 int supply_uV;
25451 int supply_uV_valid;
25452 struct work_struct update_supply_work;
25453- atomic_t interrupt_handled;
25454+ atomic_unchecked_t interrupt_handled;
25455 };
25456
25457 /**
25458@@ -246,13 +246,13 @@ static inline int sht15_update_single_va
25459 return ret;
25460
25461 gpio_direction_input(data->pdata->gpio_data);
25462- atomic_set(&data->interrupt_handled, 0);
25463+ atomic_set_unchecked(&data->interrupt_handled, 0);
25464
25465 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25466 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25467 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25468 /* Only relevant if the interrupt hasn't occurred. */
25469- if (!atomic_read(&data->interrupt_handled))
25470+ if (!atomic_read_unchecked(&data->interrupt_handled))
25471 schedule_work(&data->read_work);
25472 }
25473 ret = wait_event_timeout(data->wait_queue,
25474@@ -399,7 +399,7 @@ static irqreturn_t sht15_interrupt_fired
25475 struct sht15_data *data = d;
25476 /* First disable the interrupt */
25477 disable_irq_nosync(irq);
25478- atomic_inc(&data->interrupt_handled);
25479+ atomic_inc_unchecked(&data->interrupt_handled);
25480 /* Then schedule a reading work struct */
25481 if (data->flag != SHT15_READING_NOTHING)
25482 schedule_work(&data->read_work);
25483@@ -450,11 +450,11 @@ static void sht15_bh_read_data(struct wo
25484 here as could have gone low in meantime so verify
25485 it hasn't!
25486 */
25487- atomic_set(&data->interrupt_handled, 0);
25488+ atomic_set_unchecked(&data->interrupt_handled, 0);
25489 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25490 /* If still not occurred or another handler has been scheduled */
25491 if (gpio_get_value(data->pdata->gpio_data)
25492- || atomic_read(&data->interrupt_handled))
25493+ || atomic_read_unchecked(&data->interrupt_handled))
25494 return;
25495 }
25496 /* Read the data back from the device */
25497diff -urNp linux-2.6.39.4/drivers/hwmon/w83791d.c linux-2.6.39.4/drivers/hwmon/w83791d.c
25498--- linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-05-19 00:06:34.000000000 -0400
25499+++ linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-08-05 19:44:36.000000000 -0400
25500@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25501 struct i2c_board_info *info);
25502 static int w83791d_remove(struct i2c_client *client);
25503
25504-static int w83791d_read(struct i2c_client *client, u8 register);
25505-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25506+static int w83791d_read(struct i2c_client *client, u8 reg);
25507+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25508 static struct w83791d_data *w83791d_update_device(struct device *dev);
25509
25510 #ifdef DEBUG
25511diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c
25512--- linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-05-19 00:06:34.000000000 -0400
25513+++ linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:34:06.000000000 -0400
25514@@ -43,7 +43,7 @@
25515 extern struct i2c_adapter amd756_smbus;
25516
25517 static struct i2c_adapter *s4882_adapter;
25518-static struct i2c_algorithm *s4882_algo;
25519+static i2c_algorithm_no_const *s4882_algo;
25520
25521 /* Wrapper access functions for multiplexed SMBus */
25522 static DEFINE_MUTEX(amd756_lock);
25523diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25524--- linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-05-19 00:06:34.000000000 -0400
25525+++ linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:34:06.000000000 -0400
25526@@ -41,7 +41,7 @@
25527 extern struct i2c_adapter *nforce2_smbus;
25528
25529 static struct i2c_adapter *s4985_adapter;
25530-static struct i2c_algorithm *s4985_algo;
25531+static i2c_algorithm_no_const *s4985_algo;
25532
25533 /* Wrapper access functions for multiplexed SMBus */
25534 static DEFINE_MUTEX(nforce2_lock);
25535diff -urNp linux-2.6.39.4/drivers/i2c/i2c-mux.c linux-2.6.39.4/drivers/i2c/i2c-mux.c
25536--- linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-05-19 00:06:34.000000000 -0400
25537+++ linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-08-05 20:34:06.000000000 -0400
25538@@ -28,7 +28,7 @@
25539 /* multiplexer per channel data */
25540 struct i2c_mux_priv {
25541 struct i2c_adapter adap;
25542- struct i2c_algorithm algo;
25543+ i2c_algorithm_no_const algo;
25544
25545 struct i2c_adapter *parent;
25546 void *mux_dev; /* the mux chip/device */
25547diff -urNp linux-2.6.39.4/drivers/ide/ide-cd.c linux-2.6.39.4/drivers/ide/ide-cd.c
25548--- linux-2.6.39.4/drivers/ide/ide-cd.c 2011-06-03 00:04:14.000000000 -0400
25549+++ linux-2.6.39.4/drivers/ide/ide-cd.c 2011-08-05 19:44:36.000000000 -0400
25550@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25551 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25552 if ((unsigned long)buf & alignment
25553 || blk_rq_bytes(rq) & q->dma_pad_mask
25554- || object_is_on_stack(buf))
25555+ || object_starts_on_stack(buf))
25556 drive->dma = 0;
25557 }
25558 }
25559diff -urNp linux-2.6.39.4/drivers/ide/ide-floppy.c linux-2.6.39.4/drivers/ide/ide-floppy.c
25560--- linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-05-19 00:06:34.000000000 -0400
25561+++ linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-08-05 19:44:36.000000000 -0400
25562@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25563 u8 pc_buf[256], header_len, desc_cnt;
25564 int i, rc = 1, blocks, length;
25565
25566+ pax_track_stack();
25567+
25568 ide_debug_log(IDE_DBG_FUNC, "enter");
25569
25570 drive->bios_cyl = 0;
25571diff -urNp linux-2.6.39.4/drivers/ide/setup-pci.c linux-2.6.39.4/drivers/ide/setup-pci.c
25572--- linux-2.6.39.4/drivers/ide/setup-pci.c 2011-05-19 00:06:34.000000000 -0400
25573+++ linux-2.6.39.4/drivers/ide/setup-pci.c 2011-08-05 19:44:36.000000000 -0400
25574@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25575 int ret, i, n_ports = dev2 ? 4 : 2;
25576 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25577
25578+ pax_track_stack();
25579+
25580 for (i = 0; i < n_ports / 2; i++) {
25581 ret = ide_setup_pci_controller(pdev[i], d, !i);
25582 if (ret < 0)
25583diff -urNp linux-2.6.39.4/drivers/infiniband/core/cm.c linux-2.6.39.4/drivers/infiniband/core/cm.c
25584--- linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-05-19 00:06:34.000000000 -0400
25585+++ linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-08-05 19:44:36.000000000 -0400
25586@@ -113,7 +113,7 @@ static char const counter_group_names[CM
25587
25588 struct cm_counter_group {
25589 struct kobject obj;
25590- atomic_long_t counter[CM_ATTR_COUNT];
25591+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25592 };
25593
25594 struct cm_counter_attribute {
25595@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25596 struct ib_mad_send_buf *msg = NULL;
25597 int ret;
25598
25599- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25600+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25601 counter[CM_REQ_COUNTER]);
25602
25603 /* Quick state check to discard duplicate REQs. */
25604@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25605 if (!cm_id_priv)
25606 return;
25607
25608- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25609+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25610 counter[CM_REP_COUNTER]);
25611 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25612 if (ret)
25613@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25614 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25615 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25616 spin_unlock_irq(&cm_id_priv->lock);
25617- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25618+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25619 counter[CM_RTU_COUNTER]);
25620 goto out;
25621 }
25622@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25623 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25624 dreq_msg->local_comm_id);
25625 if (!cm_id_priv) {
25626- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25627+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25628 counter[CM_DREQ_COUNTER]);
25629 cm_issue_drep(work->port, work->mad_recv_wc);
25630 return -EINVAL;
25631@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25632 case IB_CM_MRA_REP_RCVD:
25633 break;
25634 case IB_CM_TIMEWAIT:
25635- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25636+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25637 counter[CM_DREQ_COUNTER]);
25638 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25639 goto unlock;
25640@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25641 cm_free_msg(msg);
25642 goto deref;
25643 case IB_CM_DREQ_RCVD:
25644- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25645+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25646 counter[CM_DREQ_COUNTER]);
25647 goto unlock;
25648 default:
25649@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25650 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25651 cm_id_priv->msg, timeout)) {
25652 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25653- atomic_long_inc(&work->port->
25654+ atomic_long_inc_unchecked(&work->port->
25655 counter_group[CM_RECV_DUPLICATES].
25656 counter[CM_MRA_COUNTER]);
25657 goto out;
25658@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25659 break;
25660 case IB_CM_MRA_REQ_RCVD:
25661 case IB_CM_MRA_REP_RCVD:
25662- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25663+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25664 counter[CM_MRA_COUNTER]);
25665 /* fall through */
25666 default:
25667@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25668 case IB_CM_LAP_IDLE:
25669 break;
25670 case IB_CM_MRA_LAP_SENT:
25671- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25672+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25673 counter[CM_LAP_COUNTER]);
25674 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25675 goto unlock;
25676@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25677 cm_free_msg(msg);
25678 goto deref;
25679 case IB_CM_LAP_RCVD:
25680- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25681+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25682 counter[CM_LAP_COUNTER]);
25683 goto unlock;
25684 default:
25685@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25686 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25687 if (cur_cm_id_priv) {
25688 spin_unlock_irq(&cm.lock);
25689- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25690+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25691 counter[CM_SIDR_REQ_COUNTER]);
25692 goto out; /* Duplicate message. */
25693 }
25694@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25695 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25696 msg->retries = 1;
25697
25698- atomic_long_add(1 + msg->retries,
25699+ atomic_long_add_unchecked(1 + msg->retries,
25700 &port->counter_group[CM_XMIT].counter[attr_index]);
25701 if (msg->retries)
25702- atomic_long_add(msg->retries,
25703+ atomic_long_add_unchecked(msg->retries,
25704 &port->counter_group[CM_XMIT_RETRIES].
25705 counter[attr_index]);
25706
25707@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25708 }
25709
25710 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25711- atomic_long_inc(&port->counter_group[CM_RECV].
25712+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25713 counter[attr_id - CM_ATTR_ID_OFFSET]);
25714
25715 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25716@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25717 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25718
25719 return sprintf(buf, "%ld\n",
25720- atomic_long_read(&group->counter[cm_attr->index]));
25721+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25722 }
25723
25724 static const struct sysfs_ops cm_counter_ops = {
25725diff -urNp linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c
25726--- linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-05-19 00:06:34.000000000 -0400
25727+++ linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-08-05 19:44:36.000000000 -0400
25728@@ -97,8 +97,8 @@ struct ib_fmr_pool {
25729
25730 struct task_struct *thread;
25731
25732- atomic_t req_ser;
25733- atomic_t flush_ser;
25734+ atomic_unchecked_t req_ser;
25735+ atomic_unchecked_t flush_ser;
25736
25737 wait_queue_head_t force_wait;
25738 };
25739@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25740 struct ib_fmr_pool *pool = pool_ptr;
25741
25742 do {
25743- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25744+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25745 ib_fmr_batch_release(pool);
25746
25747- atomic_inc(&pool->flush_ser);
25748+ atomic_inc_unchecked(&pool->flush_ser);
25749 wake_up_interruptible(&pool->force_wait);
25750
25751 if (pool->flush_function)
25752@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25753 }
25754
25755 set_current_state(TASK_INTERRUPTIBLE);
25756- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25757+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25758 !kthread_should_stop())
25759 schedule();
25760 __set_current_state(TASK_RUNNING);
25761@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25762 pool->dirty_watermark = params->dirty_watermark;
25763 pool->dirty_len = 0;
25764 spin_lock_init(&pool->pool_lock);
25765- atomic_set(&pool->req_ser, 0);
25766- atomic_set(&pool->flush_ser, 0);
25767+ atomic_set_unchecked(&pool->req_ser, 0);
25768+ atomic_set_unchecked(&pool->flush_ser, 0);
25769 init_waitqueue_head(&pool->force_wait);
25770
25771 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25772@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25773 }
25774 spin_unlock_irq(&pool->pool_lock);
25775
25776- serial = atomic_inc_return(&pool->req_ser);
25777+ serial = atomic_inc_return_unchecked(&pool->req_ser);
25778 wake_up_process(pool->thread);
25779
25780 if (wait_event_interruptible(pool->force_wait,
25781- atomic_read(&pool->flush_ser) - serial >= 0))
25782+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25783 return -EINTR;
25784
25785 return 0;
25786@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25787 } else {
25788 list_add_tail(&fmr->list, &pool->dirty_list);
25789 if (++pool->dirty_len >= pool->dirty_watermark) {
25790- atomic_inc(&pool->req_ser);
25791+ atomic_inc_unchecked(&pool->req_ser);
25792 wake_up_process(pool->thread);
25793 }
25794 }
25795diff -urNp linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c
25796--- linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-05-19 00:06:34.000000000 -0400
25797+++ linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-05 19:44:36.000000000 -0400
25798@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25799 int err;
25800 struct fw_ri_tpte tpt;
25801 u32 stag_idx;
25802- static atomic_t key;
25803+ static atomic_unchecked_t key;
25804
25805 if (c4iw_fatal_error(rdev))
25806 return -EIO;
25807@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25808 &rdev->resource.tpt_fifo_lock);
25809 if (!stag_idx)
25810 return -ENOMEM;
25811- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25812+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25813 }
25814 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25815 __func__, stag_state, type, pdid, stag_idx);
25816diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c
25817--- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-19 00:06:34.000000000 -0400
25818+++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-05 19:44:36.000000000 -0400
25819@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25820 struct infinipath_counters counters;
25821 struct ipath_devdata *dd;
25822
25823+ pax_track_stack();
25824+
25825 dd = file->f_path.dentry->d_inode->i_private;
25826 dd->ipath_f_read_counters(dd, &counters);
25827
25828diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c
25829--- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-19 00:06:34.000000000 -0400
25830+++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-05 19:44:36.000000000 -0400
25831@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25832 struct ib_atomic_eth *ateth;
25833 struct ipath_ack_entry *e;
25834 u64 vaddr;
25835- atomic64_t *maddr;
25836+ atomic64_unchecked_t *maddr;
25837 u64 sdata;
25838 u32 rkey;
25839 u8 next;
25840@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25841 IB_ACCESS_REMOTE_ATOMIC)))
25842 goto nack_acc_unlck;
25843 /* Perform atomic OP and save result. */
25844- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25845+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25846 sdata = be64_to_cpu(ateth->swap_data);
25847 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25848 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25849- (u64) atomic64_add_return(sdata, maddr) - sdata :
25850+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25851 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25852 be64_to_cpu(ateth->compare_data),
25853 sdata);
25854diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25855--- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-19 00:06:34.000000000 -0400
25856+++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-05 19:44:36.000000000 -0400
25857@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25858 unsigned long flags;
25859 struct ib_wc wc;
25860 u64 sdata;
25861- atomic64_t *maddr;
25862+ atomic64_unchecked_t *maddr;
25863 enum ib_wc_status send_status;
25864
25865 /*
25866@@ -382,11 +382,11 @@ again:
25867 IB_ACCESS_REMOTE_ATOMIC)))
25868 goto acc_err;
25869 /* Perform atomic OP and save result. */
25870- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25871+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25872 sdata = wqe->wr.wr.atomic.compare_add;
25873 *(u64 *) sqp->s_sge.sge.vaddr =
25874 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25875- (u64) atomic64_add_return(sdata, maddr) - sdata :
25876+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25877 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25878 sdata, wqe->wr.wr.atomic.swap);
25879 goto send_comp;
25880diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c
25881--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-05-19 00:06:34.000000000 -0400
25882+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-08-05 19:44:36.000000000 -0400
25883@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25884 LIST_HEAD(nes_adapter_list);
25885 static LIST_HEAD(nes_dev_list);
25886
25887-atomic_t qps_destroyed;
25888+atomic_unchecked_t qps_destroyed;
25889
25890 static unsigned int ee_flsh_adapter;
25891 static unsigned int sysfs_nonidx_addr;
25892@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25893 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25894 struct nes_adapter *nesadapter = nesdev->nesadapter;
25895
25896- atomic_inc(&qps_destroyed);
25897+ atomic_inc_unchecked(&qps_destroyed);
25898
25899 /* Free the control structures */
25900
25901diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c
25902--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-05-19 00:06:34.000000000 -0400
25903+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-05 19:44:36.000000000 -0400
25904@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25905 u32 cm_packets_retrans;
25906 u32 cm_packets_created;
25907 u32 cm_packets_received;
25908-atomic_t cm_listens_created;
25909-atomic_t cm_listens_destroyed;
25910+atomic_unchecked_t cm_listens_created;
25911+atomic_unchecked_t cm_listens_destroyed;
25912 u32 cm_backlog_drops;
25913-atomic_t cm_loopbacks;
25914-atomic_t cm_nodes_created;
25915-atomic_t cm_nodes_destroyed;
25916-atomic_t cm_accel_dropped_pkts;
25917-atomic_t cm_resets_recvd;
25918+atomic_unchecked_t cm_loopbacks;
25919+atomic_unchecked_t cm_nodes_created;
25920+atomic_unchecked_t cm_nodes_destroyed;
25921+atomic_unchecked_t cm_accel_dropped_pkts;
25922+atomic_unchecked_t cm_resets_recvd;
25923
25924 static inline int mini_cm_accelerated(struct nes_cm_core *,
25925 struct nes_cm_node *);
25926@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25927
25928 static struct nes_cm_core *g_cm_core;
25929
25930-atomic_t cm_connects;
25931-atomic_t cm_accepts;
25932-atomic_t cm_disconnects;
25933-atomic_t cm_closes;
25934-atomic_t cm_connecteds;
25935-atomic_t cm_connect_reqs;
25936-atomic_t cm_rejects;
25937+atomic_unchecked_t cm_connects;
25938+atomic_unchecked_t cm_accepts;
25939+atomic_unchecked_t cm_disconnects;
25940+atomic_unchecked_t cm_closes;
25941+atomic_unchecked_t cm_connecteds;
25942+atomic_unchecked_t cm_connect_reqs;
25943+atomic_unchecked_t cm_rejects;
25944
25945
25946 /**
25947@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25948 kfree(listener);
25949 listener = NULL;
25950 ret = 0;
25951- atomic_inc(&cm_listens_destroyed);
25952+ atomic_inc_unchecked(&cm_listens_destroyed);
25953 } else {
25954 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25955 }
25956@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25957 cm_node->rem_mac);
25958
25959 add_hte_node(cm_core, cm_node);
25960- atomic_inc(&cm_nodes_created);
25961+ atomic_inc_unchecked(&cm_nodes_created);
25962
25963 return cm_node;
25964 }
25965@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25966 }
25967
25968 atomic_dec(&cm_core->node_cnt);
25969- atomic_inc(&cm_nodes_destroyed);
25970+ atomic_inc_unchecked(&cm_nodes_destroyed);
25971 nesqp = cm_node->nesqp;
25972 if (nesqp) {
25973 nesqp->cm_node = NULL;
25974@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25975
25976 static void drop_packet(struct sk_buff *skb)
25977 {
25978- atomic_inc(&cm_accel_dropped_pkts);
25979+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25980 dev_kfree_skb_any(skb);
25981 }
25982
25983@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25984 {
25985
25986 int reset = 0; /* whether to send reset in case of err.. */
25987- atomic_inc(&cm_resets_recvd);
25988+ atomic_inc_unchecked(&cm_resets_recvd);
25989 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25990 " refcnt=%d\n", cm_node, cm_node->state,
25991 atomic_read(&cm_node->ref_count));
25992@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25993 rem_ref_cm_node(cm_node->cm_core, cm_node);
25994 return NULL;
25995 }
25996- atomic_inc(&cm_loopbacks);
25997+ atomic_inc_unchecked(&cm_loopbacks);
25998 loopbackremotenode->loopbackpartner = cm_node;
25999 loopbackremotenode->tcp_cntxt.rcv_wscale =
26000 NES_CM_DEFAULT_RCV_WND_SCALE;
26001@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
26002 add_ref_cm_node(cm_node);
26003 } else if (cm_node->state == NES_CM_STATE_TSA) {
26004 rem_ref_cm_node(cm_core, cm_node);
26005- atomic_inc(&cm_accel_dropped_pkts);
26006+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
26007 dev_kfree_skb_any(skb);
26008 break;
26009 }
26010@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
26011
26012 if ((cm_id) && (cm_id->event_handler)) {
26013 if (issue_disconn) {
26014- atomic_inc(&cm_disconnects);
26015+ atomic_inc_unchecked(&cm_disconnects);
26016 cm_event.event = IW_CM_EVENT_DISCONNECT;
26017 cm_event.status = disconn_status;
26018 cm_event.local_addr = cm_id->local_addr;
26019@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
26020 }
26021
26022 if (issue_close) {
26023- atomic_inc(&cm_closes);
26024+ atomic_inc_unchecked(&cm_closes);
26025 nes_disconnect(nesqp, 1);
26026
26027 cm_id->provider_data = nesqp;
26028@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
26029
26030 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
26031 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
26032- atomic_inc(&cm_accepts);
26033+ atomic_inc_unchecked(&cm_accepts);
26034
26035 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26036 netdev_refcnt_read(nesvnic->netdev));
26037@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26038
26039 struct nes_cm_core *cm_core;
26040
26041- atomic_inc(&cm_rejects);
26042+ atomic_inc_unchecked(&cm_rejects);
26043 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26044 loopback = cm_node->loopbackpartner;
26045 cm_core = cm_node->cm_core;
26046@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26047 ntohl(cm_id->local_addr.sin_addr.s_addr),
26048 ntohs(cm_id->local_addr.sin_port));
26049
26050- atomic_inc(&cm_connects);
26051+ atomic_inc_unchecked(&cm_connects);
26052 nesqp->active_conn = 1;
26053
26054 /* cache the cm_id in the qp */
26055@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26056 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26057 return err;
26058 }
26059- atomic_inc(&cm_listens_created);
26060+ atomic_inc_unchecked(&cm_listens_created);
26061 }
26062
26063 cm_id->add_ref(cm_id);
26064@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26065 if (nesqp->destroyed) {
26066 return;
26067 }
26068- atomic_inc(&cm_connecteds);
26069+ atomic_inc_unchecked(&cm_connecteds);
26070 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26071 " local port 0x%04X. jiffies = %lu.\n",
26072 nesqp->hwqp.qp_id,
26073@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26074
26075 cm_id->add_ref(cm_id);
26076 ret = cm_id->event_handler(cm_id, &cm_event);
26077- atomic_inc(&cm_closes);
26078+ atomic_inc_unchecked(&cm_closes);
26079 cm_event.event = IW_CM_EVENT_CLOSE;
26080 cm_event.status = IW_CM_EVENT_STATUS_OK;
26081 cm_event.provider_data = cm_id->provider_data;
26082@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26083 return;
26084 cm_id = cm_node->cm_id;
26085
26086- atomic_inc(&cm_connect_reqs);
26087+ atomic_inc_unchecked(&cm_connect_reqs);
26088 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26089 cm_node, cm_id, jiffies);
26090
26091@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26092 return;
26093 cm_id = cm_node->cm_id;
26094
26095- atomic_inc(&cm_connect_reqs);
26096+ atomic_inc_unchecked(&cm_connect_reqs);
26097 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26098 cm_node, cm_id, jiffies);
26099
26100diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h
26101--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-05-19 00:06:34.000000000 -0400
26102+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-08-05 19:44:36.000000000 -0400
26103@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26104 extern unsigned int wqm_quanta;
26105 extern struct list_head nes_adapter_list;
26106
26107-extern atomic_t cm_connects;
26108-extern atomic_t cm_accepts;
26109-extern atomic_t cm_disconnects;
26110-extern atomic_t cm_closes;
26111-extern atomic_t cm_connecteds;
26112-extern atomic_t cm_connect_reqs;
26113-extern atomic_t cm_rejects;
26114-extern atomic_t mod_qp_timouts;
26115-extern atomic_t qps_created;
26116-extern atomic_t qps_destroyed;
26117-extern atomic_t sw_qps_destroyed;
26118+extern atomic_unchecked_t cm_connects;
26119+extern atomic_unchecked_t cm_accepts;
26120+extern atomic_unchecked_t cm_disconnects;
26121+extern atomic_unchecked_t cm_closes;
26122+extern atomic_unchecked_t cm_connecteds;
26123+extern atomic_unchecked_t cm_connect_reqs;
26124+extern atomic_unchecked_t cm_rejects;
26125+extern atomic_unchecked_t mod_qp_timouts;
26126+extern atomic_unchecked_t qps_created;
26127+extern atomic_unchecked_t qps_destroyed;
26128+extern atomic_unchecked_t sw_qps_destroyed;
26129 extern u32 mh_detected;
26130 extern u32 mh_pauses_sent;
26131 extern u32 cm_packets_sent;
26132@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26133 extern u32 cm_packets_received;
26134 extern u32 cm_packets_dropped;
26135 extern u32 cm_packets_retrans;
26136-extern atomic_t cm_listens_created;
26137-extern atomic_t cm_listens_destroyed;
26138+extern atomic_unchecked_t cm_listens_created;
26139+extern atomic_unchecked_t cm_listens_destroyed;
26140 extern u32 cm_backlog_drops;
26141-extern atomic_t cm_loopbacks;
26142-extern atomic_t cm_nodes_created;
26143-extern atomic_t cm_nodes_destroyed;
26144-extern atomic_t cm_accel_dropped_pkts;
26145-extern atomic_t cm_resets_recvd;
26146+extern atomic_unchecked_t cm_loopbacks;
26147+extern atomic_unchecked_t cm_nodes_created;
26148+extern atomic_unchecked_t cm_nodes_destroyed;
26149+extern atomic_unchecked_t cm_accel_dropped_pkts;
26150+extern atomic_unchecked_t cm_resets_recvd;
26151
26152 extern u32 int_mod_timer_init;
26153 extern u32 int_mod_cq_depth_256;
26154diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c
26155--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-05-19 00:06:34.000000000 -0400
26156+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-05 19:44:36.000000000 -0400
26157@@ -1302,31 +1302,31 @@ static void nes_netdev_get_ethtool_stats
26158 target_stat_values[++index] = mh_detected;
26159 target_stat_values[++index] = mh_pauses_sent;
26160 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26161- target_stat_values[++index] = atomic_read(&cm_connects);
26162- target_stat_values[++index] = atomic_read(&cm_accepts);
26163- target_stat_values[++index] = atomic_read(&cm_disconnects);
26164- target_stat_values[++index] = atomic_read(&cm_connecteds);
26165- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26166- target_stat_values[++index] = atomic_read(&cm_rejects);
26167- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26168- target_stat_values[++index] = atomic_read(&qps_created);
26169- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26170- target_stat_values[++index] = atomic_read(&qps_destroyed);
26171- target_stat_values[++index] = atomic_read(&cm_closes);
26172+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26173+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26174+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26175+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26176+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26177+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26178+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26179+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26180+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26181+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26182+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26183 target_stat_values[++index] = cm_packets_sent;
26184 target_stat_values[++index] = cm_packets_bounced;
26185 target_stat_values[++index] = cm_packets_created;
26186 target_stat_values[++index] = cm_packets_received;
26187 target_stat_values[++index] = cm_packets_dropped;
26188 target_stat_values[++index] = cm_packets_retrans;
26189- target_stat_values[++index] = atomic_read(&cm_listens_created);
26190- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26191+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26192+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26193 target_stat_values[++index] = cm_backlog_drops;
26194- target_stat_values[++index] = atomic_read(&cm_loopbacks);
26195- target_stat_values[++index] = atomic_read(&cm_nodes_created);
26196- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26197- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26198- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26199+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26200+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26201+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26202+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26203+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26204 target_stat_values[++index] = nesadapter->free_4kpbl;
26205 target_stat_values[++index] = nesadapter->free_256pbl;
26206 target_stat_values[++index] = int_mod_timer_init;
26207diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c
26208--- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-19 00:06:34.000000000 -0400
26209+++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-05 19:44:36.000000000 -0400
26210@@ -46,9 +46,9 @@
26211
26212 #include <rdma/ib_umem.h>
26213
26214-atomic_t mod_qp_timouts;
26215-atomic_t qps_created;
26216-atomic_t sw_qps_destroyed;
26217+atomic_unchecked_t mod_qp_timouts;
26218+atomic_unchecked_t qps_created;
26219+atomic_unchecked_t sw_qps_destroyed;
26220
26221 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26222
26223@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26224 if (init_attr->create_flags)
26225 return ERR_PTR(-EINVAL);
26226
26227- atomic_inc(&qps_created);
26228+ atomic_inc_unchecked(&qps_created);
26229 switch (init_attr->qp_type) {
26230 case IB_QPT_RC:
26231 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26232@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26233 struct iw_cm_event cm_event;
26234 int ret;
26235
26236- atomic_inc(&sw_qps_destroyed);
26237+ atomic_inc_unchecked(&sw_qps_destroyed);
26238 nesqp->destroyed = 1;
26239
26240 /* Blow away the connection if it exists. */
26241diff -urNp linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h
26242--- linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-05-19 00:06:34.000000000 -0400
26243+++ linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-08-05 20:34:06.000000000 -0400
26244@@ -51,6 +51,7 @@
26245 #include <linux/completion.h>
26246 #include <linux/kref.h>
26247 #include <linux/sched.h>
26248+#include <linux/slab.h>
26249
26250 #include "qib_common.h"
26251 #include "qib_verbs.h"
26252diff -urNp linux-2.6.39.4/drivers/input/gameport/gameport.c linux-2.6.39.4/drivers/input/gameport/gameport.c
26253--- linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-05-19 00:06:34.000000000 -0400
26254+++ linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-08-05 19:44:37.000000000 -0400
26255@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26256 */
26257 static void gameport_init_port(struct gameport *gameport)
26258 {
26259- static atomic_t gameport_no = ATOMIC_INIT(0);
26260+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26261
26262 __module_get(THIS_MODULE);
26263
26264 mutex_init(&gameport->drv_mutex);
26265 device_initialize(&gameport->dev);
26266 dev_set_name(&gameport->dev, "gameport%lu",
26267- (unsigned long)atomic_inc_return(&gameport_no) - 1);
26268+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26269 gameport->dev.bus = &gameport_bus;
26270 gameport->dev.release = gameport_release_port;
26271 if (gameport->parent)
26272diff -urNp linux-2.6.39.4/drivers/input/input.c linux-2.6.39.4/drivers/input/input.c
26273--- linux-2.6.39.4/drivers/input/input.c 2011-07-09 09:18:51.000000000 -0400
26274+++ linux-2.6.39.4/drivers/input/input.c 2011-08-05 19:44:37.000000000 -0400
26275@@ -1815,7 +1815,7 @@ static void input_cleanse_bitmasks(struc
26276 */
26277 int input_register_device(struct input_dev *dev)
26278 {
26279- static atomic_t input_no = ATOMIC_INIT(0);
26280+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26281 struct input_handler *handler;
26282 const char *path;
26283 int error;
26284@@ -1852,7 +1852,7 @@ int input_register_device(struct input_d
26285 dev->setkeycode = input_default_setkeycode;
26286
26287 dev_set_name(&dev->dev, "input%ld",
26288- (unsigned long) atomic_inc_return(&input_no) - 1);
26289+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26290
26291 error = device_add(&dev->dev);
26292 if (error)
26293diff -urNp linux-2.6.39.4/drivers/input/joystick/sidewinder.c linux-2.6.39.4/drivers/input/joystick/sidewinder.c
26294--- linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-05-19 00:06:34.000000000 -0400
26295+++ linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-08-05 19:44:37.000000000 -0400
26296@@ -30,6 +30,7 @@
26297 #include <linux/kernel.h>
26298 #include <linux/module.h>
26299 #include <linux/slab.h>
26300+#include <linux/sched.h>
26301 #include <linux/init.h>
26302 #include <linux/input.h>
26303 #include <linux/gameport.h>
26304@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26305 unsigned char buf[SW_LENGTH];
26306 int i;
26307
26308+ pax_track_stack();
26309+
26310 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26311
26312 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26313diff -urNp linux-2.6.39.4/drivers/input/joystick/xpad.c linux-2.6.39.4/drivers/input/joystick/xpad.c
26314--- linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-05-19 00:06:34.000000000 -0400
26315+++ linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-08-05 19:44:37.000000000 -0400
26316@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26317
26318 static int xpad_led_probe(struct usb_xpad *xpad)
26319 {
26320- static atomic_t led_seq = ATOMIC_INIT(0);
26321+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26322 long led_no;
26323 struct xpad_led *led;
26324 struct led_classdev *led_cdev;
26325@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26326 if (!led)
26327 return -ENOMEM;
26328
26329- led_no = (long)atomic_inc_return(&led_seq) - 1;
26330+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26331
26332 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26333 led->xpad = xpad;
26334diff -urNp linux-2.6.39.4/drivers/input/mousedev.c linux-2.6.39.4/drivers/input/mousedev.c
26335--- linux-2.6.39.4/drivers/input/mousedev.c 2011-07-09 09:18:51.000000000 -0400
26336+++ linux-2.6.39.4/drivers/input/mousedev.c 2011-08-05 19:44:37.000000000 -0400
26337@@ -764,7 +764,7 @@ static ssize_t mousedev_read(struct file
26338
26339 spin_unlock_irq(&client->packet_lock);
26340
26341- if (copy_to_user(buffer, data, count))
26342+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
26343 return -EFAULT;
26344
26345 return count;
26346diff -urNp linux-2.6.39.4/drivers/input/serio/serio.c linux-2.6.39.4/drivers/input/serio/serio.c
26347--- linux-2.6.39.4/drivers/input/serio/serio.c 2011-05-19 00:06:34.000000000 -0400
26348+++ linux-2.6.39.4/drivers/input/serio/serio.c 2011-08-05 19:44:37.000000000 -0400
26349@@ -497,7 +497,7 @@ static void serio_release_port(struct de
26350 */
26351 static void serio_init_port(struct serio *serio)
26352 {
26353- static atomic_t serio_no = ATOMIC_INIT(0);
26354+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26355
26356 __module_get(THIS_MODULE);
26357
26358@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26359 mutex_init(&serio->drv_mutex);
26360 device_initialize(&serio->dev);
26361 dev_set_name(&serio->dev, "serio%ld",
26362- (long)atomic_inc_return(&serio_no) - 1);
26363+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
26364 serio->dev.bus = &serio_bus;
26365 serio->dev.release = serio_release_port;
26366 serio->dev.groups = serio_device_attr_groups;
26367diff -urNp linux-2.6.39.4/drivers/isdn/capi/capi.c linux-2.6.39.4/drivers/isdn/capi/capi.c
26368--- linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-05-19 00:06:34.000000000 -0400
26369+++ linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-08-05 19:44:37.000000000 -0400
26370@@ -89,8 +89,8 @@ struct capiminor {
26371
26372 struct capi20_appl *ap;
26373 u32 ncci;
26374- atomic_t datahandle;
26375- atomic_t msgid;
26376+ atomic_unchecked_t datahandle;
26377+ atomic_unchecked_t msgid;
26378
26379 struct tty_port port;
26380 int ttyinstop;
26381@@ -414,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *m
26382 capimsg_setu16(s, 2, mp->ap->applid);
26383 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26384 capimsg_setu8 (s, 5, CAPI_RESP);
26385- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26386+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26387 capimsg_setu32(s, 8, mp->ncci);
26388 capimsg_setu16(s, 12, datahandle);
26389 }
26390@@ -547,14 +547,14 @@ static void handle_minor_send(struct cap
26391 mp->outbytes -= len;
26392 spin_unlock_bh(&mp->outlock);
26393
26394- datahandle = atomic_inc_return(&mp->datahandle);
26395+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26396 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26397 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26398 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26399 capimsg_setu16(skb->data, 2, mp->ap->applid);
26400 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26401 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26402- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26403+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26404 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26405 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26406 capimsg_setu16(skb->data, 16, len); /* Data length */
26407diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/common.c linux-2.6.39.4/drivers/isdn/gigaset/common.c
26408--- linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-05-19 00:06:34.000000000 -0400
26409+++ linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-08-05 19:44:37.000000000 -0400
26410@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26411 cs->commands_pending = 0;
26412 cs->cur_at_seq = 0;
26413 cs->gotfwver = -1;
26414- cs->open_count = 0;
26415+ local_set(&cs->open_count, 0);
26416 cs->dev = NULL;
26417 cs->tty = NULL;
26418 cs->tty_dev = NULL;
26419diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h
26420--- linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-05-19 00:06:34.000000000 -0400
26421+++ linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-08-05 19:44:37.000000000 -0400
26422@@ -35,6 +35,7 @@
26423 #include <linux/tty_driver.h>
26424 #include <linux/list.h>
26425 #include <asm/atomic.h>
26426+#include <asm/local.h>
26427
26428 #define GIG_VERSION {0, 5, 0, 0}
26429 #define GIG_COMPAT {0, 4, 0, 0}
26430@@ -433,7 +434,7 @@ struct cardstate {
26431 spinlock_t cmdlock;
26432 unsigned curlen, cmdbytes;
26433
26434- unsigned open_count;
26435+ local_t open_count;
26436 struct tty_struct *tty;
26437 struct tasklet_struct if_wake_tasklet;
26438 unsigned control_state;
26439diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/interface.c linux-2.6.39.4/drivers/isdn/gigaset/interface.c
26440--- linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-05-19 00:06:34.000000000 -0400
26441+++ linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-08-05 19:44:37.000000000 -0400
26442@@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
26443 return -ERESTARTSYS;
26444 tty->driver_data = cs;
26445
26446- ++cs->open_count;
26447-
26448- if (cs->open_count == 1) {
26449+ if (local_inc_return(&cs->open_count) == 1) {
26450 spin_lock_irqsave(&cs->lock, flags);
26451 cs->tty = tty;
26452 spin_unlock_irqrestore(&cs->lock, flags);
26453@@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
26454
26455 if (!cs->connected)
26456 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26457- else if (!cs->open_count)
26458+ else if (!local_read(&cs->open_count))
26459 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26460 else {
26461- if (!--cs->open_count) {
26462+ if (!local_dec_return(&cs->open_count)) {
26463 spin_lock_irqsave(&cs->lock, flags);
26464 cs->tty = NULL;
26465 spin_unlock_irqrestore(&cs->lock, flags);
26466@@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
26467 if (!cs->connected) {
26468 gig_dbg(DEBUG_IF, "not connected");
26469 retval = -ENODEV;
26470- } else if (!cs->open_count)
26471+ } else if (!local_read(&cs->open_count))
26472 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26473 else {
26474 retval = 0;
26475@@ -358,7 +356,7 @@ static int if_write(struct tty_struct *t
26476 retval = -ENODEV;
26477 goto done;
26478 }
26479- if (!cs->open_count) {
26480+ if (!local_read(&cs->open_count)) {
26481 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26482 retval = -ENODEV;
26483 goto done;
26484@@ -411,7 +409,7 @@ static int if_write_room(struct tty_stru
26485 if (!cs->connected) {
26486 gig_dbg(DEBUG_IF, "not connected");
26487 retval = -ENODEV;
26488- } else if (!cs->open_count)
26489+ } else if (!local_read(&cs->open_count))
26490 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26491 else if (cs->mstate != MS_LOCKED) {
26492 dev_warn(cs->dev, "can't write to unlocked device\n");
26493@@ -441,7 +439,7 @@ static int if_chars_in_buffer(struct tty
26494
26495 if (!cs->connected)
26496 gig_dbg(DEBUG_IF, "not connected");
26497- else if (!cs->open_count)
26498+ else if (!local_read(&cs->open_count))
26499 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26500 else if (cs->mstate != MS_LOCKED)
26501 dev_warn(cs->dev, "can't write to unlocked device\n");
26502@@ -469,7 +467,7 @@ static void if_throttle(struct tty_struc
26503
26504 if (!cs->connected)
26505 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26506- else if (!cs->open_count)
26507+ else if (!local_read(&cs->open_count))
26508 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26509 else
26510 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26511@@ -493,7 +491,7 @@ static void if_unthrottle(struct tty_str
26512
26513 if (!cs->connected)
26514 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26515- else if (!cs->open_count)
26516+ else if (!local_read(&cs->open_count))
26517 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26518 else
26519 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26520@@ -524,7 +522,7 @@ static void if_set_termios(struct tty_st
26521 goto out;
26522 }
26523
26524- if (!cs->open_count) {
26525+ if (!local_read(&cs->open_count)) {
26526 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26527 goto out;
26528 }
26529diff -urNp linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c
26530--- linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-05-19 00:06:34.000000000 -0400
26531+++ linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-08-05 19:44:37.000000000 -0400
26532@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26533 }
26534 if (left) {
26535 if (t4file->user) {
26536- if (copy_from_user(buf, dp, left))
26537+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26538 return -EFAULT;
26539 } else {
26540 memcpy(buf, dp, left);
26541@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26542 }
26543 if (left) {
26544 if (config->user) {
26545- if (copy_from_user(buf, dp, left))
26546+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26547 return -EFAULT;
26548 } else {
26549 memcpy(buf, dp, left);
26550diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c
26551--- linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-19 00:06:34.000000000 -0400
26552+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-05 19:44:37.000000000 -0400
26553@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26554 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26555 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26556
26557+ pax_track_stack();
26558
26559 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26560 {
26561diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c
26562--- linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-05-19 00:06:34.000000000 -0400
26563+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-05 19:44:37.000000000 -0400
26564@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26565 IDI_SYNC_REQ req;
26566 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26567
26568+ pax_track_stack();
26569+
26570 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26571
26572 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26573diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c
26574--- linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-19 00:06:34.000000000 -0400
26575+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-05 19:44:37.000000000 -0400
26576@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26577 IDI_SYNC_REQ req;
26578 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26579
26580+ pax_track_stack();
26581+
26582 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26583
26584 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26585diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c
26586--- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-19 00:06:34.000000000 -0400
26587+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-05 19:44:37.000000000 -0400
26588@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
26589 IDI_SYNC_REQ req;
26590 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26591
26592+ pax_track_stack();
26593+
26594 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26595
26596 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26597diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h
26598--- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-05-19 00:06:34.000000000 -0400
26599+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:34:06.000000000 -0400
26600@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26601 } diva_didd_add_adapter_t;
26602 typedef struct _diva_didd_remove_adapter {
26603 IDI_CALL p_request;
26604-} diva_didd_remove_adapter_t;
26605+} __no_const diva_didd_remove_adapter_t;
26606 typedef struct _diva_didd_read_adapter_array {
26607 void * buffer;
26608 dword length;
26609diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c
26610--- linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-05-19 00:06:34.000000000 -0400
26611+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-05 19:44:37.000000000 -0400
26612@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26613 IDI_SYNC_REQ req;
26614 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26615
26616+ pax_track_stack();
26617+
26618 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26619
26620 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26621diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c
26622--- linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-05-19 00:06:34.000000000 -0400
26623+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-08-05 19:44:37.000000000 -0400
26624@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
26625 dword d;
26626 word w;
26627
26628+ pax_track_stack();
26629+
26630 a = plci->adapter;
26631 Id = ((word)plci->Id<<8)|a->Id;
26632 PUT_WORD(&SS_Ind[4],0x0000);
26633@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
26634 word j, n, w;
26635 dword d;
26636
26637+ pax_track_stack();
26638+
26639
26640 for(i=0;i<8;i++) bp_parms[i].length = 0;
26641 for(i=0;i<2;i++) global_config[i].length = 0;
26642@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
26643 const byte llc3[] = {4,3,2,2,6,6,0};
26644 const byte header[] = {0,2,3,3,0,0,0};
26645
26646+ pax_track_stack();
26647+
26648 for(i=0;i<8;i++) bp_parms[i].length = 0;
26649 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26650 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26651@@ -14760,6 +14766,8 @@ static void group_optimization(DIVA_CAPI
26652 word appl_number_group_type[MAX_APPL];
26653 PLCI *auxplci;
26654
26655+ pax_track_stack();
26656+
26657 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26658
26659 if(!a->group_optimization_enabled)
26660diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c
26661--- linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-19 00:06:34.000000000 -0400
26662+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-05 19:44:37.000000000 -0400
26663@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26664 IDI_SYNC_REQ req;
26665 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26666
26667+ pax_track_stack();
26668+
26669 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26670
26671 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26672diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26673--- linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-05-19 00:06:34.000000000 -0400
26674+++ linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:34:06.000000000 -0400
26675@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26676 typedef struct _diva_os_idi_adapter_interface {
26677 diva_init_card_proc_t cleanup_adapter_proc;
26678 diva_cmd_card_proc_t cmd_proc;
26679-} diva_os_idi_adapter_interface_t;
26680+} __no_const diva_os_idi_adapter_interface_t;
26681
26682 typedef struct _diva_os_xdi_adapter {
26683 struct list_head link;
26684diff -urNp linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c
26685--- linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-05-19 00:06:34.000000000 -0400
26686+++ linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-08-05 19:44:37.000000000 -0400
26687@@ -1292,6 +1292,8 @@ isdn_ioctl(struct file *file, uint cmd,
26688 } iocpar;
26689 void __user *argp = (void __user *)arg;
26690
26691+ pax_track_stack();
26692+
26693 #define name iocpar.name
26694 #define bname iocpar.bname
26695 #define iocts iocpar.iocts
26696diff -urNp linux-2.6.39.4/drivers/isdn/icn/icn.c linux-2.6.39.4/drivers/isdn/icn/icn.c
26697--- linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-05-19 00:06:34.000000000 -0400
26698+++ linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-08-05 19:44:37.000000000 -0400
26699@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26700 if (count > len)
26701 count = len;
26702 if (user) {
26703- if (copy_from_user(msg, buf, count))
26704+ if (count > sizeof msg || copy_from_user(msg, buf, count))
26705 return -EFAULT;
26706 } else
26707 memcpy(msg, buf, count);
26708diff -urNp linux-2.6.39.4/drivers/lguest/core.c linux-2.6.39.4/drivers/lguest/core.c
26709--- linux-2.6.39.4/drivers/lguest/core.c 2011-05-19 00:06:34.000000000 -0400
26710+++ linux-2.6.39.4/drivers/lguest/core.c 2011-08-05 19:44:37.000000000 -0400
26711@@ -92,9 +92,17 @@ static __init int map_switcher(void)
26712 * it's worked so far. The end address needs +1 because __get_vm_area
26713 * allocates an extra guard page, so we need space for that.
26714 */
26715+
26716+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26717+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26718+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26719+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26720+#else
26721 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26722 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26723 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26724+#endif
26725+
26726 if (!switcher_vma) {
26727 err = -ENOMEM;
26728 printk("lguest: could not map switcher pages high\n");
26729@@ -119,7 +127,7 @@ static __init int map_switcher(void)
26730 * Now the Switcher is mapped at the right address, we can't fail!
26731 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26732 */
26733- memcpy(switcher_vma->addr, start_switcher_text,
26734+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26735 end_switcher_text - start_switcher_text);
26736
26737 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26738diff -urNp linux-2.6.39.4/drivers/lguest/x86/core.c linux-2.6.39.4/drivers/lguest/x86/core.c
26739--- linux-2.6.39.4/drivers/lguest/x86/core.c 2011-05-19 00:06:34.000000000 -0400
26740+++ linux-2.6.39.4/drivers/lguest/x86/core.c 2011-08-05 19:44:37.000000000 -0400
26741@@ -59,7 +59,7 @@ static struct {
26742 /* Offset from where switcher.S was compiled to where we've copied it */
26743 static unsigned long switcher_offset(void)
26744 {
26745- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26746+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26747 }
26748
26749 /* This cpu's struct lguest_pages. */
26750@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26751 * These copies are pretty cheap, so we do them unconditionally: */
26752 /* Save the current Host top-level page directory.
26753 */
26754+
26755+#ifdef CONFIG_PAX_PER_CPU_PGD
26756+ pages->state.host_cr3 = read_cr3();
26757+#else
26758 pages->state.host_cr3 = __pa(current->mm->pgd);
26759+#endif
26760+
26761 /*
26762 * Set up the Guest's page tables to see this CPU's pages (and no
26763 * other CPU's pages).
26764@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26765 * compiled-in switcher code and the high-mapped copy we just made.
26766 */
26767 for (i = 0; i < IDT_ENTRIES; i++)
26768- default_idt_entries[i] += switcher_offset();
26769+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26770
26771 /*
26772 * Set up the Switcher's per-cpu areas.
26773@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26774 * it will be undisturbed when we switch. To change %cs and jump we
26775 * need this structure to feed to Intel's "lcall" instruction.
26776 */
26777- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26778+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26779 lguest_entry.segment = LGUEST_CS;
26780
26781 /*
26782diff -urNp linux-2.6.39.4/drivers/lguest/x86/switcher_32.S linux-2.6.39.4/drivers/lguest/x86/switcher_32.S
26783--- linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-05-19 00:06:34.000000000 -0400
26784+++ linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-08-05 19:44:37.000000000 -0400
26785@@ -87,6 +87,7 @@
26786 #include <asm/page.h>
26787 #include <asm/segment.h>
26788 #include <asm/lguest.h>
26789+#include <asm/processor-flags.h>
26790
26791 // We mark the start of the code to copy
26792 // It's placed in .text tho it's never run here
26793@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26794 // Changes type when we load it: damn Intel!
26795 // For after we switch over our page tables
26796 // That entry will be read-only: we'd crash.
26797+
26798+#ifdef CONFIG_PAX_KERNEXEC
26799+ mov %cr0, %edx
26800+ xor $X86_CR0_WP, %edx
26801+ mov %edx, %cr0
26802+#endif
26803+
26804 movl $(GDT_ENTRY_TSS*8), %edx
26805 ltr %dx
26806
26807@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26808 // Let's clear it again for our return.
26809 // The GDT descriptor of the Host
26810 // Points to the table after two "size" bytes
26811- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26812+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26813 // Clear "used" from type field (byte 5, bit 2)
26814- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26815+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26816+
26817+#ifdef CONFIG_PAX_KERNEXEC
26818+ mov %cr0, %eax
26819+ xor $X86_CR0_WP, %eax
26820+ mov %eax, %cr0
26821+#endif
26822
26823 // Once our page table's switched, the Guest is live!
26824 // The Host fades as we run this final step.
26825@@ -295,13 +309,12 @@ deliver_to_host:
26826 // I consulted gcc, and it gave
26827 // These instructions, which I gladly credit:
26828 leal (%edx,%ebx,8), %eax
26829- movzwl (%eax),%edx
26830- movl 4(%eax), %eax
26831- xorw %ax, %ax
26832- orl %eax, %edx
26833+ movl 4(%eax), %edx
26834+ movw (%eax), %dx
26835 // Now the address of the handler's in %edx
26836 // We call it now: its "iret" drops us home.
26837- jmp *%edx
26838+ ljmp $__KERNEL_CS, $1f
26839+1: jmp *%edx
26840
26841 // Every interrupt can come to us here
26842 // But we must truly tell each apart.
26843diff -urNp linux-2.6.39.4/drivers/md/dm.c linux-2.6.39.4/drivers/md/dm.c
26844--- linux-2.6.39.4/drivers/md/dm.c 2011-05-19 00:06:34.000000000 -0400
26845+++ linux-2.6.39.4/drivers/md/dm.c 2011-08-05 19:44:37.000000000 -0400
26846@@ -162,9 +162,9 @@ struct mapped_device {
26847 /*
26848 * Event handling.
26849 */
26850- atomic_t event_nr;
26851+ atomic_unchecked_t event_nr;
26852 wait_queue_head_t eventq;
26853- atomic_t uevent_seq;
26854+ atomic_unchecked_t uevent_seq;
26855 struct list_head uevent_list;
26856 spinlock_t uevent_lock; /* Protect access to uevent_list */
26857
26858@@ -1836,8 +1836,8 @@ static struct mapped_device *alloc_dev(i
26859 rwlock_init(&md->map_lock);
26860 atomic_set(&md->holders, 1);
26861 atomic_set(&md->open_count, 0);
26862- atomic_set(&md->event_nr, 0);
26863- atomic_set(&md->uevent_seq, 0);
26864+ atomic_set_unchecked(&md->event_nr, 0);
26865+ atomic_set_unchecked(&md->uevent_seq, 0);
26866 INIT_LIST_HEAD(&md->uevent_list);
26867 spin_lock_init(&md->uevent_lock);
26868
26869@@ -1971,7 +1971,7 @@ static void event_callback(void *context
26870
26871 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26872
26873- atomic_inc(&md->event_nr);
26874+ atomic_inc_unchecked(&md->event_nr);
26875 wake_up(&md->eventq);
26876 }
26877
26878@@ -2547,18 +2547,18 @@ int dm_kobject_uevent(struct mapped_devi
26879
26880 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26881 {
26882- return atomic_add_return(1, &md->uevent_seq);
26883+ return atomic_add_return_unchecked(1, &md->uevent_seq);
26884 }
26885
26886 uint32_t dm_get_event_nr(struct mapped_device *md)
26887 {
26888- return atomic_read(&md->event_nr);
26889+ return atomic_read_unchecked(&md->event_nr);
26890 }
26891
26892 int dm_wait_event(struct mapped_device *md, int event_nr)
26893 {
26894 return wait_event_interruptible(md->eventq,
26895- (event_nr != atomic_read(&md->event_nr)));
26896+ (event_nr != atomic_read_unchecked(&md->event_nr)));
26897 }
26898
26899 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26900diff -urNp linux-2.6.39.4/drivers/md/dm-ioctl.c linux-2.6.39.4/drivers/md/dm-ioctl.c
26901--- linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-05-19 00:06:34.000000000 -0400
26902+++ linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-08-05 19:44:37.000000000 -0400
26903@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26904 cmd == DM_LIST_VERSIONS_CMD)
26905 return 0;
26906
26907- if ((cmd == DM_DEV_CREATE_CMD)) {
26908+ if (cmd == DM_DEV_CREATE_CMD) {
26909 if (!*param->name) {
26910 DMWARN("name not supplied when creating device");
26911 return -EINVAL;
26912diff -urNp linux-2.6.39.4/drivers/md/dm-raid1.c linux-2.6.39.4/drivers/md/dm-raid1.c
26913--- linux-2.6.39.4/drivers/md/dm-raid1.c 2011-05-19 00:06:34.000000000 -0400
26914+++ linux-2.6.39.4/drivers/md/dm-raid1.c 2011-08-05 19:44:37.000000000 -0400
26915@@ -42,7 +42,7 @@ enum dm_raid1_error {
26916
26917 struct mirror {
26918 struct mirror_set *ms;
26919- atomic_t error_count;
26920+ atomic_unchecked_t error_count;
26921 unsigned long error_type;
26922 struct dm_dev *dev;
26923 sector_t offset;
26924@@ -187,7 +187,7 @@ static struct mirror *get_valid_mirror(s
26925 struct mirror *m;
26926
26927 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26928- if (!atomic_read(&m->error_count))
26929+ if (!atomic_read_unchecked(&m->error_count))
26930 return m;
26931
26932 return NULL;
26933@@ -219,7 +219,7 @@ static void fail_mirror(struct mirror *m
26934 * simple way to tell if a device has encountered
26935 * errors.
26936 */
26937- atomic_inc(&m->error_count);
26938+ atomic_inc_unchecked(&m->error_count);
26939
26940 if (test_and_set_bit(error_type, &m->error_type))
26941 return;
26942@@ -410,7 +410,7 @@ static struct mirror *choose_mirror(stru
26943 struct mirror *m = get_default_mirror(ms);
26944
26945 do {
26946- if (likely(!atomic_read(&m->error_count)))
26947+ if (likely(!atomic_read_unchecked(&m->error_count)))
26948 return m;
26949
26950 if (m-- == ms->mirror)
26951@@ -424,7 +424,7 @@ static int default_ok(struct mirror *m)
26952 {
26953 struct mirror *default_mirror = get_default_mirror(m->ms);
26954
26955- return !atomic_read(&default_mirror->error_count);
26956+ return !atomic_read_unchecked(&default_mirror->error_count);
26957 }
26958
26959 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26960@@ -561,7 +561,7 @@ static void do_reads(struct mirror_set *
26961 */
26962 if (likely(region_in_sync(ms, region, 1)))
26963 m = choose_mirror(ms, bio->bi_sector);
26964- else if (m && atomic_read(&m->error_count))
26965+ else if (m && atomic_read_unchecked(&m->error_count))
26966 m = NULL;
26967
26968 if (likely(m))
26969@@ -939,7 +939,7 @@ static int get_mirror(struct mirror_set
26970 }
26971
26972 ms->mirror[mirror].ms = ms;
26973- atomic_set(&(ms->mirror[mirror].error_count), 0);
26974+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26975 ms->mirror[mirror].error_type = 0;
26976 ms->mirror[mirror].offset = offset;
26977
26978@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26979 */
26980 static char device_status_char(struct mirror *m)
26981 {
26982- if (!atomic_read(&(m->error_count)))
26983+ if (!atomic_read_unchecked(&(m->error_count)))
26984 return 'A';
26985
26986 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26987diff -urNp linux-2.6.39.4/drivers/md/dm-stripe.c linux-2.6.39.4/drivers/md/dm-stripe.c
26988--- linux-2.6.39.4/drivers/md/dm-stripe.c 2011-05-19 00:06:34.000000000 -0400
26989+++ linux-2.6.39.4/drivers/md/dm-stripe.c 2011-08-05 19:44:37.000000000 -0400
26990@@ -20,7 +20,7 @@ struct stripe {
26991 struct dm_dev *dev;
26992 sector_t physical_start;
26993
26994- atomic_t error_count;
26995+ atomic_unchecked_t error_count;
26996 };
26997
26998 struct stripe_c {
26999@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
27000 kfree(sc);
27001 return r;
27002 }
27003- atomic_set(&(sc->stripe[i].error_count), 0);
27004+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
27005 }
27006
27007 ti->private = sc;
27008@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
27009 DMEMIT("%d ", sc->stripes);
27010 for (i = 0; i < sc->stripes; i++) {
27011 DMEMIT("%s ", sc->stripe[i].dev->name);
27012- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
27013+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
27014 'D' : 'A';
27015 }
27016 buffer[i] = '\0';
27017@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
27018 */
27019 for (i = 0; i < sc->stripes; i++)
27020 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
27021- atomic_inc(&(sc->stripe[i].error_count));
27022- if (atomic_read(&(sc->stripe[i].error_count)) <
27023+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
27024+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
27025 DM_IO_ERROR_THRESHOLD)
27026 schedule_work(&sc->trigger_event);
27027 }
27028diff -urNp linux-2.6.39.4/drivers/md/dm-table.c linux-2.6.39.4/drivers/md/dm-table.c
27029--- linux-2.6.39.4/drivers/md/dm-table.c 2011-06-03 00:04:14.000000000 -0400
27030+++ linux-2.6.39.4/drivers/md/dm-table.c 2011-08-05 19:44:37.000000000 -0400
27031@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
27032 if (!dev_size)
27033 return 0;
27034
27035- if ((start >= dev_size) || (start + len > dev_size)) {
27036+ if ((start >= dev_size) || (len > dev_size - start)) {
27037 DMWARN("%s: %s too small for target: "
27038 "start=%llu, len=%llu, dev_size=%llu",
27039 dm_device_name(ti->table->md), bdevname(bdev, b),
27040diff -urNp linux-2.6.39.4/drivers/md/md.c linux-2.6.39.4/drivers/md/md.c
27041--- linux-2.6.39.4/drivers/md/md.c 2011-07-09 09:18:51.000000000 -0400
27042+++ linux-2.6.39.4/drivers/md/md.c 2011-08-05 19:44:37.000000000 -0400
27043@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27044 * start build, activate spare
27045 */
27046 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27047-static atomic_t md_event_count;
27048+static atomic_unchecked_t md_event_count;
27049 void md_new_event(mddev_t *mddev)
27050 {
27051- atomic_inc(&md_event_count);
27052+ atomic_inc_unchecked(&md_event_count);
27053 wake_up(&md_event_waiters);
27054 }
27055 EXPORT_SYMBOL_GPL(md_new_event);
27056@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27057 */
27058 static void md_new_event_inintr(mddev_t *mddev)
27059 {
27060- atomic_inc(&md_event_count);
27061+ atomic_inc_unchecked(&md_event_count);
27062 wake_up(&md_event_waiters);
27063 }
27064
27065@@ -1454,7 +1454,7 @@ static int super_1_load(mdk_rdev_t *rdev
27066
27067 rdev->preferred_minor = 0xffff;
27068 rdev->data_offset = le64_to_cpu(sb->data_offset);
27069- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27070+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27071
27072 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27073 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27074@@ -1632,7 +1632,7 @@ static void super_1_sync(mddev_t *mddev,
27075 else
27076 sb->resync_offset = cpu_to_le64(0);
27077
27078- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27079+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27080
27081 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27082 sb->size = cpu_to_le64(mddev->dev_sectors);
27083@@ -2414,7 +2414,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27084 static ssize_t
27085 errors_show(mdk_rdev_t *rdev, char *page)
27086 {
27087- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27088+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27089 }
27090
27091 static ssize_t
27092@@ -2423,7 +2423,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27093 char *e;
27094 unsigned long n = simple_strtoul(buf, &e, 10);
27095 if (*buf && (*e == 0 || *e == '\n')) {
27096- atomic_set(&rdev->corrected_errors, n);
27097+ atomic_set_unchecked(&rdev->corrected_errors, n);
27098 return len;
27099 }
27100 return -EINVAL;
27101@@ -2779,8 +2779,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27102 rdev->last_read_error.tv_sec = 0;
27103 rdev->last_read_error.tv_nsec = 0;
27104 atomic_set(&rdev->nr_pending, 0);
27105- atomic_set(&rdev->read_errors, 0);
27106- atomic_set(&rdev->corrected_errors, 0);
27107+ atomic_set_unchecked(&rdev->read_errors, 0);
27108+ atomic_set_unchecked(&rdev->corrected_errors, 0);
27109
27110 INIT_LIST_HEAD(&rdev->same_set);
27111 init_waitqueue_head(&rdev->blocked_wait);
27112@@ -6388,7 +6388,7 @@ static int md_seq_show(struct seq_file *
27113
27114 spin_unlock(&pers_lock);
27115 seq_printf(seq, "\n");
27116- mi->event = atomic_read(&md_event_count);
27117+ mi->event = atomic_read_unchecked(&md_event_count);
27118 return 0;
27119 }
27120 if (v == (void*)2) {
27121@@ -6477,7 +6477,7 @@ static int md_seq_show(struct seq_file *
27122 chunk_kb ? "KB" : "B");
27123 if (bitmap->file) {
27124 seq_printf(seq, ", file: ");
27125- seq_path(seq, &bitmap->file->f_path, " \t\n");
27126+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27127 }
27128
27129 seq_printf(seq, "\n");
27130@@ -6511,7 +6511,7 @@ static int md_seq_open(struct inode *ino
27131 else {
27132 struct seq_file *p = file->private_data;
27133 p->private = mi;
27134- mi->event = atomic_read(&md_event_count);
27135+ mi->event = atomic_read_unchecked(&md_event_count);
27136 }
27137 return error;
27138 }
27139@@ -6527,7 +6527,7 @@ static unsigned int mdstat_poll(struct f
27140 /* always allow read */
27141 mask = POLLIN | POLLRDNORM;
27142
27143- if (mi->event != atomic_read(&md_event_count))
27144+ if (mi->event != atomic_read_unchecked(&md_event_count))
27145 mask |= POLLERR | POLLPRI;
27146 return mask;
27147 }
27148@@ -6571,7 +6571,7 @@ static int is_mddev_idle(mddev_t *mddev,
27149 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27150 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27151 (int)part_stat_read(&disk->part0, sectors[1]) -
27152- atomic_read(&disk->sync_io);
27153+ atomic_read_unchecked(&disk->sync_io);
27154 /* sync IO will cause sync_io to increase before the disk_stats
27155 * as sync_io is counted when a request starts, and
27156 * disk_stats is counted when it completes.
27157diff -urNp linux-2.6.39.4/drivers/md/md.h linux-2.6.39.4/drivers/md/md.h
27158--- linux-2.6.39.4/drivers/md/md.h 2011-05-19 00:06:34.000000000 -0400
27159+++ linux-2.6.39.4/drivers/md/md.h 2011-08-05 19:44:37.000000000 -0400
27160@@ -97,13 +97,13 @@ struct mdk_rdev_s
27161 * only maintained for arrays that
27162 * support hot removal
27163 */
27164- atomic_t read_errors; /* number of consecutive read errors that
27165+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
27166 * we have tried to ignore.
27167 */
27168 struct timespec last_read_error; /* monotonic time since our
27169 * last read error
27170 */
27171- atomic_t corrected_errors; /* number of corrected read errors,
27172+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27173 * for reporting to userspace and storing
27174 * in superblock.
27175 */
27176@@ -342,7 +342,7 @@ static inline void rdev_dec_pending(mdk_
27177
27178 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27179 {
27180- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27181+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27182 }
27183
27184 struct mdk_personality
27185diff -urNp linux-2.6.39.4/drivers/md/raid10.c linux-2.6.39.4/drivers/md/raid10.c
27186--- linux-2.6.39.4/drivers/md/raid10.c 2011-05-19 00:06:34.000000000 -0400
27187+++ linux-2.6.39.4/drivers/md/raid10.c 2011-08-05 19:44:37.000000000 -0400
27188@@ -1209,7 +1209,7 @@ static void end_sync_read(struct bio *bi
27189 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27190 set_bit(R10BIO_Uptodate, &r10_bio->state);
27191 else {
27192- atomic_add(r10_bio->sectors,
27193+ atomic_add_unchecked(r10_bio->sectors,
27194 &conf->mirrors[d].rdev->corrected_errors);
27195 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27196 md_error(r10_bio->mddev,
27197@@ -1417,7 +1417,7 @@ static void check_decay_read_errors(mdde
27198 {
27199 struct timespec cur_time_mon;
27200 unsigned long hours_since_last;
27201- unsigned int read_errors = atomic_read(&rdev->read_errors);
27202+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27203
27204 ktime_get_ts(&cur_time_mon);
27205
27206@@ -1439,9 +1439,9 @@ static void check_decay_read_errors(mdde
27207 * overflowing the shift of read_errors by hours_since_last.
27208 */
27209 if (hours_since_last >= 8 * sizeof(read_errors))
27210- atomic_set(&rdev->read_errors, 0);
27211+ atomic_set_unchecked(&rdev->read_errors, 0);
27212 else
27213- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27214+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27215 }
27216
27217 /*
27218@@ -1476,8 +1476,8 @@ static void fix_read_error(conf_t *conf,
27219 }
27220
27221 check_decay_read_errors(mddev, rdev);
27222- atomic_inc(&rdev->read_errors);
27223- cur_read_error_count = atomic_read(&rdev->read_errors);
27224+ atomic_inc_unchecked(&rdev->read_errors);
27225+ cur_read_error_count = atomic_read_unchecked(&rdev->read_errors);
27226 if (cur_read_error_count > max_read_errors) {
27227 rcu_read_unlock();
27228 printk(KERN_NOTICE
27229@@ -1550,7 +1550,7 @@ static void fix_read_error(conf_t *conf,
27230 test_bit(In_sync, &rdev->flags)) {
27231 atomic_inc(&rdev->nr_pending);
27232 rcu_read_unlock();
27233- atomic_add(s, &rdev->corrected_errors);
27234+ atomic_add_unchecked(s, &rdev->corrected_errors);
27235 if (sync_page_io(rdev,
27236 r10_bio->devs[sl].addr +
27237 sect,
27238diff -urNp linux-2.6.39.4/drivers/md/raid1.c linux-2.6.39.4/drivers/md/raid1.c
27239--- linux-2.6.39.4/drivers/md/raid1.c 2011-05-19 00:06:34.000000000 -0400
27240+++ linux-2.6.39.4/drivers/md/raid1.c 2011-08-05 19:44:37.000000000 -0400
27241@@ -1342,7 +1342,7 @@ static void sync_request_write(mddev_t *
27242 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
27243 continue;
27244 rdev = conf->mirrors[d].rdev;
27245- atomic_add(s, &rdev->corrected_errors);
27246+ atomic_add_unchecked(s, &rdev->corrected_errors);
27247 if (sync_page_io(rdev,
27248 sect,
27249 s<<9,
27250@@ -1488,7 +1488,7 @@ static void fix_read_error(conf_t *conf,
27251 /* Well, this device is dead */
27252 md_error(mddev, rdev);
27253 else {
27254- atomic_add(s, &rdev->corrected_errors);
27255+ atomic_add_unchecked(s, &rdev->corrected_errors);
27256 printk(KERN_INFO
27257 "md/raid1:%s: read error corrected "
27258 "(%d sectors at %llu on %s)\n",
27259diff -urNp linux-2.6.39.4/drivers/md/raid5.c linux-2.6.39.4/drivers/md/raid5.c
27260--- linux-2.6.39.4/drivers/md/raid5.c 2011-06-25 12:55:22.000000000 -0400
27261+++ linux-2.6.39.4/drivers/md/raid5.c 2011-08-05 19:44:37.000000000 -0400
27262@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27263 bi->bi_next = NULL;
27264 if ((rw & WRITE) &&
27265 test_bit(R5_ReWrite, &sh->dev[i].flags))
27266- atomic_add(STRIPE_SECTORS,
27267+ atomic_add_unchecked(STRIPE_SECTORS,
27268 &rdev->corrected_errors);
27269 generic_make_request(bi);
27270 } else {
27271@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27272 clear_bit(R5_ReadError, &sh->dev[i].flags);
27273 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27274 }
27275- if (atomic_read(&conf->disks[i].rdev->read_errors))
27276- atomic_set(&conf->disks[i].rdev->read_errors, 0);
27277+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27278+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27279 } else {
27280 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27281 int retry = 0;
27282 rdev = conf->disks[i].rdev;
27283
27284 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27285- atomic_inc(&rdev->read_errors);
27286+ atomic_inc_unchecked(&rdev->read_errors);
27287 if (conf->mddev->degraded >= conf->max_degraded)
27288 printk_rl(KERN_WARNING
27289 "md/raid:%s: read error not correctable "
27290@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27291 (unsigned long long)(sh->sector
27292 + rdev->data_offset),
27293 bdn);
27294- else if (atomic_read(&rdev->read_errors)
27295+ else if (atomic_read_unchecked(&rdev->read_errors)
27296 > conf->max_nr_stripes)
27297 printk(KERN_WARNING
27298 "md/raid:%s: Too many read errors, failing device %s.\n",
27299@@ -1947,6 +1947,7 @@ static sector_t compute_blocknr(struct s
27300 sector_t r_sector;
27301 struct stripe_head sh2;
27302
27303+ pax_track_stack();
27304
27305 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27306 stripe = new_sector;
27307diff -urNp linux-2.6.39.4/drivers/media/common/saa7146_hlp.c linux-2.6.39.4/drivers/media/common/saa7146_hlp.c
27308--- linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-05-19 00:06:34.000000000 -0400
27309+++ linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-08-05 19:44:37.000000000 -0400
27310@@ -353,6 +353,8 @@ static void calculate_clipping_registers
27311
27312 int x[32], y[32], w[32], h[32];
27313
27314+ pax_track_stack();
27315+
27316 /* clear out memory */
27317 memset(&line_list[0], 0x00, sizeof(u32)*32);
27318 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27319diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27320--- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-19 00:06:34.000000000 -0400
27321+++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-05 19:44:37.000000000 -0400
27322@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27323 u8 buf[HOST_LINK_BUF_SIZE];
27324 int i;
27325
27326+ pax_track_stack();
27327+
27328 dprintk("%s\n", __func__);
27329
27330 /* check if we have space for a link buf in the rx_buffer */
27331@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27332 unsigned long timeout;
27333 int written;
27334
27335+ pax_track_stack();
27336+
27337 dprintk("%s\n", __func__);
27338
27339 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27340diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h
27341--- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-05-19 00:06:34.000000000 -0400
27342+++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:34:06.000000000 -0400
27343@@ -73,7 +73,7 @@ struct dvb_demux_feed {
27344 union {
27345 dmx_ts_cb ts;
27346 dmx_section_cb sec;
27347- } cb;
27348+ } __no_const cb;
27349
27350 struct dvb_demux *demux;
27351 void *priv;
27352diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c
27353--- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-05-19 00:06:34.000000000 -0400
27354+++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:34:06.000000000 -0400
27355@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27356 const struct dvb_device *template, void *priv, int type)
27357 {
27358 struct dvb_device *dvbdev;
27359- struct file_operations *dvbdevfops;
27360+ file_operations_no_const *dvbdevfops;
27361 struct device *clsdev;
27362 int minor;
27363 int id;
27364diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c
27365--- linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-05-19 00:06:34.000000000 -0400
27366+++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:34:06.000000000 -0400
27367@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27368 struct dib0700_adapter_state {
27369 int (*set_param_save) (struct dvb_frontend *,
27370 struct dvb_frontend_parameters *);
27371-};
27372+} __no_const;
27373
27374 static int dib7070_set_param_override(struct dvb_frontend *fe,
27375 struct dvb_frontend_parameters *fep)
27376diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27377--- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-19 00:06:34.000000000 -0400
27378+++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-05 19:44:37.000000000 -0400
27379@@ -391,6 +391,8 @@ int dib0700_download_firmware(struct usb
27380
27381 u8 buf[260];
27382
27383+ pax_track_stack();
27384+
27385 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27386 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27387 hx.addr, hx.len, hx.chk);
27388diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c
27389--- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-05-19 00:06:34.000000000 -0400
27390+++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-05 20:34:06.000000000 -0400
27391@@ -95,7 +95,7 @@ struct su3000_state {
27392
27393 struct s6x0_state {
27394 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27395-};
27396+} __no_const;
27397
27398 /* debug */
27399 static int dvb_usb_dw2102_debug;
27400diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c
27401--- linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-05-19 00:06:34.000000000 -0400
27402+++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-05 19:44:37.000000000 -0400
27403@@ -663,6 +663,7 @@ static int lme2510_download_firmware(str
27404 packet_size = 0x31;
27405 len_in = 1;
27406
27407+ pax_track_stack();
27408
27409 info("FRM Starting Firmware Download");
27410
27411@@ -715,6 +716,8 @@ static void lme_coldreset(struct usb_dev
27412 int ret = 0, len_in;
27413 u8 data[512] = {0};
27414
27415+ pax_track_stack();
27416+
27417 data[0] = 0x0a;
27418 len_in = 1;
27419 info("FRM Firmware Cold Reset");
27420diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h
27421--- linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-05-19 00:06:34.000000000 -0400
27422+++ linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:34:06.000000000 -0400
27423@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
27424 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
27425 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27426 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27427-};
27428+} __no_const;
27429
27430 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27431 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27432diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c
27433--- linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-05-19 00:06:34.000000000 -0400
27434+++ linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-05 19:44:37.000000000 -0400
27435@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27436 int ret = -1;
27437 int sync;
27438
27439+ pax_track_stack();
27440+
27441 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27442
27443 fcp = 3000;
27444diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c
27445--- linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-05-19 00:06:34.000000000 -0400
27446+++ linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-08-05 19:44:37.000000000 -0400
27447@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27448 u8 tudata[585];
27449 int i;
27450
27451+ pax_track_stack();
27452+
27453 dprintk("Firmware is %zd bytes\n",fw->size);
27454
27455 /* Get eprom data */
27456diff -urNp linux-2.6.39.4/drivers/media/radio/radio-cadet.c linux-2.6.39.4/drivers/media/radio/radio-cadet.c
27457--- linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-05-19 00:06:34.000000000 -0400
27458+++ linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-08-05 19:44:37.000000000 -0400
27459@@ -349,7 +349,7 @@ static ssize_t cadet_read(struct file *f
27460 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
27461 mutex_unlock(&dev->lock);
27462
27463- if (copy_to_user(data, readbuf, i))
27464+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
27465 return -EFAULT;
27466 return i;
27467 }
27468diff -urNp linux-2.6.39.4/drivers/media/rc/rc-main.c linux-2.6.39.4/drivers/media/rc/rc-main.c
27469--- linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-05-19 00:06:34.000000000 -0400
27470+++ linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-08-05 19:44:37.000000000 -0400
27471@@ -996,7 +996,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
27472
27473 int rc_register_device(struct rc_dev *dev)
27474 {
27475- static atomic_t devno = ATOMIC_INIT(0);
27476+ static atomic_unchecked_t devno = ATOMIC_INIT(0);
27477 struct rc_map *rc_map;
27478 const char *path;
27479 int rc;
27480@@ -1019,7 +1019,7 @@ int rc_register_device(struct rc_dev *de
27481 if (dev->close)
27482 dev->input_dev->close = ir_close;
27483
27484- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
27485+ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
27486 dev_set_name(&dev->dev, "rc%ld", dev->devno);
27487 dev_set_drvdata(&dev->dev, dev);
27488 rc = device_add(&dev->dev);
27489diff -urNp linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c
27490--- linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-05-19 00:06:34.000000000 -0400
27491+++ linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-08-05 19:44:37.000000000 -0400
27492@@ -61,7 +61,7 @@ static struct pci_device_id cx18_pci_tbl
27493
27494 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
27495
27496-static atomic_t cx18_instance = ATOMIC_INIT(0);
27497+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
27498
27499 /* Parameter declarations */
27500 static int cardtype[CX18_MAX_CARDS];
27501@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27502 struct i2c_client c;
27503 u8 eedata[256];
27504
27505+ pax_track_stack();
27506+
27507 memset(&c, 0, sizeof(c));
27508 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27509 c.adapter = &cx->i2c_adap[0];
27510@@ -892,7 +894,7 @@ static int __devinit cx18_probe(struct p
27511 struct cx18 *cx;
27512
27513 /* FIXME - module parameter arrays constrain max instances */
27514- i = atomic_inc_return(&cx18_instance) - 1;
27515+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
27516 if (i >= CX18_MAX_CARDS) {
27517 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
27518 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
27519diff -urNp linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c
27520--- linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-05-19 00:06:34.000000000 -0400
27521+++ linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-05 19:44:37.000000000 -0400
27522@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27523 bool handle = false;
27524 struct ir_raw_event ir_core_event[64];
27525
27526+ pax_track_stack();
27527+
27528 do {
27529 num = 0;
27530 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27531diff -urNp linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c
27532--- linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-05-19 00:06:34.000000000 -0400
27533+++ linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-08-05 19:44:37.000000000 -0400
27534@@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl
27535 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
27536
27537 /* ivtv instance counter */
27538-static atomic_t ivtv_instance = ATOMIC_INIT(0);
27539+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
27540
27541 /* Parameter declarations */
27542 static int cardtype[IVTV_MAX_CARDS];
27543diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.c linux-2.6.39.4/drivers/media/video/omap24xxcam.c
27544--- linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-05-19 00:06:34.000000000 -0400
27545+++ linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-08-05 19:44:37.000000000 -0400
27546@@ -403,7 +403,7 @@ static void omap24xxcam_vbq_complete(str
27547 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
27548
27549 do_gettimeofday(&vb->ts);
27550- vb->field_count = atomic_add_return(2, &fh->field_count);
27551+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
27552 if (csr & csr_error) {
27553 vb->state = VIDEOBUF_ERROR;
27554 if (!atomic_read(&fh->cam->in_reset)) {
27555diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.h linux-2.6.39.4/drivers/media/video/omap24xxcam.h
27556--- linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-05-19 00:06:34.000000000 -0400
27557+++ linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-08-05 19:44:37.000000000 -0400
27558@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
27559 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
27560 struct videobuf_queue vbq;
27561 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
27562- atomic_t field_count; /* field counter for videobuf_buffer */
27563+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
27564 /* accessing cam here doesn't need serialisation: it's constant */
27565 struct omap24xxcam_device *cam;
27566 };
27567diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27568--- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-19 00:06:34.000000000 -0400
27569+++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-05 19:44:37.000000000 -0400
27570@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27571 u8 *eeprom;
27572 struct tveeprom tvdata;
27573
27574+ pax_track_stack();
27575+
27576 memset(&tvdata,0,sizeof(tvdata));
27577
27578 eeprom = pvr2_eeprom_fetch(hdw);
27579diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
27580--- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-05-19 00:06:34.000000000 -0400
27581+++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-05 20:34:06.000000000 -0400
27582@@ -196,7 +196,7 @@ struct pvr2_hdw {
27583
27584 /* I2C stuff */
27585 struct i2c_adapter i2c_adap;
27586- struct i2c_algorithm i2c_algo;
27587+ i2c_algorithm_no_const i2c_algo;
27588 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
27589 int i2c_cx25840_hack_state;
27590 int i2c_linked;
27591diff -urNp linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c
27592--- linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-05-19 00:06:34.000000000 -0400
27593+++ linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-05 19:44:37.000000000 -0400
27594@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27595 unsigned char localPAT[256];
27596 unsigned char localPMT[256];
27597
27598+ pax_track_stack();
27599+
27600 /* Set video format - must be done first as it resets other settings */
27601 set_reg8(client, 0x41, h->video_format);
27602
27603diff -urNp linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c
27604--- linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-19 00:06:34.000000000 -0400
27605+++ linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-05 19:44:37.000000000 -0400
27606@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27607 u8 tmp[512];
27608 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27609
27610+ pax_track_stack();
27611+
27612 /* While any outstand message on the bus exists... */
27613 do {
27614
27615@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27616 u8 tmp[512];
27617 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27618
27619+ pax_track_stack();
27620+
27621 while (loop) {
27622
27623 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27624diff -urNp linux-2.6.39.4/drivers/media/video/timblogiw.c linux-2.6.39.4/drivers/media/video/timblogiw.c
27625--- linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-05-19 00:06:34.000000000 -0400
27626+++ linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-08-05 20:34:06.000000000 -0400
27627@@ -746,7 +746,7 @@ static int timblogiw_mmap(struct file *f
27628
27629 /* Platform device functions */
27630
27631-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27632+static __devinitdata struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27633 .vidioc_querycap = timblogiw_querycap,
27634 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27635 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27636@@ -768,7 +768,7 @@ static __devinitconst struct v4l2_ioctl_
27637 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
27638 };
27639
27640-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
27641+static __devinitdata struct v4l2_file_operations timblogiw_fops = {
27642 .owner = THIS_MODULE,
27643 .open = timblogiw_open,
27644 .release = timblogiw_close,
27645diff -urNp linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c
27646--- linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-05-19 00:06:34.000000000 -0400
27647+++ linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-05 19:44:37.000000000 -0400
27648@@ -799,6 +799,8 @@ static enum parse_state usbvision_parse_
27649 unsigned char rv, gv, bv;
27650 static unsigned char *Y, *U, *V;
27651
27652+ pax_track_stack();
27653+
27654 frame = usbvision->cur_frame;
27655 image_size = frame->frmwidth * frame->frmheight;
27656 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27657diff -urNp linux-2.6.39.4/drivers/media/video/v4l2-device.c linux-2.6.39.4/drivers/media/video/v4l2-device.c
27658--- linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-05-19 00:06:34.000000000 -0400
27659+++ linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-08-05 19:44:37.000000000 -0400
27660@@ -71,9 +71,9 @@ int v4l2_device_put(struct v4l2_device *
27661 EXPORT_SYMBOL_GPL(v4l2_device_put);
27662
27663 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
27664- atomic_t *instance)
27665+ atomic_unchecked_t *instance)
27666 {
27667- int num = atomic_inc_return(instance) - 1;
27668+ int num = atomic_inc_return_unchecked(instance) - 1;
27669 int len = strlen(basename);
27670
27671 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
27672diff -urNp linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c
27673--- linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-05-19 00:06:34.000000000 -0400
27674+++ linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-08-05 19:44:37.000000000 -0400
27675@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27676 {
27677 struct videobuf_queue q;
27678
27679+ pax_track_stack();
27680+
27681 /* Required to make generic handler to call __videobuf_alloc */
27682 q.int_ops = &sg_ops;
27683
27684diff -urNp linux-2.6.39.4/drivers/message/fusion/mptbase.c linux-2.6.39.4/drivers/message/fusion/mptbase.c
27685--- linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-05-19 00:06:34.000000000 -0400
27686+++ linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-08-05 20:34:06.000000000 -0400
27687@@ -6683,8 +6683,13 @@ static int mpt_iocinfo_proc_show(struct
27688 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27689 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27690
27691+#ifdef CONFIG_GRKERNSEC_HIDESYM
27692+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27693+#else
27694 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27695 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27696+#endif
27697+
27698 /*
27699 * Rounding UP to nearest 4-kB boundary here...
27700 */
27701diff -urNp linux-2.6.39.4/drivers/message/fusion/mptsas.c linux-2.6.39.4/drivers/message/fusion/mptsas.c
27702--- linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-05-19 00:06:34.000000000 -0400
27703+++ linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-08-05 19:44:37.000000000 -0400
27704@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27705 return 0;
27706 }
27707
27708+static inline void
27709+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27710+{
27711+ if (phy_info->port_details) {
27712+ phy_info->port_details->rphy = rphy;
27713+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27714+ ioc->name, rphy));
27715+ }
27716+
27717+ if (rphy) {
27718+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27719+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27720+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27721+ ioc->name, rphy, rphy->dev.release));
27722+ }
27723+}
27724+
27725 /* no mutex */
27726 static void
27727 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27728@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27729 return NULL;
27730 }
27731
27732-static inline void
27733-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27734-{
27735- if (phy_info->port_details) {
27736- phy_info->port_details->rphy = rphy;
27737- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27738- ioc->name, rphy));
27739- }
27740-
27741- if (rphy) {
27742- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27743- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27744- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27745- ioc->name, rphy, rphy->dev.release));
27746- }
27747-}
27748-
27749 static inline struct sas_port *
27750 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27751 {
27752diff -urNp linux-2.6.39.4/drivers/message/fusion/mptscsih.c linux-2.6.39.4/drivers/message/fusion/mptscsih.c
27753--- linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-05-19 00:06:34.000000000 -0400
27754+++ linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-08-05 19:44:37.000000000 -0400
27755@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27756
27757 h = shost_priv(SChost);
27758
27759- if (h) {
27760- if (h->info_kbuf == NULL)
27761- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27762- return h->info_kbuf;
27763- h->info_kbuf[0] = '\0';
27764+ if (!h)
27765+ return NULL;
27766
27767- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27768- h->info_kbuf[size-1] = '\0';
27769- }
27770+ if (h->info_kbuf == NULL)
27771+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27772+ return h->info_kbuf;
27773+ h->info_kbuf[0] = '\0';
27774+
27775+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27776+ h->info_kbuf[size-1] = '\0';
27777
27778 return h->info_kbuf;
27779 }
27780diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_config.c linux-2.6.39.4/drivers/message/i2o/i2o_config.c
27781--- linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-05-19 00:06:34.000000000 -0400
27782+++ linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-08-05 19:44:37.000000000 -0400
27783@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27784 struct i2o_message *msg;
27785 unsigned int iop;
27786
27787+ pax_track_stack();
27788+
27789 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27790 return -EFAULT;
27791
27792diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_proc.c linux-2.6.39.4/drivers/message/i2o/i2o_proc.c
27793--- linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-05-19 00:06:34.000000000 -0400
27794+++ linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-08-05 19:44:37.000000000 -0400
27795@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27796 "Array Controller Device"
27797 };
27798
27799-static char *chtostr(u8 * chars, int n)
27800-{
27801- char tmp[256];
27802- tmp[0] = 0;
27803- return strncat(tmp, (char *)chars, n);
27804-}
27805-
27806 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27807 char *group)
27808 {
27809@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27810
27811 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27812 seq_printf(seq, "%-#8x", ddm_table.module_id);
27813- seq_printf(seq, "%-29s",
27814- chtostr(ddm_table.module_name_version, 28));
27815+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27816 seq_printf(seq, "%9d ", ddm_table.data_size);
27817 seq_printf(seq, "%8d", ddm_table.code_size);
27818
27819@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27820
27821 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27822 seq_printf(seq, "%-#8x", dst->module_id);
27823- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27824- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27825+ seq_printf(seq, "%-.28s", dst->module_name_version);
27826+ seq_printf(seq, "%-.8s", dst->date);
27827 seq_printf(seq, "%8d ", dst->module_size);
27828 seq_printf(seq, "%8d ", dst->mpb_size);
27829 seq_printf(seq, "0x%04x", dst->module_flags);
27830@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27831 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27832 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27833 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27834- seq_printf(seq, "Vendor info : %s\n",
27835- chtostr((u8 *) (work32 + 2), 16));
27836- seq_printf(seq, "Product info : %s\n",
27837- chtostr((u8 *) (work32 + 6), 16));
27838- seq_printf(seq, "Description : %s\n",
27839- chtostr((u8 *) (work32 + 10), 16));
27840- seq_printf(seq, "Product rev. : %s\n",
27841- chtostr((u8 *) (work32 + 14), 8));
27842+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27843+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27844+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27845+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27846
27847 seq_printf(seq, "Serial number : ");
27848 print_serial_number(seq, (u8 *) (work32 + 16),
27849@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27850 }
27851
27852 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27853- seq_printf(seq, "Module name : %s\n",
27854- chtostr(result.module_name, 24));
27855- seq_printf(seq, "Module revision : %s\n",
27856- chtostr(result.module_rev, 8));
27857+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
27858+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27859
27860 seq_printf(seq, "Serial number : ");
27861 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27862@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27863 return 0;
27864 }
27865
27866- seq_printf(seq, "Device name : %s\n",
27867- chtostr(result.device_name, 64));
27868- seq_printf(seq, "Service name : %s\n",
27869- chtostr(result.service_name, 64));
27870- seq_printf(seq, "Physical name : %s\n",
27871- chtostr(result.physical_location, 64));
27872- seq_printf(seq, "Instance number : %s\n",
27873- chtostr(result.instance_number, 4));
27874+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
27875+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
27876+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27877+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27878
27879 return 0;
27880 }
27881diff -urNp linux-2.6.39.4/drivers/message/i2o/iop.c linux-2.6.39.4/drivers/message/i2o/iop.c
27882--- linux-2.6.39.4/drivers/message/i2o/iop.c 2011-05-19 00:06:34.000000000 -0400
27883+++ linux-2.6.39.4/drivers/message/i2o/iop.c 2011-08-05 19:44:37.000000000 -0400
27884@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27885
27886 spin_lock_irqsave(&c->context_list_lock, flags);
27887
27888- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27889- atomic_inc(&c->context_list_counter);
27890+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27891+ atomic_inc_unchecked(&c->context_list_counter);
27892
27893- entry->context = atomic_read(&c->context_list_counter);
27894+ entry->context = atomic_read_unchecked(&c->context_list_counter);
27895
27896 list_add(&entry->list, &c->context_list);
27897
27898@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27899
27900 #if BITS_PER_LONG == 64
27901 spin_lock_init(&c->context_list_lock);
27902- atomic_set(&c->context_list_counter, 0);
27903+ atomic_set_unchecked(&c->context_list_counter, 0);
27904 INIT_LIST_HEAD(&c->context_list);
27905 #endif
27906
27907diff -urNp linux-2.6.39.4/drivers/mfd/abx500-core.c linux-2.6.39.4/drivers/mfd/abx500-core.c
27908--- linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-05-19 00:06:34.000000000 -0400
27909+++ linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-08-05 20:34:06.000000000 -0400
27910@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27911
27912 struct abx500_device_entry {
27913 struct list_head list;
27914- struct abx500_ops ops;
27915+ abx500_ops_no_const ops;
27916 struct device *dev;
27917 };
27918
27919diff -urNp linux-2.6.39.4/drivers/mfd/janz-cmodio.c linux-2.6.39.4/drivers/mfd/janz-cmodio.c
27920--- linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-05-19 00:06:34.000000000 -0400
27921+++ linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-08-05 19:44:37.000000000 -0400
27922@@ -13,6 +13,7 @@
27923
27924 #include <linux/kernel.h>
27925 #include <linux/module.h>
27926+#include <linux/slab.h>
27927 #include <linux/init.h>
27928 #include <linux/pci.h>
27929 #include <linux/interrupt.h>
27930diff -urNp linux-2.6.39.4/drivers/mfd/wm8350-i2c.c linux-2.6.39.4/drivers/mfd/wm8350-i2c.c
27931--- linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-05-19 00:06:34.000000000 -0400
27932+++ linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-08-05 19:44:37.000000000 -0400
27933@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27934 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27935 int ret;
27936
27937+ pax_track_stack();
27938+
27939 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27940 return -EINVAL;
27941
27942diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c
27943--- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-05-19 00:06:34.000000000 -0400
27944+++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-05 19:44:37.000000000 -0400
27945@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27946 * the lid is closed. This leads to interrupts as soon as a little move
27947 * is done.
27948 */
27949- atomic_inc(&lis3_dev.count);
27950+ atomic_inc_unchecked(&lis3_dev.count);
27951
27952 wake_up_interruptible(&lis3_dev.misc_wait);
27953 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27954@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27955 if (lis3_dev.pm_dev)
27956 pm_runtime_get_sync(lis3_dev.pm_dev);
27957
27958- atomic_set(&lis3_dev.count, 0);
27959+ atomic_set_unchecked(&lis3_dev.count, 0);
27960 return 0;
27961 }
27962
27963@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27964 add_wait_queue(&lis3_dev.misc_wait, &wait);
27965 while (true) {
27966 set_current_state(TASK_INTERRUPTIBLE);
27967- data = atomic_xchg(&lis3_dev.count, 0);
27968+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27969 if (data)
27970 break;
27971
27972@@ -583,7 +583,7 @@ out:
27973 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27974 {
27975 poll_wait(file, &lis3_dev.misc_wait, wait);
27976- if (atomic_read(&lis3_dev.count))
27977+ if (atomic_read_unchecked(&lis3_dev.count))
27978 return POLLIN | POLLRDNORM;
27979 return 0;
27980 }
27981diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h
27982--- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-05-19 00:06:34.000000000 -0400
27983+++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-05 19:44:37.000000000 -0400
27984@@ -265,7 +265,7 @@ struct lis3lv02d {
27985 struct input_polled_dev *idev; /* input device */
27986 struct platform_device *pdev; /* platform device */
27987 struct regulator_bulk_data regulators[2];
27988- atomic_t count; /* interrupt count after last read */
27989+ atomic_unchecked_t count; /* interrupt count after last read */
27990 union axis_conversion ac; /* hw -> logical axis */
27991 int mapped_btns[3];
27992
27993diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c
27994--- linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-05-19 00:06:34.000000000 -0400
27995+++ linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-05 19:44:37.000000000 -0400
27996@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27997 unsigned long nsec;
27998
27999 nsec = CLKS2NSEC(clks);
28000- atomic_long_inc(&mcs_op_statistics[op].count);
28001- atomic_long_add(nsec, &mcs_op_statistics[op].total);
28002+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28003+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28004 if (mcs_op_statistics[op].max < nsec)
28005 mcs_op_statistics[op].max = nsec;
28006 }
28007diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c
28008--- linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-05-19 00:06:34.000000000 -0400
28009+++ linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-05 19:44:37.000000000 -0400
28010@@ -32,9 +32,9 @@
28011
28012 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28013
28014-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28015+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28016 {
28017- unsigned long val = atomic_long_read(v);
28018+ unsigned long val = atomic_long_read_unchecked(v);
28019
28020 seq_printf(s, "%16lu %s\n", val, id);
28021 }
28022@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28023
28024 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28025 for (op = 0; op < mcsop_last; op++) {
28026- count = atomic_long_read(&mcs_op_statistics[op].count);
28027- total = atomic_long_read(&mcs_op_statistics[op].total);
28028+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28029+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28030 max = mcs_op_statistics[op].max;
28031 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28032 count ? total / count : 0, max);
28033diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h
28034--- linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-05-19 00:06:34.000000000 -0400
28035+++ linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-08-05 19:44:37.000000000 -0400
28036@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28037 * GRU statistics.
28038 */
28039 struct gru_stats_s {
28040- atomic_long_t vdata_alloc;
28041- atomic_long_t vdata_free;
28042- atomic_long_t gts_alloc;
28043- atomic_long_t gts_free;
28044- atomic_long_t gms_alloc;
28045- atomic_long_t gms_free;
28046- atomic_long_t gts_double_allocate;
28047- atomic_long_t assign_context;
28048- atomic_long_t assign_context_failed;
28049- atomic_long_t free_context;
28050- atomic_long_t load_user_context;
28051- atomic_long_t load_kernel_context;
28052- atomic_long_t lock_kernel_context;
28053- atomic_long_t unlock_kernel_context;
28054- atomic_long_t steal_user_context;
28055- atomic_long_t steal_kernel_context;
28056- atomic_long_t steal_context_failed;
28057- atomic_long_t nopfn;
28058- atomic_long_t asid_new;
28059- atomic_long_t asid_next;
28060- atomic_long_t asid_wrap;
28061- atomic_long_t asid_reuse;
28062- atomic_long_t intr;
28063- atomic_long_t intr_cbr;
28064- atomic_long_t intr_tfh;
28065- atomic_long_t intr_spurious;
28066- atomic_long_t intr_mm_lock_failed;
28067- atomic_long_t call_os;
28068- atomic_long_t call_os_wait_queue;
28069- atomic_long_t user_flush_tlb;
28070- atomic_long_t user_unload_context;
28071- atomic_long_t user_exception;
28072- atomic_long_t set_context_option;
28073- atomic_long_t check_context_retarget_intr;
28074- atomic_long_t check_context_unload;
28075- atomic_long_t tlb_dropin;
28076- atomic_long_t tlb_preload_page;
28077- atomic_long_t tlb_dropin_fail_no_asid;
28078- atomic_long_t tlb_dropin_fail_upm;
28079- atomic_long_t tlb_dropin_fail_invalid;
28080- atomic_long_t tlb_dropin_fail_range_active;
28081- atomic_long_t tlb_dropin_fail_idle;
28082- atomic_long_t tlb_dropin_fail_fmm;
28083- atomic_long_t tlb_dropin_fail_no_exception;
28084- atomic_long_t tfh_stale_on_fault;
28085- atomic_long_t mmu_invalidate_range;
28086- atomic_long_t mmu_invalidate_page;
28087- atomic_long_t flush_tlb;
28088- atomic_long_t flush_tlb_gru;
28089- atomic_long_t flush_tlb_gru_tgh;
28090- atomic_long_t flush_tlb_gru_zero_asid;
28091-
28092- atomic_long_t copy_gpa;
28093- atomic_long_t read_gpa;
28094-
28095- atomic_long_t mesq_receive;
28096- atomic_long_t mesq_receive_none;
28097- atomic_long_t mesq_send;
28098- atomic_long_t mesq_send_failed;
28099- atomic_long_t mesq_noop;
28100- atomic_long_t mesq_send_unexpected_error;
28101- atomic_long_t mesq_send_lb_overflow;
28102- atomic_long_t mesq_send_qlimit_reached;
28103- atomic_long_t mesq_send_amo_nacked;
28104- atomic_long_t mesq_send_put_nacked;
28105- atomic_long_t mesq_page_overflow;
28106- atomic_long_t mesq_qf_locked;
28107- atomic_long_t mesq_qf_noop_not_full;
28108- atomic_long_t mesq_qf_switch_head_failed;
28109- atomic_long_t mesq_qf_unexpected_error;
28110- atomic_long_t mesq_noop_unexpected_error;
28111- atomic_long_t mesq_noop_lb_overflow;
28112- atomic_long_t mesq_noop_qlimit_reached;
28113- atomic_long_t mesq_noop_amo_nacked;
28114- atomic_long_t mesq_noop_put_nacked;
28115- atomic_long_t mesq_noop_page_overflow;
28116+ atomic_long_unchecked_t vdata_alloc;
28117+ atomic_long_unchecked_t vdata_free;
28118+ atomic_long_unchecked_t gts_alloc;
28119+ atomic_long_unchecked_t gts_free;
28120+ atomic_long_unchecked_t gms_alloc;
28121+ atomic_long_unchecked_t gms_free;
28122+ atomic_long_unchecked_t gts_double_allocate;
28123+ atomic_long_unchecked_t assign_context;
28124+ atomic_long_unchecked_t assign_context_failed;
28125+ atomic_long_unchecked_t free_context;
28126+ atomic_long_unchecked_t load_user_context;
28127+ atomic_long_unchecked_t load_kernel_context;
28128+ atomic_long_unchecked_t lock_kernel_context;
28129+ atomic_long_unchecked_t unlock_kernel_context;
28130+ atomic_long_unchecked_t steal_user_context;
28131+ atomic_long_unchecked_t steal_kernel_context;
28132+ atomic_long_unchecked_t steal_context_failed;
28133+ atomic_long_unchecked_t nopfn;
28134+ atomic_long_unchecked_t asid_new;
28135+ atomic_long_unchecked_t asid_next;
28136+ atomic_long_unchecked_t asid_wrap;
28137+ atomic_long_unchecked_t asid_reuse;
28138+ atomic_long_unchecked_t intr;
28139+ atomic_long_unchecked_t intr_cbr;
28140+ atomic_long_unchecked_t intr_tfh;
28141+ atomic_long_unchecked_t intr_spurious;
28142+ atomic_long_unchecked_t intr_mm_lock_failed;
28143+ atomic_long_unchecked_t call_os;
28144+ atomic_long_unchecked_t call_os_wait_queue;
28145+ atomic_long_unchecked_t user_flush_tlb;
28146+ atomic_long_unchecked_t user_unload_context;
28147+ atomic_long_unchecked_t user_exception;
28148+ atomic_long_unchecked_t set_context_option;
28149+ atomic_long_unchecked_t check_context_retarget_intr;
28150+ atomic_long_unchecked_t check_context_unload;
28151+ atomic_long_unchecked_t tlb_dropin;
28152+ atomic_long_unchecked_t tlb_preload_page;
28153+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28154+ atomic_long_unchecked_t tlb_dropin_fail_upm;
28155+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
28156+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
28157+ atomic_long_unchecked_t tlb_dropin_fail_idle;
28158+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
28159+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28160+ atomic_long_unchecked_t tfh_stale_on_fault;
28161+ atomic_long_unchecked_t mmu_invalidate_range;
28162+ atomic_long_unchecked_t mmu_invalidate_page;
28163+ atomic_long_unchecked_t flush_tlb;
28164+ atomic_long_unchecked_t flush_tlb_gru;
28165+ atomic_long_unchecked_t flush_tlb_gru_tgh;
28166+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28167+
28168+ atomic_long_unchecked_t copy_gpa;
28169+ atomic_long_unchecked_t read_gpa;
28170+
28171+ atomic_long_unchecked_t mesq_receive;
28172+ atomic_long_unchecked_t mesq_receive_none;
28173+ atomic_long_unchecked_t mesq_send;
28174+ atomic_long_unchecked_t mesq_send_failed;
28175+ atomic_long_unchecked_t mesq_noop;
28176+ atomic_long_unchecked_t mesq_send_unexpected_error;
28177+ atomic_long_unchecked_t mesq_send_lb_overflow;
28178+ atomic_long_unchecked_t mesq_send_qlimit_reached;
28179+ atomic_long_unchecked_t mesq_send_amo_nacked;
28180+ atomic_long_unchecked_t mesq_send_put_nacked;
28181+ atomic_long_unchecked_t mesq_page_overflow;
28182+ atomic_long_unchecked_t mesq_qf_locked;
28183+ atomic_long_unchecked_t mesq_qf_noop_not_full;
28184+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
28185+ atomic_long_unchecked_t mesq_qf_unexpected_error;
28186+ atomic_long_unchecked_t mesq_noop_unexpected_error;
28187+ atomic_long_unchecked_t mesq_noop_lb_overflow;
28188+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
28189+ atomic_long_unchecked_t mesq_noop_amo_nacked;
28190+ atomic_long_unchecked_t mesq_noop_put_nacked;
28191+ atomic_long_unchecked_t mesq_noop_page_overflow;
28192
28193 };
28194
28195@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28196 tghop_invalidate, mcsop_last};
28197
28198 struct mcs_op_statistic {
28199- atomic_long_t count;
28200- atomic_long_t total;
28201+ atomic_long_unchecked_t count;
28202+ atomic_long_unchecked_t total;
28203 unsigned long max;
28204 };
28205
28206@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28207
28208 #define STAT(id) do { \
28209 if (gru_options & OPT_STATS) \
28210- atomic_long_inc(&gru_stats.id); \
28211+ atomic_long_inc_unchecked(&gru_stats.id); \
28212 } while (0)
28213
28214 #ifdef CONFIG_SGI_GRU_DEBUG
28215diff -urNp linux-2.6.39.4/drivers/misc/sgi-xp/xp.h linux-2.6.39.4/drivers/misc/sgi-xp/xp.h
28216--- linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-05-19 00:06:34.000000000 -0400
28217+++ linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-08-05 20:34:06.000000000 -0400
28218@@ -289,7 +289,7 @@ struct xpc_interface {
28219 xpc_notify_func, void *);
28220 void (*received) (short, int, void *);
28221 enum xp_retval (*partid_to_nasids) (short, void *);
28222-};
28223+} __no_const;
28224
28225 extern struct xpc_interface xpc_interface;
28226
28227diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c
28228--- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-19 00:06:34.000000000 -0400
28229+++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-05 19:44:37.000000000 -0400
28230@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28231 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28232 unsigned long timeo = jiffies + HZ;
28233
28234+ pax_track_stack();
28235+
28236 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28237 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28238 goto sleep;
28239@@ -1657,6 +1659,8 @@ static int __xipram do_write_buffer(stru
28240 unsigned long initial_adr;
28241 int initial_len = len;
28242
28243+ pax_track_stack();
28244+
28245 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28246 adr += chip->start;
28247 initial_adr = adr;
28248@@ -1875,6 +1879,8 @@ static int __xipram do_erase_oneblock(st
28249 int retries = 3;
28250 int ret;
28251
28252+ pax_track_stack();
28253+
28254 adr += chip->start;
28255
28256 retry:
28257diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c
28258--- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-19 00:06:34.000000000 -0400
28259+++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-05 19:44:37.000000000 -0400
28260@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28261 unsigned long cmd_addr;
28262 struct cfi_private *cfi = map->fldrv_priv;
28263
28264+ pax_track_stack();
28265+
28266 adr += chip->start;
28267
28268 /* Ensure cmd read/writes are aligned. */
28269@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
28270 DECLARE_WAITQUEUE(wait, current);
28271 int wbufsize, z;
28272
28273+ pax_track_stack();
28274+
28275 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28276 if (adr & (map_bankwidth(map)-1))
28277 return -EINVAL;
28278@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
28279 DECLARE_WAITQUEUE(wait, current);
28280 int ret = 0;
28281
28282+ pax_track_stack();
28283+
28284 adr += chip->start;
28285
28286 /* Let's determine this according to the interleave only once */
28287@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
28288 unsigned long timeo = jiffies + HZ;
28289 DECLARE_WAITQUEUE(wait, current);
28290
28291+ pax_track_stack();
28292+
28293 adr += chip->start;
28294
28295 /* Let's determine this according to the interleave only once */
28296@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
28297 unsigned long timeo = jiffies + HZ;
28298 DECLARE_WAITQUEUE(wait, current);
28299
28300+ pax_track_stack();
28301+
28302 adr += chip->start;
28303
28304 /* Let's determine this according to the interleave only once */
28305diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2000.c linux-2.6.39.4/drivers/mtd/devices/doc2000.c
28306--- linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-05-19 00:06:34.000000000 -0400
28307+++ linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-08-05 19:44:37.000000000 -0400
28308@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28309
28310 /* The ECC will not be calculated correctly if less than 512 is written */
28311 /* DBB-
28312- if (len != 0x200 && eccbuf)
28313+ if (len != 0x200)
28314 printk(KERN_WARNING
28315 "ECC needs a full sector write (adr: %lx size %lx)\n",
28316 (long) to, (long) len);
28317diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2001.c linux-2.6.39.4/drivers/mtd/devices/doc2001.c
28318--- linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-05-19 00:06:34.000000000 -0400
28319+++ linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-08-05 19:44:37.000000000 -0400
28320@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28321 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28322
28323 /* Don't allow read past end of device */
28324- if (from >= this->totlen)
28325+ if (from >= this->totlen || !len)
28326 return -EINVAL;
28327
28328 /* Don't allow a single read to cross a 512-byte block boundary */
28329diff -urNp linux-2.6.39.4/drivers/mtd/ftl.c linux-2.6.39.4/drivers/mtd/ftl.c
28330--- linux-2.6.39.4/drivers/mtd/ftl.c 2011-05-19 00:06:34.000000000 -0400
28331+++ linux-2.6.39.4/drivers/mtd/ftl.c 2011-08-05 19:44:37.000000000 -0400
28332@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28333 loff_t offset;
28334 uint16_t srcunitswap = cpu_to_le16(srcunit);
28335
28336+ pax_track_stack();
28337+
28338 eun = &part->EUNInfo[srcunit];
28339 xfer = &part->XferInfo[xferunit];
28340 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28341diff -urNp linux-2.6.39.4/drivers/mtd/inftlcore.c linux-2.6.39.4/drivers/mtd/inftlcore.c
28342--- linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-05-19 00:06:34.000000000 -0400
28343+++ linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-08-05 19:44:37.000000000 -0400
28344@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28345 struct inftl_oob oob;
28346 size_t retlen;
28347
28348+ pax_track_stack();
28349+
28350 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28351 "pending=%d)\n", inftl, thisVUC, pendingblock);
28352
28353diff -urNp linux-2.6.39.4/drivers/mtd/inftlmount.c linux-2.6.39.4/drivers/mtd/inftlmount.c
28354--- linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-05-19 00:06:34.000000000 -0400
28355+++ linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-08-05 19:44:37.000000000 -0400
28356@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28357 struct INFTLPartition *ip;
28358 size_t retlen;
28359
28360+ pax_track_stack();
28361+
28362 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28363
28364 /*
28365diff -urNp linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c
28366--- linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-05-19 00:06:34.000000000 -0400
28367+++ linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-05 19:44:37.000000000 -0400
28368@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28369 {
28370 map_word pfow_val[4];
28371
28372+ pax_track_stack();
28373+
28374 /* Check identification string */
28375 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28376 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28377diff -urNp linux-2.6.39.4/drivers/mtd/mtdchar.c linux-2.6.39.4/drivers/mtd/mtdchar.c
28378--- linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-05-19 00:06:34.000000000 -0400
28379+++ linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-08-05 19:44:37.000000000 -0400
28380@@ -560,6 +560,8 @@ static int mtd_ioctl(struct file *file,
28381 u_long size;
28382 struct mtd_info_user info;
28383
28384+ pax_track_stack();
28385+
28386 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28387
28388 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28389diff -urNp linux-2.6.39.4/drivers/mtd/nand/denali.c linux-2.6.39.4/drivers/mtd/nand/denali.c
28390--- linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-05-19 00:06:34.000000000 -0400
28391+++ linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-08-05 19:44:37.000000000 -0400
28392@@ -25,6 +25,7 @@
28393 #include <linux/pci.h>
28394 #include <linux/mtd/mtd.h>
28395 #include <linux/module.h>
28396+#include <linux/slab.h>
28397
28398 #include "denali.h"
28399
28400diff -urNp linux-2.6.39.4/drivers/mtd/nftlcore.c linux-2.6.39.4/drivers/mtd/nftlcore.c
28401--- linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-05-19 00:06:34.000000000 -0400
28402+++ linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-08-05 19:44:37.000000000 -0400
28403@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28404 int inplace = 1;
28405 size_t retlen;
28406
28407+ pax_track_stack();
28408+
28409 memset(BlockMap, 0xff, sizeof(BlockMap));
28410 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28411
28412diff -urNp linux-2.6.39.4/drivers/mtd/nftlmount.c linux-2.6.39.4/drivers/mtd/nftlmount.c
28413--- linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-05-19 00:06:34.000000000 -0400
28414+++ linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-08-05 19:44:37.000000000 -0400
28415@@ -24,6 +24,7 @@
28416 #include <asm/errno.h>
28417 #include <linux/delay.h>
28418 #include <linux/slab.h>
28419+#include <linux/sched.h>
28420 #include <linux/mtd/mtd.h>
28421 #include <linux/mtd/nand.h>
28422 #include <linux/mtd/nftl.h>
28423@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28424 struct mtd_info *mtd = nftl->mbd.mtd;
28425 unsigned int i;
28426
28427+ pax_track_stack();
28428+
28429 /* Assume logical EraseSize == physical erasesize for starting the scan.
28430 We'll sort it out later if we find a MediaHeader which says otherwise */
28431 /* Actually, we won't. The new DiskOnChip driver has already scanned
28432diff -urNp linux-2.6.39.4/drivers/mtd/ubi/build.c linux-2.6.39.4/drivers/mtd/ubi/build.c
28433--- linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-05-19 00:06:34.000000000 -0400
28434+++ linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-08-05 19:44:37.000000000 -0400
28435@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28436 static int __init bytes_str_to_int(const char *str)
28437 {
28438 char *endp;
28439- unsigned long result;
28440+ unsigned long result, scale = 1;
28441
28442 result = simple_strtoul(str, &endp, 0);
28443 if (str == endp || result >= INT_MAX) {
28444@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28445
28446 switch (*endp) {
28447 case 'G':
28448- result *= 1024;
28449+ scale *= 1024;
28450 case 'M':
28451- result *= 1024;
28452+ scale *= 1024;
28453 case 'K':
28454- result *= 1024;
28455+ scale *= 1024;
28456 if (endp[1] == 'i' && endp[2] == 'B')
28457 endp += 2;
28458 case '\0':
28459@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28460 return -EINVAL;
28461 }
28462
28463- return result;
28464+ if ((intoverflow_t)result*scale >= INT_MAX) {
28465+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28466+ str);
28467+ return -EINVAL;
28468+ }
28469+
28470+ return result*scale;
28471 }
28472
28473 /**
28474diff -urNp linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c
28475--- linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-05-19 00:06:34.000000000 -0400
28476+++ linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-05 20:34:06.000000000 -0400
28477@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28478 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28479 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28480
28481-static struct bfa_ioc_hwif nw_hwif_ct;
28482+static struct bfa_ioc_hwif nw_hwif_ct = {
28483+ .ioc_pll_init = bfa_ioc_ct_pll_init,
28484+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28485+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28486+ .ioc_reg_init = bfa_ioc_ct_reg_init,
28487+ .ioc_map_port = bfa_ioc_ct_map_port,
28488+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28489+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28490+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28491+ .ioc_sync_start = bfa_ioc_ct_sync_start,
28492+ .ioc_sync_join = bfa_ioc_ct_sync_join,
28493+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28494+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28495+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
28496+};
28497
28498 /**
28499 * Called from bfa_ioc_attach() to map asic specific calls.
28500@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28501 void
28502 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28503 {
28504- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28505- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28506- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28507- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28508- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28509- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28510- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28511- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28512- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28513- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28514- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28515- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28516- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28517-
28518 ioc->ioc_hwif = &nw_hwif_ct;
28519 }
28520
28521diff -urNp linux-2.6.39.4/drivers/net/bna/bnad.c linux-2.6.39.4/drivers/net/bna/bnad.c
28522--- linux-2.6.39.4/drivers/net/bna/bnad.c 2011-05-19 00:06:34.000000000 -0400
28523+++ linux-2.6.39.4/drivers/net/bna/bnad.c 2011-08-05 20:34:06.000000000 -0400
28524@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28525 struct bna_intr_info *intr_info =
28526 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28527 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28528- struct bna_tx_event_cbfn tx_cbfn;
28529+ static struct bna_tx_event_cbfn tx_cbfn = {
28530+ /* Initialize the tx event handlers */
28531+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
28532+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28533+ .tx_stall_cbfn = bnad_cb_tx_stall,
28534+ .tx_resume_cbfn = bnad_cb_tx_resume,
28535+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28536+ };
28537 struct bna_tx *tx;
28538 unsigned long flags;
28539
28540@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28541 tx_config->txq_depth = bnad->txq_depth;
28542 tx_config->tx_type = BNA_TX_T_REGULAR;
28543
28544- /* Initialize the tx event handlers */
28545- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28546- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28547- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28548- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28549- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28550-
28551 /* Get BNA's resource requirement for one tx object */
28552 spin_lock_irqsave(&bnad->bna_lock, flags);
28553 bna_tx_res_req(bnad->num_txq_per_tx,
28554@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28555 struct bna_intr_info *intr_info =
28556 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28557 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28558- struct bna_rx_event_cbfn rx_cbfn;
28559+ static struct bna_rx_event_cbfn rx_cbfn = {
28560+ /* Initialize the Rx event handlers */
28561+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
28562+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28563+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
28564+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28565+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28566+ .rx_post_cbfn = bnad_cb_rx_post
28567+ };
28568 struct bna_rx *rx;
28569 unsigned long flags;
28570
28571 /* Initialize the Rx object configuration */
28572 bnad_init_rx_config(bnad, rx_config);
28573
28574- /* Initialize the Rx event handlers */
28575- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28576- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28577- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28578- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28579- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28580- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28581-
28582 /* Get BNA's resource requirement for one Rx object */
28583 spin_lock_irqsave(&bnad->bna_lock, flags);
28584 bna_rx_res_req(rx_config, res_info);
28585diff -urNp linux-2.6.39.4/drivers/net/bnx2.c linux-2.6.39.4/drivers/net/bnx2.c
28586--- linux-2.6.39.4/drivers/net/bnx2.c 2011-05-19 00:06:34.000000000 -0400
28587+++ linux-2.6.39.4/drivers/net/bnx2.c 2011-08-05 19:44:37.000000000 -0400
28588@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28589 int rc = 0;
28590 u32 magic, csum;
28591
28592+ pax_track_stack();
28593+
28594 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28595 goto test_nvram_done;
28596
28597diff -urNp linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c
28598--- linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-05-19 00:06:34.000000000 -0400
28599+++ linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-05 19:44:37.000000000 -0400
28600@@ -1788,6 +1788,8 @@ static int bnx2x_test_nvram(struct bnx2x
28601 int i, rc;
28602 u32 magic, crc;
28603
28604+ pax_track_stack();
28605+
28606 if (BP_NOMCP(bp))
28607 return 0;
28608
28609diff -urNp linux-2.6.39.4/drivers/net/cxgb3/l2t.h linux-2.6.39.4/drivers/net/cxgb3/l2t.h
28610--- linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-05-19 00:06:34.000000000 -0400
28611+++ linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-08-05 20:34:06.000000000 -0400
28612@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28613 */
28614 struct l2t_skb_cb {
28615 arp_failure_handler_func arp_failure_handler;
28616-};
28617+} __no_const;
28618
28619 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28620
28621diff -urNp linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c
28622--- linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-05-19 00:06:34.000000000 -0400
28623+++ linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-05 19:44:37.000000000 -0400
28624@@ -3428,6 +3428,8 @@ static int __devinit enable_msix(struct
28625 unsigned int nchan = adap->params.nports;
28626 struct msix_entry entries[MAX_INGQ + 1];
28627
28628+ pax_track_stack();
28629+
28630 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28631 entries[i].entry = i;
28632
28633diff -urNp linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c
28634--- linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-05-19 00:06:34.000000000 -0400
28635+++ linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-08-05 19:44:37.000000000 -0400
28636@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28637 u8 vpd[VPD_LEN], csum;
28638 unsigned int vpdr_len, kw_offset, id_len;
28639
28640+ pax_track_stack();
28641+
28642 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28643 if (ret < 0)
28644 return ret;
28645diff -urNp linux-2.6.39.4/drivers/net/e1000e/82571.c linux-2.6.39.4/drivers/net/e1000e/82571.c
28646--- linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-05-19 00:06:34.000000000 -0400
28647+++ linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-08-05 20:34:06.000000000 -0400
28648@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28649 {
28650 struct e1000_hw *hw = &adapter->hw;
28651 struct e1000_mac_info *mac = &hw->mac;
28652- struct e1000_mac_operations *func = &mac->ops;
28653+ e1000_mac_operations_no_const *func = &mac->ops;
28654 u32 swsm = 0;
28655 u32 swsm2 = 0;
28656 bool force_clear_smbi = false;
28657diff -urNp linux-2.6.39.4/drivers/net/e1000e/es2lan.c linux-2.6.39.4/drivers/net/e1000e/es2lan.c
28658--- linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-05-19 00:06:34.000000000 -0400
28659+++ linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-08-05 20:34:06.000000000 -0400
28660@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28661 {
28662 struct e1000_hw *hw = &adapter->hw;
28663 struct e1000_mac_info *mac = &hw->mac;
28664- struct e1000_mac_operations *func = &mac->ops;
28665+ e1000_mac_operations_no_const *func = &mac->ops;
28666
28667 /* Set media type */
28668 switch (adapter->pdev->device) {
28669diff -urNp linux-2.6.39.4/drivers/net/e1000e/hw.h linux-2.6.39.4/drivers/net/e1000e/hw.h
28670--- linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-05-19 00:06:34.000000000 -0400
28671+++ linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-08-05 20:34:06.000000000 -0400
28672@@ -775,6 +775,7 @@ struct e1000_mac_operations {
28673 void (*write_vfta)(struct e1000_hw *, u32, u32);
28674 s32 (*read_mac_addr)(struct e1000_hw *);
28675 };
28676+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28677
28678 /* Function pointers for the PHY. */
28679 struct e1000_phy_operations {
28680@@ -798,6 +799,7 @@ struct e1000_phy_operations {
28681 void (*power_up)(struct e1000_hw *);
28682 void (*power_down)(struct e1000_hw *);
28683 };
28684+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28685
28686 /* Function pointers for the NVM. */
28687 struct e1000_nvm_operations {
28688@@ -809,9 +811,10 @@ struct e1000_nvm_operations {
28689 s32 (*validate)(struct e1000_hw *);
28690 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28691 };
28692+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28693
28694 struct e1000_mac_info {
28695- struct e1000_mac_operations ops;
28696+ e1000_mac_operations_no_const ops;
28697 u8 addr[ETH_ALEN];
28698 u8 perm_addr[ETH_ALEN];
28699
28700@@ -852,7 +855,7 @@ struct e1000_mac_info {
28701 };
28702
28703 struct e1000_phy_info {
28704- struct e1000_phy_operations ops;
28705+ e1000_phy_operations_no_const ops;
28706
28707 enum e1000_phy_type type;
28708
28709@@ -886,7 +889,7 @@ struct e1000_phy_info {
28710 };
28711
28712 struct e1000_nvm_info {
28713- struct e1000_nvm_operations ops;
28714+ e1000_nvm_operations_no_const ops;
28715
28716 enum e1000_nvm_type type;
28717 enum e1000_nvm_override override;
28718diff -urNp linux-2.6.39.4/drivers/net/hamradio/6pack.c linux-2.6.39.4/drivers/net/hamradio/6pack.c
28719--- linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-07-09 09:18:51.000000000 -0400
28720+++ linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-08-05 19:44:37.000000000 -0400
28721@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28722 unsigned char buf[512];
28723 int count1;
28724
28725+ pax_track_stack();
28726+
28727 if (!count)
28728 return;
28729
28730diff -urNp linux-2.6.39.4/drivers/net/igb/e1000_hw.h linux-2.6.39.4/drivers/net/igb/e1000_hw.h
28731--- linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-05-19 00:06:34.000000000 -0400
28732+++ linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-08-05 20:34:06.000000000 -0400
28733@@ -314,6 +314,7 @@ struct e1000_mac_operations {
28734 s32 (*read_mac_addr)(struct e1000_hw *);
28735 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28736 };
28737+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28738
28739 struct e1000_phy_operations {
28740 s32 (*acquire)(struct e1000_hw *);
28741@@ -330,6 +331,7 @@ struct e1000_phy_operations {
28742 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28743 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28744 };
28745+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28746
28747 struct e1000_nvm_operations {
28748 s32 (*acquire)(struct e1000_hw *);
28749@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28750 s32 (*update)(struct e1000_hw *);
28751 s32 (*validate)(struct e1000_hw *);
28752 };
28753+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28754
28755 struct e1000_info {
28756 s32 (*get_invariants)(struct e1000_hw *);
28757@@ -350,7 +353,7 @@ struct e1000_info {
28758 extern const struct e1000_info e1000_82575_info;
28759
28760 struct e1000_mac_info {
28761- struct e1000_mac_operations ops;
28762+ e1000_mac_operations_no_const ops;
28763
28764 u8 addr[6];
28765 u8 perm_addr[6];
28766@@ -388,7 +391,7 @@ struct e1000_mac_info {
28767 };
28768
28769 struct e1000_phy_info {
28770- struct e1000_phy_operations ops;
28771+ e1000_phy_operations_no_const ops;
28772
28773 enum e1000_phy_type type;
28774
28775@@ -423,7 +426,7 @@ struct e1000_phy_info {
28776 };
28777
28778 struct e1000_nvm_info {
28779- struct e1000_nvm_operations ops;
28780+ e1000_nvm_operations_no_const ops;
28781 enum e1000_nvm_type type;
28782 enum e1000_nvm_override override;
28783
28784@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28785 s32 (*check_for_ack)(struct e1000_hw *, u16);
28786 s32 (*check_for_rst)(struct e1000_hw *, u16);
28787 };
28788+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28789
28790 struct e1000_mbx_stats {
28791 u32 msgs_tx;
28792@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28793 };
28794
28795 struct e1000_mbx_info {
28796- struct e1000_mbx_operations ops;
28797+ e1000_mbx_operations_no_const ops;
28798 struct e1000_mbx_stats stats;
28799 u32 timeout;
28800 u32 usec_delay;
28801diff -urNp linux-2.6.39.4/drivers/net/igbvf/vf.h linux-2.6.39.4/drivers/net/igbvf/vf.h
28802--- linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-05-19 00:06:34.000000000 -0400
28803+++ linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-08-05 20:34:06.000000000 -0400
28804@@ -189,9 +189,10 @@ struct e1000_mac_operations {
28805 s32 (*read_mac_addr)(struct e1000_hw *);
28806 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28807 };
28808+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28809
28810 struct e1000_mac_info {
28811- struct e1000_mac_operations ops;
28812+ e1000_mac_operations_no_const ops;
28813 u8 addr[6];
28814 u8 perm_addr[6];
28815
28816@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28817 s32 (*check_for_ack)(struct e1000_hw *);
28818 s32 (*check_for_rst)(struct e1000_hw *);
28819 };
28820+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28821
28822 struct e1000_mbx_stats {
28823 u32 msgs_tx;
28824@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28825 };
28826
28827 struct e1000_mbx_info {
28828- struct e1000_mbx_operations ops;
28829+ e1000_mbx_operations_no_const ops;
28830 struct e1000_mbx_stats stats;
28831 u32 timeout;
28832 u32 usec_delay;
28833diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c
28834--- linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-05-19 00:06:34.000000000 -0400
28835+++ linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-08-05 19:44:37.000000000 -0400
28836@@ -1069,6 +1069,8 @@ ixgb_set_multi(struct net_device *netdev
28837 u32 rctl;
28838 int i;
28839
28840+ pax_track_stack();
28841+
28842 /* Check for Promiscuous and All Multicast modes */
28843
28844 rctl = IXGB_READ_REG(hw, RCTL);
28845diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c
28846--- linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-05-19 00:06:34.000000000 -0400
28847+++ linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-08-05 19:44:37.000000000 -0400
28848@@ -261,6 +261,9 @@ void __devinit
28849 ixgb_check_options(struct ixgb_adapter *adapter)
28850 {
28851 int bd = adapter->bd_number;
28852+
28853+ pax_track_stack();
28854+
28855 if (bd >= IXGB_MAX_NIC) {
28856 pr_notice("Warning: no configuration for board #%i\n", bd);
28857 pr_notice("Using defaults for all values\n");
28858diff -urNp linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h
28859--- linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-05-19 00:06:34.000000000 -0400
28860+++ linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-05 20:34:06.000000000 -0400
28861@@ -2496,6 +2496,7 @@ struct ixgbe_eeprom_operations {
28862 s32 (*update_checksum)(struct ixgbe_hw *);
28863 u16 (*calc_checksum)(struct ixgbe_hw *);
28864 };
28865+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28866
28867 struct ixgbe_mac_operations {
28868 s32 (*init_hw)(struct ixgbe_hw *);
28869@@ -2551,6 +2552,7 @@ struct ixgbe_mac_operations {
28870 /* Flow Control */
28871 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28872 };
28873+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28874
28875 struct ixgbe_phy_operations {
28876 s32 (*identify)(struct ixgbe_hw *);
28877@@ -2570,9 +2572,10 @@ struct ixgbe_phy_operations {
28878 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28879 s32 (*check_overtemp)(struct ixgbe_hw *);
28880 };
28881+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28882
28883 struct ixgbe_eeprom_info {
28884- struct ixgbe_eeprom_operations ops;
28885+ ixgbe_eeprom_operations_no_const ops;
28886 enum ixgbe_eeprom_type type;
28887 u32 semaphore_delay;
28888 u16 word_size;
28889@@ -2581,7 +2584,7 @@ struct ixgbe_eeprom_info {
28890
28891 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28892 struct ixgbe_mac_info {
28893- struct ixgbe_mac_operations ops;
28894+ ixgbe_mac_operations_no_const ops;
28895 enum ixgbe_mac_type type;
28896 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28897 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28898@@ -2608,7 +2611,7 @@ struct ixgbe_mac_info {
28899 };
28900
28901 struct ixgbe_phy_info {
28902- struct ixgbe_phy_operations ops;
28903+ ixgbe_phy_operations_no_const ops;
28904 struct mdio_if_info mdio;
28905 enum ixgbe_phy_type type;
28906 u32 id;
28907@@ -2636,6 +2639,7 @@ struct ixgbe_mbx_operations {
28908 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28909 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28910 };
28911+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28912
28913 struct ixgbe_mbx_stats {
28914 u32 msgs_tx;
28915@@ -2647,7 +2651,7 @@ struct ixgbe_mbx_stats {
28916 };
28917
28918 struct ixgbe_mbx_info {
28919- struct ixgbe_mbx_operations ops;
28920+ ixgbe_mbx_operations_no_const ops;
28921 struct ixgbe_mbx_stats stats;
28922 u32 timeout;
28923 u32 usec_delay;
28924diff -urNp linux-2.6.39.4/drivers/net/ixgbevf/vf.h linux-2.6.39.4/drivers/net/ixgbevf/vf.h
28925--- linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-05-19 00:06:34.000000000 -0400
28926+++ linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-08-05 20:34:06.000000000 -0400
28927@@ -69,6 +69,7 @@ struct ixgbe_mac_operations {
28928 s32 (*clear_vfta)(struct ixgbe_hw *);
28929 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28930 };
28931+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28932
28933 enum ixgbe_mac_type {
28934 ixgbe_mac_unknown = 0,
28935@@ -78,7 +79,7 @@ enum ixgbe_mac_type {
28936 };
28937
28938 struct ixgbe_mac_info {
28939- struct ixgbe_mac_operations ops;
28940+ ixgbe_mac_operations_no_const ops;
28941 u8 addr[6];
28942 u8 perm_addr[6];
28943
28944@@ -102,6 +103,7 @@ struct ixgbe_mbx_operations {
28945 s32 (*check_for_ack)(struct ixgbe_hw *);
28946 s32 (*check_for_rst)(struct ixgbe_hw *);
28947 };
28948+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28949
28950 struct ixgbe_mbx_stats {
28951 u32 msgs_tx;
28952@@ -113,7 +115,7 @@ struct ixgbe_mbx_stats {
28953 };
28954
28955 struct ixgbe_mbx_info {
28956- struct ixgbe_mbx_operations ops;
28957+ ixgbe_mbx_operations_no_const ops;
28958 struct ixgbe_mbx_stats stats;
28959 u32 timeout;
28960 u32 udelay;
28961diff -urNp linux-2.6.39.4/drivers/net/ksz884x.c linux-2.6.39.4/drivers/net/ksz884x.c
28962--- linux-2.6.39.4/drivers/net/ksz884x.c 2011-05-19 00:06:34.000000000 -0400
28963+++ linux-2.6.39.4/drivers/net/ksz884x.c 2011-08-05 20:34:06.000000000 -0400
28964@@ -6536,6 +6536,8 @@ static void netdev_get_ethtool_stats(str
28965 int rc;
28966 u64 counter[TOTAL_PORT_COUNTER_NUM];
28967
28968+ pax_track_stack();
28969+
28970 mutex_lock(&hw_priv->lock);
28971 n = SWITCH_PORT_NUM;
28972 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28973diff -urNp linux-2.6.39.4/drivers/net/mlx4/main.c linux-2.6.39.4/drivers/net/mlx4/main.c
28974--- linux-2.6.39.4/drivers/net/mlx4/main.c 2011-05-19 00:06:34.000000000 -0400
28975+++ linux-2.6.39.4/drivers/net/mlx4/main.c 2011-08-05 19:44:37.000000000 -0400
28976@@ -40,6 +40,7 @@
28977 #include <linux/dma-mapping.h>
28978 #include <linux/slab.h>
28979 #include <linux/io-mapping.h>
28980+#include <linux/sched.h>
28981
28982 #include <linux/mlx4/device.h>
28983 #include <linux/mlx4/doorbell.h>
28984@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28985 u64 icm_size;
28986 int err;
28987
28988+ pax_track_stack();
28989+
28990 err = mlx4_QUERY_FW(dev);
28991 if (err) {
28992 if (err == -EACCES)
28993diff -urNp linux-2.6.39.4/drivers/net/niu.c linux-2.6.39.4/drivers/net/niu.c
28994--- linux-2.6.39.4/drivers/net/niu.c 2011-05-19 00:06:34.000000000 -0400
28995+++ linux-2.6.39.4/drivers/net/niu.c 2011-08-05 19:44:37.000000000 -0400
28996@@ -9067,6 +9067,8 @@ static void __devinit niu_try_msix(struc
28997 int i, num_irqs, err;
28998 u8 first_ldg;
28999
29000+ pax_track_stack();
29001+
29002 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29003 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29004 ldg_num_map[i] = first_ldg + i;
29005diff -urNp linux-2.6.39.4/drivers/net/pcnet32.c linux-2.6.39.4/drivers/net/pcnet32.c
29006--- linux-2.6.39.4/drivers/net/pcnet32.c 2011-05-19 00:06:34.000000000 -0400
29007+++ linux-2.6.39.4/drivers/net/pcnet32.c 2011-08-05 20:34:06.000000000 -0400
29008@@ -82,7 +82,7 @@ static int cards_found;
29009 /*
29010 * VLB I/O addresses
29011 */
29012-static unsigned int pcnet32_portlist[] __initdata =
29013+static unsigned int pcnet32_portlist[] __devinitdata =
29014 { 0x300, 0x320, 0x340, 0x360, 0 };
29015
29016 static int pcnet32_debug;
29017@@ -270,7 +270,7 @@ struct pcnet32_private {
29018 struct sk_buff **rx_skbuff;
29019 dma_addr_t *tx_dma_addr;
29020 dma_addr_t *rx_dma_addr;
29021- struct pcnet32_access a;
29022+ struct pcnet32_access *a;
29023 spinlock_t lock; /* Guard lock */
29024 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29025 unsigned int rx_ring_size; /* current rx ring size */
29026@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29027 u16 val;
29028
29029 netif_wake_queue(dev);
29030- val = lp->a.read_csr(ioaddr, CSR3);
29031+ val = lp->a->read_csr(ioaddr, CSR3);
29032 val &= 0x00ff;
29033- lp->a.write_csr(ioaddr, CSR3, val);
29034+ lp->a->write_csr(ioaddr, CSR3, val);
29035 napi_enable(&lp->napi);
29036 }
29037
29038@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29039 r = mii_link_ok(&lp->mii_if);
29040 } else if (lp->chip_version >= PCNET32_79C970A) {
29041 ulong ioaddr = dev->base_addr; /* card base I/O address */
29042- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29043+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29044 } else { /* can not detect link on really old chips */
29045 r = 1;
29046 }
29047@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29048 pcnet32_netif_stop(dev);
29049
29050 spin_lock_irqsave(&lp->lock, flags);
29051- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29052+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29053
29054 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29055
29056@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29057 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29058 {
29059 struct pcnet32_private *lp = netdev_priv(dev);
29060- struct pcnet32_access *a = &lp->a; /* access to registers */
29061+ struct pcnet32_access *a = lp->a; /* access to registers */
29062 ulong ioaddr = dev->base_addr; /* card base I/O address */
29063 struct sk_buff *skb; /* sk buff */
29064 int x, i; /* counters */
29065@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29066 pcnet32_netif_stop(dev);
29067
29068 spin_lock_irqsave(&lp->lock, flags);
29069- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29070+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29071
29072 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
29073
29074 /* Reset the PCNET32 */
29075- lp->a.reset(ioaddr);
29076- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29077+ lp->a->reset(ioaddr);
29078+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29079
29080 /* switch pcnet32 to 32bit mode */
29081- lp->a.write_bcr(ioaddr, 20, 2);
29082+ lp->a->write_bcr(ioaddr, 20, 2);
29083
29084 /* purge & init rings but don't actually restart */
29085 pcnet32_restart(dev, 0x0000);
29086
29087- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29088+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29089
29090 /* Initialize Transmit buffers. */
29091 size = data_len + 15;
29092@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
29093
29094 /* set int loopback in CSR15 */
29095 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
29096- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
29097+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
29098
29099 teststatus = cpu_to_le16(0x8000);
29100- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29101+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29102
29103 /* Check status of descriptors */
29104 for (x = 0; x < numbuffs; x++) {
29105@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
29106 }
29107 }
29108
29109- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29110+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29111 wmb();
29112 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
29113 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
29114@@ -1015,7 +1015,7 @@ clean_up:
29115 pcnet32_restart(dev, CSR0_NORMAL);
29116 } else {
29117 pcnet32_purge_rx_ring(dev);
29118- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29119+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29120 }
29121 spin_unlock_irqrestore(&lp->lock, flags);
29122
29123@@ -1025,7 +1025,7 @@ clean_up:
29124 static void pcnet32_led_blink_callback(struct net_device *dev)
29125 {
29126 struct pcnet32_private *lp = netdev_priv(dev);
29127- struct pcnet32_access *a = &lp->a;
29128+ struct pcnet32_access *a = lp->a;
29129 ulong ioaddr = dev->base_addr;
29130 unsigned long flags;
29131 int i;
29132@@ -1041,7 +1041,7 @@ static void pcnet32_led_blink_callback(s
29133 static int pcnet32_phys_id(struct net_device *dev, u32 data)
29134 {
29135 struct pcnet32_private *lp = netdev_priv(dev);
29136- struct pcnet32_access *a = &lp->a;
29137+ struct pcnet32_access *a = lp->a;
29138 ulong ioaddr = dev->base_addr;
29139 unsigned long flags;
29140 int i, regs[4];
29141@@ -1085,7 +1085,7 @@ static int pcnet32_suspend(struct net_de
29142 {
29143 int csr5;
29144 struct pcnet32_private *lp = netdev_priv(dev);
29145- struct pcnet32_access *a = &lp->a;
29146+ struct pcnet32_access *a = lp->a;
29147 ulong ioaddr = dev->base_addr;
29148 int ticks;
29149
29150@@ -1342,8 +1342,8 @@ static int pcnet32_poll(struct napi_stru
29151 spin_lock_irqsave(&lp->lock, flags);
29152 if (pcnet32_tx(dev)) {
29153 /* reset the chip to clear the error condition, then restart */
29154- lp->a.reset(ioaddr);
29155- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29156+ lp->a->reset(ioaddr);
29157+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29158 pcnet32_restart(dev, CSR0_START);
29159 netif_wake_queue(dev);
29160 }
29161@@ -1355,12 +1355,12 @@ static int pcnet32_poll(struct napi_stru
29162 __napi_complete(napi);
29163
29164 /* clear interrupt masks */
29165- val = lp->a.read_csr(ioaddr, CSR3);
29166+ val = lp->a->read_csr(ioaddr, CSR3);
29167 val &= 0x00ff;
29168- lp->a.write_csr(ioaddr, CSR3, val);
29169+ lp->a->write_csr(ioaddr, CSR3, val);
29170
29171 /* Set interrupt enable. */
29172- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29173+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29174
29175 spin_unlock_irqrestore(&lp->lock, flags);
29176 }
29177@@ -1383,7 +1383,7 @@ static void pcnet32_get_regs(struct net_
29178 int i, csr0;
29179 u16 *buff = ptr;
29180 struct pcnet32_private *lp = netdev_priv(dev);
29181- struct pcnet32_access *a = &lp->a;
29182+ struct pcnet32_access *a = lp->a;
29183 ulong ioaddr = dev->base_addr;
29184 unsigned long flags;
29185
29186@@ -1419,9 +1419,9 @@ static void pcnet32_get_regs(struct net_
29187 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29188 if (lp->phymask & (1 << j)) {
29189 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29190- lp->a.write_bcr(ioaddr, 33,
29191+ lp->a->write_bcr(ioaddr, 33,
29192 (j << 5) | i);
29193- *buff++ = lp->a.read_bcr(ioaddr, 34);
29194+ *buff++ = lp->a->read_bcr(ioaddr, 34);
29195 }
29196 }
29197 }
29198@@ -1803,7 +1803,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29199 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29200 lp->options |= PCNET32_PORT_FD;
29201
29202- lp->a = *a;
29203+ lp->a = a;
29204
29205 /* prior to register_netdev, dev->name is not yet correct */
29206 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29207@@ -1862,7 +1862,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29208 if (lp->mii) {
29209 /* lp->phycount and lp->phymask are set to 0 by memset above */
29210
29211- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29212+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29213 /* scan for PHYs */
29214 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29215 unsigned short id1, id2;
29216@@ -1882,7 +1882,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29217 pr_info("Found PHY %04x:%04x at address %d\n",
29218 id1, id2, i);
29219 }
29220- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29221+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29222 if (lp->phycount > 1)
29223 lp->options |= PCNET32_PORT_MII;
29224 }
29225@@ -2038,10 +2038,10 @@ static int pcnet32_open(struct net_devic
29226 }
29227
29228 /* Reset the PCNET32 */
29229- lp->a.reset(ioaddr);
29230+ lp->a->reset(ioaddr);
29231
29232 /* switch pcnet32 to 32bit mode */
29233- lp->a.write_bcr(ioaddr, 20, 2);
29234+ lp->a->write_bcr(ioaddr, 20, 2);
29235
29236 netif_printk(lp, ifup, KERN_DEBUG, dev,
29237 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29238@@ -2050,14 +2050,14 @@ static int pcnet32_open(struct net_devic
29239 (u32) (lp->init_dma_addr));
29240
29241 /* set/reset autoselect bit */
29242- val = lp->a.read_bcr(ioaddr, 2) & ~2;
29243+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
29244 if (lp->options & PCNET32_PORT_ASEL)
29245 val |= 2;
29246- lp->a.write_bcr(ioaddr, 2, val);
29247+ lp->a->write_bcr(ioaddr, 2, val);
29248
29249 /* handle full duplex setting */
29250 if (lp->mii_if.full_duplex) {
29251- val = lp->a.read_bcr(ioaddr, 9) & ~3;
29252+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
29253 if (lp->options & PCNET32_PORT_FD) {
29254 val |= 1;
29255 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29256@@ -2067,14 +2067,14 @@ static int pcnet32_open(struct net_devic
29257 if (lp->chip_version == 0x2627)
29258 val |= 3;
29259 }
29260- lp->a.write_bcr(ioaddr, 9, val);
29261+ lp->a->write_bcr(ioaddr, 9, val);
29262 }
29263
29264 /* set/reset GPSI bit in test register */
29265- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29266+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29267 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29268 val |= 0x10;
29269- lp->a.write_csr(ioaddr, 124, val);
29270+ lp->a->write_csr(ioaddr, 124, val);
29271
29272 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29273 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29274@@ -2093,24 +2093,24 @@ static int pcnet32_open(struct net_devic
29275 * duplex, and/or enable auto negotiation, and clear DANAS
29276 */
29277 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29278- lp->a.write_bcr(ioaddr, 32,
29279- lp->a.read_bcr(ioaddr, 32) | 0x0080);
29280+ lp->a->write_bcr(ioaddr, 32,
29281+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
29282 /* disable Auto Negotiation, set 10Mpbs, HD */
29283- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29284+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29285 if (lp->options & PCNET32_PORT_FD)
29286 val |= 0x10;
29287 if (lp->options & PCNET32_PORT_100)
29288 val |= 0x08;
29289- lp->a.write_bcr(ioaddr, 32, val);
29290+ lp->a->write_bcr(ioaddr, 32, val);
29291 } else {
29292 if (lp->options & PCNET32_PORT_ASEL) {
29293- lp->a.write_bcr(ioaddr, 32,
29294- lp->a.read_bcr(ioaddr,
29295+ lp->a->write_bcr(ioaddr, 32,
29296+ lp->a->read_bcr(ioaddr,
29297 32) | 0x0080);
29298 /* enable auto negotiate, setup, disable fd */
29299- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29300+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29301 val |= 0x20;
29302- lp->a.write_bcr(ioaddr, 32, val);
29303+ lp->a->write_bcr(ioaddr, 32, val);
29304 }
29305 }
29306 } else {
29307@@ -2123,10 +2123,10 @@ static int pcnet32_open(struct net_devic
29308 * There is really no good other way to handle multiple PHYs
29309 * other than turning off all automatics
29310 */
29311- val = lp->a.read_bcr(ioaddr, 2);
29312- lp->a.write_bcr(ioaddr, 2, val & ~2);
29313- val = lp->a.read_bcr(ioaddr, 32);
29314- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29315+ val = lp->a->read_bcr(ioaddr, 2);
29316+ lp->a->write_bcr(ioaddr, 2, val & ~2);
29317+ val = lp->a->read_bcr(ioaddr, 32);
29318+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29319
29320 if (!(lp->options & PCNET32_PORT_ASEL)) {
29321 /* setup ecmd */
29322@@ -2136,7 +2136,7 @@ static int pcnet32_open(struct net_devic
29323 ecmd.speed =
29324 lp->
29325 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
29326- bcr9 = lp->a.read_bcr(ioaddr, 9);
29327+ bcr9 = lp->a->read_bcr(ioaddr, 9);
29328
29329 if (lp->options & PCNET32_PORT_FD) {
29330 ecmd.duplex = DUPLEX_FULL;
29331@@ -2145,7 +2145,7 @@ static int pcnet32_open(struct net_devic
29332 ecmd.duplex = DUPLEX_HALF;
29333 bcr9 |= ~(1 << 0);
29334 }
29335- lp->a.write_bcr(ioaddr, 9, bcr9);
29336+ lp->a->write_bcr(ioaddr, 9, bcr9);
29337 }
29338
29339 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29340@@ -2176,9 +2176,9 @@ static int pcnet32_open(struct net_devic
29341
29342 #ifdef DO_DXSUFLO
29343 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29344- val = lp->a.read_csr(ioaddr, CSR3);
29345+ val = lp->a->read_csr(ioaddr, CSR3);
29346 val |= 0x40;
29347- lp->a.write_csr(ioaddr, CSR3, val);
29348+ lp->a->write_csr(ioaddr, CSR3, val);
29349 }
29350 #endif
29351
29352@@ -2194,11 +2194,11 @@ static int pcnet32_open(struct net_devic
29353 napi_enable(&lp->napi);
29354
29355 /* Re-initialize the PCNET32, and start it when done. */
29356- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29357- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29358+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29359+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29360
29361- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29362- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29363+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29364+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29365
29366 netif_start_queue(dev);
29367
29368@@ -2210,19 +2210,19 @@ static int pcnet32_open(struct net_devic
29369
29370 i = 0;
29371 while (i++ < 100)
29372- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29373+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29374 break;
29375 /*
29376 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29377 * reports that doing so triggers a bug in the '974.
29378 */
29379- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29380+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29381
29382 netif_printk(lp, ifup, KERN_DEBUG, dev,
29383 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29384 i,
29385 (u32) (lp->init_dma_addr),
29386- lp->a.read_csr(ioaddr, CSR0));
29387+ lp->a->read_csr(ioaddr, CSR0));
29388
29389 spin_unlock_irqrestore(&lp->lock, flags);
29390
29391@@ -2236,7 +2236,7 @@ err_free_ring:
29392 * Switch back to 16bit mode to avoid problems with dumb
29393 * DOS packet driver after a warm reboot
29394 */
29395- lp->a.write_bcr(ioaddr, 20, 4);
29396+ lp->a->write_bcr(ioaddr, 20, 4);
29397
29398 err_free_irq:
29399 spin_unlock_irqrestore(&lp->lock, flags);
29400@@ -2341,7 +2341,7 @@ static void pcnet32_restart(struct net_d
29401
29402 /* wait for stop */
29403 for (i = 0; i < 100; i++)
29404- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29405+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29406 break;
29407
29408 if (i >= 100)
29409@@ -2353,13 +2353,13 @@ static void pcnet32_restart(struct net_d
29410 return;
29411
29412 /* ReInit Ring */
29413- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29414+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29415 i = 0;
29416 while (i++ < 1000)
29417- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29418+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29419 break;
29420
29421- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29422+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29423 }
29424
29425 static void pcnet32_tx_timeout(struct net_device *dev)
29426@@ -2371,8 +2371,8 @@ static void pcnet32_tx_timeout(struct ne
29427 /* Transmitter timeout, serious problems. */
29428 if (pcnet32_debug & NETIF_MSG_DRV)
29429 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29430- dev->name, lp->a.read_csr(ioaddr, CSR0));
29431- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29432+ dev->name, lp->a->read_csr(ioaddr, CSR0));
29433+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29434 dev->stats.tx_errors++;
29435 if (netif_msg_tx_err(lp)) {
29436 int i;
29437@@ -2415,7 +2415,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29438
29439 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29440 "%s() called, csr0 %4.4x\n",
29441- __func__, lp->a.read_csr(ioaddr, CSR0));
29442+ __func__, lp->a->read_csr(ioaddr, CSR0));
29443
29444 /* Default status -- will not enable Successful-TxDone
29445 * interrupt when that option is available to us.
29446@@ -2445,7 +2445,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29447 dev->stats.tx_bytes += skb->len;
29448
29449 /* Trigger an immediate send poll. */
29450- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29451+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29452
29453 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29454 lp->tx_full = 1;
29455@@ -2470,16 +2470,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29456
29457 spin_lock(&lp->lock);
29458
29459- csr0 = lp->a.read_csr(ioaddr, CSR0);
29460+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29461 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29462 if (csr0 == 0xffff)
29463 break; /* PCMCIA remove happened */
29464 /* Acknowledge all of the current interrupt sources ASAP. */
29465- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29466+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29467
29468 netif_printk(lp, intr, KERN_DEBUG, dev,
29469 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29470- csr0, lp->a.read_csr(ioaddr, CSR0));
29471+ csr0, lp->a->read_csr(ioaddr, CSR0));
29472
29473 /* Log misc errors. */
29474 if (csr0 & 0x4000)
29475@@ -2506,19 +2506,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29476 if (napi_schedule_prep(&lp->napi)) {
29477 u16 val;
29478 /* set interrupt masks */
29479- val = lp->a.read_csr(ioaddr, CSR3);
29480+ val = lp->a->read_csr(ioaddr, CSR3);
29481 val |= 0x5f00;
29482- lp->a.write_csr(ioaddr, CSR3, val);
29483+ lp->a->write_csr(ioaddr, CSR3, val);
29484
29485 __napi_schedule(&lp->napi);
29486 break;
29487 }
29488- csr0 = lp->a.read_csr(ioaddr, CSR0);
29489+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29490 }
29491
29492 netif_printk(lp, intr, KERN_DEBUG, dev,
29493 "exiting interrupt, csr0=%#4.4x\n",
29494- lp->a.read_csr(ioaddr, CSR0));
29495+ lp->a->read_csr(ioaddr, CSR0));
29496
29497 spin_unlock(&lp->lock);
29498
29499@@ -2538,20 +2538,20 @@ static int pcnet32_close(struct net_devi
29500
29501 spin_lock_irqsave(&lp->lock, flags);
29502
29503- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29504+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29505
29506 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29507 "Shutting down ethercard, status was %2.2x\n",
29508- lp->a.read_csr(ioaddr, CSR0));
29509+ lp->a->read_csr(ioaddr, CSR0));
29510
29511 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29512- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29513+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29514
29515 /*
29516 * Switch back to 16bit mode to avoid problems with dumb
29517 * DOS packet driver after a warm reboot
29518 */
29519- lp->a.write_bcr(ioaddr, 20, 4);
29520+ lp->a->write_bcr(ioaddr, 20, 4);
29521
29522 spin_unlock_irqrestore(&lp->lock, flags);
29523
29524@@ -2574,7 +2574,7 @@ static struct net_device_stats *pcnet32_
29525 unsigned long flags;
29526
29527 spin_lock_irqsave(&lp->lock, flags);
29528- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29529+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29530 spin_unlock_irqrestore(&lp->lock, flags);
29531
29532 return &dev->stats;
29533@@ -2596,10 +2596,10 @@ static void pcnet32_load_multicast(struc
29534 if (dev->flags & IFF_ALLMULTI) {
29535 ib->filter[0] = cpu_to_le32(~0U);
29536 ib->filter[1] = cpu_to_le32(~0U);
29537- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29538- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29539- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29540- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29541+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29542+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29543+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29544+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29545 return;
29546 }
29547 /* clear the multicast filter */
29548@@ -2619,7 +2619,7 @@ static void pcnet32_load_multicast(struc
29549 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29550 }
29551 for (i = 0; i < 4; i++)
29552- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29553+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29554 le16_to_cpu(mcast_table[i]));
29555 }
29556
29557@@ -2634,28 +2634,28 @@ static void pcnet32_set_multicast_list(s
29558
29559 spin_lock_irqsave(&lp->lock, flags);
29560 suspended = pcnet32_suspend(dev, &flags, 0);
29561- csr15 = lp->a.read_csr(ioaddr, CSR15);
29562+ csr15 = lp->a->read_csr(ioaddr, CSR15);
29563 if (dev->flags & IFF_PROMISC) {
29564 /* Log any net taps. */
29565 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29566 lp->init_block->mode =
29567 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29568 7);
29569- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29570+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29571 } else {
29572 lp->init_block->mode =
29573 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29574- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29575+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29576 pcnet32_load_multicast(dev);
29577 }
29578
29579 if (suspended) {
29580 int csr5;
29581 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29582- csr5 = lp->a.read_csr(ioaddr, CSR5);
29583- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29584+ csr5 = lp->a->read_csr(ioaddr, CSR5);
29585+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29586 } else {
29587- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29588+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29589 pcnet32_restart(dev, CSR0_NORMAL);
29590 netif_wake_queue(dev);
29591 }
29592@@ -2673,8 +2673,8 @@ static int mdio_read(struct net_device *
29593 if (!lp->mii)
29594 return 0;
29595
29596- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29597- val_out = lp->a.read_bcr(ioaddr, 34);
29598+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29599+ val_out = lp->a->read_bcr(ioaddr, 34);
29600
29601 return val_out;
29602 }
29603@@ -2688,8 +2688,8 @@ static void mdio_write(struct net_device
29604 if (!lp->mii)
29605 return;
29606
29607- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29608- lp->a.write_bcr(ioaddr, 34, val);
29609+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29610+ lp->a->write_bcr(ioaddr, 34, val);
29611 }
29612
29613 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29614@@ -2766,7 +2766,7 @@ static void pcnet32_check_media(struct n
29615 curr_link = mii_link_ok(&lp->mii_if);
29616 } else {
29617 ulong ioaddr = dev->base_addr; /* card base I/O address */
29618- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29619+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29620 }
29621 if (!curr_link) {
29622 if (prev_link || verbose) {
29623@@ -2789,13 +2789,13 @@ static void pcnet32_check_media(struct n
29624 (ecmd.duplex == DUPLEX_FULL)
29625 ? "full" : "half");
29626 }
29627- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29628+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29629 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29630 if (lp->mii_if.full_duplex)
29631 bcr9 |= (1 << 0);
29632 else
29633 bcr9 &= ~(1 << 0);
29634- lp->a.write_bcr(dev->base_addr, 9, bcr9);
29635+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
29636 }
29637 } else {
29638 netif_info(lp, link, dev, "link up\n");
29639diff -urNp linux-2.6.39.4/drivers/net/ppp_generic.c linux-2.6.39.4/drivers/net/ppp_generic.c
29640--- linux-2.6.39.4/drivers/net/ppp_generic.c 2011-05-19 00:06:34.000000000 -0400
29641+++ linux-2.6.39.4/drivers/net/ppp_generic.c 2011-08-05 19:44:37.000000000 -0400
29642@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29643 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29644 struct ppp_stats stats;
29645 struct ppp_comp_stats cstats;
29646- char *vers;
29647
29648 switch (cmd) {
29649 case SIOCGPPPSTATS:
29650@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29651 break;
29652
29653 case SIOCGPPPVER:
29654- vers = PPP_VERSION;
29655- if (copy_to_user(addr, vers, strlen(vers) + 1))
29656+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29657 break;
29658 err = 0;
29659 break;
29660diff -urNp linux-2.6.39.4/drivers/net/r8169.c linux-2.6.39.4/drivers/net/r8169.c
29661--- linux-2.6.39.4/drivers/net/r8169.c 2011-05-19 00:06:34.000000000 -0400
29662+++ linux-2.6.39.4/drivers/net/r8169.c 2011-08-05 20:34:06.000000000 -0400
29663@@ -552,12 +552,12 @@ struct rtl8169_private {
29664 struct mdio_ops {
29665 void (*write)(void __iomem *, int, int);
29666 int (*read)(void __iomem *, int);
29667- } mdio_ops;
29668+ } __no_const mdio_ops;
29669
29670 struct pll_power_ops {
29671 void (*down)(struct rtl8169_private *);
29672 void (*up)(struct rtl8169_private *);
29673- } pll_power_ops;
29674+ } __no_const pll_power_ops;
29675
29676 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29677 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29678diff -urNp linux-2.6.39.4/drivers/net/tg3.h linux-2.6.39.4/drivers/net/tg3.h
29679--- linux-2.6.39.4/drivers/net/tg3.h 2011-05-19 00:06:34.000000000 -0400
29680+++ linux-2.6.39.4/drivers/net/tg3.h 2011-08-05 19:44:37.000000000 -0400
29681@@ -131,6 +131,7 @@
29682 #define CHIPREV_ID_5750_A0 0x4000
29683 #define CHIPREV_ID_5750_A1 0x4001
29684 #define CHIPREV_ID_5750_A3 0x4003
29685+#define CHIPREV_ID_5750_C1 0x4201
29686 #define CHIPREV_ID_5750_C2 0x4202
29687 #define CHIPREV_ID_5752_A0_HW 0x5000
29688 #define CHIPREV_ID_5752_A0 0x6000
29689diff -urNp linux-2.6.39.4/drivers/net/tokenring/abyss.c linux-2.6.39.4/drivers/net/tokenring/abyss.c
29690--- linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-05-19 00:06:34.000000000 -0400
29691+++ linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-08-05 20:34:06.000000000 -0400
29692@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29693
29694 static int __init abyss_init (void)
29695 {
29696- abyss_netdev_ops = tms380tr_netdev_ops;
29697+ pax_open_kernel();
29698+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29699
29700- abyss_netdev_ops.ndo_open = abyss_open;
29701- abyss_netdev_ops.ndo_stop = abyss_close;
29702+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29703+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29704+ pax_close_kernel();
29705
29706 return pci_register_driver(&abyss_driver);
29707 }
29708diff -urNp linux-2.6.39.4/drivers/net/tokenring/madgemc.c linux-2.6.39.4/drivers/net/tokenring/madgemc.c
29709--- linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-05-19 00:06:34.000000000 -0400
29710+++ linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-08-05 20:34:06.000000000 -0400
29711@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29712
29713 static int __init madgemc_init (void)
29714 {
29715- madgemc_netdev_ops = tms380tr_netdev_ops;
29716- madgemc_netdev_ops.ndo_open = madgemc_open;
29717- madgemc_netdev_ops.ndo_stop = madgemc_close;
29718+ pax_open_kernel();
29719+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29720+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29721+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29722+ pax_close_kernel();
29723
29724 return mca_register_driver (&madgemc_driver);
29725 }
29726diff -urNp linux-2.6.39.4/drivers/net/tokenring/proteon.c linux-2.6.39.4/drivers/net/tokenring/proteon.c
29727--- linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-05-19 00:06:34.000000000 -0400
29728+++ linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-08-05 20:34:06.000000000 -0400
29729@@ -353,9 +353,11 @@ static int __init proteon_init(void)
29730 struct platform_device *pdev;
29731 int i, num = 0, err = 0;
29732
29733- proteon_netdev_ops = tms380tr_netdev_ops;
29734- proteon_netdev_ops.ndo_open = proteon_open;
29735- proteon_netdev_ops.ndo_stop = tms380tr_close;
29736+ pax_open_kernel();
29737+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29738+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29739+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29740+ pax_close_kernel();
29741
29742 err = platform_driver_register(&proteon_driver);
29743 if (err)
29744diff -urNp linux-2.6.39.4/drivers/net/tokenring/skisa.c linux-2.6.39.4/drivers/net/tokenring/skisa.c
29745--- linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-05-19 00:06:34.000000000 -0400
29746+++ linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-08-05 20:34:06.000000000 -0400
29747@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29748 struct platform_device *pdev;
29749 int i, num = 0, err = 0;
29750
29751- sk_isa_netdev_ops = tms380tr_netdev_ops;
29752- sk_isa_netdev_ops.ndo_open = sk_isa_open;
29753- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29754+ pax_open_kernel();
29755+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29756+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29757+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29758+ pax_close_kernel();
29759
29760 err = platform_driver_register(&sk_isa_driver);
29761 if (err)
29762diff -urNp linux-2.6.39.4/drivers/net/tulip/de2104x.c linux-2.6.39.4/drivers/net/tulip/de2104x.c
29763--- linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-05-19 00:06:34.000000000 -0400
29764+++ linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-08-05 19:44:37.000000000 -0400
29765@@ -1817,6 +1817,8 @@ static void __devinit de21041_get_srom_i
29766 struct de_srom_info_leaf *il;
29767 void *bufp;
29768
29769+ pax_track_stack();
29770+
29771 /* download entire eeprom */
29772 for (i = 0; i < DE_EEPROM_WORDS; i++)
29773 ((__le16 *)ee_data)[i] =
29774diff -urNp linux-2.6.39.4/drivers/net/tulip/de4x5.c linux-2.6.39.4/drivers/net/tulip/de4x5.c
29775--- linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-05-19 00:06:34.000000000 -0400
29776+++ linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-08-05 19:44:37.000000000 -0400
29777@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29778 for (i=0; i<ETH_ALEN; i++) {
29779 tmp.addr[i] = dev->dev_addr[i];
29780 }
29781- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29782+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29783 break;
29784
29785 case DE4X5_SET_HWADDR: /* Set the hardware address */
29786@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29787 spin_lock_irqsave(&lp->lock, flags);
29788 memcpy(&statbuf, &lp->pktStats, ioc->len);
29789 spin_unlock_irqrestore(&lp->lock, flags);
29790- if (copy_to_user(ioc->data, &statbuf, ioc->len))
29791+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29792 return -EFAULT;
29793 break;
29794 }
29795diff -urNp linux-2.6.39.4/drivers/net/usb/hso.c linux-2.6.39.4/drivers/net/usb/hso.c
29796--- linux-2.6.39.4/drivers/net/usb/hso.c 2011-05-19 00:06:34.000000000 -0400
29797+++ linux-2.6.39.4/drivers/net/usb/hso.c 2011-08-05 19:44:37.000000000 -0400
29798@@ -71,7 +71,7 @@
29799 #include <asm/byteorder.h>
29800 #include <linux/serial_core.h>
29801 #include <linux/serial.h>
29802-
29803+#include <asm/local.h>
29804
29805 #define MOD_AUTHOR "Option Wireless"
29806 #define MOD_DESCRIPTION "USB High Speed Option driver"
29807@@ -257,7 +257,7 @@ struct hso_serial {
29808
29809 /* from usb_serial_port */
29810 struct tty_struct *tty;
29811- int open_count;
29812+ local_t open_count;
29813 spinlock_t serial_lock;
29814
29815 int (*write_data) (struct hso_serial *serial);
29816@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29817 struct urb *urb;
29818
29819 urb = serial->rx_urb[0];
29820- if (serial->open_count > 0) {
29821+ if (local_read(&serial->open_count) > 0) {
29822 count = put_rxbuf_data(urb, serial);
29823 if (count == -1)
29824 return;
29825@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29826 DUMP1(urb->transfer_buffer, urb->actual_length);
29827
29828 /* Anyone listening? */
29829- if (serial->open_count == 0)
29830+ if (local_read(&serial->open_count) == 0)
29831 return;
29832
29833 if (status == 0) {
29834@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29835 spin_unlock_irq(&serial->serial_lock);
29836
29837 /* check for port already opened, if not set the termios */
29838- serial->open_count++;
29839- if (serial->open_count == 1) {
29840+ if (local_inc_return(&serial->open_count) == 1) {
29841 serial->rx_state = RX_IDLE;
29842 /* Force default termio settings */
29843 _hso_serial_set_termios(tty, NULL);
29844@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29845 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29846 if (result) {
29847 hso_stop_serial_device(serial->parent);
29848- serial->open_count--;
29849+ local_dec(&serial->open_count);
29850 kref_put(&serial->parent->ref, hso_serial_ref_free);
29851 }
29852 } else {
29853@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29854
29855 /* reset the rts and dtr */
29856 /* do the actual close */
29857- serial->open_count--;
29858+ local_dec(&serial->open_count);
29859
29860- if (serial->open_count <= 0) {
29861- serial->open_count = 0;
29862+ if (local_read(&serial->open_count) <= 0) {
29863+ local_set(&serial->open_count, 0);
29864 spin_lock_irq(&serial->serial_lock);
29865 if (serial->tty == tty) {
29866 serial->tty->driver_data = NULL;
29867@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29868
29869 /* the actual setup */
29870 spin_lock_irqsave(&serial->serial_lock, flags);
29871- if (serial->open_count)
29872+ if (local_read(&serial->open_count))
29873 _hso_serial_set_termios(tty, old);
29874 else
29875 tty->termios = old;
29876@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29877 D1("Pending read interrupt on port %d\n", i);
29878 spin_lock(&serial->serial_lock);
29879 if (serial->rx_state == RX_IDLE &&
29880- serial->open_count > 0) {
29881+ local_read(&serial->open_count) > 0) {
29882 /* Setup and send a ctrl req read on
29883 * port i */
29884 if (!serial->rx_urb_filled[0]) {
29885@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
29886 /* Start all serial ports */
29887 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29888 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29889- if (dev2ser(serial_table[i])->open_count) {
29890+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
29891 result =
29892 hso_start_serial_device(serial_table[i], GFP_NOIO);
29893 hso_kick_transmit(dev2ser(serial_table[i]));
29894diff -urNp linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29895--- linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-05-19 00:06:34.000000000 -0400
29896+++ linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-05 19:44:37.000000000 -0400
29897@@ -631,8 +631,7 @@ vmxnet3_set_rss_indir(struct net_device
29898 * Return with error code if any of the queue indices
29899 * is out of range
29900 */
29901- if (p->ring_index[i] < 0 ||
29902- p->ring_index[i] >= adapter->num_rx_queues)
29903+ if (p->ring_index[i] >= adapter->num_rx_queues)
29904 return -EINVAL;
29905 }
29906
29907diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-config.h linux-2.6.39.4/drivers/net/vxge/vxge-config.h
29908--- linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-05-19 00:06:34.000000000 -0400
29909+++ linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-08-05 20:34:06.000000000 -0400
29910@@ -508,7 +508,7 @@ struct vxge_hw_uld_cbs {
29911 void (*link_down)(struct __vxge_hw_device *devh);
29912 void (*crit_err)(struct __vxge_hw_device *devh,
29913 enum vxge_hw_event type, u64 ext_data);
29914-};
29915+} __no_const;
29916
29917 /*
29918 * struct __vxge_hw_blockpool_entry - Block private data structure
29919diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-main.c linux-2.6.39.4/drivers/net/vxge/vxge-main.c
29920--- linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-05-19 00:06:34.000000000 -0400
29921+++ linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-08-05 19:44:37.000000000 -0400
29922@@ -97,6 +97,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29923 struct sk_buff *completed[NR_SKB_COMPLETED];
29924 int more;
29925
29926+ pax_track_stack();
29927+
29928 do {
29929 more = 0;
29930 skb_ptr = completed;
29931@@ -1927,6 +1929,8 @@ static enum vxge_hw_status vxge_rth_conf
29932 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29933 int index;
29934
29935+ pax_track_stack();
29936+
29937 /*
29938 * Filling
29939 * - itable with bucket numbers
29940diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h
29941--- linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-05-19 00:06:34.000000000 -0400
29942+++ linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:34:06.000000000 -0400
29943@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29944 struct vxge_hw_mempool_dma *dma_object,
29945 u32 index,
29946 u32 is_last);
29947-};
29948+} __no_const;
29949
29950 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29951 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29952diff -urNp linux-2.6.39.4/drivers/net/wan/cycx_x25.c linux-2.6.39.4/drivers/net/wan/cycx_x25.c
29953--- linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-05-19 00:06:34.000000000 -0400
29954+++ linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-08-05 19:44:37.000000000 -0400
29955@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29956 unsigned char hex[1024],
29957 * phex = hex;
29958
29959+ pax_track_stack();
29960+
29961 if (len >= (sizeof(hex) / 2))
29962 len = (sizeof(hex) / 2) - 1;
29963
29964diff -urNp linux-2.6.39.4/drivers/net/wan/hdlc_x25.c linux-2.6.39.4/drivers/net/wan/hdlc_x25.c
29965--- linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-05-19 00:06:34.000000000 -0400
29966+++ linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-08-05 20:34:06.000000000 -0400
29967@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29968
29969 static int x25_open(struct net_device *dev)
29970 {
29971- struct lapb_register_struct cb;
29972+ static struct lapb_register_struct cb = {
29973+ .connect_confirmation = x25_connected,
29974+ .connect_indication = x25_connected,
29975+ .disconnect_confirmation = x25_disconnected,
29976+ .disconnect_indication = x25_disconnected,
29977+ .data_indication = x25_data_indication,
29978+ .data_transmit = x25_data_transmit
29979+ };
29980 int result;
29981
29982- cb.connect_confirmation = x25_connected;
29983- cb.connect_indication = x25_connected;
29984- cb.disconnect_confirmation = x25_disconnected;
29985- cb.disconnect_indication = x25_disconnected;
29986- cb.data_indication = x25_data_indication;
29987- cb.data_transmit = x25_data_transmit;
29988-
29989 result = lapb_register(dev, &cb);
29990 if (result != LAPB_OK)
29991 return result;
29992diff -urNp linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c
29993--- linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-05-19 00:06:34.000000000 -0400
29994+++ linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-05 19:44:37.000000000 -0400
29995@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29996 int do_autopm = 1;
29997 DECLARE_COMPLETION_ONSTACK(notif_completion);
29998
29999+ pax_track_stack();
30000+
30001 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30002 i2400m, ack, ack_size);
30003 BUG_ON(_ack == i2400m->bm_ack_buf);
30004diff -urNp linux-2.6.39.4/drivers/net/wireless/airo.c linux-2.6.39.4/drivers/net/wireless/airo.c
30005--- linux-2.6.39.4/drivers/net/wireless/airo.c 2011-05-19 00:06:34.000000000 -0400
30006+++ linux-2.6.39.4/drivers/net/wireless/airo.c 2011-08-05 19:44:37.000000000 -0400
30007@@ -3001,6 +3001,8 @@ static void airo_process_scan_results (s
30008 BSSListElement * loop_net;
30009 BSSListElement * tmp_net;
30010
30011+ pax_track_stack();
30012+
30013 /* Blow away current list of scan results */
30014 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30015 list_move_tail (&loop_net->list, &ai->network_free_list);
30016@@ -3792,6 +3794,8 @@ static u16 setup_card(struct airo_info *
30017 WepKeyRid wkr;
30018 int rc;
30019
30020+ pax_track_stack();
30021+
30022 memset( &mySsid, 0, sizeof( mySsid ) );
30023 kfree (ai->flash);
30024 ai->flash = NULL;
30025@@ -4760,6 +4764,8 @@ static int proc_stats_rid_open( struct i
30026 __le32 *vals = stats.vals;
30027 int len;
30028
30029+ pax_track_stack();
30030+
30031 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30032 return -ENOMEM;
30033 data = file->private_data;
30034@@ -5483,6 +5489,8 @@ static int proc_BSSList_open( struct ino
30035 /* If doLoseSync is not 1, we won't do a Lose Sync */
30036 int doLoseSync = -1;
30037
30038+ pax_track_stack();
30039+
30040 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30041 return -ENOMEM;
30042 data = file->private_data;
30043@@ -7190,6 +7198,8 @@ static int airo_get_aplist(struct net_de
30044 int i;
30045 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30046
30047+ pax_track_stack();
30048+
30049 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30050 if (!qual)
30051 return -ENOMEM;
30052@@ -7750,6 +7760,8 @@ static void airo_read_wireless_stats(str
30053 CapabilityRid cap_rid;
30054 __le32 *vals = stats_rid.vals;
30055
30056+ pax_track_stack();
30057+
30058 /* Get stats out of the card */
30059 clear_bit(JOB_WSTATS, &local->jobs);
30060 if (local->power.event) {
30061diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c
30062--- linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-05-19 00:06:34.000000000 -0400
30063+++ linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-05 19:44:37.000000000 -0400
30064@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30065 unsigned int v;
30066 u64 tsf;
30067
30068+ pax_track_stack();
30069+
30070 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30071 len += snprintf(buf+len, sizeof(buf)-len,
30072 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30073@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30074 unsigned int len = 0;
30075 unsigned int i;
30076
30077+ pax_track_stack();
30078+
30079 len += snprintf(buf+len, sizeof(buf)-len,
30080 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
30081
30082@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
30083 unsigned int i;
30084 unsigned int v;
30085
30086+ pax_track_stack();
30087+
30088 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
30089 sc->ah->ah_ant_mode);
30090 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
30091@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
30092 unsigned int len = 0;
30093 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
30094
30095+ pax_track_stack();
30096+
30097 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
30098 sc->bssidmask);
30099 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
30100@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
30101 unsigned int len = 0;
30102 int i;
30103
30104+ pax_track_stack();
30105+
30106 len += snprintf(buf+len, sizeof(buf)-len,
30107 "RX\n---------------------\n");
30108 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
30109@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
30110 char buf[700];
30111 unsigned int len = 0;
30112
30113+ pax_track_stack();
30114+
30115 len += snprintf(buf+len, sizeof(buf)-len,
30116 "HW has PHY error counters:\t%s\n",
30117 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
30118@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
30119 struct ath5k_buf *bf, *bf0;
30120 int i, n;
30121
30122+ pax_track_stack();
30123+
30124 len += snprintf(buf+len, sizeof(buf)-len,
30125 "available txbuffers: %d\n", sc->txbuf_len);
30126
30127diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30128--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-05-19 00:06:34.000000000 -0400
30129+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-05 19:44:37.000000000 -0400
30130@@ -734,6 +734,8 @@ static void ar9003_hw_tx_iq_cal(struct a
30131 s32 i, j, ip, im, nmeasurement;
30132 u8 nchains = get_streams(common->tx_chainmask);
30133
30134+ pax_track_stack();
30135+
30136 for (ip = 0; ip < MPASS; ip++) {
30137 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
30138 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
30139@@ -856,6 +858,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30140 int i, ip, im, j;
30141 int nmeasurement;
30142
30143+ pax_track_stack();
30144+
30145 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30146 if (ah->txchainmask & (1 << i))
30147 num_chains++;
30148diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30149--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-05-19 00:06:34.000000000 -0400
30150+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-05 19:44:37.000000000 -0400
30151@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30152 int theta_low_bin = 0;
30153 int i;
30154
30155+ pax_track_stack();
30156+
30157 /* disregard any bin that contains <= 16 samples */
30158 thresh_accum_cnt = 16;
30159 scale_factor = 5;
30160diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c
30161--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-05-19 00:06:34.000000000 -0400
30162+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-05 19:44:37.000000000 -0400
30163@@ -335,6 +335,8 @@ static ssize_t read_file_interrupt(struc
30164 char buf[512];
30165 unsigned int len = 0;
30166
30167+ pax_track_stack();
30168+
30169 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30170 len += snprintf(buf + len, sizeof(buf) - len,
30171 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30172@@ -422,6 +424,8 @@ static ssize_t read_file_wiphy(struct fi
30173 u8 addr[ETH_ALEN];
30174 u32 tmp;
30175
30176+ pax_track_stack();
30177+
30178 len += snprintf(buf + len, sizeof(buf) - len,
30179 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30180 wiphy_name(sc->hw->wiphy),
30181diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c
30182--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-05-19 00:06:34.000000000 -0400
30183+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-08-05 20:34:06.000000000 -0400
30184@@ -737,6 +737,8 @@ static ssize_t read_file_tgt_stats(struc
30185 unsigned int len = 0;
30186 int ret = 0;
30187
30188+ pax_track_stack();
30189+
30190 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30191
30192 WMI_CMD(WMI_TGT_STATS_CMDID);
30193@@ -782,6 +784,8 @@ static ssize_t read_file_xmit(struct fil
30194 char buf[512];
30195 unsigned int len = 0;
30196
30197+ pax_track_stack();
30198+
30199 len += snprintf(buf + len, sizeof(buf) - len,
30200 "%20s : %10u\n", "Buffers queued",
30201 priv->debug.tx_stats.buf_queued);
30202@@ -831,6 +835,8 @@ static ssize_t read_file_recv(struct fil
30203 char buf[512];
30204 unsigned int len = 0;
30205
30206+ pax_track_stack();
30207+
30208 len += snprintf(buf + len, sizeof(buf) - len,
30209 "%20s : %10u\n", "SKBs allocated",
30210 priv->debug.rx_stats.skb_allocated);
30211diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h
30212--- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-05-19 00:06:34.000000000 -0400
30213+++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-05 20:34:06.000000000 -0400
30214@@ -592,7 +592,7 @@ struct ath_hw_private_ops {
30215
30216 /* ANI */
30217 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30218-};
30219+} __no_const;
30220
30221 /**
30222 * struct ath_hw_ops - callbacks used by hardware code and driver code
30223@@ -642,7 +642,7 @@ struct ath_hw_ops {
30224 u32 burstDuration);
30225 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
30226 u32 vmf);
30227-};
30228+} __no_const;
30229
30230 struct ath_nf_limits {
30231 s16 max;
30232diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c
30233--- linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-19 00:06:34.000000000 -0400
30234+++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-05 19:44:37.000000000 -0400
30235@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30236 int err;
30237 DECLARE_SSID_BUF(ssid);
30238
30239+ pax_track_stack();
30240+
30241 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30242
30243 if (ssid_len)
30244@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30245 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30246 int err;
30247
30248+ pax_track_stack();
30249+
30250 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30251 idx, keylen, len);
30252
30253diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30254--- linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-19 00:06:34.000000000 -0400
30255+++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-05 19:44:37.000000000 -0400
30256@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30257 unsigned long flags;
30258 DECLARE_SSID_BUF(ssid);
30259
30260+ pax_track_stack();
30261+
30262 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30263 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30264 print_ssid(ssid, info_element->data, info_element->len),
30265diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30266--- linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-05-19 00:06:34.000000000 -0400
30267+++ linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-05 20:34:06.000000000 -0400
30268@@ -3958,7 +3958,9 @@ static int iwl3945_pci_probe(struct pci_
30269 */
30270 if (iwl3945_mod_params.disable_hw_scan) {
30271 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30272- iwl3945_hw_ops.hw_scan = NULL;
30273+ pax_open_kernel();
30274+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30275+ pax_close_kernel();
30276 }
30277
30278 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30279diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c
30280--- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-06-25 12:55:22.000000000 -0400
30281+++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:34:06.000000000 -0400
30282@@ -3974,7 +3974,9 @@ static int iwl_pci_probe(struct pci_dev
30283 if (cfg->mod_params->disable_hw_scan) {
30284 dev_printk(KERN_DEBUG, &(pdev->dev),
30285 "sw scan support is deprecated\n");
30286- iwlagn_hw_ops.hw_scan = NULL;
30287+ pax_open_kernel();
30288+ *(void **)&iwlagn_hw_ops.hw_scan = NULL;
30289+ pax_close_kernel();
30290 }
30291
30292 hw = iwl_alloc_all(cfg);
30293diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30294--- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-19 00:06:34.000000000 -0400
30295+++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-05 19:44:37.000000000 -0400
30296@@ -883,6 +883,8 @@ static void rs_tx_status(void *priv_r, s
30297 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30298 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30299
30300+ pax_track_stack();
30301+
30302 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30303
30304 /* Treat uninitialized rate scaling data same as non-existing. */
30305@@ -2894,6 +2896,8 @@ static void rs_fill_link_cmd(struct iwl_
30306 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30307 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30308
30309+ pax_track_stack();
30310+
30311 /* Override starting rate (index 0) if needed for debug purposes */
30312 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30313
30314diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30315--- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-19 00:06:34.000000000 -0400
30316+++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-05 19:44:37.000000000 -0400
30317@@ -549,6 +549,8 @@ static ssize_t iwl_dbgfs_status_read(str
30318 int pos = 0;
30319 const size_t bufsz = sizeof(buf);
30320
30321+ pax_track_stack();
30322+
30323 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30324 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30325 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30326@@ -681,6 +683,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30327 char buf[256 * NUM_IWL_RXON_CTX];
30328 const size_t bufsz = sizeof(buf);
30329
30330+ pax_track_stack();
30331+
30332 for_each_context(priv, ctx) {
30333 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30334 ctx->ctxid);
30335diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30336--- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-05-19 00:06:34.000000000 -0400
30337+++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-05 19:44:37.000000000 -0400
30338@@ -68,8 +68,8 @@ do {
30339 } while (0)
30340
30341 #else
30342-#define IWL_DEBUG(__priv, level, fmt, args...)
30343-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30344+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30345+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30346 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30347 const void *p, u32 len)
30348 {}
30349diff -urNp linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30350--- linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-19 00:06:34.000000000 -0400
30351+++ linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-05 19:44:37.000000000 -0400
30352@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30353 int buf_len = 512;
30354 size_t len = 0;
30355
30356+ pax_track_stack();
30357+
30358 if (*ppos != 0)
30359 return 0;
30360 if (count < sizeof(buf))
30361diff -urNp linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c
30362--- linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-05-19 00:06:34.000000000 -0400
30363+++ linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-05 20:34:06.000000000 -0400
30364@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30365 return -EINVAL;
30366
30367 if (fake_hw_scan) {
30368- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30369- mac80211_hwsim_ops.sw_scan_start = NULL;
30370- mac80211_hwsim_ops.sw_scan_complete = NULL;
30371+ pax_open_kernel();
30372+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30373+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30374+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30375+ pax_close_kernel();
30376 }
30377
30378 spin_lock_init(&hwsim_radio_lock);
30379diff -urNp linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c
30380--- linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-05-19 00:06:34.000000000 -0400
30381+++ linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-08-05 19:44:37.000000000 -0400
30382@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30383
30384 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30385
30386- if (rts_threshold < 0 || rts_threshold > 2347)
30387+ if (rts_threshold > 2347)
30388 rts_threshold = 2347;
30389
30390 tmp = cpu_to_le32(rts_threshold);
30391diff -urNp linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30392--- linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-05-19 00:06:34.000000000 -0400
30393+++ linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-05 19:44:37.000000000 -0400
30394@@ -827,6 +827,8 @@ static bool _rtl92c_phy_sw_chnl_step_by_
30395 u8 rfpath;
30396 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30397
30398+ pax_track_stack();
30399+
30400 precommoncmdcnt = 0;
30401 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30402 MAX_PRECMD_CNT,
30403diff -urNp linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h
30404--- linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-05-19 00:06:34.000000000 -0400
30405+++ linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-05 20:34:06.000000000 -0400
30406@@ -260,7 +260,7 @@ struct wl1251_if_operations {
30407 void (*reset)(struct wl1251 *wl);
30408 void (*enable_irq)(struct wl1251 *wl);
30409 void (*disable_irq)(struct wl1251 *wl);
30410-};
30411+} __no_const;
30412
30413 struct wl1251 {
30414 struct ieee80211_hw *hw;
30415diff -urNp linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c
30416--- linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-05-19 00:06:34.000000000 -0400
30417+++ linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-08-05 19:44:37.000000000 -0400
30418@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30419 u32 chunk_len;
30420 int i;
30421
30422+ pax_track_stack();
30423+
30424 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30425
30426 spi_message_init(&m);
30427diff -urNp linux-2.6.39.4/drivers/oprofile/buffer_sync.c linux-2.6.39.4/drivers/oprofile/buffer_sync.c
30428--- linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-06-25 12:55:22.000000000 -0400
30429+++ linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-08-05 19:44:37.000000000 -0400
30430@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30431 if (cookie == NO_COOKIE)
30432 offset = pc;
30433 if (cookie == INVALID_COOKIE) {
30434- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30435+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30436 offset = pc;
30437 }
30438 if (cookie != last_cookie) {
30439@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30440 /* add userspace sample */
30441
30442 if (!mm) {
30443- atomic_inc(&oprofile_stats.sample_lost_no_mm);
30444+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30445 return 0;
30446 }
30447
30448 cookie = lookup_dcookie(mm, s->eip, &offset);
30449
30450 if (cookie == INVALID_COOKIE) {
30451- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30452+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30453 return 0;
30454 }
30455
30456@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30457 /* ignore backtraces if failed to add a sample */
30458 if (state == sb_bt_start) {
30459 state = sb_bt_ignore;
30460- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30461+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30462 }
30463 }
30464 release_mm(mm);
30465diff -urNp linux-2.6.39.4/drivers/oprofile/event_buffer.c linux-2.6.39.4/drivers/oprofile/event_buffer.c
30466--- linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-05-19 00:06:34.000000000 -0400
30467+++ linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-08-05 19:44:37.000000000 -0400
30468@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30469 }
30470
30471 if (buffer_pos == buffer_size) {
30472- atomic_inc(&oprofile_stats.event_lost_overflow);
30473+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30474 return;
30475 }
30476
30477diff -urNp linux-2.6.39.4/drivers/oprofile/oprof.c linux-2.6.39.4/drivers/oprofile/oprof.c
30478--- linux-2.6.39.4/drivers/oprofile/oprof.c 2011-05-19 00:06:34.000000000 -0400
30479+++ linux-2.6.39.4/drivers/oprofile/oprof.c 2011-08-05 19:44:37.000000000 -0400
30480@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30481 if (oprofile_ops.switch_events())
30482 return;
30483
30484- atomic_inc(&oprofile_stats.multiplex_counter);
30485+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30486 start_switch_worker();
30487 }
30488
30489diff -urNp linux-2.6.39.4/drivers/oprofile/oprofilefs.c linux-2.6.39.4/drivers/oprofile/oprofilefs.c
30490--- linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-05-19 00:06:34.000000000 -0400
30491+++ linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-08-05 19:44:37.000000000 -0400
30492@@ -186,7 +186,7 @@ static const struct file_operations atom
30493
30494
30495 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30496- char const *name, atomic_t *val)
30497+ char const *name, atomic_unchecked_t *val)
30498 {
30499 return __oprofilefs_create_file(sb, root, name,
30500 &atomic_ro_fops, 0444, val);
30501diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.c linux-2.6.39.4/drivers/oprofile/oprofile_stats.c
30502--- linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-05-19 00:06:34.000000000 -0400
30503+++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-08-05 19:44:37.000000000 -0400
30504@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30505 cpu_buf->sample_invalid_eip = 0;
30506 }
30507
30508- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30509- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30510- atomic_set(&oprofile_stats.event_lost_overflow, 0);
30511- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30512- atomic_set(&oprofile_stats.multiplex_counter, 0);
30513+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30514+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30515+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30516+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30517+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30518 }
30519
30520
30521diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.h linux-2.6.39.4/drivers/oprofile/oprofile_stats.h
30522--- linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-05-19 00:06:34.000000000 -0400
30523+++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-08-05 19:44:37.000000000 -0400
30524@@ -13,11 +13,11 @@
30525 #include <asm/atomic.h>
30526
30527 struct oprofile_stat_struct {
30528- atomic_t sample_lost_no_mm;
30529- atomic_t sample_lost_no_mapping;
30530- atomic_t bt_lost_no_mapping;
30531- atomic_t event_lost_overflow;
30532- atomic_t multiplex_counter;
30533+ atomic_unchecked_t sample_lost_no_mm;
30534+ atomic_unchecked_t sample_lost_no_mapping;
30535+ atomic_unchecked_t bt_lost_no_mapping;
30536+ atomic_unchecked_t event_lost_overflow;
30537+ atomic_unchecked_t multiplex_counter;
30538 };
30539
30540 extern struct oprofile_stat_struct oprofile_stats;
30541diff -urNp linux-2.6.39.4/drivers/parport/procfs.c linux-2.6.39.4/drivers/parport/procfs.c
30542--- linux-2.6.39.4/drivers/parport/procfs.c 2011-05-19 00:06:34.000000000 -0400
30543+++ linux-2.6.39.4/drivers/parport/procfs.c 2011-08-05 19:44:37.000000000 -0400
30544@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30545
30546 *ppos += len;
30547
30548- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30549+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30550 }
30551
30552 #ifdef CONFIG_PARPORT_1284
30553@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30554
30555 *ppos += len;
30556
30557- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30558+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30559 }
30560 #endif /* IEEE1284.3 support. */
30561
30562diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h
30563--- linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-05-19 00:06:34.000000000 -0400
30564+++ linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:34:06.000000000 -0400
30565@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30566 int (*hardware_test) (struct slot* slot, u32 value);
30567 u8 (*get_power) (struct slot* slot);
30568 int (*set_power) (struct slot* slot, int value);
30569-};
30570+} __no_const;
30571
30572 struct cpci_hp_controller {
30573 unsigned int irq;
30574diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c
30575--- linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-05-19 00:06:34.000000000 -0400
30576+++ linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-05 19:44:37.000000000 -0400
30577@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30578
30579 void compaq_nvram_init (void __iomem *rom_start)
30580 {
30581+
30582+#ifndef CONFIG_PAX_KERNEXEC
30583 if (rom_start) {
30584 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30585 }
30586+#endif
30587+
30588 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30589
30590 /* initialize our int15 lock */
30591diff -urNp linux-2.6.39.4/drivers/pci/pcie/aspm.c linux-2.6.39.4/drivers/pci/pcie/aspm.c
30592--- linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-05-19 00:06:34.000000000 -0400
30593+++ linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-08-05 19:44:37.000000000 -0400
30594@@ -27,9 +27,9 @@
30595 #define MODULE_PARAM_PREFIX "pcie_aspm."
30596
30597 /* Note: those are not register definitions */
30598-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30599-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30600-#define ASPM_STATE_L1 (4) /* L1 state */
30601+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30602+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30603+#define ASPM_STATE_L1 (4U) /* L1 state */
30604 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30605 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30606
30607diff -urNp linux-2.6.39.4/drivers/pci/probe.c linux-2.6.39.4/drivers/pci/probe.c
30608--- linux-2.6.39.4/drivers/pci/probe.c 2011-05-19 00:06:34.000000000 -0400
30609+++ linux-2.6.39.4/drivers/pci/probe.c 2011-08-05 20:34:06.000000000 -0400
30610@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
30611 return ret;
30612 }
30613
30614-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
30615+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
30616 struct device_attribute *attr,
30617 char *buf)
30618 {
30619 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
30620 }
30621
30622-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
30623+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
30624 struct device_attribute *attr,
30625 char *buf)
30626 {
30627@@ -165,7 +165,7 @@ int __pci_read_base(struct pci_dev *dev,
30628 u32 l, sz, mask;
30629 u16 orig_cmd;
30630
30631- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30632+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30633
30634 if (!dev->mmio_always_on) {
30635 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30636diff -urNp linux-2.6.39.4/drivers/pci/proc.c linux-2.6.39.4/drivers/pci/proc.c
30637--- linux-2.6.39.4/drivers/pci/proc.c 2011-05-19 00:06:34.000000000 -0400
30638+++ linux-2.6.39.4/drivers/pci/proc.c 2011-08-05 19:44:37.000000000 -0400
30639@@ -476,7 +476,16 @@ static const struct file_operations proc
30640 static int __init pci_proc_init(void)
30641 {
30642 struct pci_dev *dev = NULL;
30643+
30644+#ifdef CONFIG_GRKERNSEC_PROC_ADD
30645+#ifdef CONFIG_GRKERNSEC_PROC_USER
30646+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30647+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30648+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30649+#endif
30650+#else
30651 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30652+#endif
30653 proc_create("devices", 0, proc_bus_pci_dir,
30654 &proc_bus_pci_dev_operations);
30655 proc_initialized = 1;
30656diff -urNp linux-2.6.39.4/drivers/pci/xen-pcifront.c linux-2.6.39.4/drivers/pci/xen-pcifront.c
30657--- linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-05-19 00:06:34.000000000 -0400
30658+++ linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-08-05 20:34:06.000000000 -0400
30659@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30660 struct pcifront_sd *sd = bus->sysdata;
30661 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30662
30663+ pax_track_stack();
30664+
30665 if (verbose_request)
30666 dev_info(&pdev->xdev->dev,
30667 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30668@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30669 struct pcifront_sd *sd = bus->sysdata;
30670 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30671
30672+ pax_track_stack();
30673+
30674 if (verbose_request)
30675 dev_info(&pdev->xdev->dev,
30676 "write dev=%04x:%02x:%02x.%01x - "
30677@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30678 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30679 struct msi_desc *entry;
30680
30681+ pax_track_stack();
30682+
30683 if (nvec > SH_INFO_MAX_VEC) {
30684 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30685 " Increase SH_INFO_MAX_VEC.\n", nvec);
30686@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30687 struct pcifront_sd *sd = dev->bus->sysdata;
30688 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30689
30690+ pax_track_stack();
30691+
30692 err = do_pci_op(pdev, &op);
30693
30694 /* What should do for error ? */
30695@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30696 struct pcifront_sd *sd = dev->bus->sysdata;
30697 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30698
30699+ pax_track_stack();
30700+
30701 err = do_pci_op(pdev, &op);
30702 if (likely(!err)) {
30703 vector[0] = op.value;
30704diff -urNp linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c
30705--- linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-05-19 00:06:34.000000000 -0400
30706+++ linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:34:06.000000000 -0400
30707@@ -2109,7 +2109,7 @@ static int hotkey_mask_get(void)
30708 return 0;
30709 }
30710
30711-void static hotkey_mask_warn_incomplete_mask(void)
30712+static void hotkey_mask_warn_incomplete_mask(void)
30713 {
30714 /* log only what the user can fix... */
30715 const u32 wantedmask = hotkey_driver_mask &
30716diff -urNp linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c
30717--- linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-05-19 00:06:34.000000000 -0400
30718+++ linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-05 19:44:37.000000000 -0400
30719@@ -59,7 +59,7 @@ do { \
30720 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30721 } while(0)
30722
30723-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30724+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30725 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30726
30727 /*
30728@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30729
30730 cpu = get_cpu();
30731 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30732+
30733+ pax_open_kernel();
30734 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30735+ pax_close_kernel();
30736
30737 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30738 spin_lock_irqsave(&pnp_bios_lock, flags);
30739@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30740 :"memory");
30741 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30742
30743+ pax_open_kernel();
30744 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30745+ pax_close_kernel();
30746+
30747 put_cpu();
30748
30749 /* If we get here and this is set then the PnP BIOS faulted on us. */
30750@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30751 return status;
30752 }
30753
30754-void pnpbios_calls_init(union pnp_bios_install_struct *header)
30755+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30756 {
30757 int i;
30758
30759@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30760 pnp_bios_callpoint.offset = header->fields.pm16offset;
30761 pnp_bios_callpoint.segment = PNP_CS16;
30762
30763+ pax_open_kernel();
30764+
30765 for_each_possible_cpu(i) {
30766 struct desc_struct *gdt = get_cpu_gdt_table(i);
30767 if (!gdt)
30768@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30769 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30770 (unsigned long)__va(header->fields.pm16dseg));
30771 }
30772+
30773+ pax_close_kernel();
30774 }
30775diff -urNp linux-2.6.39.4/drivers/pnp/resource.c linux-2.6.39.4/drivers/pnp/resource.c
30776--- linux-2.6.39.4/drivers/pnp/resource.c 2011-05-19 00:06:34.000000000 -0400
30777+++ linux-2.6.39.4/drivers/pnp/resource.c 2011-08-05 19:44:37.000000000 -0400
30778@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30779 return 1;
30780
30781 /* check if the resource is valid */
30782- if (*irq < 0 || *irq > 15)
30783+ if (*irq > 15)
30784 return 0;
30785
30786 /* check if the resource is reserved */
30787@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30788 return 1;
30789
30790 /* check if the resource is valid */
30791- if (*dma < 0 || *dma == 4 || *dma > 7)
30792+ if (*dma == 4 || *dma > 7)
30793 return 0;
30794
30795 /* check if the resource is reserved */
30796diff -urNp linux-2.6.39.4/drivers/power/bq27x00_battery.c linux-2.6.39.4/drivers/power/bq27x00_battery.c
30797--- linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-05-19 00:06:34.000000000 -0400
30798+++ linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-08-05 20:34:06.000000000 -0400
30799@@ -66,7 +66,7 @@
30800 struct bq27x00_device_info;
30801 struct bq27x00_access_methods {
30802 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30803-};
30804+} __no_const;
30805
30806 enum bq27x00_chip { BQ27000, BQ27500 };
30807
30808diff -urNp linux-2.6.39.4/drivers/regulator/max8660.c linux-2.6.39.4/drivers/regulator/max8660.c
30809--- linux-2.6.39.4/drivers/regulator/max8660.c 2011-05-19 00:06:34.000000000 -0400
30810+++ linux-2.6.39.4/drivers/regulator/max8660.c 2011-08-05 20:34:06.000000000 -0400
30811@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30812 max8660->shadow_regs[MAX8660_OVER1] = 5;
30813 } else {
30814 /* Otherwise devices can be toggled via software */
30815- max8660_dcdc_ops.enable = max8660_dcdc_enable;
30816- max8660_dcdc_ops.disable = max8660_dcdc_disable;
30817+ pax_open_kernel();
30818+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30819+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30820+ pax_close_kernel();
30821 }
30822
30823 /*
30824diff -urNp linux-2.6.39.4/drivers/regulator/mc13892-regulator.c linux-2.6.39.4/drivers/regulator/mc13892-regulator.c
30825--- linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-05-19 00:06:34.000000000 -0400
30826+++ linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-08-05 20:34:06.000000000 -0400
30827@@ -560,10 +560,12 @@ static int __devinit mc13892_regulator_p
30828 }
30829 mc13xxx_unlock(mc13892);
30830
30831- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30832+ pax_open_kernel();
30833+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30834 = mc13892_vcam_set_mode;
30835- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30836+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30837 = mc13892_vcam_get_mode;
30838+ pax_close_kernel();
30839 for (i = 0; i < pdata->num_regulators; i++) {
30840 init_data = &pdata->regulators[i];
30841 priv->regulators[i] = regulator_register(
30842diff -urNp linux-2.6.39.4/drivers/rtc/rtc-dev.c linux-2.6.39.4/drivers/rtc/rtc-dev.c
30843--- linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-05-19 00:06:34.000000000 -0400
30844+++ linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-08-05 19:44:37.000000000 -0400
30845@@ -14,6 +14,7 @@
30846 #include <linux/module.h>
30847 #include <linux/rtc.h>
30848 #include <linux/sched.h>
30849+#include <linux/grsecurity.h>
30850 #include "rtc-core.h"
30851
30852 static dev_t rtc_devt;
30853@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30854 if (copy_from_user(&tm, uarg, sizeof(tm)))
30855 return -EFAULT;
30856
30857+ gr_log_timechange();
30858+
30859 return rtc_set_time(rtc, &tm);
30860
30861 case RTC_PIE_ON:
30862diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h
30863--- linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-05-19 00:06:34.000000000 -0400
30864+++ linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:34:06.000000000 -0400
30865@@ -492,7 +492,7 @@ struct adapter_ops
30866 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30867 /* Administrative operations */
30868 int (*adapter_comm)(struct aac_dev * dev, int comm);
30869-};
30870+} __no_const;
30871
30872 /*
30873 * Define which interrupt handler needs to be installed
30874diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c
30875--- linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-05-19 00:06:34.000000000 -0400
30876+++ linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-08-05 19:44:37.000000000 -0400
30877@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30878 u32 actual_fibsize64, actual_fibsize = 0;
30879 int i;
30880
30881+ pax_track_stack();
30882
30883 if (dev->in_reset) {
30884 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30885diff -urNp linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c
30886--- linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-05-19 00:06:34.000000000 -0400
30887+++ linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-08-05 19:44:37.000000000 -0400
30888@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(stru
30889 flash_error_table[i].reason);
30890 }
30891
30892-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
30893+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
30894 asd_show_update_bios, asd_store_update_bios);
30895
30896 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
30897diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfad.c linux-2.6.39.4/drivers/scsi/bfa/bfad.c
30898--- linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-05-19 00:06:34.000000000 -0400
30899+++ linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-08-05 19:44:37.000000000 -0400
30900@@ -1027,6 +1027,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30901 struct bfad_vport_s *vport, *vport_new;
30902 struct bfa_fcs_driver_info_s driver_info;
30903
30904+ pax_track_stack();
30905+
30906 /* Fill the driver_info info to fcs*/
30907 memset(&driver_info, 0, sizeof(driver_info));
30908 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30909diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c
30910--- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-05-19 00:06:34.000000000 -0400
30911+++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-05 19:44:37.000000000 -0400
30912@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30913 u16 len, count;
30914 u16 templen;
30915
30916+ pax_track_stack();
30917+
30918 /*
30919 * get hba attributes
30920 */
30921@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30922 u8 count = 0;
30923 u16 templen;
30924
30925+ pax_track_stack();
30926+
30927 /*
30928 * get port attributes
30929 */
30930diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c
30931--- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-05-19 00:06:34.000000000 -0400
30932+++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-05 19:44:37.000000000 -0400
30933@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30934 struct fc_rpsc_speed_info_s speeds;
30935 struct bfa_port_attr_s pport_attr;
30936
30937+ pax_track_stack();
30938+
30939 bfa_trc(port->fcs, rx_fchs->s_id);
30940 bfa_trc(port->fcs, rx_fchs->d_id);
30941
30942diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa.h linux-2.6.39.4/drivers/scsi/bfa/bfa.h
30943--- linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-05-19 00:06:34.000000000 -0400
30944+++ linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-08-05 20:34:06.000000000 -0400
30945@@ -238,7 +238,7 @@ struct bfa_hwif_s {
30946 u32 *nvecs, u32 *maxvec);
30947 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30948 u32 *end);
30949-};
30950+} __no_const;
30951 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30952
30953 struct bfa_iocfc_s {
30954diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h
30955--- linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-05-19 00:06:34.000000000 -0400
30956+++ linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:34:06.000000000 -0400
30957@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30958 bfa_ioc_disable_cbfn_t disable_cbfn;
30959 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30960 bfa_ioc_reset_cbfn_t reset_cbfn;
30961-};
30962+} __no_const;
30963
30964 /*
30965 * Heartbeat failure notification queue element.
30966@@ -267,7 +267,7 @@ struct bfa_ioc_hwif_s {
30967 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30968 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30969 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30970-};
30971+} __no_const;
30972
30973 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30974 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30975diff -urNp linux-2.6.39.4/drivers/scsi/BusLogic.c linux-2.6.39.4/drivers/scsi/BusLogic.c
30976--- linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-05-19 00:06:34.000000000 -0400
30977+++ linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-08-05 19:44:37.000000000 -0400
30978@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30979 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30980 *PrototypeHostAdapter)
30981 {
30982+ pax_track_stack();
30983+
30984 /*
30985 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30986 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30987diff -urNp linux-2.6.39.4/drivers/scsi/dpt_i2o.c linux-2.6.39.4/drivers/scsi/dpt_i2o.c
30988--- linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-05-19 00:06:34.000000000 -0400
30989+++ linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-08-05 19:44:37.000000000 -0400
30990@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30991 dma_addr_t addr;
30992 ulong flags = 0;
30993
30994+ pax_track_stack();
30995+
30996 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30997 // get user msg size in u32s
30998 if(get_user(size, &user_msg[0])){
30999@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31000 s32 rcode;
31001 dma_addr_t addr;
31002
31003+ pax_track_stack();
31004+
31005 memset(msg, 0 , sizeof(msg));
31006 len = scsi_bufflen(cmd);
31007 direction = 0x00000000;
31008diff -urNp linux-2.6.39.4/drivers/scsi/eata.c linux-2.6.39.4/drivers/scsi/eata.c
31009--- linux-2.6.39.4/drivers/scsi/eata.c 2011-05-19 00:06:34.000000000 -0400
31010+++ linux-2.6.39.4/drivers/scsi/eata.c 2011-08-05 19:44:37.000000000 -0400
31011@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31012 struct hostdata *ha;
31013 char name[16];
31014
31015+ pax_track_stack();
31016+
31017 sprintf(name, "%s%d", driver_name, j);
31018
31019 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31020diff -urNp linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c
31021--- linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-05-19 00:06:34.000000000 -0400
31022+++ linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-05 20:34:06.000000000 -0400
31023@@ -2458,6 +2458,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31024 } buf;
31025 int rc;
31026
31027+ pax_track_stack();
31028+
31029 fiph = (struct fip_header *)skb->data;
31030 sub = fiph->fip_subcode;
31031
31032diff -urNp linux-2.6.39.4/drivers/scsi/gdth.c linux-2.6.39.4/drivers/scsi/gdth.c
31033--- linux-2.6.39.4/drivers/scsi/gdth.c 2011-05-19 00:06:34.000000000 -0400
31034+++ linux-2.6.39.4/drivers/scsi/gdth.c 2011-08-05 19:44:37.000000000 -0400
31035@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31036 unsigned long flags;
31037 gdth_ha_str *ha;
31038
31039+ pax_track_stack();
31040+
31041 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31042 return -EFAULT;
31043 ha = gdth_find_ha(ldrv.ionode);
31044@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31045 gdth_ha_str *ha;
31046 int rval;
31047
31048+ pax_track_stack();
31049+
31050 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31051 res.number >= MAX_HDRIVES)
31052 return -EFAULT;
31053@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31054 gdth_ha_str *ha;
31055 int rval;
31056
31057+ pax_track_stack();
31058+
31059 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31060 return -EFAULT;
31061 ha = gdth_find_ha(gen.ionode);
31062@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31063 int i;
31064 gdth_cmd_str gdtcmd;
31065 char cmnd[MAX_COMMAND_SIZE];
31066+
31067+ pax_track_stack();
31068+
31069 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31070
31071 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31072diff -urNp linux-2.6.39.4/drivers/scsi/gdth_proc.c linux-2.6.39.4/drivers/scsi/gdth_proc.c
31073--- linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-05-19 00:06:34.000000000 -0400
31074+++ linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-08-05 19:44:37.000000000 -0400
31075@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31076 u64 paddr;
31077
31078 char cmnd[MAX_COMMAND_SIZE];
31079+
31080+ pax_track_stack();
31081+
31082 memset(cmnd, 0xff, 12);
31083 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
31084
31085@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
31086 gdth_hget_str *phg;
31087 char cmnd[MAX_COMMAND_SIZE];
31088
31089+ pax_track_stack();
31090+
31091 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
31092 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
31093 if (!gdtcmd || !estr)
31094diff -urNp linux-2.6.39.4/drivers/scsi/hosts.c linux-2.6.39.4/drivers/scsi/hosts.c
31095--- linux-2.6.39.4/drivers/scsi/hosts.c 2011-05-19 00:06:34.000000000 -0400
31096+++ linux-2.6.39.4/drivers/scsi/hosts.c 2011-08-05 19:44:37.000000000 -0400
31097@@ -42,7 +42,7 @@
31098 #include "scsi_logging.h"
31099
31100
31101-static atomic_t scsi_host_next_hn; /* host_no for next new host */
31102+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
31103
31104
31105 static void scsi_host_cls_release(struct device *dev)
31106@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
31107 * subtract one because we increment first then return, but we need to
31108 * know what the next host number was before increment
31109 */
31110- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
31111+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
31112 shost->dma_channel = 0xff;
31113
31114 /* These three are default values which can be overridden */
31115diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.c linux-2.6.39.4/drivers/scsi/hpsa.c
31116--- linux-2.6.39.4/drivers/scsi/hpsa.c 2011-05-19 00:06:34.000000000 -0400
31117+++ linux-2.6.39.4/drivers/scsi/hpsa.c 2011-08-05 20:34:06.000000000 -0400
31118@@ -469,7 +469,7 @@ static inline u32 next_command(struct ct
31119 u32 a;
31120
31121 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31122- return h->access.command_completed(h);
31123+ return h->access->command_completed(h);
31124
31125 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31126 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31127@@ -2889,7 +2889,7 @@ static void start_io(struct ctlr_info *h
31128 while (!list_empty(&h->reqQ)) {
31129 c = list_entry(h->reqQ.next, struct CommandList, list);
31130 /* can't do anything if fifo is full */
31131- if ((h->access.fifo_full(h))) {
31132+ if ((h->access->fifo_full(h))) {
31133 dev_warn(&h->pdev->dev, "fifo full\n");
31134 break;
31135 }
31136@@ -2899,7 +2899,7 @@ static void start_io(struct ctlr_info *h
31137 h->Qdepth--;
31138
31139 /* Tell the controller execute command */
31140- h->access.submit_command(h, c);
31141+ h->access->submit_command(h, c);
31142
31143 /* Put job onto the completed Q */
31144 addQ(&h->cmpQ, c);
31145@@ -2908,17 +2908,17 @@ static void start_io(struct ctlr_info *h
31146
31147 static inline unsigned long get_next_completion(struct ctlr_info *h)
31148 {
31149- return h->access.command_completed(h);
31150+ return h->access->command_completed(h);
31151 }
31152
31153 static inline bool interrupt_pending(struct ctlr_info *h)
31154 {
31155- return h->access.intr_pending(h);
31156+ return h->access->intr_pending(h);
31157 }
31158
31159 static inline long interrupt_not_for_us(struct ctlr_info *h)
31160 {
31161- return (h->access.intr_pending(h) == 0) ||
31162+ return (h->access->intr_pending(h) == 0) ||
31163 (h->interrupts_enabled == 0);
31164 }
31165
31166@@ -3684,7 +3684,7 @@ static int __devinit hpsa_pci_init(struc
31167 if (prod_index < 0)
31168 return -ENODEV;
31169 h->product_name = products[prod_index].product_name;
31170- h->access = *(products[prod_index].access);
31171+ h->access = products[prod_index].access;
31172
31173 if (hpsa_board_disabled(h->pdev)) {
31174 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31175@@ -3845,7 +3845,7 @@ static int __devinit hpsa_init_one(struc
31176 }
31177
31178 /* make sure the board interrupts are off */
31179- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31180+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31181
31182 if (h->msix_vector || h->msi_vector)
31183 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
31184@@ -3892,7 +3892,7 @@ static int __devinit hpsa_init_one(struc
31185 hpsa_scsi_setup(h);
31186
31187 /* Turn the interrupts on so we can service requests */
31188- h->access.set_intr_mask(h, HPSA_INTR_ON);
31189+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31190
31191 hpsa_put_ctlr_into_performant_mode(h);
31192 hpsa_hba_inquiry(h);
31193@@ -3955,7 +3955,7 @@ static void hpsa_shutdown(struct pci_dev
31194 * To write all data in the battery backed cache to disks
31195 */
31196 hpsa_flush_cache(h);
31197- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31198+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31199 free_irq(h->intr[h->intr_mode], h);
31200 #ifdef CONFIG_PCI_MSI
31201 if (h->msix_vector)
31202@@ -4118,7 +4118,7 @@ static __devinit void hpsa_enter_perform
31203 return;
31204 }
31205 /* Change the access methods to the performant access methods */
31206- h->access = SA5_performant_access;
31207+ h->access = &SA5_performant_access;
31208 h->transMethod = CFGTBL_Trans_Performant;
31209 }
31210
31211diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.h linux-2.6.39.4/drivers/scsi/hpsa.h
31212--- linux-2.6.39.4/drivers/scsi/hpsa.h 2011-05-19 00:06:34.000000000 -0400
31213+++ linux-2.6.39.4/drivers/scsi/hpsa.h 2011-08-05 20:34:06.000000000 -0400
31214@@ -73,7 +73,7 @@ struct ctlr_info {
31215 unsigned int msix_vector;
31216 unsigned int msi_vector;
31217 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31218- struct access_method access;
31219+ struct access_method *access;
31220
31221 /* queue and queue Info */
31222 struct list_head reqQ;
31223diff -urNp linux-2.6.39.4/drivers/scsi/ips.h linux-2.6.39.4/drivers/scsi/ips.h
31224--- linux-2.6.39.4/drivers/scsi/ips.h 2011-05-19 00:06:34.000000000 -0400
31225+++ linux-2.6.39.4/drivers/scsi/ips.h 2011-08-05 20:34:06.000000000 -0400
31226@@ -1027,7 +1027,7 @@ typedef struct {
31227 int (*intr)(struct ips_ha *);
31228 void (*enableint)(struct ips_ha *);
31229 uint32_t (*statupd)(struct ips_ha *);
31230-} ips_hw_func_t;
31231+} __no_const ips_hw_func_t;
31232
31233 typedef struct ips_ha {
31234 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31235diff -urNp linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c
31236--- linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-05-19 00:06:34.000000000 -0400
31237+++ linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-08-05 19:44:37.000000000 -0400
31238@@ -105,12 +105,12 @@ struct fc_exch_mgr {
31239 * all together if not used XXX
31240 */
31241 struct {
31242- atomic_t no_free_exch;
31243- atomic_t no_free_exch_xid;
31244- atomic_t xid_not_found;
31245- atomic_t xid_busy;
31246- atomic_t seq_not_found;
31247- atomic_t non_bls_resp;
31248+ atomic_unchecked_t no_free_exch;
31249+ atomic_unchecked_t no_free_exch_xid;
31250+ atomic_unchecked_t xid_not_found;
31251+ atomic_unchecked_t xid_busy;
31252+ atomic_unchecked_t seq_not_found;
31253+ atomic_unchecked_t non_bls_resp;
31254 } stats;
31255 };
31256
31257@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31258 /* allocate memory for exchange */
31259 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31260 if (!ep) {
31261- atomic_inc(&mp->stats.no_free_exch);
31262+ atomic_inc_unchecked(&mp->stats.no_free_exch);
31263 goto out;
31264 }
31265 memset(ep, 0, sizeof(*ep));
31266@@ -761,7 +761,7 @@ out:
31267 return ep;
31268 err:
31269 spin_unlock_bh(&pool->lock);
31270- atomic_inc(&mp->stats.no_free_exch_xid);
31271+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31272 mempool_free(ep, mp->ep_pool);
31273 return NULL;
31274 }
31275@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31276 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31277 ep = fc_exch_find(mp, xid);
31278 if (!ep) {
31279- atomic_inc(&mp->stats.xid_not_found);
31280+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31281 reject = FC_RJT_OX_ID;
31282 goto out;
31283 }
31284@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31285 ep = fc_exch_find(mp, xid);
31286 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31287 if (ep) {
31288- atomic_inc(&mp->stats.xid_busy);
31289+ atomic_inc_unchecked(&mp->stats.xid_busy);
31290 reject = FC_RJT_RX_ID;
31291 goto rel;
31292 }
31293@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31294 }
31295 xid = ep->xid; /* get our XID */
31296 } else if (!ep) {
31297- atomic_inc(&mp->stats.xid_not_found);
31298+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31299 reject = FC_RJT_RX_ID; /* XID not found */
31300 goto out;
31301 }
31302@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31303 } else {
31304 sp = &ep->seq;
31305 if (sp->id != fh->fh_seq_id) {
31306- atomic_inc(&mp->stats.seq_not_found);
31307+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31308 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31309 goto rel;
31310 }
31311@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31312
31313 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31314 if (!ep) {
31315- atomic_inc(&mp->stats.xid_not_found);
31316+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31317 goto out;
31318 }
31319 if (ep->esb_stat & ESB_ST_COMPLETE) {
31320- atomic_inc(&mp->stats.xid_not_found);
31321+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31322 goto rel;
31323 }
31324 if (ep->rxid == FC_XID_UNKNOWN)
31325 ep->rxid = ntohs(fh->fh_rx_id);
31326 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31327- atomic_inc(&mp->stats.xid_not_found);
31328+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31329 goto rel;
31330 }
31331 if (ep->did != ntoh24(fh->fh_s_id) &&
31332 ep->did != FC_FID_FLOGI) {
31333- atomic_inc(&mp->stats.xid_not_found);
31334+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31335 goto rel;
31336 }
31337 sof = fr_sof(fp);
31338@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31339 sp->ssb_stat |= SSB_ST_RESP;
31340 sp->id = fh->fh_seq_id;
31341 } else if (sp->id != fh->fh_seq_id) {
31342- atomic_inc(&mp->stats.seq_not_found);
31343+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31344 goto rel;
31345 }
31346
31347@@ -1479,9 +1479,9 @@ static void fc_exch_recv_resp(struct fc_
31348 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31349
31350 if (!sp)
31351- atomic_inc(&mp->stats.xid_not_found);
31352+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31353 else
31354- atomic_inc(&mp->stats.non_bls_resp);
31355+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
31356
31357 fc_frame_free(fp);
31358 }
31359diff -urNp linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c
31360--- linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-05-19 00:06:34.000000000 -0400
31361+++ linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-08-05 20:34:06.000000000 -0400
31362@@ -314,7 +314,7 @@ static struct ata_port_operations sas_sa
31363 .postreset = ata_std_postreset,
31364 .error_handler = ata_std_error_handler,
31365 .post_internal_cmd = sas_ata_post_internal,
31366- .qc_defer = ata_std_qc_defer,
31367+ .qc_defer = ata_std_qc_defer,
31368 .qc_prep = ata_noop_qc_prep,
31369 .qc_issue = sas_ata_qc_issue,
31370 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31371diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c
31372--- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-19 00:06:34.000000000 -0400
31373+++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-05 19:44:37.000000000 -0400
31374@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31375
31376 #include <linux/debugfs.h>
31377
31378-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31379+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31380 static unsigned long lpfc_debugfs_start_time = 0L;
31381
31382 /* iDiag */
31383@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31384 lpfc_debugfs_enable = 0;
31385
31386 len = 0;
31387- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31388+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31389 (lpfc_debugfs_max_disc_trc - 1);
31390 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31391 dtp = vport->disc_trc + i;
31392@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31393 lpfc_debugfs_enable = 0;
31394
31395 len = 0;
31396- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31397+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31398 (lpfc_debugfs_max_slow_ring_trc - 1);
31399 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31400 dtp = phba->slow_ring_trc + i;
31401@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31402 uint32_t *ptr;
31403 char buffer[1024];
31404
31405+ pax_track_stack();
31406+
31407 off = 0;
31408 spin_lock_irq(&phba->hbalock);
31409
31410@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31411 !vport || !vport->disc_trc)
31412 return;
31413
31414- index = atomic_inc_return(&vport->disc_trc_cnt) &
31415+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31416 (lpfc_debugfs_max_disc_trc - 1);
31417 dtp = vport->disc_trc + index;
31418 dtp->fmt = fmt;
31419 dtp->data1 = data1;
31420 dtp->data2 = data2;
31421 dtp->data3 = data3;
31422- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31423+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31424 dtp->jif = jiffies;
31425 #endif
31426 return;
31427@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31428 !phba || !phba->slow_ring_trc)
31429 return;
31430
31431- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31432+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31433 (lpfc_debugfs_max_slow_ring_trc - 1);
31434 dtp = phba->slow_ring_trc + index;
31435 dtp->fmt = fmt;
31436 dtp->data1 = data1;
31437 dtp->data2 = data2;
31438 dtp->data3 = data3;
31439- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31440+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31441 dtp->jif = jiffies;
31442 #endif
31443 return;
31444@@ -2145,7 +2147,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31445 "slow_ring buffer\n");
31446 goto debug_failed;
31447 }
31448- atomic_set(&phba->slow_ring_trc_cnt, 0);
31449+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31450 memset(phba->slow_ring_trc, 0,
31451 (sizeof(struct lpfc_debugfs_trc) *
31452 lpfc_debugfs_max_slow_ring_trc));
31453@@ -2191,7 +2193,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31454 "buffer\n");
31455 goto debug_failed;
31456 }
31457- atomic_set(&vport->disc_trc_cnt, 0);
31458+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31459
31460 snprintf(name, sizeof(name), "discovery_trace");
31461 vport->debug_disc_trc =
31462diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h
31463--- linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-05-19 00:06:34.000000000 -0400
31464+++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-08-05 19:44:37.000000000 -0400
31465@@ -419,7 +419,7 @@ struct lpfc_vport {
31466 struct dentry *debug_nodelist;
31467 struct dentry *vport_debugfs_root;
31468 struct lpfc_debugfs_trc *disc_trc;
31469- atomic_t disc_trc_cnt;
31470+ atomic_unchecked_t disc_trc_cnt;
31471 #endif
31472 uint8_t stat_data_enabled;
31473 uint8_t stat_data_blocked;
31474@@ -785,8 +785,8 @@ struct lpfc_hba {
31475 struct timer_list fabric_block_timer;
31476 unsigned long bit_flags;
31477 #define FABRIC_COMANDS_BLOCKED 0
31478- atomic_t num_rsrc_err;
31479- atomic_t num_cmd_success;
31480+ atomic_unchecked_t num_rsrc_err;
31481+ atomic_unchecked_t num_cmd_success;
31482 unsigned long last_rsrc_error_time;
31483 unsigned long last_ramp_down_time;
31484 unsigned long last_ramp_up_time;
31485@@ -800,7 +800,7 @@ struct lpfc_hba {
31486 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31487 struct dentry *debug_slow_ring_trc;
31488 struct lpfc_debugfs_trc *slow_ring_trc;
31489- atomic_t slow_ring_trc_cnt;
31490+ atomic_unchecked_t slow_ring_trc_cnt;
31491 /* iDiag debugfs sub-directory */
31492 struct dentry *idiag_root;
31493 struct dentry *idiag_pci_cfg;
31494diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c
31495--- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-05-19 00:06:34.000000000 -0400
31496+++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:34:06.000000000 -0400
31497@@ -9535,8 +9535,10 @@ lpfc_init(void)
31498 printk(LPFC_COPYRIGHT "\n");
31499
31500 if (lpfc_enable_npiv) {
31501- lpfc_transport_functions.vport_create = lpfc_vport_create;
31502- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31503+ pax_open_kernel();
31504+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31505+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31506+ pax_close_kernel();
31507 }
31508 lpfc_transport_template =
31509 fc_attach_transport(&lpfc_transport_functions);
31510diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c
31511--- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-19 00:06:34.000000000 -0400
31512+++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-05 19:44:37.000000000 -0400
31513@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31514 uint32_t evt_posted;
31515
31516 spin_lock_irqsave(&phba->hbalock, flags);
31517- atomic_inc(&phba->num_rsrc_err);
31518+ atomic_inc_unchecked(&phba->num_rsrc_err);
31519 phba->last_rsrc_error_time = jiffies;
31520
31521 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31522@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31523 unsigned long flags;
31524 struct lpfc_hba *phba = vport->phba;
31525 uint32_t evt_posted;
31526- atomic_inc(&phba->num_cmd_success);
31527+ atomic_inc_unchecked(&phba->num_cmd_success);
31528
31529 if (vport->cfg_lun_queue_depth <= queue_depth)
31530 return;
31531@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31532 unsigned long num_rsrc_err, num_cmd_success;
31533 int i;
31534
31535- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31536- num_cmd_success = atomic_read(&phba->num_cmd_success);
31537+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31538+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31539
31540 vports = lpfc_create_vport_work_array(phba);
31541 if (vports != NULL)
31542@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31543 }
31544 }
31545 lpfc_destroy_vport_work_array(phba, vports);
31546- atomic_set(&phba->num_rsrc_err, 0);
31547- atomic_set(&phba->num_cmd_success, 0);
31548+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31549+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31550 }
31551
31552 /**
31553@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31554 }
31555 }
31556 lpfc_destroy_vport_work_array(phba, vports);
31557- atomic_set(&phba->num_rsrc_err, 0);
31558- atomic_set(&phba->num_cmd_success, 0);
31559+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31560+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31561 }
31562
31563 /**
31564diff -urNp linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c
31565--- linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-19 00:06:34.000000000 -0400
31566+++ linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-05 19:44:37.000000000 -0400
31567@@ -3510,6 +3510,8 @@ megaraid_cmm_register(adapter_t *adapter
31568 int rval;
31569 int i;
31570
31571+ pax_track_stack();
31572+
31573 // Allocate memory for the base list of scb for management module.
31574 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31575
31576diff -urNp linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c
31577--- linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-05-19 00:06:34.000000000 -0400
31578+++ linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-08-05 19:44:37.000000000 -0400
31579@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31580 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31581 int ret;
31582
31583+ pax_track_stack();
31584+
31585 or = osd_start_request(od, GFP_KERNEL);
31586 if (!or)
31587 return -ENOMEM;
31588diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.c linux-2.6.39.4/drivers/scsi/pmcraid.c
31589--- linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-05-19 00:06:34.000000000 -0400
31590+++ linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-08-05 19:44:37.000000000 -0400
31591@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31592 res->scsi_dev = scsi_dev;
31593 scsi_dev->hostdata = res;
31594 res->change_detected = 0;
31595- atomic_set(&res->read_failures, 0);
31596- atomic_set(&res->write_failures, 0);
31597+ atomic_set_unchecked(&res->read_failures, 0);
31598+ atomic_set_unchecked(&res->write_failures, 0);
31599 rc = 0;
31600 }
31601 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31602@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31603
31604 /* If this was a SCSI read/write command keep count of errors */
31605 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31606- atomic_inc(&res->read_failures);
31607+ atomic_inc_unchecked(&res->read_failures);
31608 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31609- atomic_inc(&res->write_failures);
31610+ atomic_inc_unchecked(&res->write_failures);
31611
31612 if (!RES_IS_GSCSI(res->cfg_entry) &&
31613 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31614@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31615 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31616 * hrrq_id assigned here in queuecommand
31617 */
31618- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31619+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31620 pinstance->num_hrrq;
31621 cmd->cmd_done = pmcraid_io_done;
31622
31623@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31624 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31625 * hrrq_id assigned here in queuecommand
31626 */
31627- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31628+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31629 pinstance->num_hrrq;
31630
31631 if (request_size) {
31632@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(stru
31633
31634 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31635 /* add resources only after host is added into system */
31636- if (!atomic_read(&pinstance->expose_resources))
31637+ if (!atomic_read_unchecked(&pinstance->expose_resources))
31638 return;
31639
31640 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31641@@ -5329,8 +5329,8 @@ static int __devinit pmcraid_init_instan
31642 init_waitqueue_head(&pinstance->reset_wait_q);
31643
31644 atomic_set(&pinstance->outstanding_cmds, 0);
31645- atomic_set(&pinstance->last_message_id, 0);
31646- atomic_set(&pinstance->expose_resources, 0);
31647+ atomic_set_unchecked(&pinstance->last_message_id, 0);
31648+ atomic_set_unchecked(&pinstance->expose_resources, 0);
31649
31650 INIT_LIST_HEAD(&pinstance->free_res_q);
31651 INIT_LIST_HEAD(&pinstance->used_res_q);
31652@@ -6045,7 +6045,7 @@ static int __devinit pmcraid_probe(
31653 /* Schedule worker thread to handle CCN and take care of adding and
31654 * removing devices to OS
31655 */
31656- atomic_set(&pinstance->expose_resources, 1);
31657+ atomic_set_unchecked(&pinstance->expose_resources, 1);
31658 schedule_work(&pinstance->worker_q);
31659 return rc;
31660
31661diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.h linux-2.6.39.4/drivers/scsi/pmcraid.h
31662--- linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-05-19 00:06:34.000000000 -0400
31663+++ linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-08-05 19:44:37.000000000 -0400
31664@@ -750,7 +750,7 @@ struct pmcraid_instance {
31665 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31666
31667 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31668- atomic_t last_message_id;
31669+ atomic_unchecked_t last_message_id;
31670
31671 /* configuration table */
31672 struct pmcraid_config_table *cfg_table;
31673@@ -779,7 +779,7 @@ struct pmcraid_instance {
31674 atomic_t outstanding_cmds;
31675
31676 /* should add/delete resources to mid-layer now ?*/
31677- atomic_t expose_resources;
31678+ atomic_unchecked_t expose_resources;
31679
31680
31681
31682@@ -815,8 +815,8 @@ struct pmcraid_resource_entry {
31683 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31684 };
31685 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31686- atomic_t read_failures; /* count of failed READ commands */
31687- atomic_t write_failures; /* count of failed WRITE commands */
31688+ atomic_unchecked_t read_failures; /* count of failed READ commands */
31689+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31690
31691 /* To indicate add/delete/modify during CCN */
31692 u8 change_detected;
31693diff -urNp linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h
31694--- linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-05-19 00:06:34.000000000 -0400
31695+++ linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:34:06.000000000 -0400
31696@@ -2236,7 +2236,7 @@ struct isp_operations {
31697 int (*get_flash_version) (struct scsi_qla_host *, void *);
31698 int (*start_scsi) (srb_t *);
31699 int (*abort_isp) (struct scsi_qla_host *);
31700-};
31701+} __no_const;
31702
31703 /* MSI-X Support *************************************************************/
31704
31705diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h
31706--- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-05-19 00:06:34.000000000 -0400
31707+++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-05 19:44:37.000000000 -0400
31708@@ -256,7 +256,7 @@ struct ddb_entry {
31709 atomic_t retry_relogin_timer; /* Min Time between relogins
31710 * (4000 only) */
31711 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31712- atomic_t relogin_retry_count; /* Num of times relogin has been
31713+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31714 * retried */
31715
31716 uint16_t port;
31717diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c
31718--- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-05-19 00:06:34.000000000 -0400
31719+++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-05 19:44:37.000000000 -0400
31720@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31721 ddb_entry->fw_ddb_index = fw_ddb_index;
31722 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31723 atomic_set(&ddb_entry->relogin_timer, 0);
31724- atomic_set(&ddb_entry->relogin_retry_count, 0);
31725+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31726 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31727 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31728 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31729@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31730 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31731 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31732 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31733- atomic_set(&ddb_entry->relogin_retry_count, 0);
31734+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31735 atomic_set(&ddb_entry->relogin_timer, 0);
31736 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31737 iscsi_unblock_session(ddb_entry->sess);
31738diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c
31739--- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-05-19 00:06:34.000000000 -0400
31740+++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-05 19:44:37.000000000 -0400
31741@@ -802,13 +802,13 @@ static void qla4xxx_timer(struct scsi_ql
31742 ddb_entry->fw_ddb_device_state ==
31743 DDB_DS_SESSION_FAILED) {
31744 /* Reset retry relogin timer */
31745- atomic_inc(&ddb_entry->relogin_retry_count);
31746+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31747 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31748 " timed out-retrying"
31749 " relogin (%d)\n",
31750 ha->host_no,
31751 ddb_entry->fw_ddb_index,
31752- atomic_read(&ddb_entry->
31753+ atomic_read_unchecked(&ddb_entry->
31754 relogin_retry_count))
31755 );
31756 start_dpc++;
31757diff -urNp linux-2.6.39.4/drivers/scsi/scsi.c linux-2.6.39.4/drivers/scsi/scsi.c
31758--- linux-2.6.39.4/drivers/scsi/scsi.c 2011-05-19 00:06:34.000000000 -0400
31759+++ linux-2.6.39.4/drivers/scsi/scsi.c 2011-08-05 19:44:37.000000000 -0400
31760@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31761 unsigned long timeout;
31762 int rtn = 0;
31763
31764- atomic_inc(&cmd->device->iorequest_cnt);
31765+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31766
31767 /* check if the device is still usable */
31768 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31769diff -urNp linux-2.6.39.4/drivers/scsi/scsi_debug.c linux-2.6.39.4/drivers/scsi/scsi_debug.c
31770--- linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-05-19 00:06:34.000000000 -0400
31771+++ linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-08-05 19:44:37.000000000 -0400
31772@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31773 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31774 unsigned char *cmd = (unsigned char *)scp->cmnd;
31775
31776+ pax_track_stack();
31777+
31778 if ((errsts = check_readiness(scp, 1, devip)))
31779 return errsts;
31780 memset(arr, 0, sizeof(arr));
31781@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31782 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31783 unsigned char *cmd = (unsigned char *)scp->cmnd;
31784
31785+ pax_track_stack();
31786+
31787 if ((errsts = check_readiness(scp, 1, devip)))
31788 return errsts;
31789 memset(arr, 0, sizeof(arr));
31790diff -urNp linux-2.6.39.4/drivers/scsi/scsi_lib.c linux-2.6.39.4/drivers/scsi/scsi_lib.c
31791--- linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-05-19 00:06:34.000000000 -0400
31792+++ linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-08-05 19:44:37.000000000 -0400
31793@@ -1410,7 +1410,7 @@ static void scsi_kill_request(struct req
31794 shost = sdev->host;
31795 scsi_init_cmd_errh(cmd);
31796 cmd->result = DID_NO_CONNECT << 16;
31797- atomic_inc(&cmd->device->iorequest_cnt);
31798+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31799
31800 /*
31801 * SCSI request completion path will do scsi_device_unbusy(),
31802@@ -1436,9 +1436,9 @@ static void scsi_softirq_done(struct req
31803
31804 INIT_LIST_HEAD(&cmd->eh_entry);
31805
31806- atomic_inc(&cmd->device->iodone_cnt);
31807+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
31808 if (cmd->result)
31809- atomic_inc(&cmd->device->ioerr_cnt);
31810+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31811
31812 disposition = scsi_decide_disposition(cmd);
31813 if (disposition != SUCCESS &&
31814diff -urNp linux-2.6.39.4/drivers/scsi/scsi_sysfs.c linux-2.6.39.4/drivers/scsi/scsi_sysfs.c
31815--- linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:22.000000000 -0400
31816+++ linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-08-05 19:44:37.000000000 -0400
31817@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31818 char *buf) \
31819 { \
31820 struct scsi_device *sdev = to_scsi_device(dev); \
31821- unsigned long long count = atomic_read(&sdev->field); \
31822+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
31823 return snprintf(buf, 20, "0x%llx\n", count); \
31824 } \
31825 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31826diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c
31827--- linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-05-19 00:06:34.000000000 -0400
31828+++ linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-08-05 19:44:37.000000000 -0400
31829@@ -485,7 +485,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31830 * Netlink Infrastructure
31831 */
31832
31833-static atomic_t fc_event_seq;
31834+static atomic_unchecked_t fc_event_seq;
31835
31836 /**
31837 * fc_get_event_number - Obtain the next sequential FC event number
31838@@ -498,7 +498,7 @@ static atomic_t fc_event_seq;
31839 u32
31840 fc_get_event_number(void)
31841 {
31842- return atomic_add_return(1, &fc_event_seq);
31843+ return atomic_add_return_unchecked(1, &fc_event_seq);
31844 }
31845 EXPORT_SYMBOL(fc_get_event_number);
31846
31847@@ -646,7 +646,7 @@ static __init int fc_transport_init(void
31848 {
31849 int error;
31850
31851- atomic_set(&fc_event_seq, 0);
31852+ atomic_set_unchecked(&fc_event_seq, 0);
31853
31854 error = transport_class_register(&fc_host_class);
31855 if (error)
31856@@ -836,7 +836,7 @@ static int fc_str_to_dev_loss(const char
31857 char *cp;
31858
31859 *val = simple_strtoul(buf, &cp, 0);
31860- if ((*cp && (*cp != '\n')) || (*val < 0))
31861+ if (*cp && (*cp != '\n'))
31862 return -EINVAL;
31863 /*
31864 * Check for overflow; dev_loss_tmo is u32
31865diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c
31866--- linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-05-19 00:06:34.000000000 -0400
31867+++ linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-05 19:44:37.000000000 -0400
31868@@ -83,7 +83,7 @@ struct iscsi_internal {
31869 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31870 };
31871
31872-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31873+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31874 static struct workqueue_struct *iscsi_eh_timer_workq;
31875
31876 /*
31877@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31878 int err;
31879
31880 ihost = shost->shost_data;
31881- session->sid = atomic_add_return(1, &iscsi_session_nr);
31882+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31883
31884 if (id == ISCSI_MAX_TARGET) {
31885 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31886@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31887 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31888 ISCSI_TRANSPORT_VERSION);
31889
31890- atomic_set(&iscsi_session_nr, 0);
31891+ atomic_set_unchecked(&iscsi_session_nr, 0);
31892
31893 err = class_register(&iscsi_transport_class);
31894 if (err)
31895diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c
31896--- linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-05-19 00:06:34.000000000 -0400
31897+++ linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-08-05 19:44:37.000000000 -0400
31898@@ -33,7 +33,7 @@
31899 #include "scsi_transport_srp_internal.h"
31900
31901 struct srp_host_attrs {
31902- atomic_t next_port_id;
31903+ atomic_unchecked_t next_port_id;
31904 };
31905 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31906
31907@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31908 struct Scsi_Host *shost = dev_to_shost(dev);
31909 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31910
31911- atomic_set(&srp_host->next_port_id, 0);
31912+ atomic_set_unchecked(&srp_host->next_port_id, 0);
31913 return 0;
31914 }
31915
31916@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31917 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31918 rport->roles = ids->roles;
31919
31920- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31921+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31922 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31923
31924 transport_setup_device(&rport->dev);
31925diff -urNp linux-2.6.39.4/drivers/scsi/sg.c linux-2.6.39.4/drivers/scsi/sg.c
31926--- linux-2.6.39.4/drivers/scsi/sg.c 2011-05-19 00:06:34.000000000 -0400
31927+++ linux-2.6.39.4/drivers/scsi/sg.c 2011-08-05 19:44:37.000000000 -0400
31928@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31929 const struct file_operations * fops;
31930 };
31931
31932-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31933+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31934 {"allow_dio", &adio_fops},
31935 {"debug", &debug_fops},
31936 {"def_reserved_size", &dressz_fops},
31937@@ -2325,7 +2325,7 @@ sg_proc_init(void)
31938 {
31939 int k, mask;
31940 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31941- struct sg_proc_leaf * leaf;
31942+ const struct sg_proc_leaf * leaf;
31943
31944 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31945 if (!sg_proc_sgp)
31946diff -urNp linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31947--- linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-19 00:06:34.000000000 -0400
31948+++ linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-05 19:44:37.000000000 -0400
31949@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31950 int do_iounmap = 0;
31951 int do_disable_device = 1;
31952
31953+ pax_track_stack();
31954+
31955 memset(&sym_dev, 0, sizeof(sym_dev));
31956 memset(&nvram, 0, sizeof(nvram));
31957 sym_dev.pdev = pdev;
31958diff -urNp linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c
31959--- linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-05-19 00:06:34.000000000 -0400
31960+++ linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-08-05 19:44:37.000000000 -0400
31961@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31962 dma_addr_t base;
31963 unsigned i;
31964
31965+ pax_track_stack();
31966+
31967 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31968 cmd.reqRingNumPages = adapter->req_pages;
31969 cmd.cmpRingNumPages = adapter->cmp_pages;
31970diff -urNp linux-2.6.39.4/drivers/spi/spi.c linux-2.6.39.4/drivers/spi/spi.c
31971--- linux-2.6.39.4/drivers/spi/spi.c 2011-05-19 00:06:34.000000000 -0400
31972+++ linux-2.6.39.4/drivers/spi/spi.c 2011-08-05 19:44:37.000000000 -0400
31973@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31974 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31975
31976 /* portable code must never pass more than 32 bytes */
31977-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31978+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
31979
31980 static u8 *buf;
31981
31982diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31983--- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-05-19 00:06:34.000000000 -0400
31984+++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-05 20:34:06.000000000 -0400
31985@@ -857,14 +857,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31986 free_netdev(ifp->net);
31987 }
31988 /* Allocate etherdev, including space for private structure */
31989- ifp->net = alloc_etherdev(sizeof(dhd));
31990+ ifp->net = alloc_etherdev(sizeof(*dhd));
31991 if (!ifp->net) {
31992 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31993 ret = -ENOMEM;
31994 }
31995 if (ret == 0) {
31996 strcpy(ifp->net->name, ifp->name);
31997- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31998+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31999 err = dhd_net_attach(&dhd->pub, ifp->idx);
32000 if (err != 0) {
32001 DHD_ERROR(("%s: dhd_net_attach failed, "
32002@@ -1923,7 +1923,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32003 strcpy(nv_path, nvram_path);
32004
32005 /* Allocate etherdev, including space for private structure */
32006- net = alloc_etherdev(sizeof(dhd));
32007+ net = alloc_etherdev(sizeof(*dhd));
32008 if (!net) {
32009 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32010 goto fail;
32011@@ -1939,7 +1939,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32012 /*
32013 * Save the dhd_info into the priv
32014 */
32015- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32016+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32017
32018 /* Set network interface name if it was provided as module parameter */
32019 if (iface_name[0]) {
32020@@ -2056,7 +2056,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32021 /*
32022 * Save the dhd_info into the priv
32023 */
32024- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32025+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32026
32027 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
32028 g_bus = bus;
32029diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c
32030--- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-05-19 00:06:34.000000000 -0400
32031+++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-08-05 19:44:37.000000000 -0400
32032@@ -495,7 +495,7 @@ wl_iw_get_range(struct net_device *dev,
32033 list = (wl_u32_list_t *) channels;
32034
32035 dwrq->length = sizeof(struct iw_range);
32036- memset(range, 0, sizeof(range));
32037+ memset(range, 0, sizeof(*range));
32038
32039 range->min_nwid = range->max_nwid = 0;
32040
32041diff -urNp linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c
32042--- linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-05-19 00:06:34.000000000 -0400
32043+++ linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-08-05 19:44:37.000000000 -0400
32044@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
32045 struct net_device_stats *stats = &etdev->net_stats;
32046
32047 if (tcb->flags & fMP_DEST_BROAD)
32048- atomic_inc(&etdev->Stats.brdcstxmt);
32049+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
32050 else if (tcb->flags & fMP_DEST_MULTI)
32051- atomic_inc(&etdev->Stats.multixmt);
32052+ atomic_inc_unchecked(&etdev->Stats.multixmt);
32053 else
32054- atomic_inc(&etdev->Stats.unixmt);
32055+ atomic_inc_unchecked(&etdev->Stats.unixmt);
32056
32057 if (tcb->skb) {
32058 stats->tx_bytes += tcb->skb->len;
32059diff -urNp linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h
32060--- linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-05-19 00:06:34.000000000 -0400
32061+++ linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-08-05 19:44:37.000000000 -0400
32062@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
32063 * operations
32064 */
32065 u32 unircv; /* # multicast packets received */
32066- atomic_t unixmt; /* # multicast packets for Tx */
32067+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
32068 u32 multircv; /* # multicast packets received */
32069- atomic_t multixmt; /* # multicast packets for Tx */
32070+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
32071 u32 brdcstrcv; /* # broadcast packets received */
32072- atomic_t brdcstxmt; /* # broadcast packets for Tx */
32073+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32074 u32 norcvbuf; /* # Rx packets discarded */
32075 u32 noxmtbuf; /* # Tx packets discarded */
32076
32077diff -urNp linux-2.6.39.4/drivers/staging/hv/channel.c linux-2.6.39.4/drivers/staging/hv/channel.c
32078--- linux-2.6.39.4/drivers/staging/hv/channel.c 2011-05-19 00:06:34.000000000 -0400
32079+++ linux-2.6.39.4/drivers/staging/hv/channel.c 2011-08-05 19:44:37.000000000 -0400
32080@@ -509,8 +509,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32081 unsigned long flags;
32082 int ret = 0;
32083
32084- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32085- atomic_inc(&vmbus_connection.next_gpadl_handle);
32086+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32087+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32088
32089 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32090 if (ret)
32091diff -urNp linux-2.6.39.4/drivers/staging/hv/hv.c linux-2.6.39.4/drivers/staging/hv/hv.c
32092--- linux-2.6.39.4/drivers/staging/hv/hv.c 2011-05-19 00:06:34.000000000 -0400
32093+++ linux-2.6.39.4/drivers/staging/hv/hv.c 2011-08-05 19:44:37.000000000 -0400
32094@@ -163,7 +163,7 @@ static u64 do_hypercall(u64 control, voi
32095 u64 output_address = (output) ? virt_to_phys(output) : 0;
32096 u32 output_address_hi = output_address >> 32;
32097 u32 output_address_lo = output_address & 0xFFFFFFFF;
32098- volatile void *hypercall_page = hv_context.hypercall_page;
32099+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32100
32101 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
32102 control, input, output);
32103diff -urNp linux-2.6.39.4/drivers/staging/hv/rndis_filter.c linux-2.6.39.4/drivers/staging/hv/rndis_filter.c
32104--- linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-05-19 00:06:34.000000000 -0400
32105+++ linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-08-05 19:44:37.000000000 -0400
32106@@ -49,7 +49,7 @@ struct rndis_device {
32107
32108 enum rndis_device_state state;
32109 u32 link_stat;
32110- atomic_t new_req_id;
32111+ atomic_unchecked_t new_req_id;
32112
32113 spinlock_t request_lock;
32114 struct list_head req_list;
32115@@ -144,7 +144,7 @@ static struct rndis_request *get_rndis_r
32116 * template
32117 */
32118 set = &rndis_msg->msg.set_req;
32119- set->req_id = atomic_inc_return(&dev->new_req_id);
32120+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32121
32122 /* Add to the request list */
32123 spin_lock_irqsave(&dev->request_lock, flags);
32124@@ -709,7 +709,7 @@ static void rndis_filter_halt_device(str
32125
32126 /* Setup the rndis set */
32127 halt = &request->request_msg.msg.halt_req;
32128- halt->req_id = atomic_inc_return(&dev->new_req_id);
32129+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32130
32131 /* Ignore return since this msg is optional. */
32132 rndis_filter_send_request(dev, request);
32133diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c
32134--- linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-05-19 00:06:34.000000000 -0400
32135+++ linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-08-05 19:44:37.000000000 -0400
32136@@ -661,14 +661,14 @@ int vmbus_child_device_register(struct h
32137 {
32138 int ret = 0;
32139
32140- static atomic_t device_num = ATOMIC_INIT(0);
32141+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32142
32143 DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
32144 child_device_obj);
32145
32146 /* Set the device name. Otherwise, device_register() will fail. */
32147 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32148- atomic_inc_return(&device_num));
32149+ atomic_inc_return_unchecked(&device_num));
32150
32151 /* The new device belongs to this bus */
32152 child_device_obj->device.bus = &vmbus_drv.bus; /* device->dev.bus; */
32153diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_private.h linux-2.6.39.4/drivers/staging/hv/vmbus_private.h
32154--- linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-05-19 00:06:34.000000000 -0400
32155+++ linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-08-05 19:44:37.000000000 -0400
32156@@ -58,7 +58,7 @@ enum vmbus_connect_state {
32157 struct vmbus_connection {
32158 enum vmbus_connect_state conn_state;
32159
32160- atomic_t next_gpadl_handle;
32161+ atomic_unchecked_t next_gpadl_handle;
32162
32163 /*
32164 * Represents channel interrupts. Each bit position represents a
32165diff -urNp linux-2.6.39.4/drivers/staging/iio/ring_generic.h linux-2.6.39.4/drivers/staging/iio/ring_generic.h
32166--- linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-05-19 00:06:34.000000000 -0400
32167+++ linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-08-05 19:44:37.000000000 -0400
32168@@ -134,7 +134,7 @@ struct iio_ring_buffer {
32169 struct iio_handler access_handler;
32170 struct iio_event_interface ev_int;
32171 struct iio_shared_ev_pointer shared_ev_pointer;
32172- struct iio_ring_access_funcs access;
32173+ struct iio_ring_access_funcs access;
32174 int (*preenable)(struct iio_dev *);
32175 int (*postenable)(struct iio_dev *);
32176 int (*predisable)(struct iio_dev *);
32177diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet.c linux-2.6.39.4/drivers/staging/octeon/ethernet.c
32178--- linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-05-19 00:06:34.000000000 -0400
32179+++ linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-08-05 19:44:37.000000000 -0400
32180@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32181 * since the RX tasklet also increments it.
32182 */
32183 #ifdef CONFIG_64BIT
32184- atomic64_add(rx_status.dropped_packets,
32185- (atomic64_t *)&priv->stats.rx_dropped);
32186+ atomic64_add_unchecked(rx_status.dropped_packets,
32187+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32188 #else
32189- atomic_add(rx_status.dropped_packets,
32190- (atomic_t *)&priv->stats.rx_dropped);
32191+ atomic_add_unchecked(rx_status.dropped_packets,
32192+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
32193 #endif
32194 }
32195
32196diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c
32197--- linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-05-19 00:06:34.000000000 -0400
32198+++ linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-08-05 19:44:37.000000000 -0400
32199@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32200 /* Increment RX stats for virtual ports */
32201 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32202 #ifdef CONFIG_64BIT
32203- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32204- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32205+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32206+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32207 #else
32208- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32209- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32210+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32211+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32212 #endif
32213 }
32214 netif_receive_skb(skb);
32215@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32216 dev->name);
32217 */
32218 #ifdef CONFIG_64BIT
32219- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32220+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32221 #else
32222- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32223+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32224 #endif
32225 dev_kfree_skb_irq(skb);
32226 }
32227diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/inode.c linux-2.6.39.4/drivers/staging/pohmelfs/inode.c
32228--- linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-05-19 00:06:34.000000000 -0400
32229+++ linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-08-05 19:44:37.000000000 -0400
32230@@ -1855,7 +1855,7 @@ static int pohmelfs_fill_super(struct su
32231 mutex_init(&psb->mcache_lock);
32232 psb->mcache_root = RB_ROOT;
32233 psb->mcache_timeout = msecs_to_jiffies(5000);
32234- atomic_long_set(&psb->mcache_gen, 0);
32235+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
32236
32237 psb->trans_max_pages = 100;
32238
32239@@ -1870,7 +1870,7 @@ static int pohmelfs_fill_super(struct su
32240 INIT_LIST_HEAD(&psb->crypto_ready_list);
32241 INIT_LIST_HEAD(&psb->crypto_active_list);
32242
32243- atomic_set(&psb->trans_gen, 1);
32244+ atomic_set_unchecked(&psb->trans_gen, 1);
32245 atomic_long_set(&psb->total_inodes, 0);
32246
32247 mutex_init(&psb->state_lock);
32248diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c
32249--- linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-05-19 00:06:34.000000000 -0400
32250+++ linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-08-05 19:44:37.000000000 -0400
32251@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32252 m->data = data;
32253 m->start = start;
32254 m->size = size;
32255- m->gen = atomic_long_inc_return(&psb->mcache_gen);
32256+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32257
32258 mutex_lock(&psb->mcache_lock);
32259 err = pohmelfs_mcache_insert(psb, m);
32260diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h
32261--- linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-05-19 00:06:34.000000000 -0400
32262+++ linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-08-05 19:44:37.000000000 -0400
32263@@ -571,14 +571,14 @@ struct pohmelfs_config;
32264 struct pohmelfs_sb {
32265 struct rb_root mcache_root;
32266 struct mutex mcache_lock;
32267- atomic_long_t mcache_gen;
32268+ atomic_long_unchecked_t mcache_gen;
32269 unsigned long mcache_timeout;
32270
32271 unsigned int idx;
32272
32273 unsigned int trans_retries;
32274
32275- atomic_t trans_gen;
32276+ atomic_unchecked_t trans_gen;
32277
32278 unsigned int crypto_attached_size;
32279 unsigned int crypto_align_size;
32280diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/trans.c linux-2.6.39.4/drivers/staging/pohmelfs/trans.c
32281--- linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-05-19 00:06:34.000000000 -0400
32282+++ linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-08-05 19:44:37.000000000 -0400
32283@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32284 int err;
32285 struct netfs_cmd *cmd = t->iovec.iov_base;
32286
32287- t->gen = atomic_inc_return(&psb->trans_gen);
32288+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32289
32290 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32291 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32292diff -urNp linux-2.6.39.4/drivers/staging/tty/istallion.c linux-2.6.39.4/drivers/staging/tty/istallion.c
32293--- linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-05-19 00:06:34.000000000 -0400
32294+++ linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-08-05 19:44:37.000000000 -0400
32295@@ -186,7 +186,6 @@ static struct ktermios stli_deftermios
32296 * re-used for each stats call.
32297 */
32298 static comstats_t stli_comstats;
32299-static combrd_t stli_brdstats;
32300 static struct asystats stli_cdkstats;
32301
32302 /*****************************************************************************/
32303@@ -4003,6 +4002,7 @@ out:
32304
32305 static int stli_getbrdstats(combrd_t __user *bp)
32306 {
32307+ combrd_t stli_brdstats;
32308 struct stlibrd *brdp;
32309 unsigned int i;
32310
32311@@ -4226,6 +4226,8 @@ static int stli_getportstruct(struct stl
32312 struct stliport stli_dummyport;
32313 struct stliport *portp;
32314
32315+ pax_track_stack();
32316+
32317 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32318 return -EFAULT;
32319 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32320@@ -4248,6 +4250,8 @@ static int stli_getbrdstruct(struct stli
32321 struct stlibrd stli_dummybrd;
32322 struct stlibrd *brdp;
32323
32324+ pax_track_stack();
32325+
32326 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32327 return -EFAULT;
32328 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32329diff -urNp linux-2.6.39.4/drivers/staging/tty/stallion.c linux-2.6.39.4/drivers/staging/tty/stallion.c
32330--- linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-05-19 00:06:34.000000000 -0400
32331+++ linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-08-05 19:44:37.000000000 -0400
32332@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32333 struct stlport stl_dummyport;
32334 struct stlport *portp;
32335
32336+ pax_track_stack();
32337+
32338 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32339 return -EFAULT;
32340 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32341diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci.h linux-2.6.39.4/drivers/staging/usbip/vhci.h
32342--- linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-05-19 00:06:34.000000000 -0400
32343+++ linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-08-05 19:44:37.000000000 -0400
32344@@ -92,7 +92,7 @@ struct vhci_hcd {
32345 unsigned resuming:1;
32346 unsigned long re_timeout;
32347
32348- atomic_t seqnum;
32349+ atomic_unchecked_t seqnum;
32350
32351 /*
32352 * NOTE:
32353diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c
32354--- linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-05-19 00:06:34.000000000 -0400
32355+++ linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-08-05 19:44:37.000000000 -0400
32356@@ -536,7 +536,7 @@ static void vhci_tx_urb(struct urb *urb)
32357 return;
32358 }
32359
32360- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32361+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32362 if (priv->seqnum == 0xffff)
32363 usbip_uinfo("seqnum max\n");
32364
32365@@ -795,7 +795,7 @@ static int vhci_urb_dequeue(struct usb_h
32366 return -ENOMEM;
32367 }
32368
32369- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32370+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32371 if (unlink->seqnum == 0xffff)
32372 usbip_uinfo("seqnum max\n");
32373
32374@@ -992,7 +992,7 @@ static int vhci_start(struct usb_hcd *hc
32375 vdev->rhport = rhport;
32376 }
32377
32378- atomic_set(&vhci->seqnum, 0);
32379+ atomic_set_unchecked(&vhci->seqnum, 0);
32380 spin_lock_init(&vhci->lock);
32381
32382
32383diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c
32384--- linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-05-19 00:06:34.000000000 -0400
32385+++ linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-08-05 19:44:37.000000000 -0400
32386@@ -81,7 +81,7 @@ static void vhci_recv_ret_submit(struct
32387 usbip_uerr("cannot find a urb of seqnum %u\n",
32388 pdu->base.seqnum);
32389 usbip_uinfo("max seqnum %d\n",
32390- atomic_read(&the_controller->seqnum));
32391+ atomic_read_unchecked(&the_controller->seqnum));
32392 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32393 return;
32394 }
32395diff -urNp linux-2.6.39.4/drivers/target/target_core_alua.c linux-2.6.39.4/drivers/target/target_core_alua.c
32396--- linux-2.6.39.4/drivers/target/target_core_alua.c 2011-05-19 00:06:34.000000000 -0400
32397+++ linux-2.6.39.4/drivers/target/target_core_alua.c 2011-08-05 19:44:37.000000000 -0400
32398@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32399 char path[ALUA_METADATA_PATH_LEN];
32400 int len;
32401
32402+ pax_track_stack();
32403+
32404 memset(path, 0, ALUA_METADATA_PATH_LEN);
32405
32406 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32407@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32408 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32409 int len;
32410
32411+ pax_track_stack();
32412+
32413 memset(path, 0, ALUA_METADATA_PATH_LEN);
32414 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32415
32416diff -urNp linux-2.6.39.4/drivers/target/target_core_cdb.c linux-2.6.39.4/drivers/target/target_core_cdb.c
32417--- linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-05-19 00:06:34.000000000 -0400
32418+++ linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-08-05 19:44:37.000000000 -0400
32419@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32420 int length = 0;
32421 unsigned char buf[SE_MODE_PAGE_BUF];
32422
32423+ pax_track_stack();
32424+
32425 memset(buf, 0, SE_MODE_PAGE_BUF);
32426
32427 switch (cdb[2] & 0x3f) {
32428diff -urNp linux-2.6.39.4/drivers/target/target_core_configfs.c linux-2.6.39.4/drivers/target/target_core_configfs.c
32429--- linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-05-19 00:06:34.000000000 -0400
32430+++ linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-08-05 20:34:06.000000000 -0400
32431@@ -1280,6 +1280,8 @@ static ssize_t target_core_dev_pr_show_a
32432 ssize_t len = 0;
32433 int reg_count = 0, prf_isid;
32434
32435+ pax_track_stack();
32436+
32437 if (!(su_dev->se_dev_ptr))
32438 return -ENODEV;
32439
32440diff -urNp linux-2.6.39.4/drivers/target/target_core_pr.c linux-2.6.39.4/drivers/target/target_core_pr.c
32441--- linux-2.6.39.4/drivers/target/target_core_pr.c 2011-05-19 00:06:34.000000000 -0400
32442+++ linux-2.6.39.4/drivers/target/target_core_pr.c 2011-08-05 19:44:37.000000000 -0400
32443@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32444 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32445 u16 tpgt;
32446
32447+ pax_track_stack();
32448+
32449 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32450 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32451 /*
32452@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32453 ssize_t len = 0;
32454 int reg_count = 0;
32455
32456+ pax_track_stack();
32457+
32458 memset(buf, 0, pr_aptpl_buf_len);
32459 /*
32460 * Called to clear metadata once APTPL has been deactivated.
32461@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32462 char path[512];
32463 int ret;
32464
32465+ pax_track_stack();
32466+
32467 memset(iov, 0, sizeof(struct iovec));
32468 memset(path, 0, 512);
32469
32470diff -urNp linux-2.6.39.4/drivers/target/target_core_tmr.c linux-2.6.39.4/drivers/target/target_core_tmr.c
32471--- linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-06-03 00:04:14.000000000 -0400
32472+++ linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-08-05 19:44:37.000000000 -0400
32473@@ -263,7 +263,7 @@ int core_tmr_lun_reset(
32474 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32475 T_TASK(cmd)->t_task_cdbs,
32476 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32477- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32478+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32479 atomic_read(&T_TASK(cmd)->t_transport_active),
32480 atomic_read(&T_TASK(cmd)->t_transport_stop),
32481 atomic_read(&T_TASK(cmd)->t_transport_sent));
32482@@ -305,7 +305,7 @@ int core_tmr_lun_reset(
32483 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32484 " task: %p, t_fe_count: %d dev: %p\n", task,
32485 fe_count, dev);
32486- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32487+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32488 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32489 flags);
32490 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32491@@ -315,7 +315,7 @@ int core_tmr_lun_reset(
32492 }
32493 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32494 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32495- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32496+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32497 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32498 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32499
32500diff -urNp linux-2.6.39.4/drivers/target/target_core_transport.c linux-2.6.39.4/drivers/target/target_core_transport.c
32501--- linux-2.6.39.4/drivers/target/target_core_transport.c 2011-06-03 00:04:14.000000000 -0400
32502+++ linux-2.6.39.4/drivers/target/target_core_transport.c 2011-08-05 19:44:37.000000000 -0400
32503@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32504
32505 dev->queue_depth = dev_limits->queue_depth;
32506 atomic_set(&dev->depth_left, dev->queue_depth);
32507- atomic_set(&dev->dev_ordered_id, 0);
32508+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
32509
32510 se_dev_set_default_attribs(dev, dev_limits);
32511
32512@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32513 * Used to determine when ORDERED commands should go from
32514 * Dormant to Active status.
32515 */
32516- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32517+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32518 smp_mb__after_atomic_inc();
32519 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32520 cmd->se_ordered_id, cmd->sam_task_attr,
32521@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32522 " t_transport_active: %d t_transport_stop: %d"
32523 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32524 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32525- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32526+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32527 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32528 atomic_read(&T_TASK(cmd)->t_transport_active),
32529 atomic_read(&T_TASK(cmd)->t_transport_stop),
32530@@ -2673,9 +2673,9 @@ check_depth:
32531 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32532 atomic_set(&task->task_active, 1);
32533 atomic_set(&task->task_sent, 1);
32534- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32535+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32536
32537- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32538+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32539 T_TASK(cmd)->t_task_cdbs)
32540 atomic_set(&cmd->transport_sent, 1);
32541
32542@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32543 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32544 }
32545 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32546- atomic_read(&T_TASK(cmd)->t_transport_aborted))
32547+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32548 goto remove;
32549
32550 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32551@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32552 {
32553 int ret = 0;
32554
32555- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32556+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32557 if (!(send_status) ||
32558 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32559 return 1;
32560@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32561 */
32562 if (cmd->data_direction == DMA_TO_DEVICE) {
32563 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32564- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32565+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32566 smp_mb__after_atomic_inc();
32567 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32568 transport_new_cmd_failure(cmd);
32569@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32570 CMD_TFO(cmd)->get_task_tag(cmd),
32571 T_TASK(cmd)->t_task_cdbs,
32572 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32573- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32574+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32575 atomic_read(&T_TASK(cmd)->t_transport_active),
32576 atomic_read(&T_TASK(cmd)->t_transport_stop),
32577 atomic_read(&T_TASK(cmd)->t_transport_sent));
32578diff -urNp linux-2.6.39.4/drivers/telephony/ixj.c linux-2.6.39.4/drivers/telephony/ixj.c
32579--- linux-2.6.39.4/drivers/telephony/ixj.c 2011-05-19 00:06:34.000000000 -0400
32580+++ linux-2.6.39.4/drivers/telephony/ixj.c 2011-08-05 19:44:37.000000000 -0400
32581@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32582 bool mContinue;
32583 char *pIn, *pOut;
32584
32585+ pax_track_stack();
32586+
32587 if (!SCI_Prepare(j))
32588 return 0;
32589
32590diff -urNp linux-2.6.39.4/drivers/tty/hvc/hvcs.c linux-2.6.39.4/drivers/tty/hvc/hvcs.c
32591--- linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-05-19 00:06:34.000000000 -0400
32592+++ linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-08-05 19:44:37.000000000 -0400
32593@@ -83,6 +83,7 @@
32594 #include <asm/hvcserver.h>
32595 #include <asm/uaccess.h>
32596 #include <asm/vio.h>
32597+#include <asm/local.h>
32598
32599 /*
32600 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32601@@ -270,7 +271,7 @@ struct hvcs_struct {
32602 unsigned int index;
32603
32604 struct tty_struct *tty;
32605- int open_count;
32606+ local_t open_count;
32607
32608 /*
32609 * Used to tell the driver kernel_thread what operations need to take
32610@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32611
32612 spin_lock_irqsave(&hvcsd->lock, flags);
32613
32614- if (hvcsd->open_count > 0) {
32615+ if (local_read(&hvcsd->open_count) > 0) {
32616 spin_unlock_irqrestore(&hvcsd->lock, flags);
32617 printk(KERN_INFO "HVCS: vterm state unchanged. "
32618 "The hvcs device node is still in use.\n");
32619@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32620 if ((retval = hvcs_partner_connect(hvcsd)))
32621 goto error_release;
32622
32623- hvcsd->open_count = 1;
32624+ local_set(&hvcsd->open_count, 1);
32625 hvcsd->tty = tty;
32626 tty->driver_data = hvcsd;
32627
32628@@ -1179,7 +1180,7 @@ fast_open:
32629
32630 spin_lock_irqsave(&hvcsd->lock, flags);
32631 kref_get(&hvcsd->kref);
32632- hvcsd->open_count++;
32633+ local_inc(&hvcsd->open_count);
32634 hvcsd->todo_mask |= HVCS_SCHED_READ;
32635 spin_unlock_irqrestore(&hvcsd->lock, flags);
32636
32637@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32638 hvcsd = tty->driver_data;
32639
32640 spin_lock_irqsave(&hvcsd->lock, flags);
32641- if (--hvcsd->open_count == 0) {
32642+ if (local_dec_and_test(&hvcsd->open_count)) {
32643
32644 vio_disable_interrupts(hvcsd->vdev);
32645
32646@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32647 free_irq(irq, hvcsd);
32648 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32649 return;
32650- } else if (hvcsd->open_count < 0) {
32651+ } else if (local_read(&hvcsd->open_count) < 0) {
32652 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32653 " is missmanaged.\n",
32654- hvcsd->vdev->unit_address, hvcsd->open_count);
32655+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32656 }
32657
32658 spin_unlock_irqrestore(&hvcsd->lock, flags);
32659@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32660
32661 spin_lock_irqsave(&hvcsd->lock, flags);
32662 /* Preserve this so that we know how many kref refs to put */
32663- temp_open_count = hvcsd->open_count;
32664+ temp_open_count = local_read(&hvcsd->open_count);
32665
32666 /*
32667 * Don't kref put inside the spinlock because the destruction
32668@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32669 hvcsd->tty->driver_data = NULL;
32670 hvcsd->tty = NULL;
32671
32672- hvcsd->open_count = 0;
32673+ local_set(&hvcsd->open_count, 0);
32674
32675 /* This will drop any buffered data on the floor which is OK in a hangup
32676 * scenario. */
32677@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32678 * the middle of a write operation? This is a crummy place to do this
32679 * but we want to keep it all in the spinlock.
32680 */
32681- if (hvcsd->open_count <= 0) {
32682+ if (local_read(&hvcsd->open_count) <= 0) {
32683 spin_unlock_irqrestore(&hvcsd->lock, flags);
32684 return -ENODEV;
32685 }
32686@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32687 {
32688 struct hvcs_struct *hvcsd = tty->driver_data;
32689
32690- if (!hvcsd || hvcsd->open_count <= 0)
32691+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32692 return 0;
32693
32694 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32695diff -urNp linux-2.6.39.4/drivers/tty/ipwireless/tty.c linux-2.6.39.4/drivers/tty/ipwireless/tty.c
32696--- linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-05-19 00:06:34.000000000 -0400
32697+++ linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-08-05 19:44:37.000000000 -0400
32698@@ -29,6 +29,7 @@
32699 #include <linux/tty_driver.h>
32700 #include <linux/tty_flip.h>
32701 #include <linux/uaccess.h>
32702+#include <asm/local.h>
32703
32704 #include "tty.h"
32705 #include "network.h"
32706@@ -51,7 +52,7 @@ struct ipw_tty {
32707 int tty_type;
32708 struct ipw_network *network;
32709 struct tty_struct *linux_tty;
32710- int open_count;
32711+ local_t open_count;
32712 unsigned int control_lines;
32713 struct mutex ipw_tty_mutex;
32714 int tx_bytes_queued;
32715@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32716 mutex_unlock(&tty->ipw_tty_mutex);
32717 return -ENODEV;
32718 }
32719- if (tty->open_count == 0)
32720+ if (local_read(&tty->open_count) == 0)
32721 tty->tx_bytes_queued = 0;
32722
32723- tty->open_count++;
32724+ local_inc(&tty->open_count);
32725
32726 tty->linux_tty = linux_tty;
32727 linux_tty->driver_data = tty;
32728@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32729
32730 static void do_ipw_close(struct ipw_tty *tty)
32731 {
32732- tty->open_count--;
32733-
32734- if (tty->open_count == 0) {
32735+ if (local_dec_return(&tty->open_count) == 0) {
32736 struct tty_struct *linux_tty = tty->linux_tty;
32737
32738 if (linux_tty != NULL) {
32739@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32740 return;
32741
32742 mutex_lock(&tty->ipw_tty_mutex);
32743- if (tty->open_count == 0) {
32744+ if (local_read(&tty->open_count) == 0) {
32745 mutex_unlock(&tty->ipw_tty_mutex);
32746 return;
32747 }
32748@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32749 return;
32750 }
32751
32752- if (!tty->open_count) {
32753+ if (!local_read(&tty->open_count)) {
32754 mutex_unlock(&tty->ipw_tty_mutex);
32755 return;
32756 }
32757@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32758 return -ENODEV;
32759
32760 mutex_lock(&tty->ipw_tty_mutex);
32761- if (!tty->open_count) {
32762+ if (!local_read(&tty->open_count)) {
32763 mutex_unlock(&tty->ipw_tty_mutex);
32764 return -EINVAL;
32765 }
32766@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32767 if (!tty)
32768 return -ENODEV;
32769
32770- if (!tty->open_count)
32771+ if (!local_read(&tty->open_count))
32772 return -EINVAL;
32773
32774 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32775@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32776 if (!tty)
32777 return 0;
32778
32779- if (!tty->open_count)
32780+ if (!local_read(&tty->open_count))
32781 return 0;
32782
32783 return tty->tx_bytes_queued;
32784@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32785 if (!tty)
32786 return -ENODEV;
32787
32788- if (!tty->open_count)
32789+ if (!local_read(&tty->open_count))
32790 return -EINVAL;
32791
32792 return get_control_lines(tty);
32793@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32794 if (!tty)
32795 return -ENODEV;
32796
32797- if (!tty->open_count)
32798+ if (!local_read(&tty->open_count))
32799 return -EINVAL;
32800
32801 return set_control_lines(tty, set, clear);
32802@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32803 if (!tty)
32804 return -ENODEV;
32805
32806- if (!tty->open_count)
32807+ if (!local_read(&tty->open_count))
32808 return -EINVAL;
32809
32810 /* FIXME: Exactly how is the tty object locked here .. */
32811@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32812 against a parallel ioctl etc */
32813 mutex_lock(&ttyj->ipw_tty_mutex);
32814 }
32815- while (ttyj->open_count)
32816+ while (local_read(&ttyj->open_count))
32817 do_ipw_close(ttyj);
32818 ipwireless_disassociate_network_ttys(network,
32819 ttyj->channel_idx);
32820diff -urNp linux-2.6.39.4/drivers/tty/n_gsm.c linux-2.6.39.4/drivers/tty/n_gsm.c
32821--- linux-2.6.39.4/drivers/tty/n_gsm.c 2011-05-19 00:06:34.000000000 -0400
32822+++ linux-2.6.39.4/drivers/tty/n_gsm.c 2011-08-05 19:44:37.000000000 -0400
32823@@ -1588,7 +1588,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32824 return NULL;
32825 spin_lock_init(&dlci->lock);
32826 dlci->fifo = &dlci->_fifo;
32827- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32828+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32829 kfree(dlci);
32830 return NULL;
32831 }
32832diff -urNp linux-2.6.39.4/drivers/tty/n_tty.c linux-2.6.39.4/drivers/tty/n_tty.c
32833--- linux-2.6.39.4/drivers/tty/n_tty.c 2011-05-19 00:06:34.000000000 -0400
32834+++ linux-2.6.39.4/drivers/tty/n_tty.c 2011-08-05 19:44:37.000000000 -0400
32835@@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32836 {
32837 *ops = tty_ldisc_N_TTY;
32838 ops->owner = NULL;
32839- ops->refcount = ops->flags = 0;
32840+ atomic_set(&ops->refcount, 0);
32841+ ops->flags = 0;
32842 }
32843 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32844diff -urNp linux-2.6.39.4/drivers/tty/pty.c linux-2.6.39.4/drivers/tty/pty.c
32845--- linux-2.6.39.4/drivers/tty/pty.c 2011-05-19 00:06:34.000000000 -0400
32846+++ linux-2.6.39.4/drivers/tty/pty.c 2011-08-05 20:34:06.000000000 -0400
32847@@ -753,8 +753,10 @@ static void __init unix98_pty_init(void)
32848 register_sysctl_table(pty_root_table);
32849
32850 /* Now create the /dev/ptmx special device */
32851+ pax_open_kernel();
32852 tty_default_fops(&ptmx_fops);
32853- ptmx_fops.open = ptmx_open;
32854+ *(void **)&ptmx_fops.open = ptmx_open;
32855+ pax_close_kernel();
32856
32857 cdev_init(&ptmx_cdev, &ptmx_fops);
32858 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32859diff -urNp linux-2.6.39.4/drivers/tty/rocket.c linux-2.6.39.4/drivers/tty/rocket.c
32860--- linux-2.6.39.4/drivers/tty/rocket.c 2011-05-19 00:06:34.000000000 -0400
32861+++ linux-2.6.39.4/drivers/tty/rocket.c 2011-08-05 19:44:37.000000000 -0400
32862@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32863 struct rocket_ports tmp;
32864 int board;
32865
32866+ pax_track_stack();
32867+
32868 if (!retports)
32869 return -EFAULT;
32870 memset(&tmp, 0, sizeof (tmp));
32871diff -urNp linux-2.6.39.4/drivers/tty/serial/kgdboc.c linux-2.6.39.4/drivers/tty/serial/kgdboc.c
32872--- linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-05-19 00:06:34.000000000 -0400
32873+++ linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-08-05 20:34:06.000000000 -0400
32874@@ -23,8 +23,9 @@
32875 #define MAX_CONFIG_LEN 40
32876
32877 static struct kgdb_io kgdboc_io_ops;
32878+static struct kgdb_io kgdboc_io_ops_console;
32879
32880-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32881+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32882 static int configured = -1;
32883
32884 static char config[MAX_CONFIG_LEN];
32885@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32886 kgdboc_unregister_kbd();
32887 if (configured == 1)
32888 kgdb_unregister_io_module(&kgdboc_io_ops);
32889+ else if (configured == 2)
32890+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
32891 }
32892
32893 static int configure_kgdboc(void)
32894@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32895 int err;
32896 char *cptr = config;
32897 struct console *cons;
32898+ int is_console = 0;
32899
32900 err = kgdboc_option_setup(config);
32901 if (err || !strlen(config) || isspace(config[0]))
32902 goto noconfig;
32903
32904 err = -ENODEV;
32905- kgdboc_io_ops.is_console = 0;
32906 kgdb_tty_driver = NULL;
32907
32908 kgdboc_use_kms = 0;
32909@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32910 int idx;
32911 if (cons->device && cons->device(cons, &idx) == p &&
32912 idx == tty_line) {
32913- kgdboc_io_ops.is_console = 1;
32914+ is_console = 1;
32915 break;
32916 }
32917 cons = cons->next;
32918@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32919 kgdb_tty_line = tty_line;
32920
32921 do_register:
32922- err = kgdb_register_io_module(&kgdboc_io_ops);
32923+ if (is_console) {
32924+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
32925+ configured = 2;
32926+ } else {
32927+ err = kgdb_register_io_module(&kgdboc_io_ops);
32928+ configured = 1;
32929+ }
32930 if (err)
32931 goto noconfig;
32932
32933- configured = 1;
32934-
32935 return 0;
32936
32937 noconfig:
32938@@ -212,7 +219,7 @@ noconfig:
32939 static int __init init_kgdboc(void)
32940 {
32941 /* Already configured? */
32942- if (configured == 1)
32943+ if (configured >= 1)
32944 return 0;
32945
32946 return configure_kgdboc();
32947@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32948 if (config[len - 1] == '\n')
32949 config[len - 1] = '\0';
32950
32951- if (configured == 1)
32952+ if (configured >= 1)
32953 cleanup_kgdboc();
32954
32955 /* Go and configure with the new params. */
32956@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32957 .post_exception = kgdboc_post_exp_handler,
32958 };
32959
32960+static struct kgdb_io kgdboc_io_ops_console = {
32961+ .name = "kgdboc",
32962+ .read_char = kgdboc_get_char,
32963+ .write_char = kgdboc_put_char,
32964+ .pre_exception = kgdboc_pre_exp_handler,
32965+ .post_exception = kgdboc_post_exp_handler,
32966+ .is_console = 1
32967+};
32968+
32969 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
32970 /* This is only available if kgdboc is a built in for early debugging */
32971 static int __init kgdboc_early_init(char *opt)
32972diff -urNp linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c
32973--- linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-05-19 00:06:34.000000000 -0400
32974+++ linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-08-05 20:34:06.000000000 -0400
32975@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
32976 int loop = 1, num, total = 0;
32977 u8 recv_buf[512], *pbuf;
32978
32979+ pax_track_stack();
32980+
32981 pbuf = recv_buf;
32982 do {
32983 num = max3110_read_multi(max, pbuf);
32984diff -urNp linux-2.6.39.4/drivers/tty/tty_io.c linux-2.6.39.4/drivers/tty/tty_io.c
32985--- linux-2.6.39.4/drivers/tty/tty_io.c 2011-05-19 00:06:34.000000000 -0400
32986+++ linux-2.6.39.4/drivers/tty/tty_io.c 2011-08-05 20:34:06.000000000 -0400
32987@@ -3200,7 +3200,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32988
32989 void tty_default_fops(struct file_operations *fops)
32990 {
32991- *fops = tty_fops;
32992+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32993 }
32994
32995 /*
32996diff -urNp linux-2.6.39.4/drivers/tty/tty_ldisc.c linux-2.6.39.4/drivers/tty/tty_ldisc.c
32997--- linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-07-09 09:18:51.000000000 -0400
32998+++ linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-08-05 19:44:37.000000000 -0400
32999@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33000 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33001 struct tty_ldisc_ops *ldo = ld->ops;
33002
33003- ldo->refcount--;
33004+ atomic_dec(&ldo->refcount);
33005 module_put(ldo->owner);
33006 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33007
33008@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33009 spin_lock_irqsave(&tty_ldisc_lock, flags);
33010 tty_ldiscs[disc] = new_ldisc;
33011 new_ldisc->num = disc;
33012- new_ldisc->refcount = 0;
33013+ atomic_set(&new_ldisc->refcount, 0);
33014 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33015
33016 return ret;
33017@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33018 return -EINVAL;
33019
33020 spin_lock_irqsave(&tty_ldisc_lock, flags);
33021- if (tty_ldiscs[disc]->refcount)
33022+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33023 ret = -EBUSY;
33024 else
33025 tty_ldiscs[disc] = NULL;
33026@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33027 if (ldops) {
33028 ret = ERR_PTR(-EAGAIN);
33029 if (try_module_get(ldops->owner)) {
33030- ldops->refcount++;
33031+ atomic_inc(&ldops->refcount);
33032 ret = ldops;
33033 }
33034 }
33035@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33036 unsigned long flags;
33037
33038 spin_lock_irqsave(&tty_ldisc_lock, flags);
33039- ldops->refcount--;
33040+ atomic_dec(&ldops->refcount);
33041 module_put(ldops->owner);
33042 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33043 }
33044diff -urNp linux-2.6.39.4/drivers/tty/vt/keyboard.c linux-2.6.39.4/drivers/tty/vt/keyboard.c
33045--- linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-05-19 00:06:34.000000000 -0400
33046+++ linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-08-05 19:44:37.000000000 -0400
33047@@ -658,6 +658,16 @@ static void k_spec(struct vc_data *vc, u
33048 kbd->kbdmode == VC_OFF) &&
33049 value != KVAL(K_SAK))
33050 return; /* SAK is allowed even in raw mode */
33051+
33052+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33053+ {
33054+ void *func = fn_handler[value];
33055+ if (func == fn_show_state || func == fn_show_ptregs ||
33056+ func == fn_show_mem)
33057+ return;
33058+ }
33059+#endif
33060+
33061 fn_handler[value](vc);
33062 }
33063
33064diff -urNp linux-2.6.39.4/drivers/tty/vt/vt.c linux-2.6.39.4/drivers/tty/vt/vt.c
33065--- linux-2.6.39.4/drivers/tty/vt/vt.c 2011-05-19 00:06:34.000000000 -0400
33066+++ linux-2.6.39.4/drivers/tty/vt/vt.c 2011-08-05 19:44:37.000000000 -0400
33067@@ -261,7 +261,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33068
33069 static void notify_write(struct vc_data *vc, unsigned int unicode)
33070 {
33071- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33072+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33073 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33074 }
33075
33076diff -urNp linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c
33077--- linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-05-19 00:06:34.000000000 -0400
33078+++ linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-08-05 19:44:37.000000000 -0400
33079@@ -209,9 +209,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33080 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33081 return -EFAULT;
33082
33083- if (!capable(CAP_SYS_TTY_CONFIG))
33084- perm = 0;
33085-
33086 switch (cmd) {
33087 case KDGKBENT:
33088 key_map = key_maps[s];
33089@@ -223,6 +220,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33090 val = (i ? K_HOLE : K_NOSUCHMAP);
33091 return put_user(val, &user_kbe->kb_value);
33092 case KDSKBENT:
33093+ if (!capable(CAP_SYS_TTY_CONFIG))
33094+ perm = 0;
33095+
33096 if (!perm)
33097 return -EPERM;
33098 if (!i && v == K_NOSUCHMAP) {
33099@@ -324,9 +324,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33100 int i, j, k;
33101 int ret;
33102
33103- if (!capable(CAP_SYS_TTY_CONFIG))
33104- perm = 0;
33105-
33106 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33107 if (!kbs) {
33108 ret = -ENOMEM;
33109@@ -360,6 +357,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33110 kfree(kbs);
33111 return ((p && *p) ? -EOVERFLOW : 0);
33112 case KDSKBSENT:
33113+ if (!capable(CAP_SYS_TTY_CONFIG))
33114+ perm = 0;
33115+
33116 if (!perm) {
33117 ret = -EPERM;
33118 goto reterr;
33119diff -urNp linux-2.6.39.4/drivers/uio/uio.c linux-2.6.39.4/drivers/uio/uio.c
33120--- linux-2.6.39.4/drivers/uio/uio.c 2011-05-19 00:06:34.000000000 -0400
33121+++ linux-2.6.39.4/drivers/uio/uio.c 2011-08-05 19:44:37.000000000 -0400
33122@@ -25,6 +25,7 @@
33123 #include <linux/kobject.h>
33124 #include <linux/cdev.h>
33125 #include <linux/uio_driver.h>
33126+#include <asm/local.h>
33127
33128 #define UIO_MAX_DEVICES (1U << MINORBITS)
33129
33130@@ -32,10 +33,10 @@ struct uio_device {
33131 struct module *owner;
33132 struct device *dev;
33133 int minor;
33134- atomic_t event;
33135+ atomic_unchecked_t event;
33136 struct fasync_struct *async_queue;
33137 wait_queue_head_t wait;
33138- int vma_count;
33139+ local_t vma_count;
33140 struct uio_info *info;
33141 struct kobject *map_dir;
33142 struct kobject *portio_dir;
33143@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33144 struct device_attribute *attr, char *buf)
33145 {
33146 struct uio_device *idev = dev_get_drvdata(dev);
33147- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33148+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33149 }
33150
33151 static struct device_attribute uio_class_attributes[] = {
33152@@ -402,7 +403,7 @@ void uio_event_notify(struct uio_info *i
33153 {
33154 struct uio_device *idev = info->uio_dev;
33155
33156- atomic_inc(&idev->event);
33157+ atomic_inc_unchecked(&idev->event);
33158 wake_up_interruptible(&idev->wait);
33159 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33160 }
33161@@ -455,7 +456,7 @@ static int uio_open(struct inode *inode,
33162 }
33163
33164 listener->dev = idev;
33165- listener->event_count = atomic_read(&idev->event);
33166+ listener->event_count = atomic_read_unchecked(&idev->event);
33167 filep->private_data = listener;
33168
33169 if (idev->info->open) {
33170@@ -506,7 +507,7 @@ static unsigned int uio_poll(struct file
33171 return -EIO;
33172
33173 poll_wait(filep, &idev->wait, wait);
33174- if (listener->event_count != atomic_read(&idev->event))
33175+ if (listener->event_count != atomic_read_unchecked(&idev->event))
33176 return POLLIN | POLLRDNORM;
33177 return 0;
33178 }
33179@@ -531,7 +532,7 @@ static ssize_t uio_read(struct file *fil
33180 do {
33181 set_current_state(TASK_INTERRUPTIBLE);
33182
33183- event_count = atomic_read(&idev->event);
33184+ event_count = atomic_read_unchecked(&idev->event);
33185 if (event_count != listener->event_count) {
33186 if (copy_to_user(buf, &event_count, count))
33187 retval = -EFAULT;
33188@@ -602,13 +603,13 @@ static int uio_find_mem_index(struct vm_
33189 static void uio_vma_open(struct vm_area_struct *vma)
33190 {
33191 struct uio_device *idev = vma->vm_private_data;
33192- idev->vma_count++;
33193+ local_inc(&idev->vma_count);
33194 }
33195
33196 static void uio_vma_close(struct vm_area_struct *vma)
33197 {
33198 struct uio_device *idev = vma->vm_private_data;
33199- idev->vma_count--;
33200+ local_dec(&idev->vma_count);
33201 }
33202
33203 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33204@@ -819,7 +820,7 @@ int __uio_register_device(struct module
33205 idev->owner = owner;
33206 idev->info = info;
33207 init_waitqueue_head(&idev->wait);
33208- atomic_set(&idev->event, 0);
33209+ atomic_set_unchecked(&idev->event, 0);
33210
33211 ret = uio_get_minor(idev);
33212 if (ret)
33213diff -urNp linux-2.6.39.4/drivers/usb/atm/cxacru.c linux-2.6.39.4/drivers/usb/atm/cxacru.c
33214--- linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-05-19 00:06:34.000000000 -0400
33215+++ linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-08-05 19:44:37.000000000 -0400
33216@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33217 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33218 if (ret < 2)
33219 return -EINVAL;
33220- if (index < 0 || index > 0x7f)
33221+ if (index > 0x7f)
33222 return -EINVAL;
33223 pos += tmp;
33224
33225diff -urNp linux-2.6.39.4/drivers/usb/atm/usbatm.c linux-2.6.39.4/drivers/usb/atm/usbatm.c
33226--- linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-05-19 00:06:34.000000000 -0400
33227+++ linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-08-05 19:44:37.000000000 -0400
33228@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33229 if (printk_ratelimit())
33230 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33231 __func__, vpi, vci);
33232- atomic_inc(&vcc->stats->rx_err);
33233+ atomic_inc_unchecked(&vcc->stats->rx_err);
33234 return;
33235 }
33236
33237@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33238 if (length > ATM_MAX_AAL5_PDU) {
33239 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33240 __func__, length, vcc);
33241- atomic_inc(&vcc->stats->rx_err);
33242+ atomic_inc_unchecked(&vcc->stats->rx_err);
33243 goto out;
33244 }
33245
33246@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33247 if (sarb->len < pdu_length) {
33248 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33249 __func__, pdu_length, sarb->len, vcc);
33250- atomic_inc(&vcc->stats->rx_err);
33251+ atomic_inc_unchecked(&vcc->stats->rx_err);
33252 goto out;
33253 }
33254
33255 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33256 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33257 __func__, vcc);
33258- atomic_inc(&vcc->stats->rx_err);
33259+ atomic_inc_unchecked(&vcc->stats->rx_err);
33260 goto out;
33261 }
33262
33263@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33264 if (printk_ratelimit())
33265 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33266 __func__, length);
33267- atomic_inc(&vcc->stats->rx_drop);
33268+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33269 goto out;
33270 }
33271
33272@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33273
33274 vcc->push(vcc, skb);
33275
33276- atomic_inc(&vcc->stats->rx);
33277+ atomic_inc_unchecked(&vcc->stats->rx);
33278 out:
33279 skb_trim(sarb, 0);
33280 }
33281@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33282 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33283
33284 usbatm_pop(vcc, skb);
33285- atomic_inc(&vcc->stats->tx);
33286+ atomic_inc_unchecked(&vcc->stats->tx);
33287
33288 skb = skb_dequeue(&instance->sndqueue);
33289 }
33290@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33291 if (!left--)
33292 return sprintf(page,
33293 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33294- atomic_read(&atm_dev->stats.aal5.tx),
33295- atomic_read(&atm_dev->stats.aal5.tx_err),
33296- atomic_read(&atm_dev->stats.aal5.rx),
33297- atomic_read(&atm_dev->stats.aal5.rx_err),
33298- atomic_read(&atm_dev->stats.aal5.rx_drop));
33299+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33300+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33301+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33302+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33303+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33304
33305 if (!left--) {
33306 if (instance->disconnected)
33307diff -urNp linux-2.6.39.4/drivers/usb/core/devices.c linux-2.6.39.4/drivers/usb/core/devices.c
33308--- linux-2.6.39.4/drivers/usb/core/devices.c 2011-05-19 00:06:34.000000000 -0400
33309+++ linux-2.6.39.4/drivers/usb/core/devices.c 2011-08-05 19:44:37.000000000 -0400
33310@@ -126,7 +126,7 @@ static const char *format_endpt =
33311 * time it gets called.
33312 */
33313 static struct device_connect_event {
33314- atomic_t count;
33315+ atomic_unchecked_t count;
33316 wait_queue_head_t wait;
33317 } device_event = {
33318 .count = ATOMIC_INIT(1),
33319@@ -164,7 +164,7 @@ static const struct class_info clas_info
33320
33321 void usbfs_conn_disc_event(void)
33322 {
33323- atomic_add(2, &device_event.count);
33324+ atomic_add_unchecked(2, &device_event.count);
33325 wake_up(&device_event.wait);
33326 }
33327
33328@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33329
33330 poll_wait(file, &device_event.wait, wait);
33331
33332- event_count = atomic_read(&device_event.count);
33333+ event_count = atomic_read_unchecked(&device_event.count);
33334 if (file->f_version != event_count) {
33335 file->f_version = event_count;
33336 return POLLIN | POLLRDNORM;
33337diff -urNp linux-2.6.39.4/drivers/usb/core/message.c linux-2.6.39.4/drivers/usb/core/message.c
33338--- linux-2.6.39.4/drivers/usb/core/message.c 2011-07-09 09:18:51.000000000 -0400
33339+++ linux-2.6.39.4/drivers/usb/core/message.c 2011-08-05 19:44:37.000000000 -0400
33340@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33341 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33342 if (buf) {
33343 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33344- if (len > 0) {
33345- smallbuf = kmalloc(++len, GFP_NOIO);
33346+ if (len++ > 0) {
33347+ smallbuf = kmalloc(len, GFP_NOIO);
33348 if (!smallbuf)
33349 return buf;
33350 memcpy(smallbuf, buf, len);
33351diff -urNp linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c
33352--- linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-05-19 00:06:34.000000000 -0400
33353+++ linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-08-05 20:34:06.000000000 -0400
33354@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33355
33356 #ifdef CONFIG_KGDB
33357 static struct kgdb_io kgdbdbgp_io_ops;
33358-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33359+static struct kgdb_io kgdbdbgp_io_ops_console;
33360+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33361 #else
33362 #define dbgp_kgdb_mode (0)
33363 #endif
33364@@ -1032,6 +1033,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33365 .write_char = kgdbdbgp_write_char,
33366 };
33367
33368+static struct kgdb_io kgdbdbgp_io_ops_console = {
33369+ .name = "kgdbdbgp",
33370+ .read_char = kgdbdbgp_read_char,
33371+ .write_char = kgdbdbgp_write_char,
33372+ .is_console = 1
33373+};
33374+
33375 static int kgdbdbgp_wait_time;
33376
33377 static int __init kgdbdbgp_parse_config(char *str)
33378@@ -1047,8 +1055,10 @@ static int __init kgdbdbgp_parse_config(
33379 ptr++;
33380 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33381 }
33382- kgdb_register_io_module(&kgdbdbgp_io_ops);
33383- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33384+ if (early_dbgp_console.index != -1)
33385+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33386+ else
33387+ kgdb_register_io_module(&kgdbdbgp_io_ops);
33388
33389 return 0;
33390 }
33391diff -urNp linux-2.6.39.4/drivers/usb/host/xhci-mem.c linux-2.6.39.4/drivers/usb/host/xhci-mem.c
33392--- linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-06-25 12:55:23.000000000 -0400
33393+++ linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-08-05 19:44:37.000000000 -0400
33394@@ -1680,6 +1680,8 @@ static int xhci_check_trb_in_td_math(str
33395 unsigned int num_tests;
33396 int i, ret;
33397
33398+ pax_track_stack();
33399+
33400 num_tests = ARRAY_SIZE(simple_test_vector);
33401 for (i = 0; i < num_tests; i++) {
33402 ret = xhci_test_trb_in_td(xhci,
33403diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h
33404--- linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-05-19 00:06:34.000000000 -0400
33405+++ linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-08-05 19:44:37.000000000 -0400
33406@@ -192,7 +192,7 @@ struct wahc {
33407 struct list_head xfer_delayed_list;
33408 spinlock_t xfer_list_lock;
33409 struct work_struct xfer_work;
33410- atomic_t xfer_id_count;
33411+ atomic_unchecked_t xfer_id_count;
33412 };
33413
33414
33415@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33416 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33417 spin_lock_init(&wa->xfer_list_lock);
33418 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33419- atomic_set(&wa->xfer_id_count, 1);
33420+ atomic_set_unchecked(&wa->xfer_id_count, 1);
33421 }
33422
33423 /**
33424diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c
33425--- linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-05-19 00:06:34.000000000 -0400
33426+++ linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-05 19:44:37.000000000 -0400
33427@@ -294,7 +294,7 @@ out:
33428 */
33429 static void wa_xfer_id_init(struct wa_xfer *xfer)
33430 {
33431- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33432+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33433 }
33434
33435 /*
33436diff -urNp linux-2.6.39.4/drivers/vhost/vhost.c linux-2.6.39.4/drivers/vhost/vhost.c
33437--- linux-2.6.39.4/drivers/vhost/vhost.c 2011-05-19 00:06:34.000000000 -0400
33438+++ linux-2.6.39.4/drivers/vhost/vhost.c 2011-08-05 19:44:37.000000000 -0400
33439@@ -580,7 +580,7 @@ static int init_used(struct vhost_virtqu
33440 return get_user(vq->last_used_idx, &used->idx);
33441 }
33442
33443-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33444+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33445 {
33446 struct file *eventfp, *filep = NULL,
33447 *pollstart = NULL, *pollstop = NULL;
33448diff -urNp linux-2.6.39.4/drivers/video/fbcmap.c linux-2.6.39.4/drivers/video/fbcmap.c
33449--- linux-2.6.39.4/drivers/video/fbcmap.c 2011-05-19 00:06:34.000000000 -0400
33450+++ linux-2.6.39.4/drivers/video/fbcmap.c 2011-08-05 19:44:37.000000000 -0400
33451@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33452 rc = -ENODEV;
33453 goto out;
33454 }
33455- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33456- !info->fbops->fb_setcmap)) {
33457+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33458 rc = -EINVAL;
33459 goto out1;
33460 }
33461diff -urNp linux-2.6.39.4/drivers/video/fbmem.c linux-2.6.39.4/drivers/video/fbmem.c
33462--- linux-2.6.39.4/drivers/video/fbmem.c 2011-05-19 00:06:34.000000000 -0400
33463+++ linux-2.6.39.4/drivers/video/fbmem.c 2011-08-05 19:44:37.000000000 -0400
33464@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33465 image->dx += image->width + 8;
33466 }
33467 } else if (rotate == FB_ROTATE_UD) {
33468- for (x = 0; x < num && image->dx >= 0; x++) {
33469+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33470 info->fbops->fb_imageblit(info, image);
33471 image->dx -= image->width + 8;
33472 }
33473@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33474 image->dy += image->height + 8;
33475 }
33476 } else if (rotate == FB_ROTATE_CCW) {
33477- for (x = 0; x < num && image->dy >= 0; x++) {
33478+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33479 info->fbops->fb_imageblit(info, image);
33480 image->dy -= image->height + 8;
33481 }
33482@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33483 int flags = info->flags;
33484 int ret = 0;
33485
33486+ pax_track_stack();
33487+
33488 if (var->activate & FB_ACTIVATE_INV_MODE) {
33489 struct fb_videomode mode1, mode2;
33490
33491@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33492 void __user *argp = (void __user *)arg;
33493 long ret = 0;
33494
33495+ pax_track_stack();
33496+
33497 switch (cmd) {
33498 case FBIOGET_VSCREENINFO:
33499 if (!lock_fb_info(info))
33500@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33501 return -EFAULT;
33502 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33503 return -EINVAL;
33504- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33505+ if (con2fb.framebuffer >= FB_MAX)
33506 return -EINVAL;
33507 if (!registered_fb[con2fb.framebuffer])
33508 request_module("fb%d", con2fb.framebuffer);
33509diff -urNp linux-2.6.39.4/drivers/video/i810/i810_accel.c linux-2.6.39.4/drivers/video/i810/i810_accel.c
33510--- linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-05-19 00:06:34.000000000 -0400
33511+++ linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-08-05 19:44:37.000000000 -0400
33512@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33513 }
33514 }
33515 printk("ringbuffer lockup!!!\n");
33516+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33517 i810_report_error(mmio);
33518 par->dev_flags |= LOCKUP;
33519 info->pixmap.scan_align = 1;
33520diff -urNp linux-2.6.39.4/drivers/video/udlfb.c linux-2.6.39.4/drivers/video/udlfb.c
33521--- linux-2.6.39.4/drivers/video/udlfb.c 2011-05-19 00:06:34.000000000 -0400
33522+++ linux-2.6.39.4/drivers/video/udlfb.c 2011-08-05 19:44:37.000000000 -0400
33523@@ -584,11 +584,11 @@ int dlfb_handle_damage(struct dlfb_data
33524 dlfb_urb_completion(urb);
33525
33526 error:
33527- atomic_add(bytes_sent, &dev->bytes_sent);
33528- atomic_add(bytes_identical, &dev->bytes_identical);
33529- atomic_add(width*height*2, &dev->bytes_rendered);
33530+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33531+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33532+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33533 end_cycles = get_cycles();
33534- atomic_add(((unsigned int) ((end_cycles - start_cycles)
33535+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33536 >> 10)), /* Kcycles */
33537 &dev->cpu_kcycles_used);
33538
33539@@ -709,11 +709,11 @@ static void dlfb_dpy_deferred_io(struct
33540 dlfb_urb_completion(urb);
33541
33542 error:
33543- atomic_add(bytes_sent, &dev->bytes_sent);
33544- atomic_add(bytes_identical, &dev->bytes_identical);
33545- atomic_add(bytes_rendered, &dev->bytes_rendered);
33546+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33547+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33548+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33549 end_cycles = get_cycles();
33550- atomic_add(((unsigned int) ((end_cycles - start_cycles)
33551+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33552 >> 10)), /* Kcycles */
33553 &dev->cpu_kcycles_used);
33554 }
33555@@ -1301,7 +1301,7 @@ static ssize_t metrics_bytes_rendered_sh
33556 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33557 struct dlfb_data *dev = fb_info->par;
33558 return snprintf(buf, PAGE_SIZE, "%u\n",
33559- atomic_read(&dev->bytes_rendered));
33560+ atomic_read_unchecked(&dev->bytes_rendered));
33561 }
33562
33563 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33564@@ -1309,7 +1309,7 @@ static ssize_t metrics_bytes_identical_s
33565 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33566 struct dlfb_data *dev = fb_info->par;
33567 return snprintf(buf, PAGE_SIZE, "%u\n",
33568- atomic_read(&dev->bytes_identical));
33569+ atomic_read_unchecked(&dev->bytes_identical));
33570 }
33571
33572 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33573@@ -1317,7 +1317,7 @@ static ssize_t metrics_bytes_sent_show(s
33574 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33575 struct dlfb_data *dev = fb_info->par;
33576 return snprintf(buf, PAGE_SIZE, "%u\n",
33577- atomic_read(&dev->bytes_sent));
33578+ atomic_read_unchecked(&dev->bytes_sent));
33579 }
33580
33581 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33582@@ -1325,7 +1325,7 @@ static ssize_t metrics_cpu_kcycles_used_
33583 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33584 struct dlfb_data *dev = fb_info->par;
33585 return snprintf(buf, PAGE_SIZE, "%u\n",
33586- atomic_read(&dev->cpu_kcycles_used));
33587+ atomic_read_unchecked(&dev->cpu_kcycles_used));
33588 }
33589
33590 static ssize_t edid_show(
33591@@ -1382,10 +1382,10 @@ static ssize_t metrics_reset_store(struc
33592 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33593 struct dlfb_data *dev = fb_info->par;
33594
33595- atomic_set(&dev->bytes_rendered, 0);
33596- atomic_set(&dev->bytes_identical, 0);
33597- atomic_set(&dev->bytes_sent, 0);
33598- atomic_set(&dev->cpu_kcycles_used, 0);
33599+ atomic_set_unchecked(&dev->bytes_rendered, 0);
33600+ atomic_set_unchecked(&dev->bytes_identical, 0);
33601+ atomic_set_unchecked(&dev->bytes_sent, 0);
33602+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33603
33604 return count;
33605 }
33606diff -urNp linux-2.6.39.4/drivers/video/uvesafb.c linux-2.6.39.4/drivers/video/uvesafb.c
33607--- linux-2.6.39.4/drivers/video/uvesafb.c 2011-05-19 00:06:34.000000000 -0400
33608+++ linux-2.6.39.4/drivers/video/uvesafb.c 2011-08-05 20:34:06.000000000 -0400
33609@@ -19,6 +19,7 @@
33610 #include <linux/io.h>
33611 #include <linux/mutex.h>
33612 #include <linux/slab.h>
33613+#include <linux/moduleloader.h>
33614 #include <video/edid.h>
33615 #include <video/uvesafb.h>
33616 #ifdef CONFIG_X86
33617@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33618 NULL,
33619 };
33620
33621- return call_usermodehelper(v86d_path, argv, envp, 1);
33622+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33623 }
33624
33625 /*
33626@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33627 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33628 par->pmi_setpal = par->ypan = 0;
33629 } else {
33630+
33631+#ifdef CONFIG_PAX_KERNEXEC
33632+#ifdef CONFIG_MODULES
33633+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33634+#endif
33635+ if (!par->pmi_code) {
33636+ par->pmi_setpal = par->ypan = 0;
33637+ return 0;
33638+ }
33639+#endif
33640+
33641 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33642 + task->t.regs.edi);
33643+
33644+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33645+ pax_open_kernel();
33646+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33647+ pax_close_kernel();
33648+
33649+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33650+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33651+#else
33652 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33653 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33654+#endif
33655+
33656 printk(KERN_INFO "uvesafb: protected mode interface info at "
33657 "%04x:%04x\n",
33658 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33659@@ -1821,6 +1844,11 @@ out:
33660 if (par->vbe_modes)
33661 kfree(par->vbe_modes);
33662
33663+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33664+ if (par->pmi_code)
33665+ module_free_exec(NULL, par->pmi_code);
33666+#endif
33667+
33668 framebuffer_release(info);
33669 return err;
33670 }
33671@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33672 kfree(par->vbe_state_orig);
33673 if (par->vbe_state_saved)
33674 kfree(par->vbe_state_saved);
33675+
33676+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33677+ if (par->pmi_code)
33678+ module_free_exec(NULL, par->pmi_code);
33679+#endif
33680+
33681 }
33682
33683 framebuffer_release(info);
33684diff -urNp linux-2.6.39.4/drivers/video/vesafb.c linux-2.6.39.4/drivers/video/vesafb.c
33685--- linux-2.6.39.4/drivers/video/vesafb.c 2011-05-19 00:06:34.000000000 -0400
33686+++ linux-2.6.39.4/drivers/video/vesafb.c 2011-08-05 20:34:06.000000000 -0400
33687@@ -9,6 +9,7 @@
33688 */
33689
33690 #include <linux/module.h>
33691+#include <linux/moduleloader.h>
33692 #include <linux/kernel.h>
33693 #include <linux/errno.h>
33694 #include <linux/string.h>
33695@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33696 static int vram_total __initdata; /* Set total amount of memory */
33697 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33698 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33699-static void (*pmi_start)(void) __read_mostly;
33700-static void (*pmi_pal) (void) __read_mostly;
33701+static void (*pmi_start)(void) __read_only;
33702+static void (*pmi_pal) (void) __read_only;
33703 static int depth __read_mostly;
33704 static int vga_compat __read_mostly;
33705 /* --------------------------------------------------------------------- */
33706@@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
33707 unsigned int size_vmode;
33708 unsigned int size_remap;
33709 unsigned int size_total;
33710+ void *pmi_code = NULL;
33711
33712 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33713 return -ENODEV;
33714@@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
33715 size_remap = size_total;
33716 vesafb_fix.smem_len = size_remap;
33717
33718-#ifndef __i386__
33719- screen_info.vesapm_seg = 0;
33720-#endif
33721-
33722 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33723 printk(KERN_WARNING
33724 "vesafb: cannot reserve video memory at 0x%lx\n",
33725@@ -306,9 +304,21 @@ static int __init vesafb_probe(struct pl
33726 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33727 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33728
33729+#ifdef __i386__
33730+
33731+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33732+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
33733+ if (!pmi_code)
33734+#elif !defined(CONFIG_PAX_KERNEXEC)
33735+ if (0)
33736+#endif
33737+
33738+#endif
33739+ screen_info.vesapm_seg = 0;
33740+
33741 if (screen_info.vesapm_seg) {
33742- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33743- screen_info.vesapm_seg,screen_info.vesapm_off);
33744+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33745+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33746 }
33747
33748 if (screen_info.vesapm_seg < 0xc000)
33749@@ -316,9 +326,25 @@ static int __init vesafb_probe(struct pl
33750
33751 if (ypan || pmi_setpal) {
33752 unsigned short *pmi_base;
33753+
33754 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33755- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33756- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33757+
33758+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33759+ pax_open_kernel();
33760+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33761+#else
33762+ pmi_code = pmi_base;
33763+#endif
33764+
33765+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33766+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33767+
33768+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33769+ pmi_start = ktva_ktla(pmi_start);
33770+ pmi_pal = ktva_ktla(pmi_pal);
33771+ pax_close_kernel();
33772+#endif
33773+
33774 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33775 if (pmi_base[3]) {
33776 printk(KERN_INFO "vesafb: pmi: ports = ");
33777@@ -487,6 +513,11 @@ static int __init vesafb_probe(struct pl
33778 info->node, info->fix.id);
33779 return 0;
33780 err:
33781+
33782+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33783+ module_free_exec(NULL, pmi_code);
33784+#endif
33785+
33786 if (info->screen_base)
33787 iounmap(info->screen_base);
33788 framebuffer_release(info);
33789diff -urNp linux-2.6.39.4/drivers/virtio/virtio_balloon.c linux-2.6.39.4/drivers/virtio/virtio_balloon.c
33790--- linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-05-19 00:06:34.000000000 -0400
33791+++ linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-08-05 19:44:37.000000000 -0400
33792@@ -176,6 +176,8 @@ static void update_balloon_stats(struct
33793 struct sysinfo i;
33794 int idx = 0;
33795
33796+ pax_track_stack();
33797+
33798 all_vm_events(events);
33799 si_meminfo(&i);
33800
33801diff -urNp linux-2.6.39.4/fs/9p/vfs_inode.c linux-2.6.39.4/fs/9p/vfs_inode.c
33802--- linux-2.6.39.4/fs/9p/vfs_inode.c 2011-05-19 00:06:34.000000000 -0400
33803+++ linux-2.6.39.4/fs/9p/vfs_inode.c 2011-08-05 19:44:37.000000000 -0400
33804@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33805 void
33806 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33807 {
33808- char *s = nd_get_link(nd);
33809+ const char *s = nd_get_link(nd);
33810
33811 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33812 IS_ERR(s) ? "<error>" : s);
33813diff -urNp linux-2.6.39.4/fs/aio.c linux-2.6.39.4/fs/aio.c
33814--- linux-2.6.39.4/fs/aio.c 2011-05-19 00:06:34.000000000 -0400
33815+++ linux-2.6.39.4/fs/aio.c 2011-08-05 19:44:37.000000000 -0400
33816@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33817 size += sizeof(struct io_event) * nr_events;
33818 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33819
33820- if (nr_pages < 0)
33821+ if (nr_pages <= 0)
33822 return -EINVAL;
33823
33824 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33825@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33826 struct aio_timeout to;
33827 int retry = 0;
33828
33829+ pax_track_stack();
33830+
33831 /* needed to zero any padding within an entry (there shouldn't be
33832 * any, but C is fun!
33833 */
33834@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33835 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33836 {
33837 ssize_t ret;
33838+ struct iovec iovstack;
33839
33840 #ifdef CONFIG_COMPAT
33841 if (compat)
33842 ret = compat_rw_copy_check_uvector(type,
33843 (struct compat_iovec __user *)kiocb->ki_buf,
33844- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33845+ kiocb->ki_nbytes, 1, &iovstack,
33846 &kiocb->ki_iovec);
33847 else
33848 #endif
33849 ret = rw_copy_check_uvector(type,
33850 (struct iovec __user *)kiocb->ki_buf,
33851- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33852+ kiocb->ki_nbytes, 1, &iovstack,
33853 &kiocb->ki_iovec);
33854 if (ret < 0)
33855 goto out;
33856
33857+ if (kiocb->ki_iovec == &iovstack) {
33858+ kiocb->ki_inline_vec = iovstack;
33859+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
33860+ }
33861 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33862 kiocb->ki_cur_seg = 0;
33863 /* ki_nbytes/left now reflect bytes instead of segs */
33864diff -urNp linux-2.6.39.4/fs/attr.c linux-2.6.39.4/fs/attr.c
33865--- linux-2.6.39.4/fs/attr.c 2011-05-19 00:06:34.000000000 -0400
33866+++ linux-2.6.39.4/fs/attr.c 2011-08-05 19:44:37.000000000 -0400
33867@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
33868 unsigned long limit;
33869
33870 limit = rlimit(RLIMIT_FSIZE);
33871+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
33872 if (limit != RLIM_INFINITY && offset > limit)
33873 goto out_sig;
33874 if (offset > inode->i_sb->s_maxbytes)
33875diff -urNp linux-2.6.39.4/fs/befs/linuxvfs.c linux-2.6.39.4/fs/befs/linuxvfs.c
33876--- linux-2.6.39.4/fs/befs/linuxvfs.c 2011-05-19 00:06:34.000000000 -0400
33877+++ linux-2.6.39.4/fs/befs/linuxvfs.c 2011-08-05 19:44:37.000000000 -0400
33878@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
33879 {
33880 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
33881 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
33882- char *link = nd_get_link(nd);
33883+ const char *link = nd_get_link(nd);
33884 if (!IS_ERR(link))
33885 kfree(link);
33886 }
33887diff -urNp linux-2.6.39.4/fs/binfmt_aout.c linux-2.6.39.4/fs/binfmt_aout.c
33888--- linux-2.6.39.4/fs/binfmt_aout.c 2011-05-19 00:06:34.000000000 -0400
33889+++ linux-2.6.39.4/fs/binfmt_aout.c 2011-08-05 19:44:37.000000000 -0400
33890@@ -16,6 +16,7 @@
33891 #include <linux/string.h>
33892 #include <linux/fs.h>
33893 #include <linux/file.h>
33894+#include <linux/security.h>
33895 #include <linux/stat.h>
33896 #include <linux/fcntl.h>
33897 #include <linux/ptrace.h>
33898@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
33899 #endif
33900 # define START_STACK(u) ((void __user *)u.start_stack)
33901
33902+ memset(&dump, 0, sizeof(dump));
33903+
33904 fs = get_fs();
33905 set_fs(KERNEL_DS);
33906 has_dumped = 1;
33907@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
33908
33909 /* If the size of the dump file exceeds the rlimit, then see what would happen
33910 if we wrote the stack, but not the data area. */
33911+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
33912 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
33913 dump.u_dsize = 0;
33914
33915 /* Make sure we have enough room to write the stack and data areas. */
33916+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
33917 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
33918 dump.u_ssize = 0;
33919
33920@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
33921 rlim = rlimit(RLIMIT_DATA);
33922 if (rlim >= RLIM_INFINITY)
33923 rlim = ~0;
33924+
33925+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
33926 if (ex.a_data + ex.a_bss > rlim)
33927 return -ENOMEM;
33928
33929@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
33930 install_exec_creds(bprm);
33931 current->flags &= ~PF_FORKNOEXEC;
33932
33933+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
33934+ current->mm->pax_flags = 0UL;
33935+#endif
33936+
33937+#ifdef CONFIG_PAX_PAGEEXEC
33938+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
33939+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
33940+
33941+#ifdef CONFIG_PAX_EMUTRAMP
33942+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
33943+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
33944+#endif
33945+
33946+#ifdef CONFIG_PAX_MPROTECT
33947+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
33948+ current->mm->pax_flags |= MF_PAX_MPROTECT;
33949+#endif
33950+
33951+ }
33952+#endif
33953+
33954 if (N_MAGIC(ex) == OMAGIC) {
33955 unsigned long text_addr, map_size;
33956 loff_t pos;
33957@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
33958
33959 down_write(&current->mm->mmap_sem);
33960 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
33961- PROT_READ | PROT_WRITE | PROT_EXEC,
33962+ PROT_READ | PROT_WRITE,
33963 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
33964 fd_offset + ex.a_text);
33965 up_write(&current->mm->mmap_sem);
33966diff -urNp linux-2.6.39.4/fs/binfmt_elf.c linux-2.6.39.4/fs/binfmt_elf.c
33967--- linux-2.6.39.4/fs/binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
33968+++ linux-2.6.39.4/fs/binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
33969@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
33970 #define elf_core_dump NULL
33971 #endif
33972
33973+#ifdef CONFIG_PAX_MPROTECT
33974+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
33975+#endif
33976+
33977 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
33978 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
33979 #else
33980@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
33981 .load_binary = load_elf_binary,
33982 .load_shlib = load_elf_library,
33983 .core_dump = elf_core_dump,
33984+
33985+#ifdef CONFIG_PAX_MPROTECT
33986+ .handle_mprotect= elf_handle_mprotect,
33987+#endif
33988+
33989 .min_coredump = ELF_EXEC_PAGESIZE,
33990 };
33991
33992@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
33993
33994 static int set_brk(unsigned long start, unsigned long end)
33995 {
33996+ unsigned long e = end;
33997+
33998 start = ELF_PAGEALIGN(start);
33999 end = ELF_PAGEALIGN(end);
34000 if (end > start) {
34001@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34002 if (BAD_ADDR(addr))
34003 return addr;
34004 }
34005- current->mm->start_brk = current->mm->brk = end;
34006+ current->mm->start_brk = current->mm->brk = e;
34007 return 0;
34008 }
34009
34010@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34011 elf_addr_t __user *u_rand_bytes;
34012 const char *k_platform = ELF_PLATFORM;
34013 const char *k_base_platform = ELF_BASE_PLATFORM;
34014- unsigned char k_rand_bytes[16];
34015+ u32 k_rand_bytes[4];
34016 int items;
34017 elf_addr_t *elf_info;
34018 int ei_index = 0;
34019 const struct cred *cred = current_cred();
34020 struct vm_area_struct *vma;
34021+ unsigned long saved_auxv[AT_VECTOR_SIZE];
34022+
34023+ pax_track_stack();
34024
34025 /*
34026 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34027@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34028 * Generate 16 random bytes for userspace PRNG seeding.
34029 */
34030 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34031- u_rand_bytes = (elf_addr_t __user *)
34032- STACK_ALLOC(p, sizeof(k_rand_bytes));
34033+ srandom32(k_rand_bytes[0] ^ random32());
34034+ srandom32(k_rand_bytes[1] ^ random32());
34035+ srandom32(k_rand_bytes[2] ^ random32());
34036+ srandom32(k_rand_bytes[3] ^ random32());
34037+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
34038+ u_rand_bytes = (elf_addr_t __user *) p;
34039 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34040 return -EFAULT;
34041
34042@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34043 return -EFAULT;
34044 current->mm->env_end = p;
34045
34046+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34047+
34048 /* Put the elf_info on the stack in the right place. */
34049 sp = (elf_addr_t __user *)envp + 1;
34050- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34051+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34052 return -EFAULT;
34053 return 0;
34054 }
34055@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34056 {
34057 struct elf_phdr *elf_phdata;
34058 struct elf_phdr *eppnt;
34059- unsigned long load_addr = 0;
34060+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34061 int load_addr_set = 0;
34062 unsigned long last_bss = 0, elf_bss = 0;
34063- unsigned long error = ~0UL;
34064+ unsigned long error = -EINVAL;
34065 unsigned long total_size;
34066 int retval, i, size;
34067
34068@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34069 goto out_close;
34070 }
34071
34072+#ifdef CONFIG_PAX_SEGMEXEC
34073+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34074+ pax_task_size = SEGMEXEC_TASK_SIZE;
34075+#endif
34076+
34077 eppnt = elf_phdata;
34078 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34079 if (eppnt->p_type == PT_LOAD) {
34080@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34081 k = load_addr + eppnt->p_vaddr;
34082 if (BAD_ADDR(k) ||
34083 eppnt->p_filesz > eppnt->p_memsz ||
34084- eppnt->p_memsz > TASK_SIZE ||
34085- TASK_SIZE - eppnt->p_memsz < k) {
34086+ eppnt->p_memsz > pax_task_size ||
34087+ pax_task_size - eppnt->p_memsz < k) {
34088 error = -ENOMEM;
34089 goto out_close;
34090 }
34091@@ -528,6 +553,193 @@ out:
34092 return error;
34093 }
34094
34095+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34096+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34097+{
34098+ unsigned long pax_flags = 0UL;
34099+
34100+#ifdef CONFIG_PAX_PAGEEXEC
34101+ if (elf_phdata->p_flags & PF_PAGEEXEC)
34102+ pax_flags |= MF_PAX_PAGEEXEC;
34103+#endif
34104+
34105+#ifdef CONFIG_PAX_SEGMEXEC
34106+ if (elf_phdata->p_flags & PF_SEGMEXEC)
34107+ pax_flags |= MF_PAX_SEGMEXEC;
34108+#endif
34109+
34110+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34111+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34112+ if ((__supported_pte_mask & _PAGE_NX))
34113+ pax_flags &= ~MF_PAX_SEGMEXEC;
34114+ else
34115+ pax_flags &= ~MF_PAX_PAGEEXEC;
34116+ }
34117+#endif
34118+
34119+#ifdef CONFIG_PAX_EMUTRAMP
34120+ if (elf_phdata->p_flags & PF_EMUTRAMP)
34121+ pax_flags |= MF_PAX_EMUTRAMP;
34122+#endif
34123+
34124+#ifdef CONFIG_PAX_MPROTECT
34125+ if (elf_phdata->p_flags & PF_MPROTECT)
34126+ pax_flags |= MF_PAX_MPROTECT;
34127+#endif
34128+
34129+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34130+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34131+ pax_flags |= MF_PAX_RANDMMAP;
34132+#endif
34133+
34134+ return pax_flags;
34135+}
34136+#endif
34137+
34138+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34139+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34140+{
34141+ unsigned long pax_flags = 0UL;
34142+
34143+#ifdef CONFIG_PAX_PAGEEXEC
34144+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34145+ pax_flags |= MF_PAX_PAGEEXEC;
34146+#endif
34147+
34148+#ifdef CONFIG_PAX_SEGMEXEC
34149+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34150+ pax_flags |= MF_PAX_SEGMEXEC;
34151+#endif
34152+
34153+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34154+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34155+ if ((__supported_pte_mask & _PAGE_NX))
34156+ pax_flags &= ~MF_PAX_SEGMEXEC;
34157+ else
34158+ pax_flags &= ~MF_PAX_PAGEEXEC;
34159+ }
34160+#endif
34161+
34162+#ifdef CONFIG_PAX_EMUTRAMP
34163+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34164+ pax_flags |= MF_PAX_EMUTRAMP;
34165+#endif
34166+
34167+#ifdef CONFIG_PAX_MPROTECT
34168+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34169+ pax_flags |= MF_PAX_MPROTECT;
34170+#endif
34171+
34172+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34173+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34174+ pax_flags |= MF_PAX_RANDMMAP;
34175+#endif
34176+
34177+ return pax_flags;
34178+}
34179+#endif
34180+
34181+#ifdef CONFIG_PAX_EI_PAX
34182+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34183+{
34184+ unsigned long pax_flags = 0UL;
34185+
34186+#ifdef CONFIG_PAX_PAGEEXEC
34187+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34188+ pax_flags |= MF_PAX_PAGEEXEC;
34189+#endif
34190+
34191+#ifdef CONFIG_PAX_SEGMEXEC
34192+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34193+ pax_flags |= MF_PAX_SEGMEXEC;
34194+#endif
34195+
34196+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34197+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34198+ if ((__supported_pte_mask & _PAGE_NX))
34199+ pax_flags &= ~MF_PAX_SEGMEXEC;
34200+ else
34201+ pax_flags &= ~MF_PAX_PAGEEXEC;
34202+ }
34203+#endif
34204+
34205+#ifdef CONFIG_PAX_EMUTRAMP
34206+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34207+ pax_flags |= MF_PAX_EMUTRAMP;
34208+#endif
34209+
34210+#ifdef CONFIG_PAX_MPROTECT
34211+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34212+ pax_flags |= MF_PAX_MPROTECT;
34213+#endif
34214+
34215+#ifdef CONFIG_PAX_ASLR
34216+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34217+ pax_flags |= MF_PAX_RANDMMAP;
34218+#endif
34219+
34220+ return pax_flags;
34221+}
34222+#endif
34223+
34224+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34225+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34226+{
34227+ unsigned long pax_flags = 0UL;
34228+
34229+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34230+ unsigned long i;
34231+ int found_flags = 0;
34232+#endif
34233+
34234+#ifdef CONFIG_PAX_EI_PAX
34235+ pax_flags = pax_parse_ei_pax(elf_ex);
34236+#endif
34237+
34238+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34239+ for (i = 0UL; i < elf_ex->e_phnum; i++)
34240+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34241+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34242+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34243+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34244+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34245+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34246+ return -EINVAL;
34247+
34248+#ifdef CONFIG_PAX_SOFTMODE
34249+ if (pax_softmode)
34250+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
34251+ else
34252+#endif
34253+
34254+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34255+ found_flags = 1;
34256+ break;
34257+ }
34258+#endif
34259+
34260+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34261+ if (found_flags == 0) {
34262+ struct elf_phdr phdr;
34263+ memset(&phdr, 0, sizeof(phdr));
34264+ phdr.p_flags = PF_NOEMUTRAMP;
34265+#ifdef CONFIG_PAX_SOFTMODE
34266+ if (pax_softmode)
34267+ pax_flags = pax_parse_softmode(&phdr);
34268+ else
34269+#endif
34270+ pax_flags = pax_parse_hardmode(&phdr);
34271+ }
34272+#endif
34273+
34274+ if (0 > pax_check_flags(&pax_flags))
34275+ return -EINVAL;
34276+
34277+ current->mm->pax_flags = pax_flags;
34278+ return 0;
34279+}
34280+#endif
34281+
34282 /*
34283 * These are the functions used to load ELF style executables and shared
34284 * libraries. There is no binary dependent code anywhere else.
34285@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34286 {
34287 unsigned int random_variable = 0;
34288
34289+#ifdef CONFIG_PAX_RANDUSTACK
34290+ if (randomize_va_space)
34291+ return stack_top - current->mm->delta_stack;
34292+#endif
34293+
34294 if ((current->flags & PF_RANDOMIZE) &&
34295 !(current->personality & ADDR_NO_RANDOMIZE)) {
34296 random_variable = get_random_int() & STACK_RND_MASK;
34297@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34298 unsigned long load_addr = 0, load_bias = 0;
34299 int load_addr_set = 0;
34300 char * elf_interpreter = NULL;
34301- unsigned long error;
34302+ unsigned long error = 0;
34303 struct elf_phdr *elf_ppnt, *elf_phdata;
34304 unsigned long elf_bss, elf_brk;
34305 int retval, i;
34306@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34307 unsigned long start_code, end_code, start_data, end_data;
34308 unsigned long reloc_func_desc __maybe_unused = 0;
34309 int executable_stack = EXSTACK_DEFAULT;
34310- unsigned long def_flags = 0;
34311 struct {
34312 struct elfhdr elf_ex;
34313 struct elfhdr interp_elf_ex;
34314 } *loc;
34315+ unsigned long pax_task_size = TASK_SIZE;
34316
34317 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34318 if (!loc) {
34319@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34320
34321 /* OK, This is the point of no return */
34322 current->flags &= ~PF_FORKNOEXEC;
34323- current->mm->def_flags = def_flags;
34324+
34325+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34326+ current->mm->pax_flags = 0UL;
34327+#endif
34328+
34329+#ifdef CONFIG_PAX_DLRESOLVE
34330+ current->mm->call_dl_resolve = 0UL;
34331+#endif
34332+
34333+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34334+ current->mm->call_syscall = 0UL;
34335+#endif
34336+
34337+#ifdef CONFIG_PAX_ASLR
34338+ current->mm->delta_mmap = 0UL;
34339+ current->mm->delta_stack = 0UL;
34340+#endif
34341+
34342+ current->mm->def_flags = 0;
34343+
34344+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34345+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34346+ send_sig(SIGKILL, current, 0);
34347+ goto out_free_dentry;
34348+ }
34349+#endif
34350+
34351+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34352+ pax_set_initial_flags(bprm);
34353+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34354+ if (pax_set_initial_flags_func)
34355+ (pax_set_initial_flags_func)(bprm);
34356+#endif
34357+
34358+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34359+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34360+ current->mm->context.user_cs_limit = PAGE_SIZE;
34361+ current->mm->def_flags |= VM_PAGEEXEC;
34362+ }
34363+#endif
34364+
34365+#ifdef CONFIG_PAX_SEGMEXEC
34366+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34367+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34368+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34369+ pax_task_size = SEGMEXEC_TASK_SIZE;
34370+ current->mm->def_flags |= VM_NOHUGEPAGE;
34371+ }
34372+#endif
34373+
34374+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34375+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34376+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34377+ put_cpu();
34378+ }
34379+#endif
34380
34381 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34382 may depend on the personality. */
34383 SET_PERSONALITY(loc->elf_ex);
34384+
34385+#ifdef CONFIG_PAX_ASLR
34386+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34387+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34388+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34389+ }
34390+#endif
34391+
34392+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34393+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34394+ executable_stack = EXSTACK_DISABLE_X;
34395+ current->personality &= ~READ_IMPLIES_EXEC;
34396+ } else
34397+#endif
34398+
34399 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34400 current->personality |= READ_IMPLIES_EXEC;
34401
34402@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34403 #else
34404 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34405 #endif
34406+
34407+#ifdef CONFIG_PAX_RANDMMAP
34408+ /* PaX: randomize base address at the default exe base if requested */
34409+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34410+#ifdef CONFIG_SPARC64
34411+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34412+#else
34413+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34414+#endif
34415+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34416+ elf_flags |= MAP_FIXED;
34417+ }
34418+#endif
34419+
34420 }
34421
34422 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34423@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34424 * allowed task size. Note that p_filesz must always be
34425 * <= p_memsz so it is only necessary to check p_memsz.
34426 */
34427- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34428- elf_ppnt->p_memsz > TASK_SIZE ||
34429- TASK_SIZE - elf_ppnt->p_memsz < k) {
34430+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34431+ elf_ppnt->p_memsz > pax_task_size ||
34432+ pax_task_size - elf_ppnt->p_memsz < k) {
34433 /* set_brk can never work. Avoid overflows. */
34434 send_sig(SIGKILL, current, 0);
34435 retval = -EINVAL;
34436@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34437 start_data += load_bias;
34438 end_data += load_bias;
34439
34440+#ifdef CONFIG_PAX_RANDMMAP
34441+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34442+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34443+#endif
34444+
34445 /* Calling set_brk effectively mmaps the pages that we need
34446 * for the bss and break sections. We must do this before
34447 * mapping in the interpreter, to make sure it doesn't wind
34448@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34449 goto out_free_dentry;
34450 }
34451 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34452- send_sig(SIGSEGV, current, 0);
34453- retval = -EFAULT; /* Nobody gets to see this, but.. */
34454- goto out_free_dentry;
34455+ /*
34456+ * This bss-zeroing can fail if the ELF
34457+ * file specifies odd protections. So
34458+ * we don't check the return value
34459+ */
34460 }
34461
34462 if (elf_interpreter) {
34463@@ -1090,7 +1398,7 @@ out:
34464 * Decide what to dump of a segment, part, all or none.
34465 */
34466 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34467- unsigned long mm_flags)
34468+ unsigned long mm_flags, long signr)
34469 {
34470 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34471
34472@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34473 if (vma->vm_file == NULL)
34474 return 0;
34475
34476- if (FILTER(MAPPED_PRIVATE))
34477+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34478 goto whole;
34479
34480 /*
34481@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34482 {
34483 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34484 int i = 0;
34485- do
34486+ do {
34487 i += 2;
34488- while (auxv[i - 2] != AT_NULL);
34489+ } while (auxv[i - 2] != AT_NULL);
34490 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34491 }
34492
34493@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34494 }
34495
34496 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34497- unsigned long mm_flags)
34498+ struct coredump_params *cprm)
34499 {
34500 struct vm_area_struct *vma;
34501 size_t size = 0;
34502
34503 for (vma = first_vma(current, gate_vma); vma != NULL;
34504 vma = next_vma(vma, gate_vma))
34505- size += vma_dump_size(vma, mm_flags);
34506+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34507 return size;
34508 }
34509
34510@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34511
34512 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34513
34514- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34515+ offset += elf_core_vma_data_size(gate_vma, cprm);
34516 offset += elf_core_extra_data_size();
34517 e_shoff = offset;
34518
34519@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34520 offset = dataoff;
34521
34522 size += sizeof(*elf);
34523+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34524 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34525 goto end_coredump;
34526
34527 size += sizeof(*phdr4note);
34528+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34529 if (size > cprm->limit
34530 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34531 goto end_coredump;
34532@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34533 phdr.p_offset = offset;
34534 phdr.p_vaddr = vma->vm_start;
34535 phdr.p_paddr = 0;
34536- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34537+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34538 phdr.p_memsz = vma->vm_end - vma->vm_start;
34539 offset += phdr.p_filesz;
34540 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34541@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34542 phdr.p_align = ELF_EXEC_PAGESIZE;
34543
34544 size += sizeof(phdr);
34545+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34546 if (size > cprm->limit
34547 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34548 goto end_coredump;
34549@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34550 unsigned long addr;
34551 unsigned long end;
34552
34553- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34554+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34555
34556 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34557 struct page *page;
34558@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34559 page = get_dump_page(addr);
34560 if (page) {
34561 void *kaddr = kmap(page);
34562+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34563 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34564 !dump_write(cprm->file, kaddr,
34565 PAGE_SIZE);
34566@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34567
34568 if (e_phnum == PN_XNUM) {
34569 size += sizeof(*shdr4extnum);
34570+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34571 if (size > cprm->limit
34572 || !dump_write(cprm->file, shdr4extnum,
34573 sizeof(*shdr4extnum)))
34574@@ -2067,6 +2380,97 @@ out:
34575
34576 #endif /* CONFIG_ELF_CORE */
34577
34578+#ifdef CONFIG_PAX_MPROTECT
34579+/* PaX: non-PIC ELF libraries need relocations on their executable segments
34580+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34581+ * we'll remove VM_MAYWRITE for good on RELRO segments.
34582+ *
34583+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34584+ * basis because we want to allow the common case and not the special ones.
34585+ */
34586+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34587+{
34588+ struct elfhdr elf_h;
34589+ struct elf_phdr elf_p;
34590+ unsigned long i;
34591+ unsigned long oldflags;
34592+ bool is_textrel_rw, is_textrel_rx, is_relro;
34593+
34594+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34595+ return;
34596+
34597+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34598+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34599+
34600+#ifdef CONFIG_PAX_ELFRELOCS
34601+ /* possible TEXTREL */
34602+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34603+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34604+#else
34605+ is_textrel_rw = false;
34606+ is_textrel_rx = false;
34607+#endif
34608+
34609+ /* possible RELRO */
34610+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34611+
34612+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34613+ return;
34614+
34615+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34616+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34617+
34618+#ifdef CONFIG_PAX_ETEXECRELOCS
34619+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34620+#else
34621+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34622+#endif
34623+
34624+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34625+ !elf_check_arch(&elf_h) ||
34626+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34627+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34628+ return;
34629+
34630+ for (i = 0UL; i < elf_h.e_phnum; i++) {
34631+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34632+ return;
34633+ switch (elf_p.p_type) {
34634+ case PT_DYNAMIC:
34635+ if (!is_textrel_rw && !is_textrel_rx)
34636+ continue;
34637+ i = 0UL;
34638+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34639+ elf_dyn dyn;
34640+
34641+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34642+ return;
34643+ if (dyn.d_tag == DT_NULL)
34644+ return;
34645+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34646+ gr_log_textrel(vma);
34647+ if (is_textrel_rw)
34648+ vma->vm_flags |= VM_MAYWRITE;
34649+ else
34650+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34651+ vma->vm_flags &= ~VM_MAYWRITE;
34652+ return;
34653+ }
34654+ i++;
34655+ }
34656+ return;
34657+
34658+ case PT_GNU_RELRO:
34659+ if (!is_relro)
34660+ continue;
34661+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34662+ vma->vm_flags &= ~VM_MAYWRITE;
34663+ return;
34664+ }
34665+ }
34666+}
34667+#endif
34668+
34669 static int __init init_elf_binfmt(void)
34670 {
34671 return register_binfmt(&elf_format);
34672diff -urNp linux-2.6.39.4/fs/binfmt_flat.c linux-2.6.39.4/fs/binfmt_flat.c
34673--- linux-2.6.39.4/fs/binfmt_flat.c 2011-05-19 00:06:34.000000000 -0400
34674+++ linux-2.6.39.4/fs/binfmt_flat.c 2011-08-05 19:44:37.000000000 -0400
34675@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34676 realdatastart = (unsigned long) -ENOMEM;
34677 printk("Unable to allocate RAM for process data, errno %d\n",
34678 (int)-realdatastart);
34679+ down_write(&current->mm->mmap_sem);
34680 do_munmap(current->mm, textpos, text_len);
34681+ up_write(&current->mm->mmap_sem);
34682 ret = realdatastart;
34683 goto err;
34684 }
34685@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34686 }
34687 if (IS_ERR_VALUE(result)) {
34688 printk("Unable to read data+bss, errno %d\n", (int)-result);
34689+ down_write(&current->mm->mmap_sem);
34690 do_munmap(current->mm, textpos, text_len);
34691 do_munmap(current->mm, realdatastart, len);
34692+ up_write(&current->mm->mmap_sem);
34693 ret = result;
34694 goto err;
34695 }
34696@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34697 }
34698 if (IS_ERR_VALUE(result)) {
34699 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34700+ down_write(&current->mm->mmap_sem);
34701 do_munmap(current->mm, textpos, text_len + data_len + extra +
34702 MAX_SHARED_LIBS * sizeof(unsigned long));
34703+ up_write(&current->mm->mmap_sem);
34704 ret = result;
34705 goto err;
34706 }
34707diff -urNp linux-2.6.39.4/fs/bio.c linux-2.6.39.4/fs/bio.c
34708--- linux-2.6.39.4/fs/bio.c 2011-05-19 00:06:34.000000000 -0400
34709+++ linux-2.6.39.4/fs/bio.c 2011-08-05 19:44:37.000000000 -0400
34710@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34711 const int read = bio_data_dir(bio) == READ;
34712 struct bio_map_data *bmd = bio->bi_private;
34713 int i;
34714- char *p = bmd->sgvecs[0].iov_base;
34715+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
34716
34717 __bio_for_each_segment(bvec, bio, i, 0) {
34718 char *addr = page_address(bvec->bv_page);
34719diff -urNp linux-2.6.39.4/fs/block_dev.c linux-2.6.39.4/fs/block_dev.c
34720--- linux-2.6.39.4/fs/block_dev.c 2011-07-09 09:18:51.000000000 -0400
34721+++ linux-2.6.39.4/fs/block_dev.c 2011-08-05 19:44:37.000000000 -0400
34722@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34723 else if (bdev->bd_contains == bdev)
34724 return true; /* is a whole device which isn't held */
34725
34726- else if (whole->bd_holder == bd_may_claim)
34727+ else if (whole->bd_holder == (void *)bd_may_claim)
34728 return true; /* is a partition of a device that is being partitioned */
34729 else if (whole->bd_holder != NULL)
34730 return false; /* is a partition of a held device */
34731diff -urNp linux-2.6.39.4/fs/btrfs/ctree.c linux-2.6.39.4/fs/btrfs/ctree.c
34732--- linux-2.6.39.4/fs/btrfs/ctree.c 2011-05-19 00:06:34.000000000 -0400
34733+++ linux-2.6.39.4/fs/btrfs/ctree.c 2011-08-05 19:44:37.000000000 -0400
34734@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
34735 free_extent_buffer(buf);
34736 add_root_to_dirty_list(root);
34737 } else {
34738- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34739- parent_start = parent->start;
34740- else
34741+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34742+ if (parent)
34743+ parent_start = parent->start;
34744+ else
34745+ parent_start = 0;
34746+ } else
34747 parent_start = 0;
34748
34749 WARN_ON(trans->transid != btrfs_header_generation(parent));
34750@@ -3647,7 +3650,6 @@ setup_items_for_insert(struct btrfs_tran
34751
34752 ret = 0;
34753 if (slot == 0) {
34754- struct btrfs_disk_key disk_key;
34755 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
34756 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
34757 }
34758diff -urNp linux-2.6.39.4/fs/btrfs/free-space-cache.c linux-2.6.39.4/fs/btrfs/free-space-cache.c
34759--- linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-05-19 00:06:34.000000000 -0400
34760+++ linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-08-05 19:44:37.000000000 -0400
34761@@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34762 while(1) {
34763 if (entry->bytes < bytes ||
34764 (!entry->bitmap && entry->offset < min_start)) {
34765- struct rb_node *node;
34766-
34767 node = rb_next(&entry->offset_index);
34768 if (!node)
34769 break;
34770@@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34771 cluster, entry, bytes,
34772 min_start);
34773 if (ret == 0) {
34774- struct rb_node *node;
34775 node = rb_next(&entry->offset_index);
34776 if (!node)
34777 break;
34778diff -urNp linux-2.6.39.4/fs/btrfs/inode.c linux-2.6.39.4/fs/btrfs/inode.c
34779--- linux-2.6.39.4/fs/btrfs/inode.c 2011-05-19 00:06:34.000000000 -0400
34780+++ linux-2.6.39.4/fs/btrfs/inode.c 2011-08-05 20:34:06.000000000 -0400
34781@@ -6947,7 +6947,7 @@ fail:
34782 return -ENOMEM;
34783 }
34784
34785-static int btrfs_getattr(struct vfsmount *mnt,
34786+int btrfs_getattr(struct vfsmount *mnt,
34787 struct dentry *dentry, struct kstat *stat)
34788 {
34789 struct inode *inode = dentry->d_inode;
34790@@ -6959,6 +6959,14 @@ static int btrfs_getattr(struct vfsmount
34791 return 0;
34792 }
34793
34794+EXPORT_SYMBOL(btrfs_getattr);
34795+
34796+dev_t get_btrfs_dev_from_inode(struct inode *inode)
34797+{
34798+ return BTRFS_I(inode)->root->anon_super.s_dev;
34799+}
34800+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34801+
34802 /*
34803 * If a file is moved, it will inherit the cow and compression flags of the new
34804 * directory.
34805diff -urNp linux-2.6.39.4/fs/btrfs/ioctl.c linux-2.6.39.4/fs/btrfs/ioctl.c
34806--- linux-2.6.39.4/fs/btrfs/ioctl.c 2011-05-19 00:06:34.000000000 -0400
34807+++ linux-2.6.39.4/fs/btrfs/ioctl.c 2011-08-05 19:44:37.000000000 -0400
34808@@ -2361,9 +2361,12 @@ long btrfs_ioctl_space_info(struct btrfs
34809 for (i = 0; i < num_types; i++) {
34810 struct btrfs_space_info *tmp;
34811
34812+ /* Don't copy in more than we allocated */
34813 if (!slot_count)
34814 break;
34815
34816+ slot_count--;
34817+
34818 info = NULL;
34819 rcu_read_lock();
34820 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34821@@ -2385,10 +2388,7 @@ long btrfs_ioctl_space_info(struct btrfs
34822 memcpy(dest, &space, sizeof(space));
34823 dest++;
34824 space_args.total_spaces++;
34825- slot_count--;
34826 }
34827- if (!slot_count)
34828- break;
34829 }
34830 up_read(&info->groups_sem);
34831 }
34832diff -urNp linux-2.6.39.4/fs/btrfs/relocation.c linux-2.6.39.4/fs/btrfs/relocation.c
34833--- linux-2.6.39.4/fs/btrfs/relocation.c 2011-05-19 00:06:34.000000000 -0400
34834+++ linux-2.6.39.4/fs/btrfs/relocation.c 2011-08-05 19:44:37.000000000 -0400
34835@@ -1239,7 +1239,7 @@ static int __update_reloc_root(struct bt
34836 }
34837 spin_unlock(&rc->reloc_root_tree.lock);
34838
34839- BUG_ON((struct btrfs_root *)node->data != root);
34840+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
34841
34842 if (!del) {
34843 spin_lock(&rc->reloc_root_tree.lock);
34844diff -urNp linux-2.6.39.4/fs/cachefiles/bind.c linux-2.6.39.4/fs/cachefiles/bind.c
34845--- linux-2.6.39.4/fs/cachefiles/bind.c 2011-05-19 00:06:34.000000000 -0400
34846+++ linux-2.6.39.4/fs/cachefiles/bind.c 2011-08-05 19:44:37.000000000 -0400
34847@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34848 args);
34849
34850 /* start by checking things over */
34851- ASSERT(cache->fstop_percent >= 0 &&
34852- cache->fstop_percent < cache->fcull_percent &&
34853+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
34854 cache->fcull_percent < cache->frun_percent &&
34855 cache->frun_percent < 100);
34856
34857- ASSERT(cache->bstop_percent >= 0 &&
34858- cache->bstop_percent < cache->bcull_percent &&
34859+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
34860 cache->bcull_percent < cache->brun_percent &&
34861 cache->brun_percent < 100);
34862
34863diff -urNp linux-2.6.39.4/fs/cachefiles/daemon.c linux-2.6.39.4/fs/cachefiles/daemon.c
34864--- linux-2.6.39.4/fs/cachefiles/daemon.c 2011-05-19 00:06:34.000000000 -0400
34865+++ linux-2.6.39.4/fs/cachefiles/daemon.c 2011-08-05 19:44:37.000000000 -0400
34866@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
34867 if (n > buflen)
34868 return -EMSGSIZE;
34869
34870- if (copy_to_user(_buffer, buffer, n) != 0)
34871+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
34872 return -EFAULT;
34873
34874 return n;
34875@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
34876 if (test_bit(CACHEFILES_DEAD, &cache->flags))
34877 return -EIO;
34878
34879- if (datalen < 0 || datalen > PAGE_SIZE - 1)
34880+ if (datalen > PAGE_SIZE - 1)
34881 return -EOPNOTSUPP;
34882
34883 /* drag the command string into the kernel so we can parse it */
34884@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
34885 if (args[0] != '%' || args[1] != '\0')
34886 return -EINVAL;
34887
34888- if (fstop < 0 || fstop >= cache->fcull_percent)
34889+ if (fstop >= cache->fcull_percent)
34890 return cachefiles_daemon_range_error(cache, args);
34891
34892 cache->fstop_percent = fstop;
34893@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
34894 if (args[0] != '%' || args[1] != '\0')
34895 return -EINVAL;
34896
34897- if (bstop < 0 || bstop >= cache->bcull_percent)
34898+ if (bstop >= cache->bcull_percent)
34899 return cachefiles_daemon_range_error(cache, args);
34900
34901 cache->bstop_percent = bstop;
34902diff -urNp linux-2.6.39.4/fs/cachefiles/internal.h linux-2.6.39.4/fs/cachefiles/internal.h
34903--- linux-2.6.39.4/fs/cachefiles/internal.h 2011-05-19 00:06:34.000000000 -0400
34904+++ linux-2.6.39.4/fs/cachefiles/internal.h 2011-08-05 19:44:37.000000000 -0400
34905@@ -57,7 +57,7 @@ struct cachefiles_cache {
34906 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
34907 struct rb_root active_nodes; /* active nodes (can't be culled) */
34908 rwlock_t active_lock; /* lock for active_nodes */
34909- atomic_t gravecounter; /* graveyard uniquifier */
34910+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
34911 unsigned frun_percent; /* when to stop culling (% files) */
34912 unsigned fcull_percent; /* when to start culling (% files) */
34913 unsigned fstop_percent; /* when to stop allocating (% files) */
34914@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
34915 * proc.c
34916 */
34917 #ifdef CONFIG_CACHEFILES_HISTOGRAM
34918-extern atomic_t cachefiles_lookup_histogram[HZ];
34919-extern atomic_t cachefiles_mkdir_histogram[HZ];
34920-extern atomic_t cachefiles_create_histogram[HZ];
34921+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34922+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34923+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
34924
34925 extern int __init cachefiles_proc_init(void);
34926 extern void cachefiles_proc_cleanup(void);
34927 static inline
34928-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
34929+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
34930 {
34931 unsigned long jif = jiffies - start_jif;
34932 if (jif >= HZ)
34933 jif = HZ - 1;
34934- atomic_inc(&histogram[jif]);
34935+ atomic_inc_unchecked(&histogram[jif]);
34936 }
34937
34938 #else
34939diff -urNp linux-2.6.39.4/fs/cachefiles/namei.c linux-2.6.39.4/fs/cachefiles/namei.c
34940--- linux-2.6.39.4/fs/cachefiles/namei.c 2011-05-19 00:06:34.000000000 -0400
34941+++ linux-2.6.39.4/fs/cachefiles/namei.c 2011-08-05 19:44:37.000000000 -0400
34942@@ -318,7 +318,7 @@ try_again:
34943 /* first step is to make up a grave dentry in the graveyard */
34944 sprintf(nbuffer, "%08x%08x",
34945 (uint32_t) get_seconds(),
34946- (uint32_t) atomic_inc_return(&cache->gravecounter));
34947+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
34948
34949 /* do the multiway lock magic */
34950 trap = lock_rename(cache->graveyard, dir);
34951diff -urNp linux-2.6.39.4/fs/cachefiles/proc.c linux-2.6.39.4/fs/cachefiles/proc.c
34952--- linux-2.6.39.4/fs/cachefiles/proc.c 2011-05-19 00:06:34.000000000 -0400
34953+++ linux-2.6.39.4/fs/cachefiles/proc.c 2011-08-05 19:44:37.000000000 -0400
34954@@ -14,9 +14,9 @@
34955 #include <linux/seq_file.h>
34956 #include "internal.h"
34957
34958-atomic_t cachefiles_lookup_histogram[HZ];
34959-atomic_t cachefiles_mkdir_histogram[HZ];
34960-atomic_t cachefiles_create_histogram[HZ];
34961+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34962+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34963+atomic_unchecked_t cachefiles_create_histogram[HZ];
34964
34965 /*
34966 * display the latency histogram
34967@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
34968 return 0;
34969 default:
34970 index = (unsigned long) v - 3;
34971- x = atomic_read(&cachefiles_lookup_histogram[index]);
34972- y = atomic_read(&cachefiles_mkdir_histogram[index]);
34973- z = atomic_read(&cachefiles_create_histogram[index]);
34974+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
34975+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
34976+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
34977 if (x == 0 && y == 0 && z == 0)
34978 return 0;
34979
34980diff -urNp linux-2.6.39.4/fs/cachefiles/rdwr.c linux-2.6.39.4/fs/cachefiles/rdwr.c
34981--- linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-05-19 00:06:34.000000000 -0400
34982+++ linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-08-05 19:44:37.000000000 -0400
34983@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
34984 old_fs = get_fs();
34985 set_fs(KERNEL_DS);
34986 ret = file->f_op->write(
34987- file, (const void __user *) data, len, &pos);
34988+ file, (__force const void __user *) data, len, &pos);
34989 set_fs(old_fs);
34990 kunmap(page);
34991 if (ret != len)
34992diff -urNp linux-2.6.39.4/fs/ceph/dir.c linux-2.6.39.4/fs/ceph/dir.c
34993--- linux-2.6.39.4/fs/ceph/dir.c 2011-05-19 00:06:34.000000000 -0400
34994+++ linux-2.6.39.4/fs/ceph/dir.c 2011-08-05 19:44:37.000000000 -0400
34995@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
34996 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
34997 struct ceph_mds_client *mdsc = fsc->mdsc;
34998 unsigned frag = fpos_frag(filp->f_pos);
34999- int off = fpos_off(filp->f_pos);
35000+ unsigned int off = fpos_off(filp->f_pos);
35001 int err;
35002 u32 ftype;
35003 struct ceph_mds_reply_info_parsed *rinfo;
35004@@ -360,7 +360,7 @@ more:
35005 rinfo = &fi->last_readdir->r_reply_info;
35006 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
35007 rinfo->dir_nr, off, fi->offset);
35008- while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
35009+ while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
35010 u64 pos = ceph_make_fpos(frag, off);
35011 struct ceph_mds_reply_inode *in =
35012 rinfo->dir_in[off - fi->offset].in;
35013diff -urNp linux-2.6.39.4/fs/cifs/cifs_debug.c linux-2.6.39.4/fs/cifs/cifs_debug.c
35014--- linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-05-19 00:06:34.000000000 -0400
35015+++ linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-08-05 19:44:37.000000000 -0400
35016@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35017 tcon = list_entry(tmp3,
35018 struct cifsTconInfo,
35019 tcon_list);
35020- atomic_set(&tcon->num_smbs_sent, 0);
35021- atomic_set(&tcon->num_writes, 0);
35022- atomic_set(&tcon->num_reads, 0);
35023- atomic_set(&tcon->num_oplock_brks, 0);
35024- atomic_set(&tcon->num_opens, 0);
35025- atomic_set(&tcon->num_posixopens, 0);
35026- atomic_set(&tcon->num_posixmkdirs, 0);
35027- atomic_set(&tcon->num_closes, 0);
35028- atomic_set(&tcon->num_deletes, 0);
35029- atomic_set(&tcon->num_mkdirs, 0);
35030- atomic_set(&tcon->num_rmdirs, 0);
35031- atomic_set(&tcon->num_renames, 0);
35032- atomic_set(&tcon->num_t2renames, 0);
35033- atomic_set(&tcon->num_ffirst, 0);
35034- atomic_set(&tcon->num_fnext, 0);
35035- atomic_set(&tcon->num_fclose, 0);
35036- atomic_set(&tcon->num_hardlinks, 0);
35037- atomic_set(&tcon->num_symlinks, 0);
35038- atomic_set(&tcon->num_locks, 0);
35039+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35040+ atomic_set_unchecked(&tcon->num_writes, 0);
35041+ atomic_set_unchecked(&tcon->num_reads, 0);
35042+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35043+ atomic_set_unchecked(&tcon->num_opens, 0);
35044+ atomic_set_unchecked(&tcon->num_posixopens, 0);
35045+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35046+ atomic_set_unchecked(&tcon->num_closes, 0);
35047+ atomic_set_unchecked(&tcon->num_deletes, 0);
35048+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
35049+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
35050+ atomic_set_unchecked(&tcon->num_renames, 0);
35051+ atomic_set_unchecked(&tcon->num_t2renames, 0);
35052+ atomic_set_unchecked(&tcon->num_ffirst, 0);
35053+ atomic_set_unchecked(&tcon->num_fnext, 0);
35054+ atomic_set_unchecked(&tcon->num_fclose, 0);
35055+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
35056+ atomic_set_unchecked(&tcon->num_symlinks, 0);
35057+ atomic_set_unchecked(&tcon->num_locks, 0);
35058 }
35059 }
35060 }
35061@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35062 if (tcon->need_reconnect)
35063 seq_puts(m, "\tDISCONNECTED ");
35064 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35065- atomic_read(&tcon->num_smbs_sent),
35066- atomic_read(&tcon->num_oplock_brks));
35067+ atomic_read_unchecked(&tcon->num_smbs_sent),
35068+ atomic_read_unchecked(&tcon->num_oplock_brks));
35069 seq_printf(m, "\nReads: %d Bytes: %lld",
35070- atomic_read(&tcon->num_reads),
35071+ atomic_read_unchecked(&tcon->num_reads),
35072 (long long)(tcon->bytes_read));
35073 seq_printf(m, "\nWrites: %d Bytes: %lld",
35074- atomic_read(&tcon->num_writes),
35075+ atomic_read_unchecked(&tcon->num_writes),
35076 (long long)(tcon->bytes_written));
35077 seq_printf(m, "\nFlushes: %d",
35078- atomic_read(&tcon->num_flushes));
35079+ atomic_read_unchecked(&tcon->num_flushes));
35080 seq_printf(m, "\nLocks: %d HardLinks: %d "
35081 "Symlinks: %d",
35082- atomic_read(&tcon->num_locks),
35083- atomic_read(&tcon->num_hardlinks),
35084- atomic_read(&tcon->num_symlinks));
35085+ atomic_read_unchecked(&tcon->num_locks),
35086+ atomic_read_unchecked(&tcon->num_hardlinks),
35087+ atomic_read_unchecked(&tcon->num_symlinks));
35088 seq_printf(m, "\nOpens: %d Closes: %d "
35089 "Deletes: %d",
35090- atomic_read(&tcon->num_opens),
35091- atomic_read(&tcon->num_closes),
35092- atomic_read(&tcon->num_deletes));
35093+ atomic_read_unchecked(&tcon->num_opens),
35094+ atomic_read_unchecked(&tcon->num_closes),
35095+ atomic_read_unchecked(&tcon->num_deletes));
35096 seq_printf(m, "\nPosix Opens: %d "
35097 "Posix Mkdirs: %d",
35098- atomic_read(&tcon->num_posixopens),
35099- atomic_read(&tcon->num_posixmkdirs));
35100+ atomic_read_unchecked(&tcon->num_posixopens),
35101+ atomic_read_unchecked(&tcon->num_posixmkdirs));
35102 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35103- atomic_read(&tcon->num_mkdirs),
35104- atomic_read(&tcon->num_rmdirs));
35105+ atomic_read_unchecked(&tcon->num_mkdirs),
35106+ atomic_read_unchecked(&tcon->num_rmdirs));
35107 seq_printf(m, "\nRenames: %d T2 Renames %d",
35108- atomic_read(&tcon->num_renames),
35109- atomic_read(&tcon->num_t2renames));
35110+ atomic_read_unchecked(&tcon->num_renames),
35111+ atomic_read_unchecked(&tcon->num_t2renames));
35112 seq_printf(m, "\nFindFirst: %d FNext %d "
35113 "FClose %d",
35114- atomic_read(&tcon->num_ffirst),
35115- atomic_read(&tcon->num_fnext),
35116- atomic_read(&tcon->num_fclose));
35117+ atomic_read_unchecked(&tcon->num_ffirst),
35118+ atomic_read_unchecked(&tcon->num_fnext),
35119+ atomic_read_unchecked(&tcon->num_fclose));
35120 }
35121 }
35122 }
35123diff -urNp linux-2.6.39.4/fs/cifs/cifsglob.h linux-2.6.39.4/fs/cifs/cifsglob.h
35124--- linux-2.6.39.4/fs/cifs/cifsglob.h 2011-05-19 00:06:34.000000000 -0400
35125+++ linux-2.6.39.4/fs/cifs/cifsglob.h 2011-08-05 19:44:37.000000000 -0400
35126@@ -305,28 +305,28 @@ struct cifsTconInfo {
35127 __u16 Flags; /* optional support bits */
35128 enum statusEnum tidStatus;
35129 #ifdef CONFIG_CIFS_STATS
35130- atomic_t num_smbs_sent;
35131- atomic_t num_writes;
35132- atomic_t num_reads;
35133- atomic_t num_flushes;
35134- atomic_t num_oplock_brks;
35135- atomic_t num_opens;
35136- atomic_t num_closes;
35137- atomic_t num_deletes;
35138- atomic_t num_mkdirs;
35139- atomic_t num_posixopens;
35140- atomic_t num_posixmkdirs;
35141- atomic_t num_rmdirs;
35142- atomic_t num_renames;
35143- atomic_t num_t2renames;
35144- atomic_t num_ffirst;
35145- atomic_t num_fnext;
35146- atomic_t num_fclose;
35147- atomic_t num_hardlinks;
35148- atomic_t num_symlinks;
35149- atomic_t num_locks;
35150- atomic_t num_acl_get;
35151- atomic_t num_acl_set;
35152+ atomic_unchecked_t num_smbs_sent;
35153+ atomic_unchecked_t num_writes;
35154+ atomic_unchecked_t num_reads;
35155+ atomic_unchecked_t num_flushes;
35156+ atomic_unchecked_t num_oplock_brks;
35157+ atomic_unchecked_t num_opens;
35158+ atomic_unchecked_t num_closes;
35159+ atomic_unchecked_t num_deletes;
35160+ atomic_unchecked_t num_mkdirs;
35161+ atomic_unchecked_t num_posixopens;
35162+ atomic_unchecked_t num_posixmkdirs;
35163+ atomic_unchecked_t num_rmdirs;
35164+ atomic_unchecked_t num_renames;
35165+ atomic_unchecked_t num_t2renames;
35166+ atomic_unchecked_t num_ffirst;
35167+ atomic_unchecked_t num_fnext;
35168+ atomic_unchecked_t num_fclose;
35169+ atomic_unchecked_t num_hardlinks;
35170+ atomic_unchecked_t num_symlinks;
35171+ atomic_unchecked_t num_locks;
35172+ atomic_unchecked_t num_acl_get;
35173+ atomic_unchecked_t num_acl_set;
35174 #ifdef CONFIG_CIFS_STATS2
35175 unsigned long long time_writes;
35176 unsigned long long time_reads;
35177@@ -509,7 +509,7 @@ static inline char CIFS_DIR_SEP(const st
35178 }
35179
35180 #ifdef CONFIG_CIFS_STATS
35181-#define cifs_stats_inc atomic_inc
35182+#define cifs_stats_inc atomic_inc_unchecked
35183
35184 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
35185 unsigned int bytes)
35186diff -urNp linux-2.6.39.4/fs/cifs/link.c linux-2.6.39.4/fs/cifs/link.c
35187--- linux-2.6.39.4/fs/cifs/link.c 2011-05-19 00:06:34.000000000 -0400
35188+++ linux-2.6.39.4/fs/cifs/link.c 2011-08-05 19:44:37.000000000 -0400
35189@@ -577,7 +577,7 @@ symlink_exit:
35190
35191 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35192 {
35193- char *p = nd_get_link(nd);
35194+ const char *p = nd_get_link(nd);
35195 if (!IS_ERR(p))
35196 kfree(p);
35197 }
35198diff -urNp linux-2.6.39.4/fs/coda/cache.c linux-2.6.39.4/fs/coda/cache.c
35199--- linux-2.6.39.4/fs/coda/cache.c 2011-05-19 00:06:34.000000000 -0400
35200+++ linux-2.6.39.4/fs/coda/cache.c 2011-08-05 19:44:37.000000000 -0400
35201@@ -24,7 +24,7 @@
35202 #include "coda_linux.h"
35203 #include "coda_cache.h"
35204
35205-static atomic_t permission_epoch = ATOMIC_INIT(0);
35206+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35207
35208 /* replace or extend an acl cache hit */
35209 void coda_cache_enter(struct inode *inode, int mask)
35210@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35211 struct coda_inode_info *cii = ITOC(inode);
35212
35213 spin_lock(&cii->c_lock);
35214- cii->c_cached_epoch = atomic_read(&permission_epoch);
35215+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35216 if (cii->c_uid != current_fsuid()) {
35217 cii->c_uid = current_fsuid();
35218 cii->c_cached_perm = mask;
35219@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35220 {
35221 struct coda_inode_info *cii = ITOC(inode);
35222 spin_lock(&cii->c_lock);
35223- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35224+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35225 spin_unlock(&cii->c_lock);
35226 }
35227
35228 /* remove all acl caches */
35229 void coda_cache_clear_all(struct super_block *sb)
35230 {
35231- atomic_inc(&permission_epoch);
35232+ atomic_inc_unchecked(&permission_epoch);
35233 }
35234
35235
35236@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35237 spin_lock(&cii->c_lock);
35238 hit = (mask & cii->c_cached_perm) == mask &&
35239 cii->c_uid == current_fsuid() &&
35240- cii->c_cached_epoch == atomic_read(&permission_epoch);
35241+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35242 spin_unlock(&cii->c_lock);
35243
35244 return hit;
35245diff -urNp linux-2.6.39.4/fs/compat_binfmt_elf.c linux-2.6.39.4/fs/compat_binfmt_elf.c
35246--- linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
35247+++ linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
35248@@ -30,11 +30,13 @@
35249 #undef elf_phdr
35250 #undef elf_shdr
35251 #undef elf_note
35252+#undef elf_dyn
35253 #undef elf_addr_t
35254 #define elfhdr elf32_hdr
35255 #define elf_phdr elf32_phdr
35256 #define elf_shdr elf32_shdr
35257 #define elf_note elf32_note
35258+#define elf_dyn Elf32_Dyn
35259 #define elf_addr_t Elf32_Addr
35260
35261 /*
35262diff -urNp linux-2.6.39.4/fs/compat.c linux-2.6.39.4/fs/compat.c
35263--- linux-2.6.39.4/fs/compat.c 2011-05-19 00:06:34.000000000 -0400
35264+++ linux-2.6.39.4/fs/compat.c 2011-08-05 19:44:37.000000000 -0400
35265@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35266 goto out;
35267
35268 ret = -EINVAL;
35269- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35270+ if (nr_segs > UIO_MAXIOV)
35271 goto out;
35272 if (nr_segs > fast_segs) {
35273 ret = -ENOMEM;
35274@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35275
35276 struct compat_readdir_callback {
35277 struct compat_old_linux_dirent __user *dirent;
35278+ struct file * file;
35279 int result;
35280 };
35281
35282@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35283 buf->result = -EOVERFLOW;
35284 return -EOVERFLOW;
35285 }
35286+
35287+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35288+ return 0;
35289+
35290 buf->result++;
35291 dirent = buf->dirent;
35292 if (!access_ok(VERIFY_WRITE, dirent,
35293@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35294
35295 buf.result = 0;
35296 buf.dirent = dirent;
35297+ buf.file = file;
35298
35299 error = vfs_readdir(file, compat_fillonedir, &buf);
35300 if (buf.result)
35301@@ -917,6 +923,7 @@ struct compat_linux_dirent {
35302 struct compat_getdents_callback {
35303 struct compat_linux_dirent __user *current_dir;
35304 struct compat_linux_dirent __user *previous;
35305+ struct file * file;
35306 int count;
35307 int error;
35308 };
35309@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35310 buf->error = -EOVERFLOW;
35311 return -EOVERFLOW;
35312 }
35313+
35314+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35315+ return 0;
35316+
35317 dirent = buf->previous;
35318 if (dirent) {
35319 if (__put_user(offset, &dirent->d_off))
35320@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35321 buf.previous = NULL;
35322 buf.count = count;
35323 buf.error = 0;
35324+ buf.file = file;
35325
35326 error = vfs_readdir(file, compat_filldir, &buf);
35327 if (error >= 0)
35328@@ -1006,6 +1018,7 @@ out:
35329 struct compat_getdents_callback64 {
35330 struct linux_dirent64 __user *current_dir;
35331 struct linux_dirent64 __user *previous;
35332+ struct file * file;
35333 int count;
35334 int error;
35335 };
35336@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35337 buf->error = -EINVAL; /* only used if we fail.. */
35338 if (reclen > buf->count)
35339 return -EINVAL;
35340+
35341+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35342+ return 0;
35343+
35344 dirent = buf->previous;
35345
35346 if (dirent) {
35347@@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35348 buf.previous = NULL;
35349 buf.count = count;
35350 buf.error = 0;
35351+ buf.file = file;
35352
35353 error = vfs_readdir(file, compat_filldir64, &buf);
35354 if (error >= 0)
35355@@ -1436,6 +1454,11 @@ int compat_do_execve(char * filename,
35356 compat_uptr_t __user *envp,
35357 struct pt_regs * regs)
35358 {
35359+#ifdef CONFIG_GRKERNSEC
35360+ struct file *old_exec_file;
35361+ struct acl_subject_label *old_acl;
35362+ struct rlimit old_rlim[RLIM_NLIMITS];
35363+#endif
35364 struct linux_binprm *bprm;
35365 struct file *file;
35366 struct files_struct *displaced;
35367@@ -1472,6 +1495,19 @@ int compat_do_execve(char * filename,
35368 bprm->filename = filename;
35369 bprm->interp = filename;
35370
35371+ if (gr_process_user_ban()) {
35372+ retval = -EPERM;
35373+ goto out_file;
35374+ }
35375+
35376+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35377+ retval = -EAGAIN;
35378+ if (gr_handle_nproc())
35379+ goto out_file;
35380+ retval = -EACCES;
35381+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
35382+ goto out_file;
35383+
35384 retval = bprm_mm_init(bprm);
35385 if (retval)
35386 goto out_file;
35387@@ -1501,9 +1537,40 @@ int compat_do_execve(char * filename,
35388 if (retval < 0)
35389 goto out;
35390
35391+ if (!gr_tpe_allow(file)) {
35392+ retval = -EACCES;
35393+ goto out;
35394+ }
35395+
35396+ if (gr_check_crash_exec(file)) {
35397+ retval = -EACCES;
35398+ goto out;
35399+ }
35400+
35401+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35402+
35403+ gr_handle_exec_args_compat(bprm, argv);
35404+
35405+#ifdef CONFIG_GRKERNSEC
35406+ old_acl = current->acl;
35407+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35408+ old_exec_file = current->exec_file;
35409+ get_file(file);
35410+ current->exec_file = file;
35411+#endif
35412+
35413+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35414+ bprm->unsafe & LSM_UNSAFE_SHARE);
35415+ if (retval < 0)
35416+ goto out_fail;
35417+
35418 retval = search_binary_handler(bprm, regs);
35419 if (retval < 0)
35420- goto out;
35421+ goto out_fail;
35422+#ifdef CONFIG_GRKERNSEC
35423+ if (old_exec_file)
35424+ fput(old_exec_file);
35425+#endif
35426
35427 /* execve succeeded */
35428 current->fs->in_exec = 0;
35429@@ -1514,6 +1581,14 @@ int compat_do_execve(char * filename,
35430 put_files_struct(displaced);
35431 return retval;
35432
35433+out_fail:
35434+#ifdef CONFIG_GRKERNSEC
35435+ current->acl = old_acl;
35436+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35437+ fput(current->exec_file);
35438+ current->exec_file = old_exec_file;
35439+#endif
35440+
35441 out:
35442 if (bprm->mm) {
35443 acct_arg_size(bprm, 0);
35444@@ -1681,6 +1756,8 @@ int compat_core_sys_select(int n, compat
35445 struct fdtable *fdt;
35446 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35447
35448+ pax_track_stack();
35449+
35450 if (n < 0)
35451 goto out_nofds;
35452
35453diff -urNp linux-2.6.39.4/fs/compat_ioctl.c linux-2.6.39.4/fs/compat_ioctl.c
35454--- linux-2.6.39.4/fs/compat_ioctl.c 2011-05-19 00:06:34.000000000 -0400
35455+++ linux-2.6.39.4/fs/compat_ioctl.c 2011-08-05 19:44:37.000000000 -0400
35456@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35457
35458 err = get_user(palp, &up->palette);
35459 err |= get_user(length, &up->length);
35460+ if (err)
35461+ return -EFAULT;
35462
35463 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35464 err = put_user(compat_ptr(palp), &up_native->palette);
35465@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35466 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35467 {
35468 unsigned int a, b;
35469- a = *(unsigned int *)p;
35470- b = *(unsigned int *)q;
35471+ a = *(const unsigned int *)p;
35472+ b = *(const unsigned int *)q;
35473 if (a > b)
35474 return 1;
35475 if (a < b)
35476diff -urNp linux-2.6.39.4/fs/configfs/dir.c linux-2.6.39.4/fs/configfs/dir.c
35477--- linux-2.6.39.4/fs/configfs/dir.c 2011-05-19 00:06:34.000000000 -0400
35478+++ linux-2.6.39.4/fs/configfs/dir.c 2011-08-05 19:44:37.000000000 -0400
35479@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35480 }
35481 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35482 struct configfs_dirent *next;
35483- const char * name;
35484+ const unsigned char * name;
35485+ char d_name[sizeof(next->s_dentry->d_iname)];
35486 int len;
35487 struct inode *inode = NULL;
35488
35489@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35490 continue;
35491
35492 name = configfs_get_name(next);
35493- len = strlen(name);
35494+ if (next->s_dentry && name == next->s_dentry->d_iname) {
35495+ len = next->s_dentry->d_name.len;
35496+ memcpy(d_name, name, len);
35497+ name = d_name;
35498+ } else
35499+ len = strlen(name);
35500
35501 /*
35502 * We'll have a dentry and an inode for
35503diff -urNp linux-2.6.39.4/fs/dcache.c linux-2.6.39.4/fs/dcache.c
35504--- linux-2.6.39.4/fs/dcache.c 2011-05-19 00:06:34.000000000 -0400
35505+++ linux-2.6.39.4/fs/dcache.c 2011-08-05 19:44:37.000000000 -0400
35506@@ -3069,7 +3069,7 @@ void __init vfs_caches_init(unsigned lon
35507 mempages -= reserve;
35508
35509 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35510- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35511+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35512
35513 dcache_init();
35514 inode_init();
35515diff -urNp linux-2.6.39.4/fs/ecryptfs/inode.c linux-2.6.39.4/fs/ecryptfs/inode.c
35516--- linux-2.6.39.4/fs/ecryptfs/inode.c 2011-06-03 00:04:14.000000000 -0400
35517+++ linux-2.6.39.4/fs/ecryptfs/inode.c 2011-08-05 19:44:37.000000000 -0400
35518@@ -623,7 +623,7 @@ static int ecryptfs_readlink_lower(struc
35519 old_fs = get_fs();
35520 set_fs(get_ds());
35521 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35522- (char __user *)lower_buf,
35523+ (__force char __user *)lower_buf,
35524 lower_bufsiz);
35525 set_fs(old_fs);
35526 if (rc < 0)
35527@@ -669,7 +669,7 @@ static void *ecryptfs_follow_link(struct
35528 }
35529 old_fs = get_fs();
35530 set_fs(get_ds());
35531- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35532+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35533 set_fs(old_fs);
35534 if (rc < 0) {
35535 kfree(buf);
35536@@ -684,7 +684,7 @@ out:
35537 static void
35538 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35539 {
35540- char *buf = nd_get_link(nd);
35541+ const char *buf = nd_get_link(nd);
35542 if (!IS_ERR(buf)) {
35543 /* Free the char* */
35544 kfree(buf);
35545diff -urNp linux-2.6.39.4/fs/ecryptfs/miscdev.c linux-2.6.39.4/fs/ecryptfs/miscdev.c
35546--- linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-05-19 00:06:34.000000000 -0400
35547+++ linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-08-05 19:44:37.000000000 -0400
35548@@ -328,7 +328,7 @@ check_list:
35549 goto out_unlock_msg_ctx;
35550 i = 5;
35551 if (msg_ctx->msg) {
35552- if (copy_to_user(&buf[i], packet_length, packet_length_size))
35553+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35554 goto out_unlock_msg_ctx;
35555 i += packet_length_size;
35556 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35557diff -urNp linux-2.6.39.4/fs/exec.c linux-2.6.39.4/fs/exec.c
35558--- linux-2.6.39.4/fs/exec.c 2011-06-25 12:55:23.000000000 -0400
35559+++ linux-2.6.39.4/fs/exec.c 2011-08-05 19:44:37.000000000 -0400
35560@@ -55,12 +55,24 @@
35561 #include <linux/fs_struct.h>
35562 #include <linux/pipe_fs_i.h>
35563 #include <linux/oom.h>
35564+#include <linux/random.h>
35565+#include <linux/seq_file.h>
35566+
35567+#ifdef CONFIG_PAX_REFCOUNT
35568+#include <linux/kallsyms.h>
35569+#include <linux/kdebug.h>
35570+#endif
35571
35572 #include <asm/uaccess.h>
35573 #include <asm/mmu_context.h>
35574 #include <asm/tlb.h>
35575 #include "internal.h"
35576
35577+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35578+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35579+EXPORT_SYMBOL(pax_set_initial_flags_func);
35580+#endif
35581+
35582 int core_uses_pid;
35583 char core_pattern[CORENAME_MAX_SIZE] = "core";
35584 unsigned int core_pipe_limit;
35585@@ -70,7 +82,7 @@ struct core_name {
35586 char *corename;
35587 int used, size;
35588 };
35589-static atomic_t call_count = ATOMIC_INIT(1);
35590+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35591
35592 /* The maximal length of core_pattern is also specified in sysctl.c */
35593
35594@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35595 char *tmp = getname(library);
35596 int error = PTR_ERR(tmp);
35597 static const struct open_flags uselib_flags = {
35598- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35599+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35600 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35601 .intent = LOOKUP_OPEN
35602 };
35603@@ -190,18 +202,10 @@ struct page *get_arg_page(struct linux_b
35604 int write)
35605 {
35606 struct page *page;
35607- int ret;
35608
35609-#ifdef CONFIG_STACK_GROWSUP
35610- if (write) {
35611- ret = expand_stack_downwards(bprm->vma, pos);
35612- if (ret < 0)
35613- return NULL;
35614- }
35615-#endif
35616- ret = get_user_pages(current, bprm->mm, pos,
35617- 1, write, 1, &page, NULL);
35618- if (ret <= 0)
35619+ if (0 > expand_stack_downwards(bprm->vma, pos))
35620+ return NULL;
35621+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35622 return NULL;
35623
35624 if (write) {
35625@@ -276,6 +280,11 @@ static int __bprm_mm_init(struct linux_b
35626 vma->vm_end = STACK_TOP_MAX;
35627 vma->vm_start = vma->vm_end - PAGE_SIZE;
35628 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35629+
35630+#ifdef CONFIG_PAX_SEGMEXEC
35631+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35632+#endif
35633+
35634 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35635 INIT_LIST_HEAD(&vma->anon_vma_chain);
35636
35637@@ -290,6 +299,12 @@ static int __bprm_mm_init(struct linux_b
35638 mm->stack_vm = mm->total_vm = 1;
35639 up_write(&mm->mmap_sem);
35640 bprm->p = vma->vm_end - sizeof(void *);
35641+
35642+#ifdef CONFIG_PAX_RANDUSTACK
35643+ if (randomize_va_space)
35644+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35645+#endif
35646+
35647 return 0;
35648 err:
35649 up_write(&mm->mmap_sem);
35650@@ -525,7 +540,7 @@ int copy_strings_kernel(int argc, const
35651 int r;
35652 mm_segment_t oldfs = get_fs();
35653 set_fs(KERNEL_DS);
35654- r = copy_strings(argc, (const char __user *const __user *)argv, bprm);
35655+ r = copy_strings(argc, (__force const char __user *const __user *)argv, bprm);
35656 set_fs(oldfs);
35657 return r;
35658 }
35659@@ -555,7 +570,8 @@ static int shift_arg_pages(struct vm_are
35660 unsigned long new_end = old_end - shift;
35661 struct mmu_gather *tlb;
35662
35663- BUG_ON(new_start > new_end);
35664+ if (new_start >= new_end || new_start < mmap_min_addr)
35665+ return -ENOMEM;
35666
35667 /*
35668 * ensure there are no vmas between where we want to go
35669@@ -564,6 +580,10 @@ static int shift_arg_pages(struct vm_are
35670 if (vma != find_vma(mm, new_start))
35671 return -EFAULT;
35672
35673+#ifdef CONFIG_PAX_SEGMEXEC
35674+ BUG_ON(pax_find_mirror_vma(vma));
35675+#endif
35676+
35677 /*
35678 * cover the whole range: [new_start, old_end)
35679 */
35680@@ -644,10 +664,6 @@ int setup_arg_pages(struct linux_binprm
35681 stack_top = arch_align_stack(stack_top);
35682 stack_top = PAGE_ALIGN(stack_top);
35683
35684- if (unlikely(stack_top < mmap_min_addr) ||
35685- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35686- return -ENOMEM;
35687-
35688 stack_shift = vma->vm_end - stack_top;
35689
35690 bprm->p -= stack_shift;
35691@@ -659,8 +675,28 @@ int setup_arg_pages(struct linux_binprm
35692 bprm->exec -= stack_shift;
35693
35694 down_write(&mm->mmap_sem);
35695+
35696+ /* Move stack pages down in memory. */
35697+ if (stack_shift) {
35698+ ret = shift_arg_pages(vma, stack_shift);
35699+ if (ret)
35700+ goto out_unlock;
35701+ }
35702+
35703 vm_flags = VM_STACK_FLAGS;
35704
35705+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35706+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35707+ vm_flags &= ~VM_EXEC;
35708+
35709+#ifdef CONFIG_PAX_MPROTECT
35710+ if (mm->pax_flags & MF_PAX_MPROTECT)
35711+ vm_flags &= ~VM_MAYEXEC;
35712+#endif
35713+
35714+ }
35715+#endif
35716+
35717 /*
35718 * Adjust stack execute permissions; explicitly enable for
35719 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35720@@ -679,13 +715,6 @@ int setup_arg_pages(struct linux_binprm
35721 goto out_unlock;
35722 BUG_ON(prev != vma);
35723
35724- /* Move stack pages down in memory. */
35725- if (stack_shift) {
35726- ret = shift_arg_pages(vma, stack_shift);
35727- if (ret)
35728- goto out_unlock;
35729- }
35730-
35731 /* mprotect_fixup is overkill to remove the temporary stack flags */
35732 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35733
35734@@ -725,7 +754,7 @@ struct file *open_exec(const char *name)
35735 struct file *file;
35736 int err;
35737 static const struct open_flags open_exec_flags = {
35738- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35739+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35740 .acc_mode = MAY_EXEC | MAY_OPEN,
35741 .intent = LOOKUP_OPEN
35742 };
35743@@ -766,7 +795,7 @@ int kernel_read(struct file *file, loff_
35744 old_fs = get_fs();
35745 set_fs(get_ds());
35746 /* The cast to a user pointer is valid due to the set_fs() */
35747- result = vfs_read(file, (void __user *)addr, count, &pos);
35748+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
35749 set_fs(old_fs);
35750 return result;
35751 }
35752@@ -1189,7 +1218,7 @@ int check_unsafe_exec(struct linux_binpr
35753 }
35754 rcu_read_unlock();
35755
35756- if (p->fs->users > n_fs) {
35757+ if (atomic_read(&p->fs->users) > n_fs) {
35758 bprm->unsafe |= LSM_UNSAFE_SHARE;
35759 } else {
35760 res = -EAGAIN;
35761@@ -1381,6 +1410,11 @@ int do_execve(const char * filename,
35762 const char __user *const __user *envp,
35763 struct pt_regs * regs)
35764 {
35765+#ifdef CONFIG_GRKERNSEC
35766+ struct file *old_exec_file;
35767+ struct acl_subject_label *old_acl;
35768+ struct rlimit old_rlim[RLIM_NLIMITS];
35769+#endif
35770 struct linux_binprm *bprm;
35771 struct file *file;
35772 struct files_struct *displaced;
35773@@ -1417,6 +1451,23 @@ int do_execve(const char * filename,
35774 bprm->filename = filename;
35775 bprm->interp = filename;
35776
35777+ if (gr_process_user_ban()) {
35778+ retval = -EPERM;
35779+ goto out_file;
35780+ }
35781+
35782+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35783+
35784+ if (gr_handle_nproc()) {
35785+ retval = -EAGAIN;
35786+ goto out_file;
35787+ }
35788+
35789+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35790+ retval = -EACCES;
35791+ goto out_file;
35792+ }
35793+
35794 retval = bprm_mm_init(bprm);
35795 if (retval)
35796 goto out_file;
35797@@ -1446,9 +1497,40 @@ int do_execve(const char * filename,
35798 if (retval < 0)
35799 goto out;
35800
35801+ if (!gr_tpe_allow(file)) {
35802+ retval = -EACCES;
35803+ goto out;
35804+ }
35805+
35806+ if (gr_check_crash_exec(file)) {
35807+ retval = -EACCES;
35808+ goto out;
35809+ }
35810+
35811+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35812+
35813+ gr_handle_exec_args(bprm, argv);
35814+
35815+#ifdef CONFIG_GRKERNSEC
35816+ old_acl = current->acl;
35817+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35818+ old_exec_file = current->exec_file;
35819+ get_file(file);
35820+ current->exec_file = file;
35821+#endif
35822+
35823+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35824+ bprm->unsafe & LSM_UNSAFE_SHARE);
35825+ if (retval < 0)
35826+ goto out_fail;
35827+
35828 retval = search_binary_handler(bprm,regs);
35829 if (retval < 0)
35830- goto out;
35831+ goto out_fail;
35832+#ifdef CONFIG_GRKERNSEC
35833+ if (old_exec_file)
35834+ fput(old_exec_file);
35835+#endif
35836
35837 /* execve succeeded */
35838 current->fs->in_exec = 0;
35839@@ -1459,6 +1541,14 @@ int do_execve(const char * filename,
35840 put_files_struct(displaced);
35841 return retval;
35842
35843+out_fail:
35844+#ifdef CONFIG_GRKERNSEC
35845+ current->acl = old_acl;
35846+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35847+ fput(current->exec_file);
35848+ current->exec_file = old_exec_file;
35849+#endif
35850+
35851 out:
35852 if (bprm->mm) {
35853 acct_arg_size(bprm, 0);
35854@@ -1504,7 +1594,7 @@ static int expand_corename(struct core_n
35855 {
35856 char *old_corename = cn->corename;
35857
35858- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35859+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35860 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35861
35862 if (!cn->corename) {
35863@@ -1557,7 +1647,7 @@ static int format_corename(struct core_n
35864 int pid_in_pattern = 0;
35865 int err = 0;
35866
35867- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
35868+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
35869 cn->corename = kmalloc(cn->size, GFP_KERNEL);
35870 cn->used = 0;
35871
35872@@ -1645,6 +1735,219 @@ out:
35873 return ispipe;
35874 }
35875
35876+int pax_check_flags(unsigned long *flags)
35877+{
35878+ int retval = 0;
35879+
35880+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
35881+ if (*flags & MF_PAX_SEGMEXEC)
35882+ {
35883+ *flags &= ~MF_PAX_SEGMEXEC;
35884+ retval = -EINVAL;
35885+ }
35886+#endif
35887+
35888+ if ((*flags & MF_PAX_PAGEEXEC)
35889+
35890+#ifdef CONFIG_PAX_PAGEEXEC
35891+ && (*flags & MF_PAX_SEGMEXEC)
35892+#endif
35893+
35894+ )
35895+ {
35896+ *flags &= ~MF_PAX_PAGEEXEC;
35897+ retval = -EINVAL;
35898+ }
35899+
35900+ if ((*flags & MF_PAX_MPROTECT)
35901+
35902+#ifdef CONFIG_PAX_MPROTECT
35903+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35904+#endif
35905+
35906+ )
35907+ {
35908+ *flags &= ~MF_PAX_MPROTECT;
35909+ retval = -EINVAL;
35910+ }
35911+
35912+ if ((*flags & MF_PAX_EMUTRAMP)
35913+
35914+#ifdef CONFIG_PAX_EMUTRAMP
35915+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35916+#endif
35917+
35918+ )
35919+ {
35920+ *flags &= ~MF_PAX_EMUTRAMP;
35921+ retval = -EINVAL;
35922+ }
35923+
35924+ return retval;
35925+}
35926+
35927+EXPORT_SYMBOL(pax_check_flags);
35928+
35929+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35930+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
35931+{
35932+ struct task_struct *tsk = current;
35933+ struct mm_struct *mm = current->mm;
35934+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
35935+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
35936+ char *path_exec = NULL;
35937+ char *path_fault = NULL;
35938+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
35939+
35940+ if (buffer_exec && buffer_fault) {
35941+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
35942+
35943+ down_read(&mm->mmap_sem);
35944+ vma = mm->mmap;
35945+ while (vma && (!vma_exec || !vma_fault)) {
35946+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
35947+ vma_exec = vma;
35948+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
35949+ vma_fault = vma;
35950+ vma = vma->vm_next;
35951+ }
35952+ if (vma_exec) {
35953+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
35954+ if (IS_ERR(path_exec))
35955+ path_exec = "<path too long>";
35956+ else {
35957+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
35958+ if (path_exec) {
35959+ *path_exec = 0;
35960+ path_exec = buffer_exec;
35961+ } else
35962+ path_exec = "<path too long>";
35963+ }
35964+ }
35965+ if (vma_fault) {
35966+ start = vma_fault->vm_start;
35967+ end = vma_fault->vm_end;
35968+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
35969+ if (vma_fault->vm_file) {
35970+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
35971+ if (IS_ERR(path_fault))
35972+ path_fault = "<path too long>";
35973+ else {
35974+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
35975+ if (path_fault) {
35976+ *path_fault = 0;
35977+ path_fault = buffer_fault;
35978+ } else
35979+ path_fault = "<path too long>";
35980+ }
35981+ } else
35982+ path_fault = "<anonymous mapping>";
35983+ }
35984+ up_read(&mm->mmap_sem);
35985+ }
35986+ if (tsk->signal->curr_ip)
35987+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
35988+ else
35989+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
35990+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
35991+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
35992+ task_uid(tsk), task_euid(tsk), pc, sp);
35993+ free_page((unsigned long)buffer_exec);
35994+ free_page((unsigned long)buffer_fault);
35995+ pax_report_insns(pc, sp);
35996+ do_coredump(SIGKILL, SIGKILL, regs);
35997+}
35998+#endif
35999+
36000+#ifdef CONFIG_PAX_REFCOUNT
36001+void pax_report_refcount_overflow(struct pt_regs *regs)
36002+{
36003+ if (current->signal->curr_ip)
36004+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36005+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36006+ else
36007+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36008+ current->comm, task_pid_nr(current), current_uid(), current_euid());
36009+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36010+ show_regs(regs);
36011+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36012+}
36013+#endif
36014+
36015+#ifdef CONFIG_PAX_USERCOPY
36016+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36017+int object_is_on_stack(const void *obj, unsigned long len)
36018+{
36019+ const void * const stack = task_stack_page(current);
36020+ const void * const stackend = stack + THREAD_SIZE;
36021+
36022+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36023+ const void *frame = NULL;
36024+ const void *oldframe;
36025+#endif
36026+
36027+ if (obj + len < obj)
36028+ return -1;
36029+
36030+ if (obj + len <= stack || stackend <= obj)
36031+ return 0;
36032+
36033+ if (obj < stack || stackend < obj + len)
36034+ return -1;
36035+
36036+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36037+ oldframe = __builtin_frame_address(1);
36038+ if (oldframe)
36039+ frame = __builtin_frame_address(2);
36040+ /*
36041+ low ----------------------------------------------> high
36042+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
36043+ ^----------------^
36044+ allow copies only within here
36045+ */
36046+ while (stack <= frame && frame < stackend) {
36047+ /* if obj + len extends past the last frame, this
36048+ check won't pass and the next frame will be 0,
36049+ causing us to bail out and correctly report
36050+ the copy as invalid
36051+ */
36052+ if (obj + len <= frame)
36053+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36054+ oldframe = frame;
36055+ frame = *(const void * const *)frame;
36056+ }
36057+ return -1;
36058+#else
36059+ return 1;
36060+#endif
36061+}
36062+
36063+
36064+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36065+{
36066+ if (current->signal->curr_ip)
36067+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36068+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36069+ else
36070+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36071+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36072+ dump_stack();
36073+ gr_handle_kernel_exploit();
36074+ do_group_exit(SIGKILL);
36075+}
36076+#endif
36077+
36078+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36079+void pax_track_stack(void)
36080+{
36081+ unsigned long sp = (unsigned long)&sp;
36082+ if (sp < current_thread_info()->lowest_stack &&
36083+ sp > (unsigned long)task_stack_page(current))
36084+ current_thread_info()->lowest_stack = sp;
36085+}
36086+EXPORT_SYMBOL(pax_track_stack);
36087+#endif
36088+
36089 static int zap_process(struct task_struct *start, int exit_code)
36090 {
36091 struct task_struct *t;
36092@@ -1855,17 +2158,17 @@ static void wait_for_dump_helpers(struct
36093 pipe = file->f_path.dentry->d_inode->i_pipe;
36094
36095 pipe_lock(pipe);
36096- pipe->readers++;
36097- pipe->writers--;
36098+ atomic_inc(&pipe->readers);
36099+ atomic_dec(&pipe->writers);
36100
36101- while ((pipe->readers > 1) && (!signal_pending(current))) {
36102+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36103 wake_up_interruptible_sync(&pipe->wait);
36104 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36105 pipe_wait(pipe);
36106 }
36107
36108- pipe->readers--;
36109- pipe->writers++;
36110+ atomic_dec(&pipe->readers);
36111+ atomic_inc(&pipe->writers);
36112 pipe_unlock(pipe);
36113
36114 }
36115@@ -1926,7 +2229,7 @@ void do_coredump(long signr, int exit_co
36116 int retval = 0;
36117 int flag = 0;
36118 int ispipe;
36119- static atomic_t core_dump_count = ATOMIC_INIT(0);
36120+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36121 struct coredump_params cprm = {
36122 .signr = signr,
36123 .regs = regs,
36124@@ -1941,6 +2244,9 @@ void do_coredump(long signr, int exit_co
36125
36126 audit_core_dumps(signr);
36127
36128+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36129+ gr_handle_brute_attach(current, cprm.mm_flags);
36130+
36131 binfmt = mm->binfmt;
36132 if (!binfmt || !binfmt->core_dump)
36133 goto fail;
36134@@ -1981,6 +2287,8 @@ void do_coredump(long signr, int exit_co
36135 goto fail_corename;
36136 }
36137
36138+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36139+
36140 if (ispipe) {
36141 int dump_count;
36142 char **helper_argv;
36143@@ -2008,7 +2316,7 @@ void do_coredump(long signr, int exit_co
36144 }
36145 cprm.limit = RLIM_INFINITY;
36146
36147- dump_count = atomic_inc_return(&core_dump_count);
36148+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
36149 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36150 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36151 task_tgid_vnr(current), current->comm);
36152@@ -2078,7 +2386,7 @@ close_fail:
36153 filp_close(cprm.file, NULL);
36154 fail_dropcount:
36155 if (ispipe)
36156- atomic_dec(&core_dump_count);
36157+ atomic_dec_unchecked(&core_dump_count);
36158 fail_unlock:
36159 kfree(cn.corename);
36160 fail_corename:
36161diff -urNp linux-2.6.39.4/fs/ext2/balloc.c linux-2.6.39.4/fs/ext2/balloc.c
36162--- linux-2.6.39.4/fs/ext2/balloc.c 2011-05-19 00:06:34.000000000 -0400
36163+++ linux-2.6.39.4/fs/ext2/balloc.c 2011-08-05 19:44:37.000000000 -0400
36164@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36165
36166 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36167 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36168- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36169+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36170 sbi->s_resuid != current_fsuid() &&
36171 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36172 return 0;
36173diff -urNp linux-2.6.39.4/fs/ext3/balloc.c linux-2.6.39.4/fs/ext3/balloc.c
36174--- linux-2.6.39.4/fs/ext3/balloc.c 2011-05-19 00:06:34.000000000 -0400
36175+++ linux-2.6.39.4/fs/ext3/balloc.c 2011-08-05 19:44:37.000000000 -0400
36176@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36177
36178 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36179 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36180- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36181+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36182 sbi->s_resuid != current_fsuid() &&
36183 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36184 return 0;
36185diff -urNp linux-2.6.39.4/fs/ext4/balloc.c linux-2.6.39.4/fs/ext4/balloc.c
36186--- linux-2.6.39.4/fs/ext4/balloc.c 2011-05-19 00:06:34.000000000 -0400
36187+++ linux-2.6.39.4/fs/ext4/balloc.c 2011-08-05 19:44:37.000000000 -0400
36188@@ -522,7 +522,7 @@ static int ext4_has_free_blocks(struct e
36189 /* Hm, nope. Are (enough) root reserved blocks available? */
36190 if (sbi->s_resuid == current_fsuid() ||
36191 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36192- capable(CAP_SYS_RESOURCE)) {
36193+ capable_nolog(CAP_SYS_RESOURCE)) {
36194 if (free_blocks >= (nblocks + dirty_blocks))
36195 return 1;
36196 }
36197diff -urNp linux-2.6.39.4/fs/ext4/ext4.h linux-2.6.39.4/fs/ext4/ext4.h
36198--- linux-2.6.39.4/fs/ext4/ext4.h 2011-06-03 00:04:14.000000000 -0400
36199+++ linux-2.6.39.4/fs/ext4/ext4.h 2011-08-05 19:44:37.000000000 -0400
36200@@ -1166,19 +1166,19 @@ struct ext4_sb_info {
36201 unsigned long s_mb_last_start;
36202
36203 /* stats for buddy allocator */
36204- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36205- atomic_t s_bal_success; /* we found long enough chunks */
36206- atomic_t s_bal_allocated; /* in blocks */
36207- atomic_t s_bal_ex_scanned; /* total extents scanned */
36208- atomic_t s_bal_goals; /* goal hits */
36209- atomic_t s_bal_breaks; /* too long searches */
36210- atomic_t s_bal_2orders; /* 2^order hits */
36211+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36212+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36213+ atomic_unchecked_t s_bal_allocated; /* in blocks */
36214+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36215+ atomic_unchecked_t s_bal_goals; /* goal hits */
36216+ atomic_unchecked_t s_bal_breaks; /* too long searches */
36217+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36218 spinlock_t s_bal_lock;
36219 unsigned long s_mb_buddies_generated;
36220 unsigned long long s_mb_generation_time;
36221- atomic_t s_mb_lost_chunks;
36222- atomic_t s_mb_preallocated;
36223- atomic_t s_mb_discarded;
36224+ atomic_unchecked_t s_mb_lost_chunks;
36225+ atomic_unchecked_t s_mb_preallocated;
36226+ atomic_unchecked_t s_mb_discarded;
36227 atomic_t s_lock_busy;
36228
36229 /* locality groups */
36230diff -urNp linux-2.6.39.4/fs/ext4/mballoc.c linux-2.6.39.4/fs/ext4/mballoc.c
36231--- linux-2.6.39.4/fs/ext4/mballoc.c 2011-06-03 00:04:14.000000000 -0400
36232+++ linux-2.6.39.4/fs/ext4/mballoc.c 2011-08-05 19:44:37.000000000 -0400
36233@@ -1853,7 +1853,7 @@ void ext4_mb_simple_scan_group(struct ex
36234 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36235
36236 if (EXT4_SB(sb)->s_mb_stats)
36237- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36238+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36239
36240 break;
36241 }
36242@@ -2147,7 +2147,7 @@ repeat:
36243 ac->ac_status = AC_STATUS_CONTINUE;
36244 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36245 cr = 3;
36246- atomic_inc(&sbi->s_mb_lost_chunks);
36247+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36248 goto repeat;
36249 }
36250 }
36251@@ -2190,6 +2190,8 @@ static int ext4_mb_seq_groups_show(struc
36252 ext4_grpblk_t counters[16];
36253 } sg;
36254
36255+ pax_track_stack();
36256+
36257 group--;
36258 if (group == 0)
36259 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36260@@ -2613,25 +2615,25 @@ int ext4_mb_release(struct super_block *
36261 if (sbi->s_mb_stats) {
36262 printk(KERN_INFO
36263 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36264- atomic_read(&sbi->s_bal_allocated),
36265- atomic_read(&sbi->s_bal_reqs),
36266- atomic_read(&sbi->s_bal_success));
36267+ atomic_read_unchecked(&sbi->s_bal_allocated),
36268+ atomic_read_unchecked(&sbi->s_bal_reqs),
36269+ atomic_read_unchecked(&sbi->s_bal_success));
36270 printk(KERN_INFO
36271 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36272 "%u 2^N hits, %u breaks, %u lost\n",
36273- atomic_read(&sbi->s_bal_ex_scanned),
36274- atomic_read(&sbi->s_bal_goals),
36275- atomic_read(&sbi->s_bal_2orders),
36276- atomic_read(&sbi->s_bal_breaks),
36277- atomic_read(&sbi->s_mb_lost_chunks));
36278+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36279+ atomic_read_unchecked(&sbi->s_bal_goals),
36280+ atomic_read_unchecked(&sbi->s_bal_2orders),
36281+ atomic_read_unchecked(&sbi->s_bal_breaks),
36282+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36283 printk(KERN_INFO
36284 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36285 sbi->s_mb_buddies_generated++,
36286 sbi->s_mb_generation_time);
36287 printk(KERN_INFO
36288 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36289- atomic_read(&sbi->s_mb_preallocated),
36290- atomic_read(&sbi->s_mb_discarded));
36291+ atomic_read_unchecked(&sbi->s_mb_preallocated),
36292+ atomic_read_unchecked(&sbi->s_mb_discarded));
36293 }
36294
36295 free_percpu(sbi->s_locality_groups);
36296@@ -3107,16 +3109,16 @@ static void ext4_mb_collect_stats(struct
36297 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36298
36299 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36300- atomic_inc(&sbi->s_bal_reqs);
36301- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36302+ atomic_inc_unchecked(&sbi->s_bal_reqs);
36303+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36304 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36305- atomic_inc(&sbi->s_bal_success);
36306- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36307+ atomic_inc_unchecked(&sbi->s_bal_success);
36308+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36309 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36310 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36311- atomic_inc(&sbi->s_bal_goals);
36312+ atomic_inc_unchecked(&sbi->s_bal_goals);
36313 if (ac->ac_found > sbi->s_mb_max_to_scan)
36314- atomic_inc(&sbi->s_bal_breaks);
36315+ atomic_inc_unchecked(&sbi->s_bal_breaks);
36316 }
36317
36318 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36319@@ -3514,7 +3516,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36320 trace_ext4_mb_new_inode_pa(ac, pa);
36321
36322 ext4_mb_use_inode_pa(ac, pa);
36323- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36324+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36325
36326 ei = EXT4_I(ac->ac_inode);
36327 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36328@@ -3574,7 +3576,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36329 trace_ext4_mb_new_group_pa(ac, pa);
36330
36331 ext4_mb_use_group_pa(ac, pa);
36332- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36333+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36334
36335 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36336 lg = ac->ac_lg;
36337@@ -3661,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36338 * from the bitmap and continue.
36339 */
36340 }
36341- atomic_add(free, &sbi->s_mb_discarded);
36342+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
36343
36344 return err;
36345 }
36346@@ -3679,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36347 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36348 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36349 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36350- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36351+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36352 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36353
36354 return 0;
36355diff -urNp linux-2.6.39.4/fs/fcntl.c linux-2.6.39.4/fs/fcntl.c
36356--- linux-2.6.39.4/fs/fcntl.c 2011-05-19 00:06:34.000000000 -0400
36357+++ linux-2.6.39.4/fs/fcntl.c 2011-08-05 19:44:37.000000000 -0400
36358@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36359 if (err)
36360 return err;
36361
36362+ if (gr_handle_chroot_fowner(pid, type))
36363+ return -ENOENT;
36364+ if (gr_check_protected_task_fowner(pid, type))
36365+ return -EACCES;
36366+
36367 f_modown(filp, pid, type, force);
36368 return 0;
36369 }
36370@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36371 switch (cmd) {
36372 case F_DUPFD:
36373 case F_DUPFD_CLOEXEC:
36374+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36375 if (arg >= rlimit(RLIMIT_NOFILE))
36376 break;
36377 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36378@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36379 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36380 * is defined as O_NONBLOCK on some platforms and not on others.
36381 */
36382- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36383+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36384 O_RDONLY | O_WRONLY | O_RDWR |
36385 O_CREAT | O_EXCL | O_NOCTTY |
36386 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36387 __O_SYNC | O_DSYNC | FASYNC |
36388 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36389 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36390- __FMODE_EXEC | O_PATH
36391+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
36392 ));
36393
36394 fasync_cache = kmem_cache_create("fasync_cache",
36395diff -urNp linux-2.6.39.4/fs/fifo.c linux-2.6.39.4/fs/fifo.c
36396--- linux-2.6.39.4/fs/fifo.c 2011-05-19 00:06:34.000000000 -0400
36397+++ linux-2.6.39.4/fs/fifo.c 2011-08-05 19:44:37.000000000 -0400
36398@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36399 */
36400 filp->f_op = &read_pipefifo_fops;
36401 pipe->r_counter++;
36402- if (pipe->readers++ == 0)
36403+ if (atomic_inc_return(&pipe->readers) == 1)
36404 wake_up_partner(inode);
36405
36406- if (!pipe->writers) {
36407+ if (!atomic_read(&pipe->writers)) {
36408 if ((filp->f_flags & O_NONBLOCK)) {
36409 /* suppress POLLHUP until we have
36410 * seen a writer */
36411@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36412 * errno=ENXIO when there is no process reading the FIFO.
36413 */
36414 ret = -ENXIO;
36415- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36416+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36417 goto err;
36418
36419 filp->f_op = &write_pipefifo_fops;
36420 pipe->w_counter++;
36421- if (!pipe->writers++)
36422+ if (atomic_inc_return(&pipe->writers) == 1)
36423 wake_up_partner(inode);
36424
36425- if (!pipe->readers) {
36426+ if (!atomic_read(&pipe->readers)) {
36427 wait_for_partner(inode, &pipe->r_counter);
36428 if (signal_pending(current))
36429 goto err_wr;
36430@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36431 */
36432 filp->f_op = &rdwr_pipefifo_fops;
36433
36434- pipe->readers++;
36435- pipe->writers++;
36436+ atomic_inc(&pipe->readers);
36437+ atomic_inc(&pipe->writers);
36438 pipe->r_counter++;
36439 pipe->w_counter++;
36440- if (pipe->readers == 1 || pipe->writers == 1)
36441+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36442 wake_up_partner(inode);
36443 break;
36444
36445@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36446 return 0;
36447
36448 err_rd:
36449- if (!--pipe->readers)
36450+ if (atomic_dec_and_test(&pipe->readers))
36451 wake_up_interruptible(&pipe->wait);
36452 ret = -ERESTARTSYS;
36453 goto err;
36454
36455 err_wr:
36456- if (!--pipe->writers)
36457+ if (atomic_dec_and_test(&pipe->writers))
36458 wake_up_interruptible(&pipe->wait);
36459 ret = -ERESTARTSYS;
36460 goto err;
36461
36462 err:
36463- if (!pipe->readers && !pipe->writers)
36464+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36465 free_pipe_info(inode);
36466
36467 err_nocleanup:
36468diff -urNp linux-2.6.39.4/fs/file.c linux-2.6.39.4/fs/file.c
36469--- linux-2.6.39.4/fs/file.c 2011-05-19 00:06:34.000000000 -0400
36470+++ linux-2.6.39.4/fs/file.c 2011-08-05 19:44:37.000000000 -0400
36471@@ -15,6 +15,7 @@
36472 #include <linux/slab.h>
36473 #include <linux/vmalloc.h>
36474 #include <linux/file.h>
36475+#include <linux/security.h>
36476 #include <linux/fdtable.h>
36477 #include <linux/bitops.h>
36478 #include <linux/interrupt.h>
36479@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36480 * N.B. For clone tasks sharing a files structure, this test
36481 * will limit the total number of files that can be opened.
36482 */
36483+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36484 if (nr >= rlimit(RLIMIT_NOFILE))
36485 return -EMFILE;
36486
36487diff -urNp linux-2.6.39.4/fs/filesystems.c linux-2.6.39.4/fs/filesystems.c
36488--- linux-2.6.39.4/fs/filesystems.c 2011-05-19 00:06:34.000000000 -0400
36489+++ linux-2.6.39.4/fs/filesystems.c 2011-08-05 19:44:37.000000000 -0400
36490@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36491 int len = dot ? dot - name : strlen(name);
36492
36493 fs = __get_fs_type(name, len);
36494+
36495+#ifdef CONFIG_GRKERNSEC_MODHARDEN
36496+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36497+#else
36498 if (!fs && (request_module("%.*s", len, name) == 0))
36499+#endif
36500 fs = __get_fs_type(name, len);
36501
36502 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36503diff -urNp linux-2.6.39.4/fs/fscache/cookie.c linux-2.6.39.4/fs/fscache/cookie.c
36504--- linux-2.6.39.4/fs/fscache/cookie.c 2011-05-19 00:06:34.000000000 -0400
36505+++ linux-2.6.39.4/fs/fscache/cookie.c 2011-08-05 19:44:37.000000000 -0400
36506@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36507 parent ? (char *) parent->def->name : "<no-parent>",
36508 def->name, netfs_data);
36509
36510- fscache_stat(&fscache_n_acquires);
36511+ fscache_stat_unchecked(&fscache_n_acquires);
36512
36513 /* if there's no parent cookie, then we don't create one here either */
36514 if (!parent) {
36515- fscache_stat(&fscache_n_acquires_null);
36516+ fscache_stat_unchecked(&fscache_n_acquires_null);
36517 _leave(" [no parent]");
36518 return NULL;
36519 }
36520@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36521 /* allocate and initialise a cookie */
36522 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36523 if (!cookie) {
36524- fscache_stat(&fscache_n_acquires_oom);
36525+ fscache_stat_unchecked(&fscache_n_acquires_oom);
36526 _leave(" [ENOMEM]");
36527 return NULL;
36528 }
36529@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36530
36531 switch (cookie->def->type) {
36532 case FSCACHE_COOKIE_TYPE_INDEX:
36533- fscache_stat(&fscache_n_cookie_index);
36534+ fscache_stat_unchecked(&fscache_n_cookie_index);
36535 break;
36536 case FSCACHE_COOKIE_TYPE_DATAFILE:
36537- fscache_stat(&fscache_n_cookie_data);
36538+ fscache_stat_unchecked(&fscache_n_cookie_data);
36539 break;
36540 default:
36541- fscache_stat(&fscache_n_cookie_special);
36542+ fscache_stat_unchecked(&fscache_n_cookie_special);
36543 break;
36544 }
36545
36546@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36547 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36548 atomic_dec(&parent->n_children);
36549 __fscache_cookie_put(cookie);
36550- fscache_stat(&fscache_n_acquires_nobufs);
36551+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36552 _leave(" = NULL");
36553 return NULL;
36554 }
36555 }
36556
36557- fscache_stat(&fscache_n_acquires_ok);
36558+ fscache_stat_unchecked(&fscache_n_acquires_ok);
36559 _leave(" = %p", cookie);
36560 return cookie;
36561 }
36562@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36563 cache = fscache_select_cache_for_object(cookie->parent);
36564 if (!cache) {
36565 up_read(&fscache_addremove_sem);
36566- fscache_stat(&fscache_n_acquires_no_cache);
36567+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36568 _leave(" = -ENOMEDIUM [no cache]");
36569 return -ENOMEDIUM;
36570 }
36571@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36572 object = cache->ops->alloc_object(cache, cookie);
36573 fscache_stat_d(&fscache_n_cop_alloc_object);
36574 if (IS_ERR(object)) {
36575- fscache_stat(&fscache_n_object_no_alloc);
36576+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
36577 ret = PTR_ERR(object);
36578 goto error;
36579 }
36580
36581- fscache_stat(&fscache_n_object_alloc);
36582+ fscache_stat_unchecked(&fscache_n_object_alloc);
36583
36584 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36585
36586@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36587 struct fscache_object *object;
36588 struct hlist_node *_p;
36589
36590- fscache_stat(&fscache_n_updates);
36591+ fscache_stat_unchecked(&fscache_n_updates);
36592
36593 if (!cookie) {
36594- fscache_stat(&fscache_n_updates_null);
36595+ fscache_stat_unchecked(&fscache_n_updates_null);
36596 _leave(" [no cookie]");
36597 return;
36598 }
36599@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36600 struct fscache_object *object;
36601 unsigned long event;
36602
36603- fscache_stat(&fscache_n_relinquishes);
36604+ fscache_stat_unchecked(&fscache_n_relinquishes);
36605 if (retire)
36606- fscache_stat(&fscache_n_relinquishes_retire);
36607+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36608
36609 if (!cookie) {
36610- fscache_stat(&fscache_n_relinquishes_null);
36611+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
36612 _leave(" [no cookie]");
36613 return;
36614 }
36615@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36616
36617 /* wait for the cookie to finish being instantiated (or to fail) */
36618 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36619- fscache_stat(&fscache_n_relinquishes_waitcrt);
36620+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36621 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36622 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36623 }
36624diff -urNp linux-2.6.39.4/fs/fscache/internal.h linux-2.6.39.4/fs/fscache/internal.h
36625--- linux-2.6.39.4/fs/fscache/internal.h 2011-05-19 00:06:34.000000000 -0400
36626+++ linux-2.6.39.4/fs/fscache/internal.h 2011-08-05 19:44:37.000000000 -0400
36627@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36628 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36629 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36630
36631-extern atomic_t fscache_n_op_pend;
36632-extern atomic_t fscache_n_op_run;
36633-extern atomic_t fscache_n_op_enqueue;
36634-extern atomic_t fscache_n_op_deferred_release;
36635-extern atomic_t fscache_n_op_release;
36636-extern atomic_t fscache_n_op_gc;
36637-extern atomic_t fscache_n_op_cancelled;
36638-extern atomic_t fscache_n_op_rejected;
36639-
36640-extern atomic_t fscache_n_attr_changed;
36641-extern atomic_t fscache_n_attr_changed_ok;
36642-extern atomic_t fscache_n_attr_changed_nobufs;
36643-extern atomic_t fscache_n_attr_changed_nomem;
36644-extern atomic_t fscache_n_attr_changed_calls;
36645-
36646-extern atomic_t fscache_n_allocs;
36647-extern atomic_t fscache_n_allocs_ok;
36648-extern atomic_t fscache_n_allocs_wait;
36649-extern atomic_t fscache_n_allocs_nobufs;
36650-extern atomic_t fscache_n_allocs_intr;
36651-extern atomic_t fscache_n_allocs_object_dead;
36652-extern atomic_t fscache_n_alloc_ops;
36653-extern atomic_t fscache_n_alloc_op_waits;
36654-
36655-extern atomic_t fscache_n_retrievals;
36656-extern atomic_t fscache_n_retrievals_ok;
36657-extern atomic_t fscache_n_retrievals_wait;
36658-extern atomic_t fscache_n_retrievals_nodata;
36659-extern atomic_t fscache_n_retrievals_nobufs;
36660-extern atomic_t fscache_n_retrievals_intr;
36661-extern atomic_t fscache_n_retrievals_nomem;
36662-extern atomic_t fscache_n_retrievals_object_dead;
36663-extern atomic_t fscache_n_retrieval_ops;
36664-extern atomic_t fscache_n_retrieval_op_waits;
36665-
36666-extern atomic_t fscache_n_stores;
36667-extern atomic_t fscache_n_stores_ok;
36668-extern atomic_t fscache_n_stores_again;
36669-extern atomic_t fscache_n_stores_nobufs;
36670-extern atomic_t fscache_n_stores_oom;
36671-extern atomic_t fscache_n_store_ops;
36672-extern atomic_t fscache_n_store_calls;
36673-extern atomic_t fscache_n_store_pages;
36674-extern atomic_t fscache_n_store_radix_deletes;
36675-extern atomic_t fscache_n_store_pages_over_limit;
36676-
36677-extern atomic_t fscache_n_store_vmscan_not_storing;
36678-extern atomic_t fscache_n_store_vmscan_gone;
36679-extern atomic_t fscache_n_store_vmscan_busy;
36680-extern atomic_t fscache_n_store_vmscan_cancelled;
36681-
36682-extern atomic_t fscache_n_marks;
36683-extern atomic_t fscache_n_uncaches;
36684-
36685-extern atomic_t fscache_n_acquires;
36686-extern atomic_t fscache_n_acquires_null;
36687-extern atomic_t fscache_n_acquires_no_cache;
36688-extern atomic_t fscache_n_acquires_ok;
36689-extern atomic_t fscache_n_acquires_nobufs;
36690-extern atomic_t fscache_n_acquires_oom;
36691-
36692-extern atomic_t fscache_n_updates;
36693-extern atomic_t fscache_n_updates_null;
36694-extern atomic_t fscache_n_updates_run;
36695-
36696-extern atomic_t fscache_n_relinquishes;
36697-extern atomic_t fscache_n_relinquishes_null;
36698-extern atomic_t fscache_n_relinquishes_waitcrt;
36699-extern atomic_t fscache_n_relinquishes_retire;
36700-
36701-extern atomic_t fscache_n_cookie_index;
36702-extern atomic_t fscache_n_cookie_data;
36703-extern atomic_t fscache_n_cookie_special;
36704-
36705-extern atomic_t fscache_n_object_alloc;
36706-extern atomic_t fscache_n_object_no_alloc;
36707-extern atomic_t fscache_n_object_lookups;
36708-extern atomic_t fscache_n_object_lookups_negative;
36709-extern atomic_t fscache_n_object_lookups_positive;
36710-extern atomic_t fscache_n_object_lookups_timed_out;
36711-extern atomic_t fscache_n_object_created;
36712-extern atomic_t fscache_n_object_avail;
36713-extern atomic_t fscache_n_object_dead;
36714-
36715-extern atomic_t fscache_n_checkaux_none;
36716-extern atomic_t fscache_n_checkaux_okay;
36717-extern atomic_t fscache_n_checkaux_update;
36718-extern atomic_t fscache_n_checkaux_obsolete;
36719+extern atomic_unchecked_t fscache_n_op_pend;
36720+extern atomic_unchecked_t fscache_n_op_run;
36721+extern atomic_unchecked_t fscache_n_op_enqueue;
36722+extern atomic_unchecked_t fscache_n_op_deferred_release;
36723+extern atomic_unchecked_t fscache_n_op_release;
36724+extern atomic_unchecked_t fscache_n_op_gc;
36725+extern atomic_unchecked_t fscache_n_op_cancelled;
36726+extern atomic_unchecked_t fscache_n_op_rejected;
36727+
36728+extern atomic_unchecked_t fscache_n_attr_changed;
36729+extern atomic_unchecked_t fscache_n_attr_changed_ok;
36730+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36731+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36732+extern atomic_unchecked_t fscache_n_attr_changed_calls;
36733+
36734+extern atomic_unchecked_t fscache_n_allocs;
36735+extern atomic_unchecked_t fscache_n_allocs_ok;
36736+extern atomic_unchecked_t fscache_n_allocs_wait;
36737+extern atomic_unchecked_t fscache_n_allocs_nobufs;
36738+extern atomic_unchecked_t fscache_n_allocs_intr;
36739+extern atomic_unchecked_t fscache_n_allocs_object_dead;
36740+extern atomic_unchecked_t fscache_n_alloc_ops;
36741+extern atomic_unchecked_t fscache_n_alloc_op_waits;
36742+
36743+extern atomic_unchecked_t fscache_n_retrievals;
36744+extern atomic_unchecked_t fscache_n_retrievals_ok;
36745+extern atomic_unchecked_t fscache_n_retrievals_wait;
36746+extern atomic_unchecked_t fscache_n_retrievals_nodata;
36747+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36748+extern atomic_unchecked_t fscache_n_retrievals_intr;
36749+extern atomic_unchecked_t fscache_n_retrievals_nomem;
36750+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36751+extern atomic_unchecked_t fscache_n_retrieval_ops;
36752+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36753+
36754+extern atomic_unchecked_t fscache_n_stores;
36755+extern atomic_unchecked_t fscache_n_stores_ok;
36756+extern atomic_unchecked_t fscache_n_stores_again;
36757+extern atomic_unchecked_t fscache_n_stores_nobufs;
36758+extern atomic_unchecked_t fscache_n_stores_oom;
36759+extern atomic_unchecked_t fscache_n_store_ops;
36760+extern atomic_unchecked_t fscache_n_store_calls;
36761+extern atomic_unchecked_t fscache_n_store_pages;
36762+extern atomic_unchecked_t fscache_n_store_radix_deletes;
36763+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36764+
36765+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36766+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36767+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36768+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36769+
36770+extern atomic_unchecked_t fscache_n_marks;
36771+extern atomic_unchecked_t fscache_n_uncaches;
36772+
36773+extern atomic_unchecked_t fscache_n_acquires;
36774+extern atomic_unchecked_t fscache_n_acquires_null;
36775+extern atomic_unchecked_t fscache_n_acquires_no_cache;
36776+extern atomic_unchecked_t fscache_n_acquires_ok;
36777+extern atomic_unchecked_t fscache_n_acquires_nobufs;
36778+extern atomic_unchecked_t fscache_n_acquires_oom;
36779+
36780+extern atomic_unchecked_t fscache_n_updates;
36781+extern atomic_unchecked_t fscache_n_updates_null;
36782+extern atomic_unchecked_t fscache_n_updates_run;
36783+
36784+extern atomic_unchecked_t fscache_n_relinquishes;
36785+extern atomic_unchecked_t fscache_n_relinquishes_null;
36786+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36787+extern atomic_unchecked_t fscache_n_relinquishes_retire;
36788+
36789+extern atomic_unchecked_t fscache_n_cookie_index;
36790+extern atomic_unchecked_t fscache_n_cookie_data;
36791+extern atomic_unchecked_t fscache_n_cookie_special;
36792+
36793+extern atomic_unchecked_t fscache_n_object_alloc;
36794+extern atomic_unchecked_t fscache_n_object_no_alloc;
36795+extern atomic_unchecked_t fscache_n_object_lookups;
36796+extern atomic_unchecked_t fscache_n_object_lookups_negative;
36797+extern atomic_unchecked_t fscache_n_object_lookups_positive;
36798+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36799+extern atomic_unchecked_t fscache_n_object_created;
36800+extern atomic_unchecked_t fscache_n_object_avail;
36801+extern atomic_unchecked_t fscache_n_object_dead;
36802+
36803+extern atomic_unchecked_t fscache_n_checkaux_none;
36804+extern atomic_unchecked_t fscache_n_checkaux_okay;
36805+extern atomic_unchecked_t fscache_n_checkaux_update;
36806+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36807
36808 extern atomic_t fscache_n_cop_alloc_object;
36809 extern atomic_t fscache_n_cop_lookup_object;
36810@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36811 atomic_inc(stat);
36812 }
36813
36814+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36815+{
36816+ atomic_inc_unchecked(stat);
36817+}
36818+
36819 static inline void fscache_stat_d(atomic_t *stat)
36820 {
36821 atomic_dec(stat);
36822@@ -267,6 +272,7 @@ extern const struct file_operations fsca
36823
36824 #define __fscache_stat(stat) (NULL)
36825 #define fscache_stat(stat) do {} while (0)
36826+#define fscache_stat_unchecked(stat) do {} while (0)
36827 #define fscache_stat_d(stat) do {} while (0)
36828 #endif
36829
36830diff -urNp linux-2.6.39.4/fs/fscache/object.c linux-2.6.39.4/fs/fscache/object.c
36831--- linux-2.6.39.4/fs/fscache/object.c 2011-05-19 00:06:34.000000000 -0400
36832+++ linux-2.6.39.4/fs/fscache/object.c 2011-08-05 19:44:37.000000000 -0400
36833@@ -128,7 +128,7 @@ static void fscache_object_state_machine
36834 /* update the object metadata on disk */
36835 case FSCACHE_OBJECT_UPDATING:
36836 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36837- fscache_stat(&fscache_n_updates_run);
36838+ fscache_stat_unchecked(&fscache_n_updates_run);
36839 fscache_stat(&fscache_n_cop_update_object);
36840 object->cache->ops->update_object(object);
36841 fscache_stat_d(&fscache_n_cop_update_object);
36842@@ -217,7 +217,7 @@ static void fscache_object_state_machine
36843 spin_lock(&object->lock);
36844 object->state = FSCACHE_OBJECT_DEAD;
36845 spin_unlock(&object->lock);
36846- fscache_stat(&fscache_n_object_dead);
36847+ fscache_stat_unchecked(&fscache_n_object_dead);
36848 goto terminal_transit;
36849
36850 /* handle the parent cache of this object being withdrawn from
36851@@ -232,7 +232,7 @@ static void fscache_object_state_machine
36852 spin_lock(&object->lock);
36853 object->state = FSCACHE_OBJECT_DEAD;
36854 spin_unlock(&object->lock);
36855- fscache_stat(&fscache_n_object_dead);
36856+ fscache_stat_unchecked(&fscache_n_object_dead);
36857 goto terminal_transit;
36858
36859 /* complain about the object being woken up once it is
36860@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36861 parent->cookie->def->name, cookie->def->name,
36862 object->cache->tag->name);
36863
36864- fscache_stat(&fscache_n_object_lookups);
36865+ fscache_stat_unchecked(&fscache_n_object_lookups);
36866 fscache_stat(&fscache_n_cop_lookup_object);
36867 ret = object->cache->ops->lookup_object(object);
36868 fscache_stat_d(&fscache_n_cop_lookup_object);
36869@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
36870 if (ret == -ETIMEDOUT) {
36871 /* probably stuck behind another object, so move this one to
36872 * the back of the queue */
36873- fscache_stat(&fscache_n_object_lookups_timed_out);
36874+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
36875 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36876 }
36877
36878@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
36879
36880 spin_lock(&object->lock);
36881 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36882- fscache_stat(&fscache_n_object_lookups_negative);
36883+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
36884
36885 /* transit here to allow write requests to begin stacking up
36886 * and read requests to begin returning ENODATA */
36887@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
36888 * result, in which case there may be data available */
36889 spin_lock(&object->lock);
36890 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36891- fscache_stat(&fscache_n_object_lookups_positive);
36892+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
36893
36894 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
36895
36896@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
36897 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36898 } else {
36899 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
36900- fscache_stat(&fscache_n_object_created);
36901+ fscache_stat_unchecked(&fscache_n_object_created);
36902
36903 object->state = FSCACHE_OBJECT_AVAILABLE;
36904 spin_unlock(&object->lock);
36905@@ -602,7 +602,7 @@ static void fscache_object_available(str
36906 fscache_enqueue_dependents(object);
36907
36908 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
36909- fscache_stat(&fscache_n_object_avail);
36910+ fscache_stat_unchecked(&fscache_n_object_avail);
36911
36912 _leave("");
36913 }
36914@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
36915 enum fscache_checkaux result;
36916
36917 if (!object->cookie->def->check_aux) {
36918- fscache_stat(&fscache_n_checkaux_none);
36919+ fscache_stat_unchecked(&fscache_n_checkaux_none);
36920 return FSCACHE_CHECKAUX_OKAY;
36921 }
36922
36923@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
36924 switch (result) {
36925 /* entry okay as is */
36926 case FSCACHE_CHECKAUX_OKAY:
36927- fscache_stat(&fscache_n_checkaux_okay);
36928+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
36929 break;
36930
36931 /* entry requires update */
36932 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
36933- fscache_stat(&fscache_n_checkaux_update);
36934+ fscache_stat_unchecked(&fscache_n_checkaux_update);
36935 break;
36936
36937 /* entry requires deletion */
36938 case FSCACHE_CHECKAUX_OBSOLETE:
36939- fscache_stat(&fscache_n_checkaux_obsolete);
36940+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
36941 break;
36942
36943 default:
36944diff -urNp linux-2.6.39.4/fs/fscache/operation.c linux-2.6.39.4/fs/fscache/operation.c
36945--- linux-2.6.39.4/fs/fscache/operation.c 2011-05-19 00:06:34.000000000 -0400
36946+++ linux-2.6.39.4/fs/fscache/operation.c 2011-08-05 19:44:37.000000000 -0400
36947@@ -17,7 +17,7 @@
36948 #include <linux/slab.h>
36949 #include "internal.h"
36950
36951-atomic_t fscache_op_debug_id;
36952+atomic_unchecked_t fscache_op_debug_id;
36953 EXPORT_SYMBOL(fscache_op_debug_id);
36954
36955 /**
36956@@ -40,7 +40,7 @@ void fscache_enqueue_operation(struct fs
36957 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
36958 ASSERTCMP(atomic_read(&op->usage), >, 0);
36959
36960- fscache_stat(&fscache_n_op_enqueue);
36961+ fscache_stat_unchecked(&fscache_n_op_enqueue);
36962 switch (op->flags & FSCACHE_OP_TYPE) {
36963 case FSCACHE_OP_ASYNC:
36964 _debug("queue async");
36965@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscach
36966 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
36967 if (op->processor)
36968 fscache_enqueue_operation(op);
36969- fscache_stat(&fscache_n_op_run);
36970+ fscache_stat_unchecked(&fscache_n_op_run);
36971 }
36972
36973 /*
36974@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct f
36975 if (object->n_ops > 1) {
36976 atomic_inc(&op->usage);
36977 list_add_tail(&op->pend_link, &object->pending_ops);
36978- fscache_stat(&fscache_n_op_pend);
36979+ fscache_stat_unchecked(&fscache_n_op_pend);
36980 } else if (!list_empty(&object->pending_ops)) {
36981 atomic_inc(&op->usage);
36982 list_add_tail(&op->pend_link, &object->pending_ops);
36983- fscache_stat(&fscache_n_op_pend);
36984+ fscache_stat_unchecked(&fscache_n_op_pend);
36985 fscache_start_operations(object);
36986 } else {
36987 ASSERTCMP(object->n_in_progress, ==, 0);
36988@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct f
36989 object->n_exclusive++; /* reads and writes must wait */
36990 atomic_inc(&op->usage);
36991 list_add_tail(&op->pend_link, &object->pending_ops);
36992- fscache_stat(&fscache_n_op_pend);
36993+ fscache_stat_unchecked(&fscache_n_op_pend);
36994 ret = 0;
36995 } else {
36996 /* not allowed to submit ops in any other state */
36997@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_obj
36998 if (object->n_exclusive > 0) {
36999 atomic_inc(&op->usage);
37000 list_add_tail(&op->pend_link, &object->pending_ops);
37001- fscache_stat(&fscache_n_op_pend);
37002+ fscache_stat_unchecked(&fscache_n_op_pend);
37003 } else if (!list_empty(&object->pending_ops)) {
37004 atomic_inc(&op->usage);
37005 list_add_tail(&op->pend_link, &object->pending_ops);
37006- fscache_stat(&fscache_n_op_pend);
37007+ fscache_stat_unchecked(&fscache_n_op_pend);
37008 fscache_start_operations(object);
37009 } else {
37010 ASSERTCMP(object->n_exclusive, ==, 0);
37011@@ -227,12 +227,12 @@ int fscache_submit_op(struct fscache_obj
37012 object->n_ops++;
37013 atomic_inc(&op->usage);
37014 list_add_tail(&op->pend_link, &object->pending_ops);
37015- fscache_stat(&fscache_n_op_pend);
37016+ fscache_stat_unchecked(&fscache_n_op_pend);
37017 ret = 0;
37018 } else if (object->state == FSCACHE_OBJECT_DYING ||
37019 object->state == FSCACHE_OBJECT_LC_DYING ||
37020 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37021- fscache_stat(&fscache_n_op_rejected);
37022+ fscache_stat_unchecked(&fscache_n_op_rejected);
37023 ret = -ENOBUFS;
37024 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37025 fscache_report_unexpected_submission(object, op, ostate);
37026@@ -302,7 +302,7 @@ int fscache_cancel_op(struct fscache_ope
37027
37028 ret = -EBUSY;
37029 if (!list_empty(&op->pend_link)) {
37030- fscache_stat(&fscache_n_op_cancelled);
37031+ fscache_stat_unchecked(&fscache_n_op_cancelled);
37032 list_del_init(&op->pend_link);
37033 object->n_ops--;
37034 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37035@@ -341,7 +341,7 @@ void fscache_put_operation(struct fscach
37036 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37037 BUG();
37038
37039- fscache_stat(&fscache_n_op_release);
37040+ fscache_stat_unchecked(&fscache_n_op_release);
37041
37042 if (op->release) {
37043 op->release(op);
37044@@ -358,7 +358,7 @@ void fscache_put_operation(struct fscach
37045 * lock, and defer it otherwise */
37046 if (!spin_trylock(&object->lock)) {
37047 _debug("defer put");
37048- fscache_stat(&fscache_n_op_deferred_release);
37049+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
37050
37051 cache = object->cache;
37052 spin_lock(&cache->op_gc_list_lock);
37053@@ -420,7 +420,7 @@ void fscache_operation_gc(struct work_st
37054
37055 _debug("GC DEFERRED REL OBJ%x OP%x",
37056 object->debug_id, op->debug_id);
37057- fscache_stat(&fscache_n_op_gc);
37058+ fscache_stat_unchecked(&fscache_n_op_gc);
37059
37060 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37061
37062diff -urNp linux-2.6.39.4/fs/fscache/page.c linux-2.6.39.4/fs/fscache/page.c
37063--- linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:11:51.000000000 -0400
37064+++ linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:12:20.000000000 -0400
37065@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37066 val = radix_tree_lookup(&cookie->stores, page->index);
37067 if (!val) {
37068 rcu_read_unlock();
37069- fscache_stat(&fscache_n_store_vmscan_not_storing);
37070+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37071 __fscache_uncache_page(cookie, page);
37072 return true;
37073 }
37074@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37075 spin_unlock(&cookie->stores_lock);
37076
37077 if (xpage) {
37078- fscache_stat(&fscache_n_store_vmscan_cancelled);
37079- fscache_stat(&fscache_n_store_radix_deletes);
37080+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37081+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37082 ASSERTCMP(xpage, ==, page);
37083 } else {
37084- fscache_stat(&fscache_n_store_vmscan_gone);
37085+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37086 }
37087
37088 wake_up_bit(&cookie->flags, 0);
37089@@ -107,7 +107,7 @@ page_busy:
37090 /* we might want to wait here, but that could deadlock the allocator as
37091 * the work threads writing to the cache may all end up sleeping
37092 * on memory allocation */
37093- fscache_stat(&fscache_n_store_vmscan_busy);
37094+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37095 return false;
37096 }
37097 EXPORT_SYMBOL(__fscache_maybe_release_page);
37098@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37099 FSCACHE_COOKIE_STORING_TAG);
37100 if (!radix_tree_tag_get(&cookie->stores, page->index,
37101 FSCACHE_COOKIE_PENDING_TAG)) {
37102- fscache_stat(&fscache_n_store_radix_deletes);
37103+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37104 xpage = radix_tree_delete(&cookie->stores, page->index);
37105 }
37106 spin_unlock(&cookie->stores_lock);
37107@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37108
37109 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37110
37111- fscache_stat(&fscache_n_attr_changed_calls);
37112+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37113
37114 if (fscache_object_is_active(object)) {
37115 fscache_set_op_state(op, "CallFS");
37116@@ -179,11 +179,11 @@ int __fscache_attr_changed(struct fscach
37117
37118 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37119
37120- fscache_stat(&fscache_n_attr_changed);
37121+ fscache_stat_unchecked(&fscache_n_attr_changed);
37122
37123 op = kzalloc(sizeof(*op), GFP_KERNEL);
37124 if (!op) {
37125- fscache_stat(&fscache_n_attr_changed_nomem);
37126+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37127 _leave(" = -ENOMEM");
37128 return -ENOMEM;
37129 }
37130@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
37131 if (fscache_submit_exclusive_op(object, op) < 0)
37132 goto nobufs;
37133 spin_unlock(&cookie->lock);
37134- fscache_stat(&fscache_n_attr_changed_ok);
37135+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37136 fscache_put_operation(op);
37137 _leave(" = 0");
37138 return 0;
37139@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
37140 nobufs:
37141 spin_unlock(&cookie->lock);
37142 kfree(op);
37143- fscache_stat(&fscache_n_attr_changed_nobufs);
37144+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37145 _leave(" = %d", -ENOBUFS);
37146 return -ENOBUFS;
37147 }
37148@@ -246,7 +246,7 @@ static struct fscache_retrieval *fscache
37149 /* allocate a retrieval operation and attempt to submit it */
37150 op = kzalloc(sizeof(*op), GFP_NOIO);
37151 if (!op) {
37152- fscache_stat(&fscache_n_retrievals_nomem);
37153+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37154 return NULL;
37155 }
37156
37157@@ -275,13 +275,13 @@ static int fscache_wait_for_deferred_loo
37158 return 0;
37159 }
37160
37161- fscache_stat(&fscache_n_retrievals_wait);
37162+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
37163
37164 jif = jiffies;
37165 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37166 fscache_wait_bit_interruptible,
37167 TASK_INTERRUPTIBLE) != 0) {
37168- fscache_stat(&fscache_n_retrievals_intr);
37169+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37170 _leave(" = -ERESTARTSYS");
37171 return -ERESTARTSYS;
37172 }
37173@@ -299,8 +299,8 @@ static int fscache_wait_for_deferred_loo
37174 */
37175 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37176 struct fscache_retrieval *op,
37177- atomic_t *stat_op_waits,
37178- atomic_t *stat_object_dead)
37179+ atomic_unchecked_t *stat_op_waits,
37180+ atomic_unchecked_t *stat_object_dead)
37181 {
37182 int ret;
37183
37184@@ -308,7 +308,7 @@ static int fscache_wait_for_retrieval_ac
37185 goto check_if_dead;
37186
37187 _debug(">>> WT");
37188- fscache_stat(stat_op_waits);
37189+ fscache_stat_unchecked(stat_op_waits);
37190 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37191 fscache_wait_bit_interruptible,
37192 TASK_INTERRUPTIBLE) < 0) {
37193@@ -325,7 +325,7 @@ static int fscache_wait_for_retrieval_ac
37194
37195 check_if_dead:
37196 if (unlikely(fscache_object_is_dead(object))) {
37197- fscache_stat(stat_object_dead);
37198+ fscache_stat_unchecked(stat_object_dead);
37199 return -ENOBUFS;
37200 }
37201 return 0;
37202@@ -352,7 +352,7 @@ int __fscache_read_or_alloc_page(struct
37203
37204 _enter("%p,%p,,,", cookie, page);
37205
37206- fscache_stat(&fscache_n_retrievals);
37207+ fscache_stat_unchecked(&fscache_n_retrievals);
37208
37209 if (hlist_empty(&cookie->backing_objects))
37210 goto nobufs;
37211@@ -386,7 +386,7 @@ int __fscache_read_or_alloc_page(struct
37212 goto nobufs_unlock;
37213 spin_unlock(&cookie->lock);
37214
37215- fscache_stat(&fscache_n_retrieval_ops);
37216+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
37217
37218 /* pin the netfs read context in case we need to do the actual netfs
37219 * read because we've encountered a cache read failure */
37220@@ -416,15 +416,15 @@ int __fscache_read_or_alloc_page(struct
37221
37222 error:
37223 if (ret == -ENOMEM)
37224- fscache_stat(&fscache_n_retrievals_nomem);
37225+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37226 else if (ret == -ERESTARTSYS)
37227- fscache_stat(&fscache_n_retrievals_intr);
37228+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37229 else if (ret == -ENODATA)
37230- fscache_stat(&fscache_n_retrievals_nodata);
37231+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37232 else if (ret < 0)
37233- fscache_stat(&fscache_n_retrievals_nobufs);
37234+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37235 else
37236- fscache_stat(&fscache_n_retrievals_ok);
37237+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
37238
37239 fscache_put_retrieval(op);
37240 _leave(" = %d", ret);
37241@@ -434,7 +434,7 @@ nobufs_unlock:
37242 spin_unlock(&cookie->lock);
37243 kfree(op);
37244 nobufs:
37245- fscache_stat(&fscache_n_retrievals_nobufs);
37246+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37247 _leave(" = -ENOBUFS");
37248 return -ENOBUFS;
37249 }
37250@@ -472,7 +472,7 @@ int __fscache_read_or_alloc_pages(struct
37251
37252 _enter("%p,,%d,,,", cookie, *nr_pages);
37253
37254- fscache_stat(&fscache_n_retrievals);
37255+ fscache_stat_unchecked(&fscache_n_retrievals);
37256
37257 if (hlist_empty(&cookie->backing_objects))
37258 goto nobufs;
37259@@ -503,7 +503,7 @@ int __fscache_read_or_alloc_pages(struct
37260 goto nobufs_unlock;
37261 spin_unlock(&cookie->lock);
37262
37263- fscache_stat(&fscache_n_retrieval_ops);
37264+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
37265
37266 /* pin the netfs read context in case we need to do the actual netfs
37267 * read because we've encountered a cache read failure */
37268@@ -533,15 +533,15 @@ int __fscache_read_or_alloc_pages(struct
37269
37270 error:
37271 if (ret == -ENOMEM)
37272- fscache_stat(&fscache_n_retrievals_nomem);
37273+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37274 else if (ret == -ERESTARTSYS)
37275- fscache_stat(&fscache_n_retrievals_intr);
37276+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37277 else if (ret == -ENODATA)
37278- fscache_stat(&fscache_n_retrievals_nodata);
37279+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37280 else if (ret < 0)
37281- fscache_stat(&fscache_n_retrievals_nobufs);
37282+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37283 else
37284- fscache_stat(&fscache_n_retrievals_ok);
37285+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
37286
37287 fscache_put_retrieval(op);
37288 _leave(" = %d", ret);
37289@@ -551,7 +551,7 @@ nobufs_unlock:
37290 spin_unlock(&cookie->lock);
37291 kfree(op);
37292 nobufs:
37293- fscache_stat(&fscache_n_retrievals_nobufs);
37294+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37295 _leave(" = -ENOBUFS");
37296 return -ENOBUFS;
37297 }
37298@@ -575,7 +575,7 @@ int __fscache_alloc_page(struct fscache_
37299
37300 _enter("%p,%p,,,", cookie, page);
37301
37302- fscache_stat(&fscache_n_allocs);
37303+ fscache_stat_unchecked(&fscache_n_allocs);
37304
37305 if (hlist_empty(&cookie->backing_objects))
37306 goto nobufs;
37307@@ -602,7 +602,7 @@ int __fscache_alloc_page(struct fscache_
37308 goto nobufs_unlock;
37309 spin_unlock(&cookie->lock);
37310
37311- fscache_stat(&fscache_n_alloc_ops);
37312+ fscache_stat_unchecked(&fscache_n_alloc_ops);
37313
37314 ret = fscache_wait_for_retrieval_activation(
37315 object, op,
37316@@ -618,11 +618,11 @@ int __fscache_alloc_page(struct fscache_
37317
37318 error:
37319 if (ret == -ERESTARTSYS)
37320- fscache_stat(&fscache_n_allocs_intr);
37321+ fscache_stat_unchecked(&fscache_n_allocs_intr);
37322 else if (ret < 0)
37323- fscache_stat(&fscache_n_allocs_nobufs);
37324+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37325 else
37326- fscache_stat(&fscache_n_allocs_ok);
37327+ fscache_stat_unchecked(&fscache_n_allocs_ok);
37328
37329 fscache_put_retrieval(op);
37330 _leave(" = %d", ret);
37331@@ -632,7 +632,7 @@ nobufs_unlock:
37332 spin_unlock(&cookie->lock);
37333 kfree(op);
37334 nobufs:
37335- fscache_stat(&fscache_n_allocs_nobufs);
37336+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37337 _leave(" = -ENOBUFS");
37338 return -ENOBUFS;
37339 }
37340@@ -675,7 +675,7 @@ static void fscache_write_op(struct fsca
37341
37342 spin_lock(&cookie->stores_lock);
37343
37344- fscache_stat(&fscache_n_store_calls);
37345+ fscache_stat_unchecked(&fscache_n_store_calls);
37346
37347 /* find a page to store */
37348 page = NULL;
37349@@ -686,7 +686,7 @@ static void fscache_write_op(struct fsca
37350 page = results[0];
37351 _debug("gang %d [%lx]", n, page->index);
37352 if (page->index > op->store_limit) {
37353- fscache_stat(&fscache_n_store_pages_over_limit);
37354+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37355 goto superseded;
37356 }
37357
37358@@ -699,7 +699,7 @@ static void fscache_write_op(struct fsca
37359 spin_unlock(&object->lock);
37360
37361 fscache_set_op_state(&op->op, "Store");
37362- fscache_stat(&fscache_n_store_pages);
37363+ fscache_stat_unchecked(&fscache_n_store_pages);
37364 fscache_stat(&fscache_n_cop_write_page);
37365 ret = object->cache->ops->write_page(op, page);
37366 fscache_stat_d(&fscache_n_cop_write_page);
37367@@ -769,7 +769,7 @@ int __fscache_write_page(struct fscache_
37368 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37369 ASSERT(PageFsCache(page));
37370
37371- fscache_stat(&fscache_n_stores);
37372+ fscache_stat_unchecked(&fscache_n_stores);
37373
37374 op = kzalloc(sizeof(*op), GFP_NOIO);
37375 if (!op)
37376@@ -821,7 +821,7 @@ int __fscache_write_page(struct fscache_
37377 spin_unlock(&cookie->stores_lock);
37378 spin_unlock(&object->lock);
37379
37380- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37381+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37382 op->store_limit = object->store_limit;
37383
37384 if (fscache_submit_op(object, &op->op) < 0)
37385@@ -829,8 +829,8 @@ int __fscache_write_page(struct fscache_
37386
37387 spin_unlock(&cookie->lock);
37388 radix_tree_preload_end();
37389- fscache_stat(&fscache_n_store_ops);
37390- fscache_stat(&fscache_n_stores_ok);
37391+ fscache_stat_unchecked(&fscache_n_store_ops);
37392+ fscache_stat_unchecked(&fscache_n_stores_ok);
37393
37394 /* the work queue now carries its own ref on the object */
37395 fscache_put_operation(&op->op);
37396@@ -838,14 +838,14 @@ int __fscache_write_page(struct fscache_
37397 return 0;
37398
37399 already_queued:
37400- fscache_stat(&fscache_n_stores_again);
37401+ fscache_stat_unchecked(&fscache_n_stores_again);
37402 already_pending:
37403 spin_unlock(&cookie->stores_lock);
37404 spin_unlock(&object->lock);
37405 spin_unlock(&cookie->lock);
37406 radix_tree_preload_end();
37407 kfree(op);
37408- fscache_stat(&fscache_n_stores_ok);
37409+ fscache_stat_unchecked(&fscache_n_stores_ok);
37410 _leave(" = 0");
37411 return 0;
37412
37413@@ -864,14 +864,14 @@ nobufs:
37414 spin_unlock(&cookie->lock);
37415 radix_tree_preload_end();
37416 kfree(op);
37417- fscache_stat(&fscache_n_stores_nobufs);
37418+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
37419 _leave(" = -ENOBUFS");
37420 return -ENOBUFS;
37421
37422 nomem_free:
37423 kfree(op);
37424 nomem:
37425- fscache_stat(&fscache_n_stores_oom);
37426+ fscache_stat_unchecked(&fscache_n_stores_oom);
37427 _leave(" = -ENOMEM");
37428 return -ENOMEM;
37429 }
37430@@ -889,7 +889,7 @@ void __fscache_uncache_page(struct fscac
37431 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37432 ASSERTCMP(page, !=, NULL);
37433
37434- fscache_stat(&fscache_n_uncaches);
37435+ fscache_stat_unchecked(&fscache_n_uncaches);
37436
37437 /* cache withdrawal may beat us to it */
37438 if (!PageFsCache(page))
37439@@ -942,7 +942,7 @@ void fscache_mark_pages_cached(struct fs
37440 unsigned long loop;
37441
37442 #ifdef CONFIG_FSCACHE_STATS
37443- atomic_add(pagevec->nr, &fscache_n_marks);
37444+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37445 #endif
37446
37447 for (loop = 0; loop < pagevec->nr; loop++) {
37448diff -urNp linux-2.6.39.4/fs/fscache/stats.c linux-2.6.39.4/fs/fscache/stats.c
37449--- linux-2.6.39.4/fs/fscache/stats.c 2011-05-19 00:06:34.000000000 -0400
37450+++ linux-2.6.39.4/fs/fscache/stats.c 2011-08-05 19:44:37.000000000 -0400
37451@@ -18,95 +18,95 @@
37452 /*
37453 * operation counters
37454 */
37455-atomic_t fscache_n_op_pend;
37456-atomic_t fscache_n_op_run;
37457-atomic_t fscache_n_op_enqueue;
37458-atomic_t fscache_n_op_requeue;
37459-atomic_t fscache_n_op_deferred_release;
37460-atomic_t fscache_n_op_release;
37461-atomic_t fscache_n_op_gc;
37462-atomic_t fscache_n_op_cancelled;
37463-atomic_t fscache_n_op_rejected;
37464-
37465-atomic_t fscache_n_attr_changed;
37466-atomic_t fscache_n_attr_changed_ok;
37467-atomic_t fscache_n_attr_changed_nobufs;
37468-atomic_t fscache_n_attr_changed_nomem;
37469-atomic_t fscache_n_attr_changed_calls;
37470-
37471-atomic_t fscache_n_allocs;
37472-atomic_t fscache_n_allocs_ok;
37473-atomic_t fscache_n_allocs_wait;
37474-atomic_t fscache_n_allocs_nobufs;
37475-atomic_t fscache_n_allocs_intr;
37476-atomic_t fscache_n_allocs_object_dead;
37477-atomic_t fscache_n_alloc_ops;
37478-atomic_t fscache_n_alloc_op_waits;
37479-
37480-atomic_t fscache_n_retrievals;
37481-atomic_t fscache_n_retrievals_ok;
37482-atomic_t fscache_n_retrievals_wait;
37483-atomic_t fscache_n_retrievals_nodata;
37484-atomic_t fscache_n_retrievals_nobufs;
37485-atomic_t fscache_n_retrievals_intr;
37486-atomic_t fscache_n_retrievals_nomem;
37487-atomic_t fscache_n_retrievals_object_dead;
37488-atomic_t fscache_n_retrieval_ops;
37489-atomic_t fscache_n_retrieval_op_waits;
37490-
37491-atomic_t fscache_n_stores;
37492-atomic_t fscache_n_stores_ok;
37493-atomic_t fscache_n_stores_again;
37494-atomic_t fscache_n_stores_nobufs;
37495-atomic_t fscache_n_stores_oom;
37496-atomic_t fscache_n_store_ops;
37497-atomic_t fscache_n_store_calls;
37498-atomic_t fscache_n_store_pages;
37499-atomic_t fscache_n_store_radix_deletes;
37500-atomic_t fscache_n_store_pages_over_limit;
37501-
37502-atomic_t fscache_n_store_vmscan_not_storing;
37503-atomic_t fscache_n_store_vmscan_gone;
37504-atomic_t fscache_n_store_vmscan_busy;
37505-atomic_t fscache_n_store_vmscan_cancelled;
37506-
37507-atomic_t fscache_n_marks;
37508-atomic_t fscache_n_uncaches;
37509-
37510-atomic_t fscache_n_acquires;
37511-atomic_t fscache_n_acquires_null;
37512-atomic_t fscache_n_acquires_no_cache;
37513-atomic_t fscache_n_acquires_ok;
37514-atomic_t fscache_n_acquires_nobufs;
37515-atomic_t fscache_n_acquires_oom;
37516-
37517-atomic_t fscache_n_updates;
37518-atomic_t fscache_n_updates_null;
37519-atomic_t fscache_n_updates_run;
37520-
37521-atomic_t fscache_n_relinquishes;
37522-atomic_t fscache_n_relinquishes_null;
37523-atomic_t fscache_n_relinquishes_waitcrt;
37524-atomic_t fscache_n_relinquishes_retire;
37525-
37526-atomic_t fscache_n_cookie_index;
37527-atomic_t fscache_n_cookie_data;
37528-atomic_t fscache_n_cookie_special;
37529-
37530-atomic_t fscache_n_object_alloc;
37531-atomic_t fscache_n_object_no_alloc;
37532-atomic_t fscache_n_object_lookups;
37533-atomic_t fscache_n_object_lookups_negative;
37534-atomic_t fscache_n_object_lookups_positive;
37535-atomic_t fscache_n_object_lookups_timed_out;
37536-atomic_t fscache_n_object_created;
37537-atomic_t fscache_n_object_avail;
37538-atomic_t fscache_n_object_dead;
37539-
37540-atomic_t fscache_n_checkaux_none;
37541-atomic_t fscache_n_checkaux_okay;
37542-atomic_t fscache_n_checkaux_update;
37543-atomic_t fscache_n_checkaux_obsolete;
37544+atomic_unchecked_t fscache_n_op_pend;
37545+atomic_unchecked_t fscache_n_op_run;
37546+atomic_unchecked_t fscache_n_op_enqueue;
37547+atomic_unchecked_t fscache_n_op_requeue;
37548+atomic_unchecked_t fscache_n_op_deferred_release;
37549+atomic_unchecked_t fscache_n_op_release;
37550+atomic_unchecked_t fscache_n_op_gc;
37551+atomic_unchecked_t fscache_n_op_cancelled;
37552+atomic_unchecked_t fscache_n_op_rejected;
37553+
37554+atomic_unchecked_t fscache_n_attr_changed;
37555+atomic_unchecked_t fscache_n_attr_changed_ok;
37556+atomic_unchecked_t fscache_n_attr_changed_nobufs;
37557+atomic_unchecked_t fscache_n_attr_changed_nomem;
37558+atomic_unchecked_t fscache_n_attr_changed_calls;
37559+
37560+atomic_unchecked_t fscache_n_allocs;
37561+atomic_unchecked_t fscache_n_allocs_ok;
37562+atomic_unchecked_t fscache_n_allocs_wait;
37563+atomic_unchecked_t fscache_n_allocs_nobufs;
37564+atomic_unchecked_t fscache_n_allocs_intr;
37565+atomic_unchecked_t fscache_n_allocs_object_dead;
37566+atomic_unchecked_t fscache_n_alloc_ops;
37567+atomic_unchecked_t fscache_n_alloc_op_waits;
37568+
37569+atomic_unchecked_t fscache_n_retrievals;
37570+atomic_unchecked_t fscache_n_retrievals_ok;
37571+atomic_unchecked_t fscache_n_retrievals_wait;
37572+atomic_unchecked_t fscache_n_retrievals_nodata;
37573+atomic_unchecked_t fscache_n_retrievals_nobufs;
37574+atomic_unchecked_t fscache_n_retrievals_intr;
37575+atomic_unchecked_t fscache_n_retrievals_nomem;
37576+atomic_unchecked_t fscache_n_retrievals_object_dead;
37577+atomic_unchecked_t fscache_n_retrieval_ops;
37578+atomic_unchecked_t fscache_n_retrieval_op_waits;
37579+
37580+atomic_unchecked_t fscache_n_stores;
37581+atomic_unchecked_t fscache_n_stores_ok;
37582+atomic_unchecked_t fscache_n_stores_again;
37583+atomic_unchecked_t fscache_n_stores_nobufs;
37584+atomic_unchecked_t fscache_n_stores_oom;
37585+atomic_unchecked_t fscache_n_store_ops;
37586+atomic_unchecked_t fscache_n_store_calls;
37587+atomic_unchecked_t fscache_n_store_pages;
37588+atomic_unchecked_t fscache_n_store_radix_deletes;
37589+atomic_unchecked_t fscache_n_store_pages_over_limit;
37590+
37591+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37592+atomic_unchecked_t fscache_n_store_vmscan_gone;
37593+atomic_unchecked_t fscache_n_store_vmscan_busy;
37594+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37595+
37596+atomic_unchecked_t fscache_n_marks;
37597+atomic_unchecked_t fscache_n_uncaches;
37598+
37599+atomic_unchecked_t fscache_n_acquires;
37600+atomic_unchecked_t fscache_n_acquires_null;
37601+atomic_unchecked_t fscache_n_acquires_no_cache;
37602+atomic_unchecked_t fscache_n_acquires_ok;
37603+atomic_unchecked_t fscache_n_acquires_nobufs;
37604+atomic_unchecked_t fscache_n_acquires_oom;
37605+
37606+atomic_unchecked_t fscache_n_updates;
37607+atomic_unchecked_t fscache_n_updates_null;
37608+atomic_unchecked_t fscache_n_updates_run;
37609+
37610+atomic_unchecked_t fscache_n_relinquishes;
37611+atomic_unchecked_t fscache_n_relinquishes_null;
37612+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37613+atomic_unchecked_t fscache_n_relinquishes_retire;
37614+
37615+atomic_unchecked_t fscache_n_cookie_index;
37616+atomic_unchecked_t fscache_n_cookie_data;
37617+atomic_unchecked_t fscache_n_cookie_special;
37618+
37619+atomic_unchecked_t fscache_n_object_alloc;
37620+atomic_unchecked_t fscache_n_object_no_alloc;
37621+atomic_unchecked_t fscache_n_object_lookups;
37622+atomic_unchecked_t fscache_n_object_lookups_negative;
37623+atomic_unchecked_t fscache_n_object_lookups_positive;
37624+atomic_unchecked_t fscache_n_object_lookups_timed_out;
37625+atomic_unchecked_t fscache_n_object_created;
37626+atomic_unchecked_t fscache_n_object_avail;
37627+atomic_unchecked_t fscache_n_object_dead;
37628+
37629+atomic_unchecked_t fscache_n_checkaux_none;
37630+atomic_unchecked_t fscache_n_checkaux_okay;
37631+atomic_unchecked_t fscache_n_checkaux_update;
37632+atomic_unchecked_t fscache_n_checkaux_obsolete;
37633
37634 atomic_t fscache_n_cop_alloc_object;
37635 atomic_t fscache_n_cop_lookup_object;
37636@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37637 seq_puts(m, "FS-Cache statistics\n");
37638
37639 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37640- atomic_read(&fscache_n_cookie_index),
37641- atomic_read(&fscache_n_cookie_data),
37642- atomic_read(&fscache_n_cookie_special));
37643+ atomic_read_unchecked(&fscache_n_cookie_index),
37644+ atomic_read_unchecked(&fscache_n_cookie_data),
37645+ atomic_read_unchecked(&fscache_n_cookie_special));
37646
37647 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37648- atomic_read(&fscache_n_object_alloc),
37649- atomic_read(&fscache_n_object_no_alloc),
37650- atomic_read(&fscache_n_object_avail),
37651- atomic_read(&fscache_n_object_dead));
37652+ atomic_read_unchecked(&fscache_n_object_alloc),
37653+ atomic_read_unchecked(&fscache_n_object_no_alloc),
37654+ atomic_read_unchecked(&fscache_n_object_avail),
37655+ atomic_read_unchecked(&fscache_n_object_dead));
37656 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37657- atomic_read(&fscache_n_checkaux_none),
37658- atomic_read(&fscache_n_checkaux_okay),
37659- atomic_read(&fscache_n_checkaux_update),
37660- atomic_read(&fscache_n_checkaux_obsolete));
37661+ atomic_read_unchecked(&fscache_n_checkaux_none),
37662+ atomic_read_unchecked(&fscache_n_checkaux_okay),
37663+ atomic_read_unchecked(&fscache_n_checkaux_update),
37664+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37665
37666 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37667- atomic_read(&fscache_n_marks),
37668- atomic_read(&fscache_n_uncaches));
37669+ atomic_read_unchecked(&fscache_n_marks),
37670+ atomic_read_unchecked(&fscache_n_uncaches));
37671
37672 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37673 " oom=%u\n",
37674- atomic_read(&fscache_n_acquires),
37675- atomic_read(&fscache_n_acquires_null),
37676- atomic_read(&fscache_n_acquires_no_cache),
37677- atomic_read(&fscache_n_acquires_ok),
37678- atomic_read(&fscache_n_acquires_nobufs),
37679- atomic_read(&fscache_n_acquires_oom));
37680+ atomic_read_unchecked(&fscache_n_acquires),
37681+ atomic_read_unchecked(&fscache_n_acquires_null),
37682+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
37683+ atomic_read_unchecked(&fscache_n_acquires_ok),
37684+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
37685+ atomic_read_unchecked(&fscache_n_acquires_oom));
37686
37687 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37688- atomic_read(&fscache_n_object_lookups),
37689- atomic_read(&fscache_n_object_lookups_negative),
37690- atomic_read(&fscache_n_object_lookups_positive),
37691- atomic_read(&fscache_n_object_created),
37692- atomic_read(&fscache_n_object_lookups_timed_out));
37693+ atomic_read_unchecked(&fscache_n_object_lookups),
37694+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
37695+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
37696+ atomic_read_unchecked(&fscache_n_object_created),
37697+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37698
37699 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37700- atomic_read(&fscache_n_updates),
37701- atomic_read(&fscache_n_updates_null),
37702- atomic_read(&fscache_n_updates_run));
37703+ atomic_read_unchecked(&fscache_n_updates),
37704+ atomic_read_unchecked(&fscache_n_updates_null),
37705+ atomic_read_unchecked(&fscache_n_updates_run));
37706
37707 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37708- atomic_read(&fscache_n_relinquishes),
37709- atomic_read(&fscache_n_relinquishes_null),
37710- atomic_read(&fscache_n_relinquishes_waitcrt),
37711- atomic_read(&fscache_n_relinquishes_retire));
37712+ atomic_read_unchecked(&fscache_n_relinquishes),
37713+ atomic_read_unchecked(&fscache_n_relinquishes_null),
37714+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37715+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
37716
37717 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37718- atomic_read(&fscache_n_attr_changed),
37719- atomic_read(&fscache_n_attr_changed_ok),
37720- atomic_read(&fscache_n_attr_changed_nobufs),
37721- atomic_read(&fscache_n_attr_changed_nomem),
37722- atomic_read(&fscache_n_attr_changed_calls));
37723+ atomic_read_unchecked(&fscache_n_attr_changed),
37724+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
37725+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37726+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37727+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
37728
37729 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37730- atomic_read(&fscache_n_allocs),
37731- atomic_read(&fscache_n_allocs_ok),
37732- atomic_read(&fscache_n_allocs_wait),
37733- atomic_read(&fscache_n_allocs_nobufs),
37734- atomic_read(&fscache_n_allocs_intr));
37735+ atomic_read_unchecked(&fscache_n_allocs),
37736+ atomic_read_unchecked(&fscache_n_allocs_ok),
37737+ atomic_read_unchecked(&fscache_n_allocs_wait),
37738+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
37739+ atomic_read_unchecked(&fscache_n_allocs_intr));
37740 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37741- atomic_read(&fscache_n_alloc_ops),
37742- atomic_read(&fscache_n_alloc_op_waits),
37743- atomic_read(&fscache_n_allocs_object_dead));
37744+ atomic_read_unchecked(&fscache_n_alloc_ops),
37745+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
37746+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
37747
37748 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37749 " int=%u oom=%u\n",
37750- atomic_read(&fscache_n_retrievals),
37751- atomic_read(&fscache_n_retrievals_ok),
37752- atomic_read(&fscache_n_retrievals_wait),
37753- atomic_read(&fscache_n_retrievals_nodata),
37754- atomic_read(&fscache_n_retrievals_nobufs),
37755- atomic_read(&fscache_n_retrievals_intr),
37756- atomic_read(&fscache_n_retrievals_nomem));
37757+ atomic_read_unchecked(&fscache_n_retrievals),
37758+ atomic_read_unchecked(&fscache_n_retrievals_ok),
37759+ atomic_read_unchecked(&fscache_n_retrievals_wait),
37760+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
37761+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37762+ atomic_read_unchecked(&fscache_n_retrievals_intr),
37763+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
37764 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37765- atomic_read(&fscache_n_retrieval_ops),
37766- atomic_read(&fscache_n_retrieval_op_waits),
37767- atomic_read(&fscache_n_retrievals_object_dead));
37768+ atomic_read_unchecked(&fscache_n_retrieval_ops),
37769+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37770+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37771
37772 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37773- atomic_read(&fscache_n_stores),
37774- atomic_read(&fscache_n_stores_ok),
37775- atomic_read(&fscache_n_stores_again),
37776- atomic_read(&fscache_n_stores_nobufs),
37777- atomic_read(&fscache_n_stores_oom));
37778+ atomic_read_unchecked(&fscache_n_stores),
37779+ atomic_read_unchecked(&fscache_n_stores_ok),
37780+ atomic_read_unchecked(&fscache_n_stores_again),
37781+ atomic_read_unchecked(&fscache_n_stores_nobufs),
37782+ atomic_read_unchecked(&fscache_n_stores_oom));
37783 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37784- atomic_read(&fscache_n_store_ops),
37785- atomic_read(&fscache_n_store_calls),
37786- atomic_read(&fscache_n_store_pages),
37787- atomic_read(&fscache_n_store_radix_deletes),
37788- atomic_read(&fscache_n_store_pages_over_limit));
37789+ atomic_read_unchecked(&fscache_n_store_ops),
37790+ atomic_read_unchecked(&fscache_n_store_calls),
37791+ atomic_read_unchecked(&fscache_n_store_pages),
37792+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
37793+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37794
37795 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37796- atomic_read(&fscache_n_store_vmscan_not_storing),
37797- atomic_read(&fscache_n_store_vmscan_gone),
37798- atomic_read(&fscache_n_store_vmscan_busy),
37799- atomic_read(&fscache_n_store_vmscan_cancelled));
37800+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37801+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37802+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37803+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37804
37805 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37806- atomic_read(&fscache_n_op_pend),
37807- atomic_read(&fscache_n_op_run),
37808- atomic_read(&fscache_n_op_enqueue),
37809- atomic_read(&fscache_n_op_cancelled),
37810- atomic_read(&fscache_n_op_rejected));
37811+ atomic_read_unchecked(&fscache_n_op_pend),
37812+ atomic_read_unchecked(&fscache_n_op_run),
37813+ atomic_read_unchecked(&fscache_n_op_enqueue),
37814+ atomic_read_unchecked(&fscache_n_op_cancelled),
37815+ atomic_read_unchecked(&fscache_n_op_rejected));
37816 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37817- atomic_read(&fscache_n_op_deferred_release),
37818- atomic_read(&fscache_n_op_release),
37819- atomic_read(&fscache_n_op_gc));
37820+ atomic_read_unchecked(&fscache_n_op_deferred_release),
37821+ atomic_read_unchecked(&fscache_n_op_release),
37822+ atomic_read_unchecked(&fscache_n_op_gc));
37823
37824 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37825 atomic_read(&fscache_n_cop_alloc_object),
37826diff -urNp linux-2.6.39.4/fs/fs_struct.c linux-2.6.39.4/fs/fs_struct.c
37827--- linux-2.6.39.4/fs/fs_struct.c 2011-05-19 00:06:34.000000000 -0400
37828+++ linux-2.6.39.4/fs/fs_struct.c 2011-08-05 19:44:37.000000000 -0400
37829@@ -4,6 +4,7 @@
37830 #include <linux/path.h>
37831 #include <linux/slab.h>
37832 #include <linux/fs_struct.h>
37833+#include <linux/grsecurity.h>
37834 #include "internal.h"
37835
37836 static inline void path_get_longterm(struct path *path)
37837@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37838 old_root = fs->root;
37839 fs->root = *path;
37840 path_get_longterm(path);
37841+ gr_set_chroot_entries(current, path);
37842 write_seqcount_end(&fs->seq);
37843 spin_unlock(&fs->lock);
37844 if (old_root.dentry)
37845@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37846 && fs->root.mnt == old_root->mnt) {
37847 path_get_longterm(new_root);
37848 fs->root = *new_root;
37849+ gr_set_chroot_entries(p, new_root);
37850 count++;
37851 }
37852 if (fs->pwd.dentry == old_root->dentry
37853@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37854 spin_lock(&fs->lock);
37855 write_seqcount_begin(&fs->seq);
37856 tsk->fs = NULL;
37857- kill = !--fs->users;
37858+ gr_clear_chroot_entries(tsk);
37859+ kill = !atomic_dec_return(&fs->users);
37860 write_seqcount_end(&fs->seq);
37861 spin_unlock(&fs->lock);
37862 task_unlock(tsk);
37863@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37864 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
37865 /* We don't need to lock fs - think why ;-) */
37866 if (fs) {
37867- fs->users = 1;
37868+ atomic_set(&fs->users, 1);
37869 fs->in_exec = 0;
37870 spin_lock_init(&fs->lock);
37871 seqcount_init(&fs->seq);
37872@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
37873 spin_lock(&old->lock);
37874 fs->root = old->root;
37875 path_get_longterm(&fs->root);
37876+ /* instead of calling gr_set_chroot_entries here,
37877+ we call it from every caller of this function
37878+ */
37879 fs->pwd = old->pwd;
37880 path_get_longterm(&fs->pwd);
37881 spin_unlock(&old->lock);
37882@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
37883
37884 task_lock(current);
37885 spin_lock(&fs->lock);
37886- kill = !--fs->users;
37887+ kill = !atomic_dec_return(&fs->users);
37888 current->fs = new_fs;
37889+ gr_set_chroot_entries(current, &new_fs->root);
37890 spin_unlock(&fs->lock);
37891 task_unlock(current);
37892
37893@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
37894
37895 /* to be mentioned only in INIT_TASK */
37896 struct fs_struct init_fs = {
37897- .users = 1,
37898+ .users = ATOMIC_INIT(1),
37899 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
37900 .seq = SEQCNT_ZERO,
37901 .umask = 0022,
37902@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
37903 task_lock(current);
37904
37905 spin_lock(&init_fs.lock);
37906- init_fs.users++;
37907+ atomic_inc(&init_fs.users);
37908 spin_unlock(&init_fs.lock);
37909
37910 spin_lock(&fs->lock);
37911 current->fs = &init_fs;
37912- kill = !--fs->users;
37913+ gr_set_chroot_entries(current, &current->fs->root);
37914+ kill = !atomic_dec_return(&fs->users);
37915 spin_unlock(&fs->lock);
37916
37917 task_unlock(current);
37918diff -urNp linux-2.6.39.4/fs/fuse/cuse.c linux-2.6.39.4/fs/fuse/cuse.c
37919--- linux-2.6.39.4/fs/fuse/cuse.c 2011-05-19 00:06:34.000000000 -0400
37920+++ linux-2.6.39.4/fs/fuse/cuse.c 2011-08-05 20:34:06.000000000 -0400
37921@@ -586,10 +586,12 @@ static int __init cuse_init(void)
37922 INIT_LIST_HEAD(&cuse_conntbl[i]);
37923
37924 /* inherit and extend fuse_dev_operations */
37925- cuse_channel_fops = fuse_dev_operations;
37926- cuse_channel_fops.owner = THIS_MODULE;
37927- cuse_channel_fops.open = cuse_channel_open;
37928- cuse_channel_fops.release = cuse_channel_release;
37929+ pax_open_kernel();
37930+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
37931+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
37932+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
37933+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
37934+ pax_close_kernel();
37935
37936 cuse_class = class_create(THIS_MODULE, "cuse");
37937 if (IS_ERR(cuse_class))
37938diff -urNp linux-2.6.39.4/fs/fuse/dev.c linux-2.6.39.4/fs/fuse/dev.c
37939--- linux-2.6.39.4/fs/fuse/dev.c 2011-05-19 00:06:34.000000000 -0400
37940+++ linux-2.6.39.4/fs/fuse/dev.c 2011-08-05 20:34:06.000000000 -0400
37941@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
37942 ret = 0;
37943 pipe_lock(pipe);
37944
37945- if (!pipe->readers) {
37946+ if (!atomic_read(&pipe->readers)) {
37947 send_sig(SIGPIPE, current, 0);
37948 if (!ret)
37949 ret = -EPIPE;
37950diff -urNp linux-2.6.39.4/fs/fuse/dir.c linux-2.6.39.4/fs/fuse/dir.c
37951--- linux-2.6.39.4/fs/fuse/dir.c 2011-05-19 00:06:34.000000000 -0400
37952+++ linux-2.6.39.4/fs/fuse/dir.c 2011-08-05 19:44:37.000000000 -0400
37953@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
37954 return link;
37955 }
37956
37957-static void free_link(char *link)
37958+static void free_link(const char *link)
37959 {
37960 if (!IS_ERR(link))
37961 free_page((unsigned long) link);
37962diff -urNp linux-2.6.39.4/fs/gfs2/ops_inode.c linux-2.6.39.4/fs/gfs2/ops_inode.c
37963--- linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-05-19 00:06:34.000000000 -0400
37964+++ linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-08-05 19:44:37.000000000 -0400
37965@@ -740,6 +740,8 @@ static int gfs2_rename(struct inode *odi
37966 unsigned int x;
37967 int error;
37968
37969+ pax_track_stack();
37970+
37971 if (ndentry->d_inode) {
37972 nip = GFS2_I(ndentry->d_inode);
37973 if (ip == nip)
37974@@ -1019,7 +1021,7 @@ out:
37975
37976 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
37977 {
37978- char *s = nd_get_link(nd);
37979+ const char *s = nd_get_link(nd);
37980 if (!IS_ERR(s))
37981 kfree(s);
37982 }
37983diff -urNp linux-2.6.39.4/fs/hfsplus/catalog.c linux-2.6.39.4/fs/hfsplus/catalog.c
37984--- linux-2.6.39.4/fs/hfsplus/catalog.c 2011-05-19 00:06:34.000000000 -0400
37985+++ linux-2.6.39.4/fs/hfsplus/catalog.c 2011-08-05 19:44:37.000000000 -0400
37986@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
37987 int err;
37988 u16 type;
37989
37990+ pax_track_stack();
37991+
37992 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
37993 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
37994 if (err)
37995@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
37996 int entry_size;
37997 int err;
37998
37999+ pax_track_stack();
38000+
38001 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38002 str->name, cnid, inode->i_nlink);
38003 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38004@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38005 int entry_size, type;
38006 int err = 0;
38007
38008+ pax_track_stack();
38009+
38010 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38011 cnid, src_dir->i_ino, src_name->name,
38012 dst_dir->i_ino, dst_name->name);
38013diff -urNp linux-2.6.39.4/fs/hfsplus/dir.c linux-2.6.39.4/fs/hfsplus/dir.c
38014--- linux-2.6.39.4/fs/hfsplus/dir.c 2011-05-19 00:06:34.000000000 -0400
38015+++ linux-2.6.39.4/fs/hfsplus/dir.c 2011-08-05 19:44:37.000000000 -0400
38016@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38017 struct hfsplus_readdir_data *rd;
38018 u16 type;
38019
38020+ pax_track_stack();
38021+
38022 if (filp->f_pos >= inode->i_size)
38023 return 0;
38024
38025diff -urNp linux-2.6.39.4/fs/hfsplus/inode.c linux-2.6.39.4/fs/hfsplus/inode.c
38026--- linux-2.6.39.4/fs/hfsplus/inode.c 2011-05-19 00:06:34.000000000 -0400
38027+++ linux-2.6.39.4/fs/hfsplus/inode.c 2011-08-05 19:44:37.000000000 -0400
38028@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38029 int res = 0;
38030 u16 type;
38031
38032+ pax_track_stack();
38033+
38034 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38035
38036 HFSPLUS_I(inode)->linkid = 0;
38037@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38038 struct hfs_find_data fd;
38039 hfsplus_cat_entry entry;
38040
38041+ pax_track_stack();
38042+
38043 if (HFSPLUS_IS_RSRC(inode))
38044 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38045
38046diff -urNp linux-2.6.39.4/fs/hfsplus/ioctl.c linux-2.6.39.4/fs/hfsplus/ioctl.c
38047--- linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-05-19 00:06:34.000000000 -0400
38048+++ linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-08-05 19:44:37.000000000 -0400
38049@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38050 struct hfsplus_cat_file *file;
38051 int res;
38052
38053+ pax_track_stack();
38054+
38055 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38056 return -EOPNOTSUPP;
38057
38058@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38059 struct hfsplus_cat_file *file;
38060 ssize_t res = 0;
38061
38062+ pax_track_stack();
38063+
38064 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38065 return -EOPNOTSUPP;
38066
38067diff -urNp linux-2.6.39.4/fs/hfsplus/super.c linux-2.6.39.4/fs/hfsplus/super.c
38068--- linux-2.6.39.4/fs/hfsplus/super.c 2011-05-19 00:06:34.000000000 -0400
38069+++ linux-2.6.39.4/fs/hfsplus/super.c 2011-08-05 19:44:37.000000000 -0400
38070@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38071 struct nls_table *nls = NULL;
38072 int err;
38073
38074+ pax_track_stack();
38075+
38076 err = -EINVAL;
38077 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38078 if (!sbi)
38079diff -urNp linux-2.6.39.4/fs/hugetlbfs/inode.c linux-2.6.39.4/fs/hugetlbfs/inode.c
38080--- linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-05-19 00:06:34.000000000 -0400
38081+++ linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38082@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38083 .kill_sb = kill_litter_super,
38084 };
38085
38086-static struct vfsmount *hugetlbfs_vfsmount;
38087+struct vfsmount *hugetlbfs_vfsmount;
38088
38089 static int can_do_hugetlb_shm(void)
38090 {
38091diff -urNp linux-2.6.39.4/fs/inode.c linux-2.6.39.4/fs/inode.c
38092--- linux-2.6.39.4/fs/inode.c 2011-05-19 00:06:34.000000000 -0400
38093+++ linux-2.6.39.4/fs/inode.c 2011-08-05 19:44:37.000000000 -0400
38094@@ -862,8 +862,8 @@ unsigned int get_next_ino(void)
38095
38096 #ifdef CONFIG_SMP
38097 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38098- static atomic_t shared_last_ino;
38099- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38100+ static atomic_unchecked_t shared_last_ino;
38101+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38102
38103 res = next - LAST_INO_BATCH;
38104 }
38105diff -urNp linux-2.6.39.4/fs/jbd/checkpoint.c linux-2.6.39.4/fs/jbd/checkpoint.c
38106--- linux-2.6.39.4/fs/jbd/checkpoint.c 2011-05-19 00:06:34.000000000 -0400
38107+++ linux-2.6.39.4/fs/jbd/checkpoint.c 2011-08-05 19:44:37.000000000 -0400
38108@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38109 tid_t this_tid;
38110 int result;
38111
38112+ pax_track_stack();
38113+
38114 jbd_debug(1, "Start checkpoint\n");
38115
38116 /*
38117diff -urNp linux-2.6.39.4/fs/jffs2/compr_rtime.c linux-2.6.39.4/fs/jffs2/compr_rtime.c
38118--- linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-05-19 00:06:34.000000000 -0400
38119+++ linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-08-05 19:44:37.000000000 -0400
38120@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38121 int outpos = 0;
38122 int pos=0;
38123
38124+ pax_track_stack();
38125+
38126 memset(positions,0,sizeof(positions));
38127
38128 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38129@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38130 int outpos = 0;
38131 int pos=0;
38132
38133+ pax_track_stack();
38134+
38135 memset(positions,0,sizeof(positions));
38136
38137 while (outpos<destlen) {
38138diff -urNp linux-2.6.39.4/fs/jffs2/compr_rubin.c linux-2.6.39.4/fs/jffs2/compr_rubin.c
38139--- linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-05-19 00:06:34.000000000 -0400
38140+++ linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-08-05 19:44:37.000000000 -0400
38141@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38142 int ret;
38143 uint32_t mysrclen, mydstlen;
38144
38145+ pax_track_stack();
38146+
38147 mysrclen = *sourcelen;
38148 mydstlen = *dstlen - 8;
38149
38150diff -urNp linux-2.6.39.4/fs/jffs2/erase.c linux-2.6.39.4/fs/jffs2/erase.c
38151--- linux-2.6.39.4/fs/jffs2/erase.c 2011-05-19 00:06:34.000000000 -0400
38152+++ linux-2.6.39.4/fs/jffs2/erase.c 2011-08-05 19:44:37.000000000 -0400
38153@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38154 struct jffs2_unknown_node marker = {
38155 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38156 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38157- .totlen = cpu_to_je32(c->cleanmarker_size)
38158+ .totlen = cpu_to_je32(c->cleanmarker_size),
38159+ .hdr_crc = cpu_to_je32(0)
38160 };
38161
38162 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38163diff -urNp linux-2.6.39.4/fs/jffs2/wbuf.c linux-2.6.39.4/fs/jffs2/wbuf.c
38164--- linux-2.6.39.4/fs/jffs2/wbuf.c 2011-05-19 00:06:34.000000000 -0400
38165+++ linux-2.6.39.4/fs/jffs2/wbuf.c 2011-08-05 19:44:37.000000000 -0400
38166@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38167 {
38168 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38169 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38170- .totlen = constant_cpu_to_je32(8)
38171+ .totlen = constant_cpu_to_je32(8),
38172+ .hdr_crc = constant_cpu_to_je32(0)
38173 };
38174
38175 /*
38176diff -urNp linux-2.6.39.4/fs/jffs2/xattr.c linux-2.6.39.4/fs/jffs2/xattr.c
38177--- linux-2.6.39.4/fs/jffs2/xattr.c 2011-05-19 00:06:34.000000000 -0400
38178+++ linux-2.6.39.4/fs/jffs2/xattr.c 2011-08-05 19:44:37.000000000 -0400
38179@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38180
38181 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38182
38183+ pax_track_stack();
38184+
38185 /* Phase.1 : Merge same xref */
38186 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38187 xref_tmphash[i] = NULL;
38188diff -urNp linux-2.6.39.4/fs/jfs/super.c linux-2.6.39.4/fs/jfs/super.c
38189--- linux-2.6.39.4/fs/jfs/super.c 2011-05-19 00:06:34.000000000 -0400
38190+++ linux-2.6.39.4/fs/jfs/super.c 2011-08-05 19:44:37.000000000 -0400
38191@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38192
38193 jfs_inode_cachep =
38194 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38195- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38196+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38197 init_once);
38198 if (jfs_inode_cachep == NULL)
38199 return -ENOMEM;
38200diff -urNp linux-2.6.39.4/fs/Kconfig.binfmt linux-2.6.39.4/fs/Kconfig.binfmt
38201--- linux-2.6.39.4/fs/Kconfig.binfmt 2011-05-19 00:06:34.000000000 -0400
38202+++ linux-2.6.39.4/fs/Kconfig.binfmt 2011-08-05 19:44:37.000000000 -0400
38203@@ -86,7 +86,7 @@ config HAVE_AOUT
38204
38205 config BINFMT_AOUT
38206 tristate "Kernel support for a.out and ECOFF binaries"
38207- depends on HAVE_AOUT
38208+ depends on HAVE_AOUT && BROKEN
38209 ---help---
38210 A.out (Assembler.OUTput) is a set of formats for libraries and
38211 executables used in the earliest versions of UNIX. Linux used
38212diff -urNp linux-2.6.39.4/fs/libfs.c linux-2.6.39.4/fs/libfs.c
38213--- linux-2.6.39.4/fs/libfs.c 2011-05-19 00:06:34.000000000 -0400
38214+++ linux-2.6.39.4/fs/libfs.c 2011-08-05 19:44:37.000000000 -0400
38215@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38216
38217 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38218 struct dentry *next;
38219+ char d_name[sizeof(next->d_iname)];
38220+ const unsigned char *name;
38221+
38222 next = list_entry(p, struct dentry, d_u.d_child);
38223 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38224 if (!simple_positive(next)) {
38225@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38226
38227 spin_unlock(&next->d_lock);
38228 spin_unlock(&dentry->d_lock);
38229- if (filldir(dirent, next->d_name.name,
38230+ name = next->d_name.name;
38231+ if (name == next->d_iname) {
38232+ memcpy(d_name, name, next->d_name.len);
38233+ name = d_name;
38234+ }
38235+ if (filldir(dirent, name,
38236 next->d_name.len, filp->f_pos,
38237 next->d_inode->i_ino,
38238 dt_type(next->d_inode)) < 0)
38239diff -urNp linux-2.6.39.4/fs/lockd/clntproc.c linux-2.6.39.4/fs/lockd/clntproc.c
38240--- linux-2.6.39.4/fs/lockd/clntproc.c 2011-07-09 09:18:51.000000000 -0400
38241+++ linux-2.6.39.4/fs/lockd/clntproc.c 2011-08-05 19:44:37.000000000 -0400
38242@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38243 /*
38244 * Cookie counter for NLM requests
38245 */
38246-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38247+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38248
38249 void nlmclnt_next_cookie(struct nlm_cookie *c)
38250 {
38251- u32 cookie = atomic_inc_return(&nlm_cookie);
38252+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38253
38254 memcpy(c->data, &cookie, 4);
38255 c->len=4;
38256@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38257 struct nlm_rqst reqst, *req;
38258 int status;
38259
38260+ pax_track_stack();
38261+
38262 req = &reqst;
38263 memset(req, 0, sizeof(*req));
38264 locks_init_lock(&req->a_args.lock.fl);
38265diff -urNp linux-2.6.39.4/fs/locks.c linux-2.6.39.4/fs/locks.c
38266--- linux-2.6.39.4/fs/locks.c 2011-07-09 09:18:51.000000000 -0400
38267+++ linux-2.6.39.4/fs/locks.c 2011-08-05 19:44:37.000000000 -0400
38268@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38269 return;
38270
38271 if (filp->f_op && filp->f_op->flock) {
38272- struct file_lock fl = {
38273+ struct file_lock flock = {
38274 .fl_pid = current->tgid,
38275 .fl_file = filp,
38276 .fl_flags = FL_FLOCK,
38277 .fl_type = F_UNLCK,
38278 .fl_end = OFFSET_MAX,
38279 };
38280- filp->f_op->flock(filp, F_SETLKW, &fl);
38281- if (fl.fl_ops && fl.fl_ops->fl_release_private)
38282- fl.fl_ops->fl_release_private(&fl);
38283+ filp->f_op->flock(filp, F_SETLKW, &flock);
38284+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
38285+ flock.fl_ops->fl_release_private(&flock);
38286 }
38287
38288 lock_flocks();
38289diff -urNp linux-2.6.39.4/fs/logfs/super.c linux-2.6.39.4/fs/logfs/super.c
38290--- linux-2.6.39.4/fs/logfs/super.c 2011-05-19 00:06:34.000000000 -0400
38291+++ linux-2.6.39.4/fs/logfs/super.c 2011-08-05 19:44:37.000000000 -0400
38292@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38293 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38294 int err, valid0, valid1;
38295
38296+ pax_track_stack();
38297+
38298 /* read first superblock */
38299 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38300 if (err)
38301diff -urNp linux-2.6.39.4/fs/namei.c linux-2.6.39.4/fs/namei.c
38302--- linux-2.6.39.4/fs/namei.c 2011-08-05 21:11:51.000000000 -0400
38303+++ linux-2.6.39.4/fs/namei.c 2011-08-05 21:12:20.000000000 -0400
38304@@ -237,20 +237,30 @@ int generic_permission(struct inode *ino
38305 return ret;
38306
38307 /*
38308- * Read/write DACs are always overridable.
38309- * Executable DACs are overridable if at least one exec bit is set.
38310+ * Searching includes executable on directories, else just read.
38311 */
38312- if (!(mask & MAY_EXEC) || execute_ok(inode))
38313- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38314+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38315+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38316+#ifdef CONFIG_GRKERNSEC
38317+ if (flags & IPERM_FLAG_RCU)
38318+ return -ECHILD;
38319+#endif
38320+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38321 return 0;
38322+ }
38323
38324 /*
38325- * Searching includes executable on directories, else just read.
38326+ * Read/write DACs are always overridable.
38327+ * Executable DACs are overridable if at least one exec bit is set.
38328 */
38329- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38330- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38331- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38332+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38333+#ifdef CONFIG_GRKERNSEC
38334+ if (flags & IPERM_FLAG_RCU)
38335+ return -ECHILD;
38336+#endif
38337+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38338 return 0;
38339+ }
38340
38341 return -EACCES;
38342 }
38343@@ -626,6 +636,9 @@ static inline int handle_reval_path(stru
38344 struct dentry *dentry = nd->path.dentry;
38345 int status;
38346
38347+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38348+ return -ENOENT;
38349+
38350 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38351 return 0;
38352
38353@@ -671,9 +684,16 @@ static inline int exec_permission(struct
38354 if (ret == -ECHILD)
38355 return ret;
38356
38357- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38358- ns_capable(ns, CAP_DAC_READ_SEARCH))
38359+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38360 goto ok;
38361+ else {
38362+#ifdef CONFIG_GRKERNSEC
38363+ if (flags & IPERM_FLAG_RCU)
38364+ return -ECHILD;
38365+#endif
38366+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38367+ goto ok;
38368+ }
38369
38370 return ret;
38371 ok:
38372@@ -781,11 +801,19 @@ follow_link(struct path *link, struct na
38373 return error;
38374 }
38375
38376+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
38377+ dentry->d_inode, dentry, nd->path.mnt)) {
38378+ error = -EACCES;
38379+ *p = ERR_PTR(error); /* no ->put_link(), please */
38380+ path_put(&nd->path);
38381+ return error;
38382+ }
38383+
38384 nd->last_type = LAST_BIND;
38385 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38386 error = PTR_ERR(*p);
38387 if (!IS_ERR(*p)) {
38388- char *s = nd_get_link(nd);
38389+ const char *s = nd_get_link(nd);
38390 error = 0;
38391 if (s)
38392 error = __vfs_follow_link(nd, s);
38393@@ -1702,6 +1730,9 @@ static int do_path_lookup(int dfd, const
38394 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38395
38396 if (likely(!retval)) {
38397+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38398+ return -ENOENT;
38399+
38400 if (unlikely(!audit_dummy_context())) {
38401 if (nd->path.dentry && nd->inode)
38402 audit_inode(name, nd->path.dentry);
38403@@ -2012,6 +2043,30 @@ int vfs_create(struct inode *dir, struct
38404 return error;
38405 }
38406
38407+/*
38408+ * Note that while the flag value (low two bits) for sys_open means:
38409+ * 00 - read-only
38410+ * 01 - write-only
38411+ * 10 - read-write
38412+ * 11 - special
38413+ * it is changed into
38414+ * 00 - no permissions needed
38415+ * 01 - read-permission
38416+ * 10 - write-permission
38417+ * 11 - read-write
38418+ * for the internal routines (ie open_namei()/follow_link() etc)
38419+ * This is more logical, and also allows the 00 "no perm needed"
38420+ * to be used for symlinks (where the permissions are checked
38421+ * later).
38422+ *
38423+*/
38424+static inline int open_to_namei_flags(int flag)
38425+{
38426+ if ((flag+1) & O_ACCMODE)
38427+ flag++;
38428+ return flag;
38429+}
38430+
38431 static int may_open(struct path *path, int acc_mode, int flag)
38432 {
38433 struct dentry *dentry = path->dentry;
38434@@ -2064,7 +2119,27 @@ static int may_open(struct path *path, i
38435 /*
38436 * Ensure there are no outstanding leases on the file.
38437 */
38438- return break_lease(inode, flag);
38439+ error = break_lease(inode, flag);
38440+
38441+ if (error)
38442+ return error;
38443+
38444+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38445+ error = -EPERM;
38446+ goto exit;
38447+ }
38448+
38449+ if (gr_handle_rawio(inode)) {
38450+ error = -EPERM;
38451+ goto exit;
38452+ }
38453+
38454+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38455+ error = -EACCES;
38456+ goto exit;
38457+ }
38458+exit:
38459+ return error;
38460 }
38461
38462 static int handle_truncate(struct file *filp)
38463@@ -2090,30 +2165,6 @@ static int handle_truncate(struct file *
38464 }
38465
38466 /*
38467- * Note that while the flag value (low two bits) for sys_open means:
38468- * 00 - read-only
38469- * 01 - write-only
38470- * 10 - read-write
38471- * 11 - special
38472- * it is changed into
38473- * 00 - no permissions needed
38474- * 01 - read-permission
38475- * 10 - write-permission
38476- * 11 - read-write
38477- * for the internal routines (ie open_namei()/follow_link() etc)
38478- * This is more logical, and also allows the 00 "no perm needed"
38479- * to be used for symlinks (where the permissions are checked
38480- * later).
38481- *
38482-*/
38483-static inline int open_to_namei_flags(int flag)
38484-{
38485- if ((flag+1) & O_ACCMODE)
38486- flag++;
38487- return flag;
38488-}
38489-
38490-/*
38491 * Handle the last step of open()
38492 */
38493 static struct file *do_last(struct nameidata *nd, struct path *path,
38494@@ -2122,6 +2173,7 @@ static struct file *do_last(struct namei
38495 struct dentry *dir = nd->path.dentry;
38496 struct dentry *dentry;
38497 int open_flag = op->open_flag;
38498+ int flag = open_to_namei_flags(open_flag);
38499 int will_truncate = open_flag & O_TRUNC;
38500 int want_write = 0;
38501 int acc_mode = op->acc_mode;
38502@@ -2217,6 +2269,12 @@ static struct file *do_last(struct namei
38503 /* Negative dentry, just create the file */
38504 if (!dentry->d_inode) {
38505 int mode = op->mode;
38506+
38507+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38508+ error = -EACCES;
38509+ goto exit_mutex_unlock;
38510+ }
38511+
38512 if (!IS_POSIXACL(dir->d_inode))
38513 mode &= ~current_umask();
38514 /*
38515@@ -2240,6 +2298,8 @@ static struct file *do_last(struct namei
38516 error = vfs_create(dir->d_inode, dentry, mode, nd);
38517 if (error)
38518 goto exit_mutex_unlock;
38519+ else
38520+ gr_handle_create(path->dentry, path->mnt);
38521 mutex_unlock(&dir->d_inode->i_mutex);
38522 dput(nd->path.dentry);
38523 nd->path.dentry = dentry;
38524@@ -2249,6 +2309,14 @@ static struct file *do_last(struct namei
38525 /*
38526 * It already exists.
38527 */
38528+
38529+ /* only check if O_CREAT is specified, all other checks need to go
38530+ into may_open */
38531+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38532+ error = -EACCES;
38533+ goto exit_mutex_unlock;
38534+ }
38535+
38536 mutex_unlock(&dir->d_inode->i_mutex);
38537 audit_inode(pathname, path->dentry);
38538
38539@@ -2535,6 +2603,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38540 error = may_mknod(mode);
38541 if (error)
38542 goto out_dput;
38543+
38544+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38545+ error = -EPERM;
38546+ goto out_dput;
38547+ }
38548+
38549+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38550+ error = -EACCES;
38551+ goto out_dput;
38552+ }
38553+
38554 error = mnt_want_write(nd.path.mnt);
38555 if (error)
38556 goto out_dput;
38557@@ -2555,6 +2634,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38558 }
38559 out_drop_write:
38560 mnt_drop_write(nd.path.mnt);
38561+
38562+ if (!error)
38563+ gr_handle_create(dentry, nd.path.mnt);
38564 out_dput:
38565 dput(dentry);
38566 out_unlock:
38567@@ -2607,6 +2689,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38568 if (IS_ERR(dentry))
38569 goto out_unlock;
38570
38571+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38572+ error = -EACCES;
38573+ goto out_dput;
38574+ }
38575+
38576 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38577 mode &= ~current_umask();
38578 error = mnt_want_write(nd.path.mnt);
38579@@ -2618,6 +2705,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38580 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38581 out_drop_write:
38582 mnt_drop_write(nd.path.mnt);
38583+
38584+ if (!error)
38585+ gr_handle_create(dentry, nd.path.mnt);
38586+
38587 out_dput:
38588 dput(dentry);
38589 out_unlock:
38590@@ -2697,6 +2788,8 @@ static long do_rmdir(int dfd, const char
38591 char * name;
38592 struct dentry *dentry;
38593 struct nameidata nd;
38594+ ino_t saved_ino = 0;
38595+ dev_t saved_dev = 0;
38596
38597 error = user_path_parent(dfd, pathname, &nd, &name);
38598 if (error)
38599@@ -2721,6 +2814,19 @@ static long do_rmdir(int dfd, const char
38600 error = PTR_ERR(dentry);
38601 if (IS_ERR(dentry))
38602 goto exit2;
38603+
38604+ if (dentry->d_inode != NULL) {
38605+ if (dentry->d_inode->i_nlink <= 1) {
38606+ saved_ino = dentry->d_inode->i_ino;
38607+ saved_dev = gr_get_dev_from_dentry(dentry);
38608+ }
38609+
38610+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38611+ error = -EACCES;
38612+ goto exit3;
38613+ }
38614+ }
38615+
38616 error = mnt_want_write(nd.path.mnt);
38617 if (error)
38618 goto exit3;
38619@@ -2728,6 +2834,8 @@ static long do_rmdir(int dfd, const char
38620 if (error)
38621 goto exit4;
38622 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38623+ if (!error && (saved_dev || saved_ino))
38624+ gr_handle_delete(saved_ino, saved_dev);
38625 exit4:
38626 mnt_drop_write(nd.path.mnt);
38627 exit3:
38628@@ -2790,6 +2898,8 @@ static long do_unlinkat(int dfd, const c
38629 struct dentry *dentry;
38630 struct nameidata nd;
38631 struct inode *inode = NULL;
38632+ ino_t saved_ino = 0;
38633+ dev_t saved_dev = 0;
38634
38635 error = user_path_parent(dfd, pathname, &nd, &name);
38636 if (error)
38637@@ -2809,8 +2919,17 @@ static long do_unlinkat(int dfd, const c
38638 if (nd.last.name[nd.last.len])
38639 goto slashes;
38640 inode = dentry->d_inode;
38641- if (inode)
38642+ if (inode) {
38643 ihold(inode);
38644+ if (inode->i_nlink <= 1) {
38645+ saved_ino = inode->i_ino;
38646+ saved_dev = gr_get_dev_from_dentry(dentry);
38647+ }
38648+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38649+ error = -EACCES;
38650+ goto exit2;
38651+ }
38652+ }
38653 error = mnt_want_write(nd.path.mnt);
38654 if (error)
38655 goto exit2;
38656@@ -2818,6 +2937,8 @@ static long do_unlinkat(int dfd, const c
38657 if (error)
38658 goto exit3;
38659 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38660+ if (!error && (saved_ino || saved_dev))
38661+ gr_handle_delete(saved_ino, saved_dev);
38662 exit3:
38663 mnt_drop_write(nd.path.mnt);
38664 exit2:
38665@@ -2895,6 +3016,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38666 if (IS_ERR(dentry))
38667 goto out_unlock;
38668
38669+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38670+ error = -EACCES;
38671+ goto out_dput;
38672+ }
38673+
38674 error = mnt_want_write(nd.path.mnt);
38675 if (error)
38676 goto out_dput;
38677@@ -2902,6 +3028,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38678 if (error)
38679 goto out_drop_write;
38680 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38681+ if (!error)
38682+ gr_handle_create(dentry, nd.path.mnt);
38683 out_drop_write:
38684 mnt_drop_write(nd.path.mnt);
38685 out_dput:
38686@@ -3010,6 +3138,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38687 error = PTR_ERR(new_dentry);
38688 if (IS_ERR(new_dentry))
38689 goto out_unlock;
38690+
38691+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38692+ old_path.dentry->d_inode,
38693+ old_path.dentry->d_inode->i_mode, to)) {
38694+ error = -EACCES;
38695+ goto out_dput;
38696+ }
38697+
38698+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38699+ old_path.dentry, old_path.mnt, to)) {
38700+ error = -EACCES;
38701+ goto out_dput;
38702+ }
38703+
38704 error = mnt_want_write(nd.path.mnt);
38705 if (error)
38706 goto out_dput;
38707@@ -3017,6 +3159,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38708 if (error)
38709 goto out_drop_write;
38710 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38711+ if (!error)
38712+ gr_handle_create(new_dentry, nd.path.mnt);
38713 out_drop_write:
38714 mnt_drop_write(nd.path.mnt);
38715 out_dput:
38716@@ -3194,6 +3338,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38717 char *to;
38718 int error;
38719
38720+ pax_track_stack();
38721+
38722 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38723 if (error)
38724 goto exit;
38725@@ -3250,6 +3396,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38726 if (new_dentry == trap)
38727 goto exit5;
38728
38729+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38730+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
38731+ to);
38732+ if (error)
38733+ goto exit5;
38734+
38735 error = mnt_want_write(oldnd.path.mnt);
38736 if (error)
38737 goto exit5;
38738@@ -3259,6 +3411,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38739 goto exit6;
38740 error = vfs_rename(old_dir->d_inode, old_dentry,
38741 new_dir->d_inode, new_dentry);
38742+ if (!error)
38743+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38744+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38745 exit6:
38746 mnt_drop_write(oldnd.path.mnt);
38747 exit5:
38748@@ -3284,6 +3439,8 @@ SYSCALL_DEFINE2(rename, const char __use
38749
38750 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38751 {
38752+ char tmpbuf[64];
38753+ const char *newlink;
38754 int len;
38755
38756 len = PTR_ERR(link);
38757@@ -3293,7 +3450,14 @@ int vfs_readlink(struct dentry *dentry,
38758 len = strlen(link);
38759 if (len > (unsigned) buflen)
38760 len = buflen;
38761- if (copy_to_user(buffer, link, len))
38762+
38763+ if (len < sizeof(tmpbuf)) {
38764+ memcpy(tmpbuf, link, len);
38765+ newlink = tmpbuf;
38766+ } else
38767+ newlink = link;
38768+
38769+ if (copy_to_user(buffer, newlink, len))
38770 len = -EFAULT;
38771 out:
38772 return len;
38773diff -urNp linux-2.6.39.4/fs/namespace.c linux-2.6.39.4/fs/namespace.c
38774--- linux-2.6.39.4/fs/namespace.c 2011-05-19 00:06:34.000000000 -0400
38775+++ linux-2.6.39.4/fs/namespace.c 2011-08-05 19:44:37.000000000 -0400
38776@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38777 if (!(sb->s_flags & MS_RDONLY))
38778 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38779 up_write(&sb->s_umount);
38780+
38781+ gr_log_remount(mnt->mnt_devname, retval);
38782+
38783 return retval;
38784 }
38785
38786@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38787 br_write_unlock(vfsmount_lock);
38788 up_write(&namespace_sem);
38789 release_mounts(&umount_list);
38790+
38791+ gr_log_unmount(mnt->mnt_devname, retval);
38792+
38793 return retval;
38794 }
38795
38796@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38797 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38798 MS_STRICTATIME);
38799
38800+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38801+ retval = -EPERM;
38802+ goto dput_out;
38803+ }
38804+
38805+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38806+ retval = -EPERM;
38807+ goto dput_out;
38808+ }
38809+
38810 if (flags & MS_REMOUNT)
38811 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38812 data_page);
38813@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38814 dev_name, data_page);
38815 dput_out:
38816 path_put(&path);
38817+
38818+ gr_log_mount(dev_name, dir_name, retval);
38819+
38820 return retval;
38821 }
38822
38823@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38824 if (error)
38825 goto out2;
38826
38827+ if (gr_handle_chroot_pivot()) {
38828+ error = -EPERM;
38829+ goto out2;
38830+ }
38831+
38832 get_fs_root(current->fs, &root);
38833 error = lock_mount(&old);
38834 if (error)
38835diff -urNp linux-2.6.39.4/fs/ncpfs/dir.c linux-2.6.39.4/fs/ncpfs/dir.c
38836--- linux-2.6.39.4/fs/ncpfs/dir.c 2011-05-19 00:06:34.000000000 -0400
38837+++ linux-2.6.39.4/fs/ncpfs/dir.c 2011-08-05 19:44:37.000000000 -0400
38838@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38839 int res, val = 0, len;
38840 __u8 __name[NCP_MAXPATHLEN + 1];
38841
38842+ pax_track_stack();
38843+
38844 if (dentry == dentry->d_sb->s_root)
38845 return 1;
38846
38847@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38848 int error, res, len;
38849 __u8 __name[NCP_MAXPATHLEN + 1];
38850
38851+ pax_track_stack();
38852+
38853 error = -EIO;
38854 if (!ncp_conn_valid(server))
38855 goto finished;
38856@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38857 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38858 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38859
38860+ pax_track_stack();
38861+
38862 ncp_age_dentry(server, dentry);
38863 len = sizeof(__name);
38864 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
38865@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
38866 int error, len;
38867 __u8 __name[NCP_MAXPATHLEN + 1];
38868
38869+ pax_track_stack();
38870+
38871 DPRINTK("ncp_mkdir: making %s/%s\n",
38872 dentry->d_parent->d_name.name, dentry->d_name.name);
38873
38874@@ -1135,6 +1143,8 @@ static int ncp_rename(struct inode *old_
38875 int old_len, new_len;
38876 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
38877
38878+ pax_track_stack();
38879+
38880 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
38881 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
38882 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
38883diff -urNp linux-2.6.39.4/fs/ncpfs/inode.c linux-2.6.39.4/fs/ncpfs/inode.c
38884--- linux-2.6.39.4/fs/ncpfs/inode.c 2011-05-19 00:06:34.000000000 -0400
38885+++ linux-2.6.39.4/fs/ncpfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38886@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
38887 #endif
38888 struct ncp_entry_info finfo;
38889
38890+ pax_track_stack();
38891+
38892 data.wdog_pid = NULL;
38893 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
38894 if (!server)
38895diff -urNp linux-2.6.39.4/fs/nfs/inode.c linux-2.6.39.4/fs/nfs/inode.c
38896--- linux-2.6.39.4/fs/nfs/inode.c 2011-07-09 09:18:51.000000000 -0400
38897+++ linux-2.6.39.4/fs/nfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38898@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
38899 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
38900 nfsi->attrtimeo_timestamp = jiffies;
38901
38902- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
38903+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
38904 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
38905 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
38906 else
38907@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
38908 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
38909 }
38910
38911-static atomic_long_t nfs_attr_generation_counter;
38912+static atomic_long_unchecked_t nfs_attr_generation_counter;
38913
38914 static unsigned long nfs_read_attr_generation_counter(void)
38915 {
38916- return atomic_long_read(&nfs_attr_generation_counter);
38917+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
38918 }
38919
38920 unsigned long nfs_inc_attr_generation_counter(void)
38921 {
38922- return atomic_long_inc_return(&nfs_attr_generation_counter);
38923+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
38924 }
38925
38926 void nfs_fattr_init(struct nfs_fattr *fattr)
38927diff -urNp linux-2.6.39.4/fs/nfsd/nfs4state.c linux-2.6.39.4/fs/nfsd/nfs4state.c
38928--- linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-05-19 00:06:34.000000000 -0400
38929+++ linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-08-05 19:44:37.000000000 -0400
38930@@ -3784,6 +3784,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
38931 unsigned int strhashval;
38932 int err;
38933
38934+ pax_track_stack();
38935+
38936 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
38937 (long long) lock->lk_offset,
38938 (long long) lock->lk_length);
38939diff -urNp linux-2.6.39.4/fs/nfsd/nfs4xdr.c linux-2.6.39.4/fs/nfsd/nfs4xdr.c
38940--- linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-05-19 00:06:34.000000000 -0400
38941+++ linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-08-05 19:44:37.000000000 -0400
38942@@ -1793,6 +1793,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
38943 .dentry = dentry,
38944 };
38945
38946+ pax_track_stack();
38947+
38948 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
38949 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
38950 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
38951diff -urNp linux-2.6.39.4/fs/nfsd/vfs.c linux-2.6.39.4/fs/nfsd/vfs.c
38952--- linux-2.6.39.4/fs/nfsd/vfs.c 2011-07-09 09:18:51.000000000 -0400
38953+++ linux-2.6.39.4/fs/nfsd/vfs.c 2011-08-05 19:44:37.000000000 -0400
38954@@ -901,7 +901,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
38955 } else {
38956 oldfs = get_fs();
38957 set_fs(KERNEL_DS);
38958- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
38959+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
38960 set_fs(oldfs);
38961 }
38962
38963@@ -1005,7 +1005,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
38964
38965 /* Write the data. */
38966 oldfs = get_fs(); set_fs(KERNEL_DS);
38967- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
38968+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
38969 set_fs(oldfs);
38970 if (host_err < 0)
38971 goto out_nfserr;
38972@@ -1528,7 +1528,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
38973 */
38974
38975 oldfs = get_fs(); set_fs(KERNEL_DS);
38976- host_err = inode->i_op->readlink(dentry, buf, *lenp);
38977+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
38978 set_fs(oldfs);
38979
38980 if (host_err < 0)
38981diff -urNp linux-2.6.39.4/fs/notify/notification.c linux-2.6.39.4/fs/notify/notification.c
38982--- linux-2.6.39.4/fs/notify/notification.c 2011-05-19 00:06:34.000000000 -0400
38983+++ linux-2.6.39.4/fs/notify/notification.c 2011-08-05 19:44:37.000000000 -0400
38984@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
38985 * get set to 0 so it will never get 'freed'
38986 */
38987 static struct fsnotify_event *q_overflow_event;
38988-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
38989+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
38990
38991 /**
38992 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
38993@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
38994 */
38995 u32 fsnotify_get_cookie(void)
38996 {
38997- return atomic_inc_return(&fsnotify_sync_cookie);
38998+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
38999 }
39000 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39001
39002diff -urNp linux-2.6.39.4/fs/ntfs/dir.c linux-2.6.39.4/fs/ntfs/dir.c
39003--- linux-2.6.39.4/fs/ntfs/dir.c 2011-05-19 00:06:34.000000000 -0400
39004+++ linux-2.6.39.4/fs/ntfs/dir.c 2011-08-05 19:44:37.000000000 -0400
39005@@ -1329,7 +1329,7 @@ find_next_index_buffer:
39006 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39007 ~(s64)(ndir->itype.index.block_size - 1)));
39008 /* Bounds checks. */
39009- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39010+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39011 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39012 "inode 0x%lx or driver bug.", vdir->i_ino);
39013 goto err_out;
39014diff -urNp linux-2.6.39.4/fs/ntfs/file.c linux-2.6.39.4/fs/ntfs/file.c
39015--- linux-2.6.39.4/fs/ntfs/file.c 2011-05-19 00:06:34.000000000 -0400
39016+++ linux-2.6.39.4/fs/ntfs/file.c 2011-08-05 19:44:37.000000000 -0400
39017@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39018 #endif /* NTFS_RW */
39019 };
39020
39021-const struct file_operations ntfs_empty_file_ops = {};
39022+const struct file_operations ntfs_empty_file_ops __read_only;
39023
39024-const struct inode_operations ntfs_empty_inode_ops = {};
39025+const struct inode_operations ntfs_empty_inode_ops __read_only;
39026diff -urNp linux-2.6.39.4/fs/ocfs2/localalloc.c linux-2.6.39.4/fs/ocfs2/localalloc.c
39027--- linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-05-19 00:06:34.000000000 -0400
39028+++ linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-08-05 19:44:37.000000000 -0400
39029@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39030 goto bail;
39031 }
39032
39033- atomic_inc(&osb->alloc_stats.moves);
39034+ atomic_inc_unchecked(&osb->alloc_stats.moves);
39035
39036 bail:
39037 if (handle)
39038diff -urNp linux-2.6.39.4/fs/ocfs2/namei.c linux-2.6.39.4/fs/ocfs2/namei.c
39039--- linux-2.6.39.4/fs/ocfs2/namei.c 2011-05-19 00:06:34.000000000 -0400
39040+++ linux-2.6.39.4/fs/ocfs2/namei.c 2011-08-05 19:44:37.000000000 -0400
39041@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39042 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39043 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39044
39045+ pax_track_stack();
39046+
39047 /* At some point it might be nice to break this function up a
39048 * bit. */
39049
39050diff -urNp linux-2.6.39.4/fs/ocfs2/ocfs2.h linux-2.6.39.4/fs/ocfs2/ocfs2.h
39051--- linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-05-19 00:06:34.000000000 -0400
39052+++ linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-08-05 19:44:37.000000000 -0400
39053@@ -235,11 +235,11 @@ enum ocfs2_vol_state
39054
39055 struct ocfs2_alloc_stats
39056 {
39057- atomic_t moves;
39058- atomic_t local_data;
39059- atomic_t bitmap_data;
39060- atomic_t bg_allocs;
39061- atomic_t bg_extends;
39062+ atomic_unchecked_t moves;
39063+ atomic_unchecked_t local_data;
39064+ atomic_unchecked_t bitmap_data;
39065+ atomic_unchecked_t bg_allocs;
39066+ atomic_unchecked_t bg_extends;
39067 };
39068
39069 enum ocfs2_local_alloc_state
39070diff -urNp linux-2.6.39.4/fs/ocfs2/suballoc.c linux-2.6.39.4/fs/ocfs2/suballoc.c
39071--- linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-05-19 00:06:34.000000000 -0400
39072+++ linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-08-05 19:44:37.000000000 -0400
39073@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39074 mlog_errno(status);
39075 goto bail;
39076 }
39077- atomic_inc(&osb->alloc_stats.bg_extends);
39078+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39079
39080 /* You should never ask for this much metadata */
39081 BUG_ON(bits_wanted >
39082@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39083 mlog_errno(status);
39084 goto bail;
39085 }
39086- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39087+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39088
39089 *suballoc_loc = res.sr_bg_blkno;
39090 *suballoc_bit_start = res.sr_bit_offset;
39091@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39092 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39093 res->sr_bits);
39094
39095- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39096+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39097
39098 BUG_ON(res->sr_bits != 1);
39099
39100@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39101 mlog_errno(status);
39102 goto bail;
39103 }
39104- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39105+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39106
39107 BUG_ON(res.sr_bits != 1);
39108
39109@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39110 cluster_start,
39111 num_clusters);
39112 if (!status)
39113- atomic_inc(&osb->alloc_stats.local_data);
39114+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
39115 } else {
39116 if (min_clusters > (osb->bitmap_cpg - 1)) {
39117 /* The only paths asking for contiguousness
39118@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39119 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39120 res.sr_bg_blkno,
39121 res.sr_bit_offset);
39122- atomic_inc(&osb->alloc_stats.bitmap_data);
39123+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39124 *num_clusters = res.sr_bits;
39125 }
39126 }
39127diff -urNp linux-2.6.39.4/fs/ocfs2/super.c linux-2.6.39.4/fs/ocfs2/super.c
39128--- linux-2.6.39.4/fs/ocfs2/super.c 2011-05-19 00:06:34.000000000 -0400
39129+++ linux-2.6.39.4/fs/ocfs2/super.c 2011-08-05 19:44:37.000000000 -0400
39130@@ -299,11 +299,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39131 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39132 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39133 "Stats",
39134- atomic_read(&osb->alloc_stats.bitmap_data),
39135- atomic_read(&osb->alloc_stats.local_data),
39136- atomic_read(&osb->alloc_stats.bg_allocs),
39137- atomic_read(&osb->alloc_stats.moves),
39138- atomic_read(&osb->alloc_stats.bg_extends));
39139+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39140+ atomic_read_unchecked(&osb->alloc_stats.local_data),
39141+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39142+ atomic_read_unchecked(&osb->alloc_stats.moves),
39143+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39144
39145 out += snprintf(buf + out, len - out,
39146 "%10s => State: %u Descriptor: %llu Size: %u bits "
39147@@ -2111,11 +2111,11 @@ static int ocfs2_initialize_super(struct
39148 spin_lock_init(&osb->osb_xattr_lock);
39149 ocfs2_init_steal_slots(osb);
39150
39151- atomic_set(&osb->alloc_stats.moves, 0);
39152- atomic_set(&osb->alloc_stats.local_data, 0);
39153- atomic_set(&osb->alloc_stats.bitmap_data, 0);
39154- atomic_set(&osb->alloc_stats.bg_allocs, 0);
39155- atomic_set(&osb->alloc_stats.bg_extends, 0);
39156+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39157+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39158+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39159+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39160+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39161
39162 /* Copy the blockcheck stats from the superblock probe */
39163 osb->osb_ecc_stats = *stats;
39164diff -urNp linux-2.6.39.4/fs/ocfs2/symlink.c linux-2.6.39.4/fs/ocfs2/symlink.c
39165--- linux-2.6.39.4/fs/ocfs2/symlink.c 2011-05-19 00:06:34.000000000 -0400
39166+++ linux-2.6.39.4/fs/ocfs2/symlink.c 2011-08-05 19:44:37.000000000 -0400
39167@@ -142,7 +142,7 @@ bail:
39168
39169 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39170 {
39171- char *link = nd_get_link(nd);
39172+ const char *link = nd_get_link(nd);
39173 if (!IS_ERR(link))
39174 kfree(link);
39175 }
39176diff -urNp linux-2.6.39.4/fs/open.c linux-2.6.39.4/fs/open.c
39177--- linux-2.6.39.4/fs/open.c 2011-05-19 00:06:34.000000000 -0400
39178+++ linux-2.6.39.4/fs/open.c 2011-08-05 19:44:37.000000000 -0400
39179@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39180 error = locks_verify_truncate(inode, NULL, length);
39181 if (!error)
39182 error = security_path_truncate(&path);
39183+
39184+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39185+ error = -EACCES;
39186+
39187 if (!error)
39188 error = do_truncate(path.dentry, length, 0, NULL);
39189
39190@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39191 if (__mnt_is_readonly(path.mnt))
39192 res = -EROFS;
39193
39194+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39195+ res = -EACCES;
39196+
39197 out_path_release:
39198 path_put(&path);
39199 out:
39200@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39201 if (error)
39202 goto dput_and_out;
39203
39204+ gr_log_chdir(path.dentry, path.mnt);
39205+
39206 set_fs_pwd(current->fs, &path);
39207
39208 dput_and_out:
39209@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39210 goto out_putf;
39211
39212 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39213+
39214+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39215+ error = -EPERM;
39216+
39217+ if (!error)
39218+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39219+
39220 if (!error)
39221 set_fs_pwd(current->fs, &file->f_path);
39222 out_putf:
39223@@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39224 if (error)
39225 goto dput_and_out;
39226
39227+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39228+ goto dput_and_out;
39229+
39230+ if (gr_handle_chroot_caps(&path)) {
39231+ error = -ENOMEM;
39232+ goto dput_and_out;
39233+ }
39234+
39235 set_fs_root(current->fs, &path);
39236+
39237+ gr_handle_chroot_chdir(&path);
39238+
39239 error = 0;
39240 dput_and_out:
39241 path_put(&path);
39242@@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39243 err = mnt_want_write_file(file);
39244 if (err)
39245 goto out_putf;
39246+
39247 mutex_lock(&inode->i_mutex);
39248+
39249+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39250+ err = -EACCES;
39251+ goto out_unlock;
39252+ }
39253+
39254 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39255 if (err)
39256 goto out_unlock;
39257 if (mode == (mode_t) -1)
39258 mode = inode->i_mode;
39259+
39260+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39261+ err = -EACCES;
39262+ goto out_unlock;
39263+ }
39264+
39265 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39266 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39267 err = notify_change(dentry, &newattrs);
39268@@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39269 error = mnt_want_write(path.mnt);
39270 if (error)
39271 goto dput_and_out;
39272+
39273 mutex_lock(&inode->i_mutex);
39274+
39275+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39276+ error = -EACCES;
39277+ goto out_unlock;
39278+ }
39279+
39280 error = security_path_chmod(path.dentry, path.mnt, mode);
39281 if (error)
39282 goto out_unlock;
39283 if (mode == (mode_t) -1)
39284 mode = inode->i_mode;
39285+
39286+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39287+ error = -EACCES;
39288+ goto out_unlock;
39289+ }
39290+
39291 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39292 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39293 error = notify_change(path.dentry, &newattrs);
39294@@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39295 int error;
39296 struct iattr newattrs;
39297
39298+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
39299+ return -EACCES;
39300+
39301 newattrs.ia_valid = ATTR_CTIME;
39302 if (user != (uid_t) -1) {
39303 newattrs.ia_valid |= ATTR_UID;
39304@@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39305 if (!IS_ERR(tmp)) {
39306 fd = get_unused_fd_flags(flags);
39307 if (fd >= 0) {
39308- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39309+ struct file *f;
39310+ /* don't allow to be set by userland */
39311+ flags &= ~FMODE_GREXEC;
39312+ f = do_filp_open(dfd, tmp, &op, lookup);
39313 if (IS_ERR(f)) {
39314 put_unused_fd(fd);
39315 fd = PTR_ERR(f);
39316diff -urNp linux-2.6.39.4/fs/partitions/ldm.c linux-2.6.39.4/fs/partitions/ldm.c
39317--- linux-2.6.39.4/fs/partitions/ldm.c 2011-06-03 00:04:14.000000000 -0400
39318+++ linux-2.6.39.4/fs/partitions/ldm.c 2011-08-05 19:44:37.000000000 -0400
39319@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39320 ldm_error ("A VBLK claims to have %d parts.", num);
39321 return false;
39322 }
39323+
39324 if (rec >= num) {
39325 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39326 return false;
39327@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39328 goto found;
39329 }
39330
39331- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39332+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39333 if (!f) {
39334 ldm_crit ("Out of memory.");
39335 return false;
39336diff -urNp linux-2.6.39.4/fs/pipe.c linux-2.6.39.4/fs/pipe.c
39337--- linux-2.6.39.4/fs/pipe.c 2011-05-19 00:06:34.000000000 -0400
39338+++ linux-2.6.39.4/fs/pipe.c 2011-08-05 19:44:37.000000000 -0400
39339@@ -420,9 +420,9 @@ redo:
39340 }
39341 if (bufs) /* More to do? */
39342 continue;
39343- if (!pipe->writers)
39344+ if (!atomic_read(&pipe->writers))
39345 break;
39346- if (!pipe->waiting_writers) {
39347+ if (!atomic_read(&pipe->waiting_writers)) {
39348 /* syscall merging: Usually we must not sleep
39349 * if O_NONBLOCK is set, or if we got some data.
39350 * But if a writer sleeps in kernel space, then
39351@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39352 mutex_lock(&inode->i_mutex);
39353 pipe = inode->i_pipe;
39354
39355- if (!pipe->readers) {
39356+ if (!atomic_read(&pipe->readers)) {
39357 send_sig(SIGPIPE, current, 0);
39358 ret = -EPIPE;
39359 goto out;
39360@@ -530,7 +530,7 @@ redo1:
39361 for (;;) {
39362 int bufs;
39363
39364- if (!pipe->readers) {
39365+ if (!atomic_read(&pipe->readers)) {
39366 send_sig(SIGPIPE, current, 0);
39367 if (!ret)
39368 ret = -EPIPE;
39369@@ -616,9 +616,9 @@ redo2:
39370 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39371 do_wakeup = 0;
39372 }
39373- pipe->waiting_writers++;
39374+ atomic_inc(&pipe->waiting_writers);
39375 pipe_wait(pipe);
39376- pipe->waiting_writers--;
39377+ atomic_dec(&pipe->waiting_writers);
39378 }
39379 out:
39380 mutex_unlock(&inode->i_mutex);
39381@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39382 mask = 0;
39383 if (filp->f_mode & FMODE_READ) {
39384 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39385- if (!pipe->writers && filp->f_version != pipe->w_counter)
39386+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39387 mask |= POLLHUP;
39388 }
39389
39390@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39391 * Most Unices do not set POLLERR for FIFOs but on Linux they
39392 * behave exactly like pipes for poll().
39393 */
39394- if (!pipe->readers)
39395+ if (!atomic_read(&pipe->readers))
39396 mask |= POLLERR;
39397 }
39398
39399@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39400
39401 mutex_lock(&inode->i_mutex);
39402 pipe = inode->i_pipe;
39403- pipe->readers -= decr;
39404- pipe->writers -= decw;
39405+ atomic_sub(decr, &pipe->readers);
39406+ atomic_sub(decw, &pipe->writers);
39407
39408- if (!pipe->readers && !pipe->writers) {
39409+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39410 free_pipe_info(inode);
39411 } else {
39412 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39413@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39414
39415 if (inode->i_pipe) {
39416 ret = 0;
39417- inode->i_pipe->readers++;
39418+ atomic_inc(&inode->i_pipe->readers);
39419 }
39420
39421 mutex_unlock(&inode->i_mutex);
39422@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39423
39424 if (inode->i_pipe) {
39425 ret = 0;
39426- inode->i_pipe->writers++;
39427+ atomic_inc(&inode->i_pipe->writers);
39428 }
39429
39430 mutex_unlock(&inode->i_mutex);
39431@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39432 if (inode->i_pipe) {
39433 ret = 0;
39434 if (filp->f_mode & FMODE_READ)
39435- inode->i_pipe->readers++;
39436+ atomic_inc(&inode->i_pipe->readers);
39437 if (filp->f_mode & FMODE_WRITE)
39438- inode->i_pipe->writers++;
39439+ atomic_inc(&inode->i_pipe->writers);
39440 }
39441
39442 mutex_unlock(&inode->i_mutex);
39443@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39444 inode->i_pipe = NULL;
39445 }
39446
39447-static struct vfsmount *pipe_mnt __read_mostly;
39448+struct vfsmount *pipe_mnt __read_mostly;
39449
39450 /*
39451 * pipefs_dname() is called from d_path().
39452@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39453 goto fail_iput;
39454 inode->i_pipe = pipe;
39455
39456- pipe->readers = pipe->writers = 1;
39457+ atomic_set(&pipe->readers, 1);
39458+ atomic_set(&pipe->writers, 1);
39459 inode->i_fop = &rdwr_pipefifo_fops;
39460
39461 /*
39462diff -urNp linux-2.6.39.4/fs/proc/array.c linux-2.6.39.4/fs/proc/array.c
39463--- linux-2.6.39.4/fs/proc/array.c 2011-05-19 00:06:34.000000000 -0400
39464+++ linux-2.6.39.4/fs/proc/array.c 2011-08-05 19:44:37.000000000 -0400
39465@@ -60,6 +60,7 @@
39466 #include <linux/tty.h>
39467 #include <linux/string.h>
39468 #include <linux/mman.h>
39469+#include <linux/grsecurity.h>
39470 #include <linux/proc_fs.h>
39471 #include <linux/ioport.h>
39472 #include <linux/uaccess.h>
39473@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39474 seq_putc(m, '\n');
39475 }
39476
39477+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39478+static inline void task_pax(struct seq_file *m, struct task_struct *p)
39479+{
39480+ if (p->mm)
39481+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39482+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39483+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39484+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39485+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39486+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39487+ else
39488+ seq_printf(m, "PaX:\t-----\n");
39489+}
39490+#endif
39491+
39492 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39493 struct pid *pid, struct task_struct *task)
39494 {
39495@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39496 task_cpus_allowed(m, task);
39497 cpuset_task_status_allowed(m, task);
39498 task_context_switch_counts(m, task);
39499+
39500+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39501+ task_pax(m, task);
39502+#endif
39503+
39504+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39505+ task_grsec_rbac(m, task);
39506+#endif
39507+
39508 return 0;
39509 }
39510
39511+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39512+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39513+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
39514+ _mm->pax_flags & MF_PAX_SEGMEXEC))
39515+#endif
39516+
39517 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39518 struct pid *pid, struct task_struct *task, int whole)
39519 {
39520@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39521 cputime_t cutime, cstime, utime, stime;
39522 cputime_t cgtime, gtime;
39523 unsigned long rsslim = 0;
39524- char tcomm[sizeof(task->comm)];
39525+ char tcomm[sizeof(task->comm)] = { 0 };
39526 unsigned long flags;
39527
39528+ pax_track_stack();
39529+
39530 state = *get_task_state(task);
39531 vsize = eip = esp = 0;
39532 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39533@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39534 gtime = task->gtime;
39535 }
39536
39537+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39538+ if (PAX_RAND_FLAGS(mm)) {
39539+ eip = 0;
39540+ esp = 0;
39541+ wchan = 0;
39542+ }
39543+#endif
39544+#ifdef CONFIG_GRKERNSEC_HIDESYM
39545+ wchan = 0;
39546+ eip =0;
39547+ esp =0;
39548+#endif
39549+
39550 /* scale priority and nice values from timeslices to -20..20 */
39551 /* to make it look like a "normal" Unix priority/nice value */
39552 priority = task_prio(task);
39553@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39554 vsize,
39555 mm ? get_mm_rss(mm) : 0,
39556 rsslim,
39557+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39558+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39559+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39560+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39561+#else
39562 mm ? (permitted ? mm->start_code : 1) : 0,
39563 mm ? (permitted ? mm->end_code : 1) : 0,
39564 (permitted && mm) ? mm->start_stack : 0,
39565+#endif
39566 esp,
39567 eip,
39568 /* The signal information here is obsolete.
39569@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39570
39571 return 0;
39572 }
39573+
39574+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39575+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39576+{
39577+ u32 curr_ip = 0;
39578+ unsigned long flags;
39579+
39580+ if (lock_task_sighand(task, &flags)) {
39581+ curr_ip = task->signal->curr_ip;
39582+ unlock_task_sighand(task, &flags);
39583+ }
39584+
39585+ return sprintf(buffer, "%pI4\n", &curr_ip);
39586+}
39587+#endif
39588diff -urNp linux-2.6.39.4/fs/proc/base.c linux-2.6.39.4/fs/proc/base.c
39589--- linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:11:51.000000000 -0400
39590+++ linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:13:18.000000000 -0400
39591@@ -104,6 +104,22 @@ struct pid_entry {
39592 union proc_op op;
39593 };
39594
39595+struct getdents_callback {
39596+ struct linux_dirent __user * current_dir;
39597+ struct linux_dirent __user * previous;
39598+ struct file * file;
39599+ int count;
39600+ int error;
39601+};
39602+
39603+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39604+ loff_t offset, u64 ino, unsigned int d_type)
39605+{
39606+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
39607+ buf->error = -EINVAL;
39608+ return 0;
39609+}
39610+
39611 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39612 .name = (NAME), \
39613 .len = sizeof(NAME) - 1, \
39614@@ -206,6 +222,9 @@ static struct mm_struct *__check_mem_per
39615 if (task == current)
39616 return mm;
39617
39618+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39619+ return ERR_PTR(-EPERM);
39620+
39621 /*
39622 * If current is actively ptrace'ing, and would also be
39623 * permitted to freshly attach with ptrace now, permit it.
39624@@ -279,6 +298,9 @@ static int proc_pid_cmdline(struct task_
39625 if (!mm->arg_end)
39626 goto out_mm; /* Shh! No looking before we're done */
39627
39628+ if (gr_acl_handle_procpidmem(task))
39629+ goto out_mm;
39630+
39631 len = mm->arg_end - mm->arg_start;
39632
39633 if (len > PAGE_SIZE)
39634@@ -306,12 +328,28 @@ out:
39635 return res;
39636 }
39637
39638+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39639+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39640+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
39641+ _mm->pax_flags & MF_PAX_SEGMEXEC))
39642+#endif
39643+
39644 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39645 {
39646 struct mm_struct *mm = mm_for_maps(task);
39647 int res = PTR_ERR(mm);
39648 if (mm && !IS_ERR(mm)) {
39649 unsigned int nwords = 0;
39650+
39651+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39652+ /* allow if we're currently ptracing this task */
39653+ if (PAX_RAND_FLAGS(mm) &&
39654+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39655+ mmput(mm);
39656+ return res;
39657+ }
39658+#endif
39659+
39660 do {
39661 nwords += 2;
39662 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39663@@ -325,7 +363,7 @@ static int proc_pid_auxv(struct task_str
39664 }
39665
39666
39667-#ifdef CONFIG_KALLSYMS
39668+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39669 /*
39670 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39671 * Returns the resolved symbol. If that fails, simply return the address.
39672@@ -364,7 +402,7 @@ static void unlock_trace(struct task_str
39673 mutex_unlock(&task->signal->cred_guard_mutex);
39674 }
39675
39676-#ifdef CONFIG_STACKTRACE
39677+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39678
39679 #define MAX_STACK_TRACE_DEPTH 64
39680
39681@@ -555,7 +593,7 @@ static int proc_pid_limits(struct task_s
39682 return count;
39683 }
39684
39685-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39686+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39687 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39688 {
39689 long nr;
39690@@ -584,7 +622,7 @@ static int proc_pid_syscall(struct task_
39691 /************************************************************************/
39692
39693 /* permission checks */
39694-static int proc_fd_access_allowed(struct inode *inode)
39695+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39696 {
39697 struct task_struct *task;
39698 int allowed = 0;
39699@@ -594,7 +632,10 @@ static int proc_fd_access_allowed(struct
39700 */
39701 task = get_proc_task(inode);
39702 if (task) {
39703- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39704+ if (log)
39705+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39706+ else
39707+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39708 put_task_struct(task);
39709 }
39710 return allowed;
39711@@ -973,6 +1014,9 @@ static ssize_t environ_read(struct file
39712 if (!task)
39713 goto out_no_task;
39714
39715+ if (gr_acl_handle_procpidmem(task))
39716+ goto out;
39717+
39718 ret = -ENOMEM;
39719 page = (char *)__get_free_page(GFP_TEMPORARY);
39720 if (!page)
39721@@ -1660,7 +1704,7 @@ static void *proc_pid_follow_link(struct
39722 path_put(&nd->path);
39723
39724 /* Are we allowed to snoop on the tasks file descriptors? */
39725- if (!proc_fd_access_allowed(inode))
39726+ if (!proc_fd_access_allowed(inode,0))
39727 goto out;
39728
39729 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39730@@ -1699,8 +1743,18 @@ static int proc_pid_readlink(struct dent
39731 struct path path;
39732
39733 /* Are we allowed to snoop on the tasks file descriptors? */
39734- if (!proc_fd_access_allowed(inode))
39735- goto out;
39736+ /* logging this is needed for learning on chromium to work properly,
39737+ but we don't want to flood the logs from 'ps' which does a readlink
39738+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39739+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
39740+ */
39741+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39742+ if (!proc_fd_access_allowed(inode,0))
39743+ goto out;
39744+ } else {
39745+ if (!proc_fd_access_allowed(inode,1))
39746+ goto out;
39747+ }
39748
39749 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39750 if (error)
39751@@ -1766,7 +1820,11 @@ static struct inode *proc_pid_make_inode
39752 rcu_read_lock();
39753 cred = __task_cred(task);
39754 inode->i_uid = cred->euid;
39755+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39756+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39757+#else
39758 inode->i_gid = cred->egid;
39759+#endif
39760 rcu_read_unlock();
39761 }
39762 security_task_to_inode(task, inode);
39763@@ -1784,6 +1842,9 @@ static int pid_getattr(struct vfsmount *
39764 struct inode *inode = dentry->d_inode;
39765 struct task_struct *task;
39766 const struct cred *cred;
39767+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39768+ const struct cred *tmpcred = current_cred();
39769+#endif
39770
39771 generic_fillattr(inode, stat);
39772
39773@@ -1791,13 +1852,41 @@ static int pid_getattr(struct vfsmount *
39774 stat->uid = 0;
39775 stat->gid = 0;
39776 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39777+
39778+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39779+ rcu_read_unlock();
39780+ return -ENOENT;
39781+ }
39782+
39783 if (task) {
39784+ cred = __task_cred(task);
39785+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39786+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39787+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39788+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39789+#endif
39790+ ) {
39791+#endif
39792 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39793+#ifdef CONFIG_GRKERNSEC_PROC_USER
39794+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39795+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39796+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39797+#endif
39798 task_dumpable(task)) {
39799- cred = __task_cred(task);
39800 stat->uid = cred->euid;
39801+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39802+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39803+#else
39804 stat->gid = cred->egid;
39805+#endif
39806 }
39807+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39808+ } else {
39809+ rcu_read_unlock();
39810+ return -ENOENT;
39811+ }
39812+#endif
39813 }
39814 rcu_read_unlock();
39815 return 0;
39816@@ -1834,11 +1923,20 @@ static int pid_revalidate(struct dentry
39817
39818 if (task) {
39819 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39820+#ifdef CONFIG_GRKERNSEC_PROC_USER
39821+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39822+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39823+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39824+#endif
39825 task_dumpable(task)) {
39826 rcu_read_lock();
39827 cred = __task_cred(task);
39828 inode->i_uid = cred->euid;
39829+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39830+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39831+#else
39832 inode->i_gid = cred->egid;
39833+#endif
39834 rcu_read_unlock();
39835 } else {
39836 inode->i_uid = 0;
39837@@ -1959,7 +2057,8 @@ static int proc_fd_info(struct inode *in
39838 int fd = proc_fd(inode);
39839
39840 if (task) {
39841- files = get_files_struct(task);
39842+ if (!gr_acl_handle_procpidmem(task))
39843+ files = get_files_struct(task);
39844 put_task_struct(task);
39845 }
39846 if (files) {
39847@@ -2219,15 +2318,25 @@ static const struct file_operations proc
39848 */
39849 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39850 {
39851+ struct task_struct *task;
39852 int rv;
39853
39854 if (flags & IPERM_FLAG_RCU)
39855 return -ECHILD;
39856 rv = generic_permission(inode, mask, flags, NULL);
39857- if (rv == 0)
39858- return 0;
39859+
39860 if (task_pid(current) == proc_pid(inode))
39861 rv = 0;
39862+
39863+ task = get_proc_task(inode);
39864+ if (task == NULL)
39865+ return rv;
39866+
39867+ if (gr_acl_handle_procpidmem(task))
39868+ rv = -EACCES;
39869+
39870+ put_task_struct(task);
39871+
39872 return rv;
39873 }
39874
39875@@ -2337,6 +2446,9 @@ static struct dentry *proc_pident_lookup
39876 if (!task)
39877 goto out_no_task;
39878
39879+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39880+ goto out;
39881+
39882 /*
39883 * Yes, it does not scale. And it should not. Don't add
39884 * new entries into /proc/<tgid>/ without very good reasons.
39885@@ -2381,6 +2493,9 @@ static int proc_pident_readdir(struct fi
39886 if (!task)
39887 goto out_no_task;
39888
39889+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39890+ goto out;
39891+
39892 ret = 0;
39893 i = filp->f_pos;
39894 switch (i) {
39895@@ -2651,7 +2766,7 @@ static void *proc_self_follow_link(struc
39896 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
39897 void *cookie)
39898 {
39899- char *s = nd_get_link(nd);
39900+ const char *s = nd_get_link(nd);
39901 if (!IS_ERR(s))
39902 __putname(s);
39903 }
39904@@ -2838,7 +2953,7 @@ static const struct pid_entry tgid_base_
39905 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
39906 #endif
39907 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39908-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39909+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39910 INF("syscall", S_IRUGO, proc_pid_syscall),
39911 #endif
39912 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39913@@ -2863,10 +2978,10 @@ static const struct pid_entry tgid_base_
39914 #ifdef CONFIG_SECURITY
39915 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39916 #endif
39917-#ifdef CONFIG_KALLSYMS
39918+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39919 INF("wchan", S_IRUGO, proc_pid_wchan),
39920 #endif
39921-#ifdef CONFIG_STACKTRACE
39922+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39923 ONE("stack", S_IRUGO, proc_pid_stack),
39924 #endif
39925 #ifdef CONFIG_SCHEDSTATS
39926@@ -2897,6 +3012,9 @@ static const struct pid_entry tgid_base_
39927 #ifdef CONFIG_TASK_IO_ACCOUNTING
39928 INF("io", S_IRUSR, proc_tgid_io_accounting),
39929 #endif
39930+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39931+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
39932+#endif
39933 };
39934
39935 static int proc_tgid_base_readdir(struct file * filp,
39936@@ -3022,7 +3140,14 @@ static struct dentry *proc_pid_instantia
39937 if (!inode)
39938 goto out;
39939
39940+#ifdef CONFIG_GRKERNSEC_PROC_USER
39941+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
39942+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39943+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39944+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
39945+#else
39946 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
39947+#endif
39948 inode->i_op = &proc_tgid_base_inode_operations;
39949 inode->i_fop = &proc_tgid_base_operations;
39950 inode->i_flags|=S_IMMUTABLE;
39951@@ -3064,7 +3189,11 @@ struct dentry *proc_pid_lookup(struct in
39952 if (!task)
39953 goto out;
39954
39955+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39956+ goto out_put_task;
39957+
39958 result = proc_pid_instantiate(dir, dentry, task, NULL);
39959+out_put_task:
39960 put_task_struct(task);
39961 out:
39962 return result;
39963@@ -3129,6 +3258,11 @@ int proc_pid_readdir(struct file * filp,
39964 {
39965 unsigned int nr;
39966 struct task_struct *reaper;
39967+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39968+ const struct cred *tmpcred = current_cred();
39969+ const struct cred *itercred;
39970+#endif
39971+ filldir_t __filldir = filldir;
39972 struct tgid_iter iter;
39973 struct pid_namespace *ns;
39974
39975@@ -3152,8 +3286,27 @@ int proc_pid_readdir(struct file * filp,
39976 for (iter = next_tgid(ns, iter);
39977 iter.task;
39978 iter.tgid += 1, iter = next_tgid(ns, iter)) {
39979+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39980+ rcu_read_lock();
39981+ itercred = __task_cred(iter.task);
39982+#endif
39983+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
39984+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39985+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
39986+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39987+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39988+#endif
39989+ )
39990+#endif
39991+ )
39992+ __filldir = &gr_fake_filldir;
39993+ else
39994+ __filldir = filldir;
39995+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39996+ rcu_read_unlock();
39997+#endif
39998 filp->f_pos = iter.tgid + TGID_OFFSET;
39999- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40000+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40001 put_task_struct(iter.task);
40002 goto out;
40003 }
40004@@ -3180,7 +3333,7 @@ static const struct pid_entry tid_base_s
40005 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40006 #endif
40007 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40008-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40009+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40010 INF("syscall", S_IRUGO, proc_pid_syscall),
40011 #endif
40012 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40013@@ -3204,10 +3357,10 @@ static const struct pid_entry tid_base_s
40014 #ifdef CONFIG_SECURITY
40015 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40016 #endif
40017-#ifdef CONFIG_KALLSYMS
40018+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40019 INF("wchan", S_IRUGO, proc_pid_wchan),
40020 #endif
40021-#ifdef CONFIG_STACKTRACE
40022+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40023 ONE("stack", S_IRUGO, proc_pid_stack),
40024 #endif
40025 #ifdef CONFIG_SCHEDSTATS
40026diff -urNp linux-2.6.39.4/fs/proc/cmdline.c linux-2.6.39.4/fs/proc/cmdline.c
40027--- linux-2.6.39.4/fs/proc/cmdline.c 2011-05-19 00:06:34.000000000 -0400
40028+++ linux-2.6.39.4/fs/proc/cmdline.c 2011-08-05 19:44:37.000000000 -0400
40029@@ -23,7 +23,11 @@ static const struct file_operations cmdl
40030
40031 static int __init proc_cmdline_init(void)
40032 {
40033+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40034+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40035+#else
40036 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40037+#endif
40038 return 0;
40039 }
40040 module_init(proc_cmdline_init);
40041diff -urNp linux-2.6.39.4/fs/proc/devices.c linux-2.6.39.4/fs/proc/devices.c
40042--- linux-2.6.39.4/fs/proc/devices.c 2011-05-19 00:06:34.000000000 -0400
40043+++ linux-2.6.39.4/fs/proc/devices.c 2011-08-05 19:44:37.000000000 -0400
40044@@ -64,7 +64,11 @@ static const struct file_operations proc
40045
40046 static int __init proc_devices_init(void)
40047 {
40048+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40049+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40050+#else
40051 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40052+#endif
40053 return 0;
40054 }
40055 module_init(proc_devices_init);
40056diff -urNp linux-2.6.39.4/fs/proc/inode.c linux-2.6.39.4/fs/proc/inode.c
40057--- linux-2.6.39.4/fs/proc/inode.c 2011-05-19 00:06:34.000000000 -0400
40058+++ linux-2.6.39.4/fs/proc/inode.c 2011-08-05 19:44:37.000000000 -0400
40059@@ -433,7 +433,11 @@ struct inode *proc_get_inode(struct supe
40060 if (de->mode) {
40061 inode->i_mode = de->mode;
40062 inode->i_uid = de->uid;
40063+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40064+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40065+#else
40066 inode->i_gid = de->gid;
40067+#endif
40068 }
40069 if (de->size)
40070 inode->i_size = de->size;
40071diff -urNp linux-2.6.39.4/fs/proc/internal.h linux-2.6.39.4/fs/proc/internal.h
40072--- linux-2.6.39.4/fs/proc/internal.h 2011-05-19 00:06:34.000000000 -0400
40073+++ linux-2.6.39.4/fs/proc/internal.h 2011-08-05 19:44:37.000000000 -0400
40074@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40075 struct pid *pid, struct task_struct *task);
40076 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40077 struct pid *pid, struct task_struct *task);
40078+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40079+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40080+#endif
40081 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40082
40083 extern const struct file_operations proc_maps_operations;
40084diff -urNp linux-2.6.39.4/fs/proc/Kconfig linux-2.6.39.4/fs/proc/Kconfig
40085--- linux-2.6.39.4/fs/proc/Kconfig 2011-05-19 00:06:34.000000000 -0400
40086+++ linux-2.6.39.4/fs/proc/Kconfig 2011-08-05 19:44:37.000000000 -0400
40087@@ -30,12 +30,12 @@ config PROC_FS
40088
40089 config PROC_KCORE
40090 bool "/proc/kcore support" if !ARM
40091- depends on PROC_FS && MMU
40092+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40093
40094 config PROC_VMCORE
40095 bool "/proc/vmcore support"
40096- depends on PROC_FS && CRASH_DUMP
40097- default y
40098+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40099+ default n
40100 help
40101 Exports the dump image of crashed kernel in ELF format.
40102
40103@@ -59,8 +59,8 @@ config PROC_SYSCTL
40104 limited in memory.
40105
40106 config PROC_PAGE_MONITOR
40107- default y
40108- depends on PROC_FS && MMU
40109+ default n
40110+ depends on PROC_FS && MMU && !GRKERNSEC
40111 bool "Enable /proc page monitoring" if EXPERT
40112 help
40113 Various /proc files exist to monitor process memory utilization:
40114diff -urNp linux-2.6.39.4/fs/proc/kcore.c linux-2.6.39.4/fs/proc/kcore.c
40115--- linux-2.6.39.4/fs/proc/kcore.c 2011-05-19 00:06:34.000000000 -0400
40116+++ linux-2.6.39.4/fs/proc/kcore.c 2011-08-05 19:44:37.000000000 -0400
40117@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40118 off_t offset = 0;
40119 struct kcore_list *m;
40120
40121+ pax_track_stack();
40122+
40123 /* setup ELF header */
40124 elf = (struct elfhdr *) bufp;
40125 bufp += sizeof(struct elfhdr);
40126@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40127 * the addresses in the elf_phdr on our list.
40128 */
40129 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40130- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40131+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40132+ if (tsz > buflen)
40133 tsz = buflen;
40134-
40135+
40136 while (buflen) {
40137 struct kcore_list *m;
40138
40139@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40140 kfree(elf_buf);
40141 } else {
40142 if (kern_addr_valid(start)) {
40143- unsigned long n;
40144+ char *elf_buf;
40145+ mm_segment_t oldfs;
40146
40147- n = copy_to_user(buffer, (char *)start, tsz);
40148- /*
40149- * We cannot distingush between fault on source
40150- * and fault on destination. When this happens
40151- * we clear too and hope it will trigger the
40152- * EFAULT again.
40153- */
40154- if (n) {
40155- if (clear_user(buffer + tsz - n,
40156- n))
40157+ elf_buf = kmalloc(tsz, GFP_KERNEL);
40158+ if (!elf_buf)
40159+ return -ENOMEM;
40160+ oldfs = get_fs();
40161+ set_fs(KERNEL_DS);
40162+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40163+ set_fs(oldfs);
40164+ if (copy_to_user(buffer, elf_buf, tsz)) {
40165+ kfree(elf_buf);
40166 return -EFAULT;
40167+ }
40168 }
40169+ set_fs(oldfs);
40170+ kfree(elf_buf);
40171 } else {
40172 if (clear_user(buffer, tsz))
40173 return -EFAULT;
40174@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40175
40176 static int open_kcore(struct inode *inode, struct file *filp)
40177 {
40178+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40179+ return -EPERM;
40180+#endif
40181 if (!capable(CAP_SYS_RAWIO))
40182 return -EPERM;
40183 if (kcore_need_update)
40184diff -urNp linux-2.6.39.4/fs/proc/meminfo.c linux-2.6.39.4/fs/proc/meminfo.c
40185--- linux-2.6.39.4/fs/proc/meminfo.c 2011-05-19 00:06:34.000000000 -0400
40186+++ linux-2.6.39.4/fs/proc/meminfo.c 2011-08-05 19:44:37.000000000 -0400
40187@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40188 unsigned long pages[NR_LRU_LISTS];
40189 int lru;
40190
40191+ pax_track_stack();
40192+
40193 /*
40194 * display in kilobytes.
40195 */
40196@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40197 vmi.used >> 10,
40198 vmi.largest_chunk >> 10
40199 #ifdef CONFIG_MEMORY_FAILURE
40200- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40201+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40202 #endif
40203 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40204 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40205diff -urNp linux-2.6.39.4/fs/proc/nommu.c linux-2.6.39.4/fs/proc/nommu.c
40206--- linux-2.6.39.4/fs/proc/nommu.c 2011-05-19 00:06:34.000000000 -0400
40207+++ linux-2.6.39.4/fs/proc/nommu.c 2011-08-05 19:44:37.000000000 -0400
40208@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40209 if (len < 1)
40210 len = 1;
40211 seq_printf(m, "%*c", len, ' ');
40212- seq_path(m, &file->f_path, "");
40213+ seq_path(m, &file->f_path, "\n\\");
40214 }
40215
40216 seq_putc(m, '\n');
40217diff -urNp linux-2.6.39.4/fs/proc/proc_net.c linux-2.6.39.4/fs/proc/proc_net.c
40218--- linux-2.6.39.4/fs/proc/proc_net.c 2011-05-19 00:06:34.000000000 -0400
40219+++ linux-2.6.39.4/fs/proc/proc_net.c 2011-08-05 19:44:37.000000000 -0400
40220@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40221 struct task_struct *task;
40222 struct nsproxy *ns;
40223 struct net *net = NULL;
40224+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40225+ const struct cred *cred = current_cred();
40226+#endif
40227+
40228+#ifdef CONFIG_GRKERNSEC_PROC_USER
40229+ if (cred->fsuid)
40230+ return net;
40231+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40232+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40233+ return net;
40234+#endif
40235
40236 rcu_read_lock();
40237 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40238diff -urNp linux-2.6.39.4/fs/proc/proc_sysctl.c linux-2.6.39.4/fs/proc/proc_sysctl.c
40239--- linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-05-19 00:06:34.000000000 -0400
40240+++ linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-08-05 19:44:37.000000000 -0400
40241@@ -8,6 +8,8 @@
40242 #include <linux/namei.h>
40243 #include "internal.h"
40244
40245+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40246+
40247 static const struct dentry_operations proc_sys_dentry_operations;
40248 static const struct file_operations proc_sys_file_operations;
40249 static const struct inode_operations proc_sys_inode_operations;
40250@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40251 if (!p)
40252 goto out;
40253
40254+ if (gr_handle_sysctl(p, MAY_EXEC))
40255+ goto out;
40256+
40257 err = ERR_PTR(-ENOMEM);
40258 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40259 if (h)
40260@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40261 if (*pos < file->f_pos)
40262 continue;
40263
40264+ if (gr_handle_sysctl(table, 0))
40265+ continue;
40266+
40267 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40268 if (res)
40269 return res;
40270@@ -358,6 +366,9 @@ static int proc_sys_getattr(struct vfsmo
40271 if (IS_ERR(head))
40272 return PTR_ERR(head);
40273
40274+ if (table && gr_handle_sysctl(table, MAY_EXEC))
40275+ return -ENOENT;
40276+
40277 generic_fillattr(inode, stat);
40278 if (table)
40279 stat->mode = (stat->mode & S_IFMT) | table->mode;
40280diff -urNp linux-2.6.39.4/fs/proc/root.c linux-2.6.39.4/fs/proc/root.c
40281--- linux-2.6.39.4/fs/proc/root.c 2011-05-19 00:06:34.000000000 -0400
40282+++ linux-2.6.39.4/fs/proc/root.c 2011-08-05 19:44:37.000000000 -0400
40283@@ -122,7 +122,15 @@ void __init proc_root_init(void)
40284 #ifdef CONFIG_PROC_DEVICETREE
40285 proc_device_tree_init();
40286 #endif
40287+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40288+#ifdef CONFIG_GRKERNSEC_PROC_USER
40289+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40290+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40291+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40292+#endif
40293+#else
40294 proc_mkdir("bus", NULL);
40295+#endif
40296 proc_sys_init();
40297 }
40298
40299diff -urNp linux-2.6.39.4/fs/proc/task_mmu.c linux-2.6.39.4/fs/proc/task_mmu.c
40300--- linux-2.6.39.4/fs/proc/task_mmu.c 2011-05-19 00:06:34.000000000 -0400
40301+++ linux-2.6.39.4/fs/proc/task_mmu.c 2011-08-05 19:44:37.000000000 -0400
40302@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40303 "VmExe:\t%8lu kB\n"
40304 "VmLib:\t%8lu kB\n"
40305 "VmPTE:\t%8lu kB\n"
40306- "VmSwap:\t%8lu kB\n",
40307- hiwater_vm << (PAGE_SHIFT-10),
40308+ "VmSwap:\t%8lu kB\n"
40309+
40310+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40311+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40312+#endif
40313+
40314+ ,hiwater_vm << (PAGE_SHIFT-10),
40315 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40316 mm->locked_vm << (PAGE_SHIFT-10),
40317 hiwater_rss << (PAGE_SHIFT-10),
40318@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40319 data << (PAGE_SHIFT-10),
40320 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40321 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40322- swap << (PAGE_SHIFT-10));
40323+ swap << (PAGE_SHIFT-10)
40324+
40325+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40326+ , mm->context.user_cs_base, mm->context.user_cs_limit
40327+#endif
40328+
40329+ );
40330 }
40331
40332 unsigned long task_vsize(struct mm_struct *mm)
40333@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40334 return ret;
40335 }
40336
40337+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40338+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40339+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
40340+ _mm->pax_flags & MF_PAX_SEGMEXEC))
40341+#endif
40342+
40343 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40344 {
40345 struct mm_struct *mm = vma->vm_mm;
40346@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40347 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40348 }
40349
40350- /* We don't show the stack guard page in /proc/maps */
40351+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40352+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40353+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40354+#else
40355 start = vma->vm_start;
40356- if (stack_guard_page_start(vma, start))
40357- start += PAGE_SIZE;
40358 end = vma->vm_end;
40359- if (stack_guard_page_end(vma, end))
40360- end -= PAGE_SIZE;
40361+#endif
40362
40363 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40364 start,
40365@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40366 flags & VM_WRITE ? 'w' : '-',
40367 flags & VM_EXEC ? 'x' : '-',
40368 flags & VM_MAYSHARE ? 's' : 'p',
40369+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40370+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40371+#else
40372 pgoff,
40373+#endif
40374 MAJOR(dev), MINOR(dev), ino, &len);
40375
40376 /*
40377@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40378 */
40379 if (file) {
40380 pad_len_spaces(m, len);
40381- seq_path(m, &file->f_path, "\n");
40382+ seq_path(m, &file->f_path, "\n\\");
40383 } else {
40384 const char *name = arch_vma_name(vma);
40385 if (!name) {
40386@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40387 if (vma->vm_start <= mm->brk &&
40388 vma->vm_end >= mm->start_brk) {
40389 name = "[heap]";
40390- } else if (vma->vm_start <= mm->start_stack &&
40391- vma->vm_end >= mm->start_stack) {
40392+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40393+ (vma->vm_start <= mm->start_stack &&
40394+ vma->vm_end >= mm->start_stack)) {
40395 name = "[stack]";
40396 }
40397 } else {
40398@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40399 };
40400
40401 memset(&mss, 0, sizeof mss);
40402- mss.vma = vma;
40403- /* mmap_sem is held in m_start */
40404- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40405- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40406-
40407+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40408+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40409+#endif
40410+ mss.vma = vma;
40411+ /* mmap_sem is held in m_start */
40412+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40413+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40414+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40415+ }
40416+#endif
40417 show_map_vma(m, vma);
40418
40419 seq_printf(m,
40420@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40421 "KernelPageSize: %8lu kB\n"
40422 "MMUPageSize: %8lu kB\n"
40423 "Locked: %8lu kB\n",
40424+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40425+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40426+#else
40427 (vma->vm_end - vma->vm_start) >> 10,
40428+#endif
40429 mss.resident >> 10,
40430 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40431 mss.shared_clean >> 10,
40432diff -urNp linux-2.6.39.4/fs/proc/task_nommu.c linux-2.6.39.4/fs/proc/task_nommu.c
40433--- linux-2.6.39.4/fs/proc/task_nommu.c 2011-05-19 00:06:34.000000000 -0400
40434+++ linux-2.6.39.4/fs/proc/task_nommu.c 2011-08-05 19:44:37.000000000 -0400
40435@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40436 else
40437 bytes += kobjsize(mm);
40438
40439- if (current->fs && current->fs->users > 1)
40440+ if (current->fs && atomic_read(&current->fs->users) > 1)
40441 sbytes += kobjsize(current->fs);
40442 else
40443 bytes += kobjsize(current->fs);
40444@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40445
40446 if (file) {
40447 pad_len_spaces(m, len);
40448- seq_path(m, &file->f_path, "");
40449+ seq_path(m, &file->f_path, "\n\\");
40450 } else if (mm) {
40451 if (vma->vm_start <= mm->start_stack &&
40452 vma->vm_end >= mm->start_stack) {
40453diff -urNp linux-2.6.39.4/fs/quota/netlink.c linux-2.6.39.4/fs/quota/netlink.c
40454--- linux-2.6.39.4/fs/quota/netlink.c 2011-05-19 00:06:34.000000000 -0400
40455+++ linux-2.6.39.4/fs/quota/netlink.c 2011-08-05 19:44:37.000000000 -0400
40456@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40457 void quota_send_warning(short type, unsigned int id, dev_t dev,
40458 const char warntype)
40459 {
40460- static atomic_t seq;
40461+ static atomic_unchecked_t seq;
40462 struct sk_buff *skb;
40463 void *msg_head;
40464 int ret;
40465@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40466 "VFS: Not enough memory to send quota warning.\n");
40467 return;
40468 }
40469- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40470+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40471 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40472 if (!msg_head) {
40473 printk(KERN_ERR
40474diff -urNp linux-2.6.39.4/fs/readdir.c linux-2.6.39.4/fs/readdir.c
40475--- linux-2.6.39.4/fs/readdir.c 2011-05-19 00:06:34.000000000 -0400
40476+++ linux-2.6.39.4/fs/readdir.c 2011-08-05 19:44:37.000000000 -0400
40477@@ -17,6 +17,7 @@
40478 #include <linux/security.h>
40479 #include <linux/syscalls.h>
40480 #include <linux/unistd.h>
40481+#include <linux/namei.h>
40482
40483 #include <asm/uaccess.h>
40484
40485@@ -67,6 +68,7 @@ struct old_linux_dirent {
40486
40487 struct readdir_callback {
40488 struct old_linux_dirent __user * dirent;
40489+ struct file * file;
40490 int result;
40491 };
40492
40493@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40494 buf->result = -EOVERFLOW;
40495 return -EOVERFLOW;
40496 }
40497+
40498+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40499+ return 0;
40500+
40501 buf->result++;
40502 dirent = buf->dirent;
40503 if (!access_ok(VERIFY_WRITE, dirent,
40504@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40505
40506 buf.result = 0;
40507 buf.dirent = dirent;
40508+ buf.file = file;
40509
40510 error = vfs_readdir(file, fillonedir, &buf);
40511 if (buf.result)
40512@@ -142,6 +149,7 @@ struct linux_dirent {
40513 struct getdents_callback {
40514 struct linux_dirent __user * current_dir;
40515 struct linux_dirent __user * previous;
40516+ struct file * file;
40517 int count;
40518 int error;
40519 };
40520@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40521 buf->error = -EOVERFLOW;
40522 return -EOVERFLOW;
40523 }
40524+
40525+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40526+ return 0;
40527+
40528 dirent = buf->previous;
40529 if (dirent) {
40530 if (__put_user(offset, &dirent->d_off))
40531@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40532 buf.previous = NULL;
40533 buf.count = count;
40534 buf.error = 0;
40535+ buf.file = file;
40536
40537 error = vfs_readdir(file, filldir, &buf);
40538 if (error >= 0)
40539@@ -229,6 +242,7 @@ out:
40540 struct getdents_callback64 {
40541 struct linux_dirent64 __user * current_dir;
40542 struct linux_dirent64 __user * previous;
40543+ struct file *file;
40544 int count;
40545 int error;
40546 };
40547@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40548 buf->error = -EINVAL; /* only used if we fail.. */
40549 if (reclen > buf->count)
40550 return -EINVAL;
40551+
40552+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40553+ return 0;
40554+
40555 dirent = buf->previous;
40556 if (dirent) {
40557 if (__put_user(offset, &dirent->d_off))
40558@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40559
40560 buf.current_dir = dirent;
40561 buf.previous = NULL;
40562+ buf.file = file;
40563 buf.count = count;
40564 buf.error = 0;
40565
40566diff -urNp linux-2.6.39.4/fs/reiserfs/dir.c linux-2.6.39.4/fs/reiserfs/dir.c
40567--- linux-2.6.39.4/fs/reiserfs/dir.c 2011-05-19 00:06:34.000000000 -0400
40568+++ linux-2.6.39.4/fs/reiserfs/dir.c 2011-08-05 19:44:37.000000000 -0400
40569@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40570 struct reiserfs_dir_entry de;
40571 int ret = 0;
40572
40573+ pax_track_stack();
40574+
40575 reiserfs_write_lock(inode->i_sb);
40576
40577 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40578diff -urNp linux-2.6.39.4/fs/reiserfs/do_balan.c linux-2.6.39.4/fs/reiserfs/do_balan.c
40579--- linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-05-19 00:06:34.000000000 -0400
40580+++ linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-08-05 19:44:37.000000000 -0400
40581@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40582 return;
40583 }
40584
40585- atomic_inc(&(fs_generation(tb->tb_sb)));
40586+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40587 do_balance_starts(tb);
40588
40589 /* balance leaf returns 0 except if combining L R and S into
40590diff -urNp linux-2.6.39.4/fs/reiserfs/journal.c linux-2.6.39.4/fs/reiserfs/journal.c
40591--- linux-2.6.39.4/fs/reiserfs/journal.c 2011-05-19 00:06:34.000000000 -0400
40592+++ linux-2.6.39.4/fs/reiserfs/journal.c 2011-08-05 19:44:37.000000000 -0400
40593@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40594 struct buffer_head *bh;
40595 int i, j;
40596
40597+ pax_track_stack();
40598+
40599 bh = __getblk(dev, block, bufsize);
40600 if (buffer_uptodate(bh))
40601 return (bh);
40602diff -urNp linux-2.6.39.4/fs/reiserfs/namei.c linux-2.6.39.4/fs/reiserfs/namei.c
40603--- linux-2.6.39.4/fs/reiserfs/namei.c 2011-05-19 00:06:34.000000000 -0400
40604+++ linux-2.6.39.4/fs/reiserfs/namei.c 2011-08-05 19:44:37.000000000 -0400
40605@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40606 unsigned long savelink = 1;
40607 struct timespec ctime;
40608
40609+ pax_track_stack();
40610+
40611 /* three balancings: (1) old name removal, (2) new name insertion
40612 and (3) maybe "save" link insertion
40613 stat data updates: (1) old directory,
40614diff -urNp linux-2.6.39.4/fs/reiserfs/procfs.c linux-2.6.39.4/fs/reiserfs/procfs.c
40615--- linux-2.6.39.4/fs/reiserfs/procfs.c 2011-05-19 00:06:34.000000000 -0400
40616+++ linux-2.6.39.4/fs/reiserfs/procfs.c 2011-08-05 19:44:37.000000000 -0400
40617@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40618 "SMALL_TAILS " : "NO_TAILS ",
40619 replay_only(sb) ? "REPLAY_ONLY " : "",
40620 convert_reiserfs(sb) ? "CONV " : "",
40621- atomic_read(&r->s_generation_counter),
40622+ atomic_read_unchecked(&r->s_generation_counter),
40623 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40624 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40625 SF(s_good_search_by_key_reada), SF(s_bmaps),
40626@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40627 struct journal_params *jp = &rs->s_v1.s_journal;
40628 char b[BDEVNAME_SIZE];
40629
40630+ pax_track_stack();
40631+
40632 seq_printf(m, /* on-disk fields */
40633 "jp_journal_1st_block: \t%i\n"
40634 "jp_journal_dev: \t%s[%x]\n"
40635diff -urNp linux-2.6.39.4/fs/reiserfs/stree.c linux-2.6.39.4/fs/reiserfs/stree.c
40636--- linux-2.6.39.4/fs/reiserfs/stree.c 2011-05-19 00:06:34.000000000 -0400
40637+++ linux-2.6.39.4/fs/reiserfs/stree.c 2011-08-05 19:44:37.000000000 -0400
40638@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40639 int iter = 0;
40640 #endif
40641
40642+ pax_track_stack();
40643+
40644 BUG_ON(!th->t_trans_id);
40645
40646 init_tb_struct(th, &s_del_balance, sb, path,
40647@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40648 int retval;
40649 int quota_cut_bytes = 0;
40650
40651+ pax_track_stack();
40652+
40653 BUG_ON(!th->t_trans_id);
40654
40655 le_key2cpu_key(&cpu_key, key);
40656@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40657 int quota_cut_bytes;
40658 loff_t tail_pos = 0;
40659
40660+ pax_track_stack();
40661+
40662 BUG_ON(!th->t_trans_id);
40663
40664 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40665@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40666 int retval;
40667 int fs_gen;
40668
40669+ pax_track_stack();
40670+
40671 BUG_ON(!th->t_trans_id);
40672
40673 fs_gen = get_generation(inode->i_sb);
40674@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40675 int fs_gen = 0;
40676 int quota_bytes = 0;
40677
40678+ pax_track_stack();
40679+
40680 BUG_ON(!th->t_trans_id);
40681
40682 if (inode) { /* Do we count quotas for item? */
40683diff -urNp linux-2.6.39.4/fs/reiserfs/super.c linux-2.6.39.4/fs/reiserfs/super.c
40684--- linux-2.6.39.4/fs/reiserfs/super.c 2011-05-19 00:06:34.000000000 -0400
40685+++ linux-2.6.39.4/fs/reiserfs/super.c 2011-08-05 19:44:37.000000000 -0400
40686@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40687 {.option_name = NULL}
40688 };
40689
40690+ pax_track_stack();
40691+
40692 *blocks = 0;
40693 if (!options || !*options)
40694 /* use default configuration: create tails, journaling on, no
40695diff -urNp linux-2.6.39.4/fs/select.c linux-2.6.39.4/fs/select.c
40696--- linux-2.6.39.4/fs/select.c 2011-05-19 00:06:34.000000000 -0400
40697+++ linux-2.6.39.4/fs/select.c 2011-08-05 19:44:37.000000000 -0400
40698@@ -20,6 +20,7 @@
40699 #include <linux/module.h>
40700 #include <linux/slab.h>
40701 #include <linux/poll.h>
40702+#include <linux/security.h>
40703 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40704 #include <linux/file.h>
40705 #include <linux/fdtable.h>
40706@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40707 int retval, i, timed_out = 0;
40708 unsigned long slack = 0;
40709
40710+ pax_track_stack();
40711+
40712 rcu_read_lock();
40713 retval = max_select_fd(n, fds);
40714 rcu_read_unlock();
40715@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40716 /* Allocate small arguments on the stack to save memory and be faster */
40717 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40718
40719+ pax_track_stack();
40720+
40721 ret = -EINVAL;
40722 if (n < 0)
40723 goto out_nofds;
40724@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40725 struct poll_list *walk = head;
40726 unsigned long todo = nfds;
40727
40728+ pax_track_stack();
40729+
40730+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40731 if (nfds > rlimit(RLIMIT_NOFILE))
40732 return -EINVAL;
40733
40734diff -urNp linux-2.6.39.4/fs/seq_file.c linux-2.6.39.4/fs/seq_file.c
40735--- linux-2.6.39.4/fs/seq_file.c 2011-05-19 00:06:34.000000000 -0400
40736+++ linux-2.6.39.4/fs/seq_file.c 2011-08-05 20:34:06.000000000 -0400
40737@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40738 return 0;
40739 }
40740 if (!m->buf) {
40741- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40742+ m->size = PAGE_SIZE;
40743+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40744 if (!m->buf)
40745 return -ENOMEM;
40746 }
40747@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40748 Eoverflow:
40749 m->op->stop(m, p);
40750 kfree(m->buf);
40751- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40752+ m->size <<= 1;
40753+ m->buf = kmalloc(m->size, GFP_KERNEL);
40754 return !m->buf ? -ENOMEM : -EAGAIN;
40755 }
40756
40757@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40758 m->version = file->f_version;
40759 /* grab buffer if we didn't have one */
40760 if (!m->buf) {
40761- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40762+ m->size = PAGE_SIZE;
40763+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40764 if (!m->buf)
40765 goto Enomem;
40766 }
40767@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40768 goto Fill;
40769 m->op->stop(m, p);
40770 kfree(m->buf);
40771- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40772+ m->size <<= 1;
40773+ m->buf = kmalloc(m->size, GFP_KERNEL);
40774 if (!m->buf)
40775 goto Enomem;
40776 m->count = 0;
40777@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40778 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40779 void *data)
40780 {
40781- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40782+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40783 int res = -ENOMEM;
40784
40785 if (op) {
40786diff -urNp linux-2.6.39.4/fs/splice.c linux-2.6.39.4/fs/splice.c
40787--- linux-2.6.39.4/fs/splice.c 2011-05-19 00:06:34.000000000 -0400
40788+++ linux-2.6.39.4/fs/splice.c 2011-08-05 19:44:37.000000000 -0400
40789@@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40790 pipe_lock(pipe);
40791
40792 for (;;) {
40793- if (!pipe->readers) {
40794+ if (!atomic_read(&pipe->readers)) {
40795 send_sig(SIGPIPE, current, 0);
40796 if (!ret)
40797 ret = -EPIPE;
40798@@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40799 do_wakeup = 0;
40800 }
40801
40802- pipe->waiting_writers++;
40803+ atomic_inc(&pipe->waiting_writers);
40804 pipe_wait(pipe);
40805- pipe->waiting_writers--;
40806+ atomic_dec(&pipe->waiting_writers);
40807 }
40808
40809 pipe_unlock(pipe);
40810@@ -316,6 +316,8 @@ __generic_file_splice_read(struct file *
40811 .spd_release = spd_release_page,
40812 };
40813
40814+ pax_track_stack();
40815+
40816 if (splice_grow_spd(pipe, &spd))
40817 return -ENOMEM;
40818
40819@@ -556,7 +558,7 @@ static ssize_t kernel_readv(struct file
40820 old_fs = get_fs();
40821 set_fs(get_ds());
40822 /* The cast to a user pointer is valid due to the set_fs() */
40823- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40824+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40825 set_fs(old_fs);
40826
40827 return res;
40828@@ -571,7 +573,7 @@ static ssize_t kernel_write(struct file
40829 old_fs = get_fs();
40830 set_fs(get_ds());
40831 /* The cast to a user pointer is valid due to the set_fs() */
40832- res = vfs_write(file, (const char __user *)buf, count, &pos);
40833+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40834 set_fs(old_fs);
40835
40836 return res;
40837@@ -599,6 +601,8 @@ ssize_t default_file_splice_read(struct
40838 .spd_release = spd_release_page,
40839 };
40840
40841+ pax_track_stack();
40842+
40843 if (splice_grow_spd(pipe, &spd))
40844 return -ENOMEM;
40845
40846@@ -622,7 +626,7 @@ ssize_t default_file_splice_read(struct
40847 goto err;
40848
40849 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40850- vec[i].iov_base = (void __user *) page_address(page);
40851+ vec[i].iov_base = (__force void __user *) page_address(page);
40852 vec[i].iov_len = this_len;
40853 spd.pages[i] = page;
40854 spd.nr_pages++;
40855@@ -842,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
40856 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
40857 {
40858 while (!pipe->nrbufs) {
40859- if (!pipe->writers)
40860+ if (!atomic_read(&pipe->writers))
40861 return 0;
40862
40863- if (!pipe->waiting_writers && sd->num_spliced)
40864+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
40865 return 0;
40866
40867 if (sd->flags & SPLICE_F_NONBLOCK)
40868@@ -1178,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct fi
40869 * out of the pipe right after the splice_to_pipe(). So set
40870 * PIPE_READERS appropriately.
40871 */
40872- pipe->readers = 1;
40873+ atomic_set(&pipe->readers, 1);
40874
40875 current->splice_pipe = pipe;
40876 }
40877@@ -1615,6 +1619,8 @@ static long vmsplice_to_pipe(struct file
40878 };
40879 long ret;
40880
40881+ pax_track_stack();
40882+
40883 pipe = get_pipe_info(file);
40884 if (!pipe)
40885 return -EBADF;
40886@@ -1730,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_
40887 ret = -ERESTARTSYS;
40888 break;
40889 }
40890- if (!pipe->writers)
40891+ if (!atomic_read(&pipe->writers))
40892 break;
40893- if (!pipe->waiting_writers) {
40894+ if (!atomic_read(&pipe->waiting_writers)) {
40895 if (flags & SPLICE_F_NONBLOCK) {
40896 ret = -EAGAIN;
40897 break;
40898@@ -1764,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_
40899 pipe_lock(pipe);
40900
40901 while (pipe->nrbufs >= pipe->buffers) {
40902- if (!pipe->readers) {
40903+ if (!atomic_read(&pipe->readers)) {
40904 send_sig(SIGPIPE, current, 0);
40905 ret = -EPIPE;
40906 break;
40907@@ -1777,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_
40908 ret = -ERESTARTSYS;
40909 break;
40910 }
40911- pipe->waiting_writers++;
40912+ atomic_inc(&pipe->waiting_writers);
40913 pipe_wait(pipe);
40914- pipe->waiting_writers--;
40915+ atomic_dec(&pipe->waiting_writers);
40916 }
40917
40918 pipe_unlock(pipe);
40919@@ -1815,14 +1821,14 @@ retry:
40920 pipe_double_lock(ipipe, opipe);
40921
40922 do {
40923- if (!opipe->readers) {
40924+ if (!atomic_read(&opipe->readers)) {
40925 send_sig(SIGPIPE, current, 0);
40926 if (!ret)
40927 ret = -EPIPE;
40928 break;
40929 }
40930
40931- if (!ipipe->nrbufs && !ipipe->writers)
40932+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
40933 break;
40934
40935 /*
40936@@ -1922,7 +1928,7 @@ static int link_pipe(struct pipe_inode_i
40937 pipe_double_lock(ipipe, opipe);
40938
40939 do {
40940- if (!opipe->readers) {
40941+ if (!atomic_read(&opipe->readers)) {
40942 send_sig(SIGPIPE, current, 0);
40943 if (!ret)
40944 ret = -EPIPE;
40945@@ -1967,7 +1973,7 @@ static int link_pipe(struct pipe_inode_i
40946 * return EAGAIN if we have the potential of some data in the
40947 * future, otherwise just return 0
40948 */
40949- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
40950+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
40951 ret = -EAGAIN;
40952
40953 pipe_unlock(ipipe);
40954diff -urNp linux-2.6.39.4/fs/sysfs/file.c linux-2.6.39.4/fs/sysfs/file.c
40955--- linux-2.6.39.4/fs/sysfs/file.c 2011-05-19 00:06:34.000000000 -0400
40956+++ linux-2.6.39.4/fs/sysfs/file.c 2011-08-05 19:44:37.000000000 -0400
40957@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
40958
40959 struct sysfs_open_dirent {
40960 atomic_t refcnt;
40961- atomic_t event;
40962+ atomic_unchecked_t event;
40963 wait_queue_head_t poll;
40964 struct list_head buffers; /* goes through sysfs_buffer.list */
40965 };
40966@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
40967 if (!sysfs_get_active(attr_sd))
40968 return -ENODEV;
40969
40970- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
40971+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
40972 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
40973
40974 sysfs_put_active(attr_sd);
40975@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
40976 return -ENOMEM;
40977
40978 atomic_set(&new_od->refcnt, 0);
40979- atomic_set(&new_od->event, 1);
40980+ atomic_set_unchecked(&new_od->event, 1);
40981 init_waitqueue_head(&new_od->poll);
40982 INIT_LIST_HEAD(&new_od->buffers);
40983 goto retry;
40984@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
40985
40986 sysfs_put_active(attr_sd);
40987
40988- if (buffer->event != atomic_read(&od->event))
40989+ if (buffer->event != atomic_read_unchecked(&od->event))
40990 goto trigger;
40991
40992 return DEFAULT_POLLMASK;
40993@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
40994
40995 od = sd->s_attr.open;
40996 if (od) {
40997- atomic_inc(&od->event);
40998+ atomic_inc_unchecked(&od->event);
40999 wake_up_interruptible(&od->poll);
41000 }
41001
41002diff -urNp linux-2.6.39.4/fs/sysfs/mount.c linux-2.6.39.4/fs/sysfs/mount.c
41003--- linux-2.6.39.4/fs/sysfs/mount.c 2011-05-19 00:06:34.000000000 -0400
41004+++ linux-2.6.39.4/fs/sysfs/mount.c 2011-08-05 19:44:37.000000000 -0400
41005@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41006 .s_name = "",
41007 .s_count = ATOMIC_INIT(1),
41008 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41009+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41010+ .s_mode = S_IFDIR | S_IRWXU,
41011+#else
41012 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41013+#endif
41014 .s_ino = 1,
41015 };
41016
41017diff -urNp linux-2.6.39.4/fs/sysfs/symlink.c linux-2.6.39.4/fs/sysfs/symlink.c
41018--- linux-2.6.39.4/fs/sysfs/symlink.c 2011-05-19 00:06:34.000000000 -0400
41019+++ linux-2.6.39.4/fs/sysfs/symlink.c 2011-08-05 19:44:37.000000000 -0400
41020@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41021
41022 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41023 {
41024- char *page = nd_get_link(nd);
41025+ const char *page = nd_get_link(nd);
41026 if (!IS_ERR(page))
41027 free_page((unsigned long)page);
41028 }
41029diff -urNp linux-2.6.39.4/fs/udf/inode.c linux-2.6.39.4/fs/udf/inode.c
41030--- linux-2.6.39.4/fs/udf/inode.c 2011-05-19 00:06:34.000000000 -0400
41031+++ linux-2.6.39.4/fs/udf/inode.c 2011-08-05 19:44:37.000000000 -0400
41032@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41033 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41034 int lastblock = 0;
41035
41036+ pax_track_stack();
41037+
41038 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41039 prev_epos.block = iinfo->i_location;
41040 prev_epos.bh = NULL;
41041diff -urNp linux-2.6.39.4/fs/udf/misc.c linux-2.6.39.4/fs/udf/misc.c
41042--- linux-2.6.39.4/fs/udf/misc.c 2011-05-19 00:06:34.000000000 -0400
41043+++ linux-2.6.39.4/fs/udf/misc.c 2011-08-05 19:44:37.000000000 -0400
41044@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41045
41046 u8 udf_tag_checksum(const struct tag *t)
41047 {
41048- u8 *data = (u8 *)t;
41049+ const u8 *data = (const u8 *)t;
41050 u8 checksum = 0;
41051 int i;
41052 for (i = 0; i < sizeof(struct tag); ++i)
41053diff -urNp linux-2.6.39.4/fs/utimes.c linux-2.6.39.4/fs/utimes.c
41054--- linux-2.6.39.4/fs/utimes.c 2011-05-19 00:06:34.000000000 -0400
41055+++ linux-2.6.39.4/fs/utimes.c 2011-08-05 19:44:37.000000000 -0400
41056@@ -1,6 +1,7 @@
41057 #include <linux/compiler.h>
41058 #include <linux/file.h>
41059 #include <linux/fs.h>
41060+#include <linux/security.h>
41061 #include <linux/linkage.h>
41062 #include <linux/mount.h>
41063 #include <linux/namei.h>
41064@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41065 goto mnt_drop_write_and_out;
41066 }
41067 }
41068+
41069+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41070+ error = -EACCES;
41071+ goto mnt_drop_write_and_out;
41072+ }
41073+
41074 mutex_lock(&inode->i_mutex);
41075 error = notify_change(path->dentry, &newattrs);
41076 mutex_unlock(&inode->i_mutex);
41077diff -urNp linux-2.6.39.4/fs/xattr_acl.c linux-2.6.39.4/fs/xattr_acl.c
41078--- linux-2.6.39.4/fs/xattr_acl.c 2011-05-19 00:06:34.000000000 -0400
41079+++ linux-2.6.39.4/fs/xattr_acl.c 2011-08-05 19:44:37.000000000 -0400
41080@@ -17,8 +17,8 @@
41081 struct posix_acl *
41082 posix_acl_from_xattr(const void *value, size_t size)
41083 {
41084- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41085- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41086+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41087+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41088 int count;
41089 struct posix_acl *acl;
41090 struct posix_acl_entry *acl_e;
41091diff -urNp linux-2.6.39.4/fs/xattr.c linux-2.6.39.4/fs/xattr.c
41092--- linux-2.6.39.4/fs/xattr.c 2011-05-19 00:06:34.000000000 -0400
41093+++ linux-2.6.39.4/fs/xattr.c 2011-08-05 19:44:37.000000000 -0400
41094@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41095 * Extended attribute SET operations
41096 */
41097 static long
41098-setxattr(struct dentry *d, const char __user *name, const void __user *value,
41099+setxattr(struct path *path, const char __user *name, const void __user *value,
41100 size_t size, int flags)
41101 {
41102 int error;
41103@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
41104 return PTR_ERR(kvalue);
41105 }
41106
41107- error = vfs_setxattr(d, kname, kvalue, size, flags);
41108+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41109+ error = -EACCES;
41110+ goto out;
41111+ }
41112+
41113+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41114+out:
41115 kfree(kvalue);
41116 return error;
41117 }
41118@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41119 return error;
41120 error = mnt_want_write(path.mnt);
41121 if (!error) {
41122- error = setxattr(path.dentry, name, value, size, flags);
41123+ error = setxattr(&path, name, value, size, flags);
41124 mnt_drop_write(path.mnt);
41125 }
41126 path_put(&path);
41127@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41128 return error;
41129 error = mnt_want_write(path.mnt);
41130 if (!error) {
41131- error = setxattr(path.dentry, name, value, size, flags);
41132+ error = setxattr(&path, name, value, size, flags);
41133 mnt_drop_write(path.mnt);
41134 }
41135 path_put(&path);
41136@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41137 const void __user *,value, size_t, size, int, flags)
41138 {
41139 struct file *f;
41140- struct dentry *dentry;
41141 int error = -EBADF;
41142
41143 f = fget(fd);
41144 if (!f)
41145 return error;
41146- dentry = f->f_path.dentry;
41147- audit_inode(NULL, dentry);
41148+ audit_inode(NULL, f->f_path.dentry);
41149 error = mnt_want_write_file(f);
41150 if (!error) {
41151- error = setxattr(dentry, name, value, size, flags);
41152+ error = setxattr(&f->f_path, name, value, size, flags);
41153 mnt_drop_write(f->f_path.mnt);
41154 }
41155 fput(f);
41156diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c
41157--- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-05-19 00:06:34.000000000 -0400
41158+++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-05 19:44:37.000000000 -0400
41159@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41160 xfs_fsop_geom_t fsgeo;
41161 int error;
41162
41163+ memset(&fsgeo, 0, sizeof(fsgeo));
41164 error = xfs_fs_geometry(mp, &fsgeo, 3);
41165 if (error)
41166 return -error;
41167diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c
41168--- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-05-19 00:06:34.000000000 -0400
41169+++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-05 19:44:37.000000000 -0400
41170@@ -128,7 +128,7 @@ xfs_find_handle(
41171 }
41172
41173 error = -EFAULT;
41174- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41175+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41176 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41177 goto out_put;
41178
41179diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c
41180--- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-05-19 00:06:34.000000000 -0400
41181+++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-05 19:44:37.000000000 -0400
41182@@ -437,7 +437,7 @@ xfs_vn_put_link(
41183 struct nameidata *nd,
41184 void *p)
41185 {
41186- char *s = nd_get_link(nd);
41187+ const char *s = nd_get_link(nd);
41188
41189 if (!IS_ERR(s))
41190 kfree(s);
41191diff -urNp linux-2.6.39.4/fs/xfs/xfs_bmap.c linux-2.6.39.4/fs/xfs/xfs_bmap.c
41192--- linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-05-19 00:06:34.000000000 -0400
41193+++ linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-08-05 19:44:37.000000000 -0400
41194@@ -287,7 +287,7 @@ xfs_bmap_validate_ret(
41195 int nmap,
41196 int ret_nmap);
41197 #else
41198-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41199+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41200 #endif /* DEBUG */
41201
41202 STATIC int
41203diff -urNp linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c
41204--- linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-05-19 00:06:34.000000000 -0400
41205+++ linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-08-05 19:44:37.000000000 -0400
41206@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41207 }
41208
41209 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41210- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41211+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41212+ char name[sfep->namelen];
41213+ memcpy(name, sfep->name, sfep->namelen);
41214+ if (filldir(dirent, name, sfep->namelen,
41215+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
41216+ *offset = off & 0x7fffffff;
41217+ return 0;
41218+ }
41219+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41220 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41221 *offset = off & 0x7fffffff;
41222 return 0;
41223diff -urNp linux-2.6.39.4/grsecurity/gracl_alloc.c linux-2.6.39.4/grsecurity/gracl_alloc.c
41224--- linux-2.6.39.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41225+++ linux-2.6.39.4/grsecurity/gracl_alloc.c 2011-08-05 19:44:37.000000000 -0400
41226@@ -0,0 +1,105 @@
41227+#include <linux/kernel.h>
41228+#include <linux/mm.h>
41229+#include <linux/slab.h>
41230+#include <linux/vmalloc.h>
41231+#include <linux/gracl.h>
41232+#include <linux/grsecurity.h>
41233+
41234+static unsigned long alloc_stack_next = 1;
41235+static unsigned long alloc_stack_size = 1;
41236+static void **alloc_stack;
41237+
41238+static __inline__ int
41239+alloc_pop(void)
41240+{
41241+ if (alloc_stack_next == 1)
41242+ return 0;
41243+
41244+ kfree(alloc_stack[alloc_stack_next - 2]);
41245+
41246+ alloc_stack_next--;
41247+
41248+ return 1;
41249+}
41250+
41251+static __inline__ int
41252+alloc_push(void *buf)
41253+{
41254+ if (alloc_stack_next >= alloc_stack_size)
41255+ return 1;
41256+
41257+ alloc_stack[alloc_stack_next - 1] = buf;
41258+
41259+ alloc_stack_next++;
41260+
41261+ return 0;
41262+}
41263+
41264+void *
41265+acl_alloc(unsigned long len)
41266+{
41267+ void *ret = NULL;
41268+
41269+ if (!len || len > PAGE_SIZE)
41270+ goto out;
41271+
41272+ ret = kmalloc(len, GFP_KERNEL);
41273+
41274+ if (ret) {
41275+ if (alloc_push(ret)) {
41276+ kfree(ret);
41277+ ret = NULL;
41278+ }
41279+ }
41280+
41281+out:
41282+ return ret;
41283+}
41284+
41285+void *
41286+acl_alloc_num(unsigned long num, unsigned long len)
41287+{
41288+ if (!len || (num > (PAGE_SIZE / len)))
41289+ return NULL;
41290+
41291+ return acl_alloc(num * len);
41292+}
41293+
41294+void
41295+acl_free_all(void)
41296+{
41297+ if (gr_acl_is_enabled() || !alloc_stack)
41298+ return;
41299+
41300+ while (alloc_pop()) ;
41301+
41302+ if (alloc_stack) {
41303+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41304+ kfree(alloc_stack);
41305+ else
41306+ vfree(alloc_stack);
41307+ }
41308+
41309+ alloc_stack = NULL;
41310+ alloc_stack_size = 1;
41311+ alloc_stack_next = 1;
41312+
41313+ return;
41314+}
41315+
41316+int
41317+acl_alloc_stack_init(unsigned long size)
41318+{
41319+ if ((size * sizeof (void *)) <= PAGE_SIZE)
41320+ alloc_stack =
41321+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41322+ else
41323+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
41324+
41325+ alloc_stack_size = size;
41326+
41327+ if (!alloc_stack)
41328+ return 0;
41329+ else
41330+ return 1;
41331+}
41332diff -urNp linux-2.6.39.4/grsecurity/gracl.c linux-2.6.39.4/grsecurity/gracl.c
41333--- linux-2.6.39.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41334+++ linux-2.6.39.4/grsecurity/gracl.c 2011-08-05 19:44:37.000000000 -0400
41335@@ -0,0 +1,4106 @@
41336+#include <linux/kernel.h>
41337+#include <linux/module.h>
41338+#include <linux/sched.h>
41339+#include <linux/mm.h>
41340+#include <linux/file.h>
41341+#include <linux/fs.h>
41342+#include <linux/namei.h>
41343+#include <linux/mount.h>
41344+#include <linux/tty.h>
41345+#include <linux/proc_fs.h>
41346+#include <linux/lglock.h>
41347+#include <linux/slab.h>
41348+#include <linux/vmalloc.h>
41349+#include <linux/types.h>
41350+#include <linux/sysctl.h>
41351+#include <linux/netdevice.h>
41352+#include <linux/ptrace.h>
41353+#include <linux/gracl.h>
41354+#include <linux/gralloc.h>
41355+#include <linux/grsecurity.h>
41356+#include <linux/grinternal.h>
41357+#include <linux/pid_namespace.h>
41358+#include <linux/fdtable.h>
41359+#include <linux/percpu.h>
41360+
41361+#include <asm/uaccess.h>
41362+#include <asm/errno.h>
41363+#include <asm/mman.h>
41364+
41365+static struct acl_role_db acl_role_set;
41366+static struct name_db name_set;
41367+static struct inodev_db inodev_set;
41368+
41369+/* for keeping track of userspace pointers used for subjects, so we
41370+ can share references in the kernel as well
41371+*/
41372+
41373+static struct path real_root;
41374+
41375+static struct acl_subj_map_db subj_map_set;
41376+
41377+static struct acl_role_label *default_role;
41378+
41379+static struct acl_role_label *role_list;
41380+
41381+static u16 acl_sp_role_value;
41382+
41383+extern char *gr_shared_page[4];
41384+static DEFINE_MUTEX(gr_dev_mutex);
41385+DEFINE_RWLOCK(gr_inode_lock);
41386+
41387+struct gr_arg *gr_usermode;
41388+
41389+static unsigned int gr_status __read_only = GR_STATUS_INIT;
41390+
41391+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41392+extern void gr_clear_learn_entries(void);
41393+
41394+#ifdef CONFIG_GRKERNSEC_RESLOG
41395+extern void gr_log_resource(const struct task_struct *task,
41396+ const int res, const unsigned long wanted, const int gt);
41397+#endif
41398+
41399+unsigned char *gr_system_salt;
41400+unsigned char *gr_system_sum;
41401+
41402+static struct sprole_pw **acl_special_roles = NULL;
41403+static __u16 num_sprole_pws = 0;
41404+
41405+static struct acl_role_label *kernel_role = NULL;
41406+
41407+static unsigned int gr_auth_attempts = 0;
41408+static unsigned long gr_auth_expires = 0UL;
41409+
41410+#ifdef CONFIG_NET
41411+extern struct vfsmount *sock_mnt;
41412+#endif
41413+
41414+extern struct vfsmount *pipe_mnt;
41415+extern struct vfsmount *shm_mnt;
41416+#ifdef CONFIG_HUGETLBFS
41417+extern struct vfsmount *hugetlbfs_vfsmount;
41418+#endif
41419+
41420+static struct acl_object_label *fakefs_obj_rw;
41421+static struct acl_object_label *fakefs_obj_rwx;
41422+
41423+extern int gr_init_uidset(void);
41424+extern void gr_free_uidset(void);
41425+extern void gr_remove_uid(uid_t uid);
41426+extern int gr_find_uid(uid_t uid);
41427+
41428+DECLARE_BRLOCK(vfsmount_lock);
41429+
41430+__inline__ int
41431+gr_acl_is_enabled(void)
41432+{
41433+ return (gr_status & GR_READY);
41434+}
41435+
41436+#ifdef CONFIG_BTRFS_FS
41437+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41438+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41439+#endif
41440+
41441+static inline dev_t __get_dev(const struct dentry *dentry)
41442+{
41443+#ifdef CONFIG_BTRFS_FS
41444+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41445+ return get_btrfs_dev_from_inode(dentry->d_inode);
41446+ else
41447+#endif
41448+ return dentry->d_inode->i_sb->s_dev;
41449+}
41450+
41451+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41452+{
41453+ return __get_dev(dentry);
41454+}
41455+
41456+static char gr_task_roletype_to_char(struct task_struct *task)
41457+{
41458+ switch (task->role->roletype &
41459+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41460+ GR_ROLE_SPECIAL)) {
41461+ case GR_ROLE_DEFAULT:
41462+ return 'D';
41463+ case GR_ROLE_USER:
41464+ return 'U';
41465+ case GR_ROLE_GROUP:
41466+ return 'G';
41467+ case GR_ROLE_SPECIAL:
41468+ return 'S';
41469+ }
41470+
41471+ return 'X';
41472+}
41473+
41474+char gr_roletype_to_char(void)
41475+{
41476+ return gr_task_roletype_to_char(current);
41477+}
41478+
41479+__inline__ int
41480+gr_acl_tpe_check(void)
41481+{
41482+ if (unlikely(!(gr_status & GR_READY)))
41483+ return 0;
41484+ if (current->role->roletype & GR_ROLE_TPE)
41485+ return 1;
41486+ else
41487+ return 0;
41488+}
41489+
41490+int
41491+gr_handle_rawio(const struct inode *inode)
41492+{
41493+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41494+ if (inode && S_ISBLK(inode->i_mode) &&
41495+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41496+ !capable(CAP_SYS_RAWIO))
41497+ return 1;
41498+#endif
41499+ return 0;
41500+}
41501+
41502+static int
41503+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41504+{
41505+ if (likely(lena != lenb))
41506+ return 0;
41507+
41508+ return !memcmp(a, b, lena);
41509+}
41510+
41511+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41512+{
41513+ *buflen -= namelen;
41514+ if (*buflen < 0)
41515+ return -ENAMETOOLONG;
41516+ *buffer -= namelen;
41517+ memcpy(*buffer, str, namelen);
41518+ return 0;
41519+}
41520+
41521+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41522+{
41523+ return prepend(buffer, buflen, name->name, name->len);
41524+}
41525+
41526+static int prepend_path(const struct path *path, struct path *root,
41527+ char **buffer, int *buflen)
41528+{
41529+ struct dentry *dentry = path->dentry;
41530+ struct vfsmount *vfsmnt = path->mnt;
41531+ bool slash = false;
41532+ int error = 0;
41533+
41534+ while (dentry != root->dentry || vfsmnt != root->mnt) {
41535+ struct dentry * parent;
41536+
41537+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41538+ /* Global root? */
41539+ if (vfsmnt->mnt_parent == vfsmnt) {
41540+ goto out;
41541+ }
41542+ dentry = vfsmnt->mnt_mountpoint;
41543+ vfsmnt = vfsmnt->mnt_parent;
41544+ continue;
41545+ }
41546+ parent = dentry->d_parent;
41547+ prefetch(parent);
41548+ spin_lock(&dentry->d_lock);
41549+ error = prepend_name(buffer, buflen, &dentry->d_name);
41550+ spin_unlock(&dentry->d_lock);
41551+ if (!error)
41552+ error = prepend(buffer, buflen, "/", 1);
41553+ if (error)
41554+ break;
41555+
41556+ slash = true;
41557+ dentry = parent;
41558+ }
41559+
41560+out:
41561+ if (!error && !slash)
41562+ error = prepend(buffer, buflen, "/", 1);
41563+
41564+ return error;
41565+}
41566+
41567+/* this must be called with vfsmount_lock and rename_lock held */
41568+
41569+static char *__our_d_path(const struct path *path, struct path *root,
41570+ char *buf, int buflen)
41571+{
41572+ char *res = buf + buflen;
41573+ int error;
41574+
41575+ prepend(&res, &buflen, "\0", 1);
41576+ error = prepend_path(path, root, &res, &buflen);
41577+ if (error)
41578+ return ERR_PTR(error);
41579+
41580+ return res;
41581+}
41582+
41583+static char *
41584+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41585+{
41586+ char *retval;
41587+
41588+ retval = __our_d_path(path, root, buf, buflen);
41589+ if (unlikely(IS_ERR(retval)))
41590+ retval = strcpy(buf, "<path too long>");
41591+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41592+ retval[1] = '\0';
41593+
41594+ return retval;
41595+}
41596+
41597+static char *
41598+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41599+ char *buf, int buflen)
41600+{
41601+ struct path path;
41602+ char *res;
41603+
41604+ path.dentry = (struct dentry *)dentry;
41605+ path.mnt = (struct vfsmount *)vfsmnt;
41606+
41607+ /* we can use real_root.dentry, real_root.mnt, because this is only called
41608+ by the RBAC system */
41609+ res = gen_full_path(&path, &real_root, buf, buflen);
41610+
41611+ return res;
41612+}
41613+
41614+static char *
41615+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41616+ char *buf, int buflen)
41617+{
41618+ char *res;
41619+ struct path path;
41620+ struct path root;
41621+ struct task_struct *reaper = &init_task;
41622+
41623+ path.dentry = (struct dentry *)dentry;
41624+ path.mnt = (struct vfsmount *)vfsmnt;
41625+
41626+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41627+ get_fs_root(reaper->fs, &root);
41628+
41629+ write_seqlock(&rename_lock);
41630+ br_read_lock(vfsmount_lock);
41631+ res = gen_full_path(&path, &root, buf, buflen);
41632+ br_read_unlock(vfsmount_lock);
41633+ write_sequnlock(&rename_lock);
41634+
41635+ path_put(&root);
41636+ return res;
41637+}
41638+
41639+static char *
41640+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41641+{
41642+ char *ret;
41643+ write_seqlock(&rename_lock);
41644+ br_read_lock(vfsmount_lock);
41645+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41646+ PAGE_SIZE);
41647+ br_read_unlock(vfsmount_lock);
41648+ write_sequnlock(&rename_lock);
41649+ return ret;
41650+}
41651+
41652+char *
41653+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41654+{
41655+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41656+ PAGE_SIZE);
41657+}
41658+
41659+char *
41660+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41661+{
41662+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41663+ PAGE_SIZE);
41664+}
41665+
41666+char *
41667+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41668+{
41669+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41670+ PAGE_SIZE);
41671+}
41672+
41673+char *
41674+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41675+{
41676+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41677+ PAGE_SIZE);
41678+}
41679+
41680+char *
41681+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41682+{
41683+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41684+ PAGE_SIZE);
41685+}
41686+
41687+__inline__ __u32
41688+to_gr_audit(const __u32 reqmode)
41689+{
41690+ /* masks off auditable permission flags, then shifts them to create
41691+ auditing flags, and adds the special case of append auditing if
41692+ we're requesting write */
41693+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41694+}
41695+
41696+struct acl_subject_label *
41697+lookup_subject_map(const struct acl_subject_label *userp)
41698+{
41699+ unsigned int index = shash(userp, subj_map_set.s_size);
41700+ struct subject_map *match;
41701+
41702+ match = subj_map_set.s_hash[index];
41703+
41704+ while (match && match->user != userp)
41705+ match = match->next;
41706+
41707+ if (match != NULL)
41708+ return match->kernel;
41709+ else
41710+ return NULL;
41711+}
41712+
41713+static void
41714+insert_subj_map_entry(struct subject_map *subjmap)
41715+{
41716+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41717+ struct subject_map **curr;
41718+
41719+ subjmap->prev = NULL;
41720+
41721+ curr = &subj_map_set.s_hash[index];
41722+ if (*curr != NULL)
41723+ (*curr)->prev = subjmap;
41724+
41725+ subjmap->next = *curr;
41726+ *curr = subjmap;
41727+
41728+ return;
41729+}
41730+
41731+static struct acl_role_label *
41732+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41733+ const gid_t gid)
41734+{
41735+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41736+ struct acl_role_label *match;
41737+ struct role_allowed_ip *ipp;
41738+ unsigned int x;
41739+ u32 curr_ip = task->signal->curr_ip;
41740+
41741+ task->signal->saved_ip = curr_ip;
41742+
41743+ match = acl_role_set.r_hash[index];
41744+
41745+ while (match) {
41746+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41747+ for (x = 0; x < match->domain_child_num; x++) {
41748+ if (match->domain_children[x] == uid)
41749+ goto found;
41750+ }
41751+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41752+ break;
41753+ match = match->next;
41754+ }
41755+found:
41756+ if (match == NULL) {
41757+ try_group:
41758+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41759+ match = acl_role_set.r_hash[index];
41760+
41761+ while (match) {
41762+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41763+ for (x = 0; x < match->domain_child_num; x++) {
41764+ if (match->domain_children[x] == gid)
41765+ goto found2;
41766+ }
41767+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41768+ break;
41769+ match = match->next;
41770+ }
41771+found2:
41772+ if (match == NULL)
41773+ match = default_role;
41774+ if (match->allowed_ips == NULL)
41775+ return match;
41776+ else {
41777+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41778+ if (likely
41779+ ((ntohl(curr_ip) & ipp->netmask) ==
41780+ (ntohl(ipp->addr) & ipp->netmask)))
41781+ return match;
41782+ }
41783+ match = default_role;
41784+ }
41785+ } else if (match->allowed_ips == NULL) {
41786+ return match;
41787+ } else {
41788+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41789+ if (likely
41790+ ((ntohl(curr_ip) & ipp->netmask) ==
41791+ (ntohl(ipp->addr) & ipp->netmask)))
41792+ return match;
41793+ }
41794+ goto try_group;
41795+ }
41796+
41797+ return match;
41798+}
41799+
41800+struct acl_subject_label *
41801+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41802+ const struct acl_role_label *role)
41803+{
41804+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
41805+ struct acl_subject_label *match;
41806+
41807+ match = role->subj_hash[index];
41808+
41809+ while (match && (match->inode != ino || match->device != dev ||
41810+ (match->mode & GR_DELETED))) {
41811+ match = match->next;
41812+ }
41813+
41814+ if (match && !(match->mode & GR_DELETED))
41815+ return match;
41816+ else
41817+ return NULL;
41818+}
41819+
41820+struct acl_subject_label *
41821+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41822+ const struct acl_role_label *role)
41823+{
41824+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
41825+ struct acl_subject_label *match;
41826+
41827+ match = role->subj_hash[index];
41828+
41829+ while (match && (match->inode != ino || match->device != dev ||
41830+ !(match->mode & GR_DELETED))) {
41831+ match = match->next;
41832+ }
41833+
41834+ if (match && (match->mode & GR_DELETED))
41835+ return match;
41836+ else
41837+ return NULL;
41838+}
41839+
41840+static struct acl_object_label *
41841+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41842+ const struct acl_subject_label *subj)
41843+{
41844+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41845+ struct acl_object_label *match;
41846+
41847+ match = subj->obj_hash[index];
41848+
41849+ while (match && (match->inode != ino || match->device != dev ||
41850+ (match->mode & GR_DELETED))) {
41851+ match = match->next;
41852+ }
41853+
41854+ if (match && !(match->mode & GR_DELETED))
41855+ return match;
41856+ else
41857+ return NULL;
41858+}
41859+
41860+static struct acl_object_label *
41861+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
41862+ const struct acl_subject_label *subj)
41863+{
41864+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41865+ struct acl_object_label *match;
41866+
41867+ match = subj->obj_hash[index];
41868+
41869+ while (match && (match->inode != ino || match->device != dev ||
41870+ !(match->mode & GR_DELETED))) {
41871+ match = match->next;
41872+ }
41873+
41874+ if (match && (match->mode & GR_DELETED))
41875+ return match;
41876+
41877+ match = subj->obj_hash[index];
41878+
41879+ while (match && (match->inode != ino || match->device != dev ||
41880+ (match->mode & GR_DELETED))) {
41881+ match = match->next;
41882+ }
41883+
41884+ if (match && !(match->mode & GR_DELETED))
41885+ return match;
41886+ else
41887+ return NULL;
41888+}
41889+
41890+static struct name_entry *
41891+lookup_name_entry(const char *name)
41892+{
41893+ unsigned int len = strlen(name);
41894+ unsigned int key = full_name_hash(name, len);
41895+ unsigned int index = key % name_set.n_size;
41896+ struct name_entry *match;
41897+
41898+ match = name_set.n_hash[index];
41899+
41900+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
41901+ match = match->next;
41902+
41903+ return match;
41904+}
41905+
41906+static struct name_entry *
41907+lookup_name_entry_create(const char *name)
41908+{
41909+ unsigned int len = strlen(name);
41910+ unsigned int key = full_name_hash(name, len);
41911+ unsigned int index = key % name_set.n_size;
41912+ struct name_entry *match;
41913+
41914+ match = name_set.n_hash[index];
41915+
41916+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41917+ !match->deleted))
41918+ match = match->next;
41919+
41920+ if (match && match->deleted)
41921+ return match;
41922+
41923+ match = name_set.n_hash[index];
41924+
41925+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41926+ match->deleted))
41927+ match = match->next;
41928+
41929+ if (match && !match->deleted)
41930+ return match;
41931+ else
41932+ return NULL;
41933+}
41934+
41935+static struct inodev_entry *
41936+lookup_inodev_entry(const ino_t ino, const dev_t dev)
41937+{
41938+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
41939+ struct inodev_entry *match;
41940+
41941+ match = inodev_set.i_hash[index];
41942+
41943+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
41944+ match = match->next;
41945+
41946+ return match;
41947+}
41948+
41949+static void
41950+insert_inodev_entry(struct inodev_entry *entry)
41951+{
41952+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
41953+ inodev_set.i_size);
41954+ struct inodev_entry **curr;
41955+
41956+ entry->prev = NULL;
41957+
41958+ curr = &inodev_set.i_hash[index];
41959+ if (*curr != NULL)
41960+ (*curr)->prev = entry;
41961+
41962+ entry->next = *curr;
41963+ *curr = entry;
41964+
41965+ return;
41966+}
41967+
41968+static void
41969+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
41970+{
41971+ unsigned int index =
41972+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
41973+ struct acl_role_label **curr;
41974+ struct acl_role_label *tmp;
41975+
41976+ curr = &acl_role_set.r_hash[index];
41977+
41978+ /* if role was already inserted due to domains and already has
41979+ a role in the same bucket as it attached, then we need to
41980+ combine these two buckets
41981+ */
41982+ if (role->next) {
41983+ tmp = role->next;
41984+ while (tmp->next)
41985+ tmp = tmp->next;
41986+ tmp->next = *curr;
41987+ } else
41988+ role->next = *curr;
41989+ *curr = role;
41990+
41991+ return;
41992+}
41993+
41994+static void
41995+insert_acl_role_label(struct acl_role_label *role)
41996+{
41997+ int i;
41998+
41999+ if (role_list == NULL) {
42000+ role_list = role;
42001+ role->prev = NULL;
42002+ } else {
42003+ role->prev = role_list;
42004+ role_list = role;
42005+ }
42006+
42007+ /* used for hash chains */
42008+ role->next = NULL;
42009+
42010+ if (role->roletype & GR_ROLE_DOMAIN) {
42011+ for (i = 0; i < role->domain_child_num; i++)
42012+ __insert_acl_role_label(role, role->domain_children[i]);
42013+ } else
42014+ __insert_acl_role_label(role, role->uidgid);
42015+}
42016+
42017+static int
42018+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42019+{
42020+ struct name_entry **curr, *nentry;
42021+ struct inodev_entry *ientry;
42022+ unsigned int len = strlen(name);
42023+ unsigned int key = full_name_hash(name, len);
42024+ unsigned int index = key % name_set.n_size;
42025+
42026+ curr = &name_set.n_hash[index];
42027+
42028+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42029+ curr = &((*curr)->next);
42030+
42031+ if (*curr != NULL)
42032+ return 1;
42033+
42034+ nentry = acl_alloc(sizeof (struct name_entry));
42035+ if (nentry == NULL)
42036+ return 0;
42037+ ientry = acl_alloc(sizeof (struct inodev_entry));
42038+ if (ientry == NULL)
42039+ return 0;
42040+ ientry->nentry = nentry;
42041+
42042+ nentry->key = key;
42043+ nentry->name = name;
42044+ nentry->inode = inode;
42045+ nentry->device = device;
42046+ nentry->len = len;
42047+ nentry->deleted = deleted;
42048+
42049+ nentry->prev = NULL;
42050+ curr = &name_set.n_hash[index];
42051+ if (*curr != NULL)
42052+ (*curr)->prev = nentry;
42053+ nentry->next = *curr;
42054+ *curr = nentry;
42055+
42056+ /* insert us into the table searchable by inode/dev */
42057+ insert_inodev_entry(ientry);
42058+
42059+ return 1;
42060+}
42061+
42062+static void
42063+insert_acl_obj_label(struct acl_object_label *obj,
42064+ struct acl_subject_label *subj)
42065+{
42066+ unsigned int index =
42067+ fhash(obj->inode, obj->device, subj->obj_hash_size);
42068+ struct acl_object_label **curr;
42069+
42070+
42071+ obj->prev = NULL;
42072+
42073+ curr = &subj->obj_hash[index];
42074+ if (*curr != NULL)
42075+ (*curr)->prev = obj;
42076+
42077+ obj->next = *curr;
42078+ *curr = obj;
42079+
42080+ return;
42081+}
42082+
42083+static void
42084+insert_acl_subj_label(struct acl_subject_label *obj,
42085+ struct acl_role_label *role)
42086+{
42087+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42088+ struct acl_subject_label **curr;
42089+
42090+ obj->prev = NULL;
42091+
42092+ curr = &role->subj_hash[index];
42093+ if (*curr != NULL)
42094+ (*curr)->prev = obj;
42095+
42096+ obj->next = *curr;
42097+ *curr = obj;
42098+
42099+ return;
42100+}
42101+
42102+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42103+
42104+static void *
42105+create_table(__u32 * len, int elementsize)
42106+{
42107+ unsigned int table_sizes[] = {
42108+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42109+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42110+ 4194301, 8388593, 16777213, 33554393, 67108859
42111+ };
42112+ void *newtable = NULL;
42113+ unsigned int pwr = 0;
42114+
42115+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42116+ table_sizes[pwr] <= *len)
42117+ pwr++;
42118+
42119+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42120+ return newtable;
42121+
42122+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42123+ newtable =
42124+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42125+ else
42126+ newtable = vmalloc(table_sizes[pwr] * elementsize);
42127+
42128+ *len = table_sizes[pwr];
42129+
42130+ return newtable;
42131+}
42132+
42133+static int
42134+init_variables(const struct gr_arg *arg)
42135+{
42136+ struct task_struct *reaper = &init_task;
42137+ unsigned int stacksize;
42138+
42139+ subj_map_set.s_size = arg->role_db.num_subjects;
42140+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42141+ name_set.n_size = arg->role_db.num_objects;
42142+ inodev_set.i_size = arg->role_db.num_objects;
42143+
42144+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
42145+ !name_set.n_size || !inodev_set.i_size)
42146+ return 1;
42147+
42148+ if (!gr_init_uidset())
42149+ return 1;
42150+
42151+ /* set up the stack that holds allocation info */
42152+
42153+ stacksize = arg->role_db.num_pointers + 5;
42154+
42155+ if (!acl_alloc_stack_init(stacksize))
42156+ return 1;
42157+
42158+ /* grab reference for the real root dentry and vfsmount */
42159+ get_fs_root(reaper->fs, &real_root);
42160+
42161+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42162+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42163+#endif
42164+
42165+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42166+ if (fakefs_obj_rw == NULL)
42167+ return 1;
42168+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42169+
42170+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42171+ if (fakefs_obj_rwx == NULL)
42172+ return 1;
42173+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42174+
42175+ subj_map_set.s_hash =
42176+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42177+ acl_role_set.r_hash =
42178+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42179+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42180+ inodev_set.i_hash =
42181+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42182+
42183+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42184+ !name_set.n_hash || !inodev_set.i_hash)
42185+ return 1;
42186+
42187+ memset(subj_map_set.s_hash, 0,
42188+ sizeof(struct subject_map *) * subj_map_set.s_size);
42189+ memset(acl_role_set.r_hash, 0,
42190+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
42191+ memset(name_set.n_hash, 0,
42192+ sizeof (struct name_entry *) * name_set.n_size);
42193+ memset(inodev_set.i_hash, 0,
42194+ sizeof (struct inodev_entry *) * inodev_set.i_size);
42195+
42196+ return 0;
42197+}
42198+
42199+/* free information not needed after startup
42200+ currently contains user->kernel pointer mappings for subjects
42201+*/
42202+
42203+static void
42204+free_init_variables(void)
42205+{
42206+ __u32 i;
42207+
42208+ if (subj_map_set.s_hash) {
42209+ for (i = 0; i < subj_map_set.s_size; i++) {
42210+ if (subj_map_set.s_hash[i]) {
42211+ kfree(subj_map_set.s_hash[i]);
42212+ subj_map_set.s_hash[i] = NULL;
42213+ }
42214+ }
42215+
42216+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42217+ PAGE_SIZE)
42218+ kfree(subj_map_set.s_hash);
42219+ else
42220+ vfree(subj_map_set.s_hash);
42221+ }
42222+
42223+ return;
42224+}
42225+
42226+static void
42227+free_variables(void)
42228+{
42229+ struct acl_subject_label *s;
42230+ struct acl_role_label *r;
42231+ struct task_struct *task, *task2;
42232+ unsigned int x;
42233+
42234+ gr_clear_learn_entries();
42235+
42236+ read_lock(&tasklist_lock);
42237+ do_each_thread(task2, task) {
42238+ task->acl_sp_role = 0;
42239+ task->acl_role_id = 0;
42240+ task->acl = NULL;
42241+ task->role = NULL;
42242+ } while_each_thread(task2, task);
42243+ read_unlock(&tasklist_lock);
42244+
42245+ /* release the reference to the real root dentry and vfsmount */
42246+ path_put(&real_root);
42247+
42248+ /* free all object hash tables */
42249+
42250+ FOR_EACH_ROLE_START(r)
42251+ if (r->subj_hash == NULL)
42252+ goto next_role;
42253+ FOR_EACH_SUBJECT_START(r, s, x)
42254+ if (s->obj_hash == NULL)
42255+ break;
42256+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42257+ kfree(s->obj_hash);
42258+ else
42259+ vfree(s->obj_hash);
42260+ FOR_EACH_SUBJECT_END(s, x)
42261+ FOR_EACH_NESTED_SUBJECT_START(r, s)
42262+ if (s->obj_hash == NULL)
42263+ break;
42264+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42265+ kfree(s->obj_hash);
42266+ else
42267+ vfree(s->obj_hash);
42268+ FOR_EACH_NESTED_SUBJECT_END(s)
42269+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42270+ kfree(r->subj_hash);
42271+ else
42272+ vfree(r->subj_hash);
42273+ r->subj_hash = NULL;
42274+next_role:
42275+ FOR_EACH_ROLE_END(r)
42276+
42277+ acl_free_all();
42278+
42279+ if (acl_role_set.r_hash) {
42280+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42281+ PAGE_SIZE)
42282+ kfree(acl_role_set.r_hash);
42283+ else
42284+ vfree(acl_role_set.r_hash);
42285+ }
42286+ if (name_set.n_hash) {
42287+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
42288+ PAGE_SIZE)
42289+ kfree(name_set.n_hash);
42290+ else
42291+ vfree(name_set.n_hash);
42292+ }
42293+
42294+ if (inodev_set.i_hash) {
42295+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42296+ PAGE_SIZE)
42297+ kfree(inodev_set.i_hash);
42298+ else
42299+ vfree(inodev_set.i_hash);
42300+ }
42301+
42302+ gr_free_uidset();
42303+
42304+ memset(&name_set, 0, sizeof (struct name_db));
42305+ memset(&inodev_set, 0, sizeof (struct inodev_db));
42306+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42307+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42308+
42309+ default_role = NULL;
42310+ role_list = NULL;
42311+
42312+ return;
42313+}
42314+
42315+static __u32
42316+count_user_objs(struct acl_object_label *userp)
42317+{
42318+ struct acl_object_label o_tmp;
42319+ __u32 num = 0;
42320+
42321+ while (userp) {
42322+ if (copy_from_user(&o_tmp, userp,
42323+ sizeof (struct acl_object_label)))
42324+ break;
42325+
42326+ userp = o_tmp.prev;
42327+ num++;
42328+ }
42329+
42330+ return num;
42331+}
42332+
42333+static struct acl_subject_label *
42334+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42335+
42336+static int
42337+copy_user_glob(struct acl_object_label *obj)
42338+{
42339+ struct acl_object_label *g_tmp, **guser;
42340+ unsigned int len;
42341+ char *tmp;
42342+
42343+ if (obj->globbed == NULL)
42344+ return 0;
42345+
42346+ guser = &obj->globbed;
42347+ while (*guser) {
42348+ g_tmp = (struct acl_object_label *)
42349+ acl_alloc(sizeof (struct acl_object_label));
42350+ if (g_tmp == NULL)
42351+ return -ENOMEM;
42352+
42353+ if (copy_from_user(g_tmp, *guser,
42354+ sizeof (struct acl_object_label)))
42355+ return -EFAULT;
42356+
42357+ len = strnlen_user(g_tmp->filename, PATH_MAX);
42358+
42359+ if (!len || len >= PATH_MAX)
42360+ return -EINVAL;
42361+
42362+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42363+ return -ENOMEM;
42364+
42365+ if (copy_from_user(tmp, g_tmp->filename, len))
42366+ return -EFAULT;
42367+ tmp[len-1] = '\0';
42368+ g_tmp->filename = tmp;
42369+
42370+ *guser = g_tmp;
42371+ guser = &(g_tmp->next);
42372+ }
42373+
42374+ return 0;
42375+}
42376+
42377+static int
42378+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42379+ struct acl_role_label *role)
42380+{
42381+ struct acl_object_label *o_tmp;
42382+ unsigned int len;
42383+ int ret;
42384+ char *tmp;
42385+
42386+ while (userp) {
42387+ if ((o_tmp = (struct acl_object_label *)
42388+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
42389+ return -ENOMEM;
42390+
42391+ if (copy_from_user(o_tmp, userp,
42392+ sizeof (struct acl_object_label)))
42393+ return -EFAULT;
42394+
42395+ userp = o_tmp->prev;
42396+
42397+ len = strnlen_user(o_tmp->filename, PATH_MAX);
42398+
42399+ if (!len || len >= PATH_MAX)
42400+ return -EINVAL;
42401+
42402+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42403+ return -ENOMEM;
42404+
42405+ if (copy_from_user(tmp, o_tmp->filename, len))
42406+ return -EFAULT;
42407+ tmp[len-1] = '\0';
42408+ o_tmp->filename = tmp;
42409+
42410+ insert_acl_obj_label(o_tmp, subj);
42411+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42412+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42413+ return -ENOMEM;
42414+
42415+ ret = copy_user_glob(o_tmp);
42416+ if (ret)
42417+ return ret;
42418+
42419+ if (o_tmp->nested) {
42420+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42421+ if (IS_ERR(o_tmp->nested))
42422+ return PTR_ERR(o_tmp->nested);
42423+
42424+ /* insert into nested subject list */
42425+ o_tmp->nested->next = role->hash->first;
42426+ role->hash->first = o_tmp->nested;
42427+ }
42428+ }
42429+
42430+ return 0;
42431+}
42432+
42433+static __u32
42434+count_user_subjs(struct acl_subject_label *userp)
42435+{
42436+ struct acl_subject_label s_tmp;
42437+ __u32 num = 0;
42438+
42439+ while (userp) {
42440+ if (copy_from_user(&s_tmp, userp,
42441+ sizeof (struct acl_subject_label)))
42442+ break;
42443+
42444+ userp = s_tmp.prev;
42445+ /* do not count nested subjects against this count, since
42446+ they are not included in the hash table, but are
42447+ attached to objects. We have already counted
42448+ the subjects in userspace for the allocation
42449+ stack
42450+ */
42451+ if (!(s_tmp.mode & GR_NESTED))
42452+ num++;
42453+ }
42454+
42455+ return num;
42456+}
42457+
42458+static int
42459+copy_user_allowedips(struct acl_role_label *rolep)
42460+{
42461+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42462+
42463+ ruserip = rolep->allowed_ips;
42464+
42465+ while (ruserip) {
42466+ rlast = rtmp;
42467+
42468+ if ((rtmp = (struct role_allowed_ip *)
42469+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42470+ return -ENOMEM;
42471+
42472+ if (copy_from_user(rtmp, ruserip,
42473+ sizeof (struct role_allowed_ip)))
42474+ return -EFAULT;
42475+
42476+ ruserip = rtmp->prev;
42477+
42478+ if (!rlast) {
42479+ rtmp->prev = NULL;
42480+ rolep->allowed_ips = rtmp;
42481+ } else {
42482+ rlast->next = rtmp;
42483+ rtmp->prev = rlast;
42484+ }
42485+
42486+ if (!ruserip)
42487+ rtmp->next = NULL;
42488+ }
42489+
42490+ return 0;
42491+}
42492+
42493+static int
42494+copy_user_transitions(struct acl_role_label *rolep)
42495+{
42496+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
42497+
42498+ unsigned int len;
42499+ char *tmp;
42500+
42501+ rusertp = rolep->transitions;
42502+
42503+ while (rusertp) {
42504+ rlast = rtmp;
42505+
42506+ if ((rtmp = (struct role_transition *)
42507+ acl_alloc(sizeof (struct role_transition))) == NULL)
42508+ return -ENOMEM;
42509+
42510+ if (copy_from_user(rtmp, rusertp,
42511+ sizeof (struct role_transition)))
42512+ return -EFAULT;
42513+
42514+ rusertp = rtmp->prev;
42515+
42516+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42517+
42518+ if (!len || len >= GR_SPROLE_LEN)
42519+ return -EINVAL;
42520+
42521+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42522+ return -ENOMEM;
42523+
42524+ if (copy_from_user(tmp, rtmp->rolename, len))
42525+ return -EFAULT;
42526+ tmp[len-1] = '\0';
42527+ rtmp->rolename = tmp;
42528+
42529+ if (!rlast) {
42530+ rtmp->prev = NULL;
42531+ rolep->transitions = rtmp;
42532+ } else {
42533+ rlast->next = rtmp;
42534+ rtmp->prev = rlast;
42535+ }
42536+
42537+ if (!rusertp)
42538+ rtmp->next = NULL;
42539+ }
42540+
42541+ return 0;
42542+}
42543+
42544+static struct acl_subject_label *
42545+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42546+{
42547+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42548+ unsigned int len;
42549+ char *tmp;
42550+ __u32 num_objs;
42551+ struct acl_ip_label **i_tmp, *i_utmp2;
42552+ struct gr_hash_struct ghash;
42553+ struct subject_map *subjmap;
42554+ unsigned int i_num;
42555+ int err;
42556+
42557+ s_tmp = lookup_subject_map(userp);
42558+
42559+ /* we've already copied this subject into the kernel, just return
42560+ the reference to it, and don't copy it over again
42561+ */
42562+ if (s_tmp)
42563+ return(s_tmp);
42564+
42565+ if ((s_tmp = (struct acl_subject_label *)
42566+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42567+ return ERR_PTR(-ENOMEM);
42568+
42569+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42570+ if (subjmap == NULL)
42571+ return ERR_PTR(-ENOMEM);
42572+
42573+ subjmap->user = userp;
42574+ subjmap->kernel = s_tmp;
42575+ insert_subj_map_entry(subjmap);
42576+
42577+ if (copy_from_user(s_tmp, userp,
42578+ sizeof (struct acl_subject_label)))
42579+ return ERR_PTR(-EFAULT);
42580+
42581+ len = strnlen_user(s_tmp->filename, PATH_MAX);
42582+
42583+ if (!len || len >= PATH_MAX)
42584+ return ERR_PTR(-EINVAL);
42585+
42586+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42587+ return ERR_PTR(-ENOMEM);
42588+
42589+ if (copy_from_user(tmp, s_tmp->filename, len))
42590+ return ERR_PTR(-EFAULT);
42591+ tmp[len-1] = '\0';
42592+ s_tmp->filename = tmp;
42593+
42594+ if (!strcmp(s_tmp->filename, "/"))
42595+ role->root_label = s_tmp;
42596+
42597+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42598+ return ERR_PTR(-EFAULT);
42599+
42600+ /* copy user and group transition tables */
42601+
42602+ if (s_tmp->user_trans_num) {
42603+ uid_t *uidlist;
42604+
42605+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42606+ if (uidlist == NULL)
42607+ return ERR_PTR(-ENOMEM);
42608+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42609+ return ERR_PTR(-EFAULT);
42610+
42611+ s_tmp->user_transitions = uidlist;
42612+ }
42613+
42614+ if (s_tmp->group_trans_num) {
42615+ gid_t *gidlist;
42616+
42617+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42618+ if (gidlist == NULL)
42619+ return ERR_PTR(-ENOMEM);
42620+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42621+ return ERR_PTR(-EFAULT);
42622+
42623+ s_tmp->group_transitions = gidlist;
42624+ }
42625+
42626+ /* set up object hash table */
42627+ num_objs = count_user_objs(ghash.first);
42628+
42629+ s_tmp->obj_hash_size = num_objs;
42630+ s_tmp->obj_hash =
42631+ (struct acl_object_label **)
42632+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42633+
42634+ if (!s_tmp->obj_hash)
42635+ return ERR_PTR(-ENOMEM);
42636+
42637+ memset(s_tmp->obj_hash, 0,
42638+ s_tmp->obj_hash_size *
42639+ sizeof (struct acl_object_label *));
42640+
42641+ /* add in objects */
42642+ err = copy_user_objs(ghash.first, s_tmp, role);
42643+
42644+ if (err)
42645+ return ERR_PTR(err);
42646+
42647+ /* set pointer for parent subject */
42648+ if (s_tmp->parent_subject) {
42649+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42650+
42651+ if (IS_ERR(s_tmp2))
42652+ return s_tmp2;
42653+
42654+ s_tmp->parent_subject = s_tmp2;
42655+ }
42656+
42657+ /* add in ip acls */
42658+
42659+ if (!s_tmp->ip_num) {
42660+ s_tmp->ips = NULL;
42661+ goto insert;
42662+ }
42663+
42664+ i_tmp =
42665+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42666+ sizeof (struct acl_ip_label *));
42667+
42668+ if (!i_tmp)
42669+ return ERR_PTR(-ENOMEM);
42670+
42671+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42672+ *(i_tmp + i_num) =
42673+ (struct acl_ip_label *)
42674+ acl_alloc(sizeof (struct acl_ip_label));
42675+ if (!*(i_tmp + i_num))
42676+ return ERR_PTR(-ENOMEM);
42677+
42678+ if (copy_from_user
42679+ (&i_utmp2, s_tmp->ips + i_num,
42680+ sizeof (struct acl_ip_label *)))
42681+ return ERR_PTR(-EFAULT);
42682+
42683+ if (copy_from_user
42684+ (*(i_tmp + i_num), i_utmp2,
42685+ sizeof (struct acl_ip_label)))
42686+ return ERR_PTR(-EFAULT);
42687+
42688+ if ((*(i_tmp + i_num))->iface == NULL)
42689+ continue;
42690+
42691+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42692+ if (!len || len >= IFNAMSIZ)
42693+ return ERR_PTR(-EINVAL);
42694+ tmp = acl_alloc(len);
42695+ if (tmp == NULL)
42696+ return ERR_PTR(-ENOMEM);
42697+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42698+ return ERR_PTR(-EFAULT);
42699+ (*(i_tmp + i_num))->iface = tmp;
42700+ }
42701+
42702+ s_tmp->ips = i_tmp;
42703+
42704+insert:
42705+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42706+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42707+ return ERR_PTR(-ENOMEM);
42708+
42709+ return s_tmp;
42710+}
42711+
42712+static int
42713+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42714+{
42715+ struct acl_subject_label s_pre;
42716+ struct acl_subject_label * ret;
42717+ int err;
42718+
42719+ while (userp) {
42720+ if (copy_from_user(&s_pre, userp,
42721+ sizeof (struct acl_subject_label)))
42722+ return -EFAULT;
42723+
42724+ /* do not add nested subjects here, add
42725+ while parsing objects
42726+ */
42727+
42728+ if (s_pre.mode & GR_NESTED) {
42729+ userp = s_pre.prev;
42730+ continue;
42731+ }
42732+
42733+ ret = do_copy_user_subj(userp, role);
42734+
42735+ err = PTR_ERR(ret);
42736+ if (IS_ERR(ret))
42737+ return err;
42738+
42739+ insert_acl_subj_label(ret, role);
42740+
42741+ userp = s_pre.prev;
42742+ }
42743+
42744+ return 0;
42745+}
42746+
42747+static int
42748+copy_user_acl(struct gr_arg *arg)
42749+{
42750+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42751+ struct sprole_pw *sptmp;
42752+ struct gr_hash_struct *ghash;
42753+ uid_t *domainlist;
42754+ unsigned int r_num;
42755+ unsigned int len;
42756+ char *tmp;
42757+ int err = 0;
42758+ __u16 i;
42759+ __u32 num_subjs;
42760+
42761+ /* we need a default and kernel role */
42762+ if (arg->role_db.num_roles < 2)
42763+ return -EINVAL;
42764+
42765+ /* copy special role authentication info from userspace */
42766+
42767+ num_sprole_pws = arg->num_sprole_pws;
42768+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42769+
42770+ if (!acl_special_roles) {
42771+ err = -ENOMEM;
42772+ goto cleanup;
42773+ }
42774+
42775+ for (i = 0; i < num_sprole_pws; i++) {
42776+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42777+ if (!sptmp) {
42778+ err = -ENOMEM;
42779+ goto cleanup;
42780+ }
42781+ if (copy_from_user(sptmp, arg->sprole_pws + i,
42782+ sizeof (struct sprole_pw))) {
42783+ err = -EFAULT;
42784+ goto cleanup;
42785+ }
42786+
42787+ len =
42788+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42789+
42790+ if (!len || len >= GR_SPROLE_LEN) {
42791+ err = -EINVAL;
42792+ goto cleanup;
42793+ }
42794+
42795+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
42796+ err = -ENOMEM;
42797+ goto cleanup;
42798+ }
42799+
42800+ if (copy_from_user(tmp, sptmp->rolename, len)) {
42801+ err = -EFAULT;
42802+ goto cleanup;
42803+ }
42804+ tmp[len-1] = '\0';
42805+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42806+ printk(KERN_ALERT "Copying special role %s\n", tmp);
42807+#endif
42808+ sptmp->rolename = tmp;
42809+ acl_special_roles[i] = sptmp;
42810+ }
42811+
42812+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42813+
42814+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42815+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
42816+
42817+ if (!r_tmp) {
42818+ err = -ENOMEM;
42819+ goto cleanup;
42820+ }
42821+
42822+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
42823+ sizeof (struct acl_role_label *))) {
42824+ err = -EFAULT;
42825+ goto cleanup;
42826+ }
42827+
42828+ if (copy_from_user(r_tmp, r_utmp2,
42829+ sizeof (struct acl_role_label))) {
42830+ err = -EFAULT;
42831+ goto cleanup;
42832+ }
42833+
42834+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42835+
42836+ if (!len || len >= PATH_MAX) {
42837+ err = -EINVAL;
42838+ goto cleanup;
42839+ }
42840+
42841+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
42842+ err = -ENOMEM;
42843+ goto cleanup;
42844+ }
42845+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
42846+ err = -EFAULT;
42847+ goto cleanup;
42848+ }
42849+ tmp[len-1] = '\0';
42850+ r_tmp->rolename = tmp;
42851+
42852+ if (!strcmp(r_tmp->rolename, "default")
42853+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
42854+ default_role = r_tmp;
42855+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
42856+ kernel_role = r_tmp;
42857+ }
42858+
42859+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
42860+ err = -ENOMEM;
42861+ goto cleanup;
42862+ }
42863+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
42864+ err = -EFAULT;
42865+ goto cleanup;
42866+ }
42867+
42868+ r_tmp->hash = ghash;
42869+
42870+ num_subjs = count_user_subjs(r_tmp->hash->first);
42871+
42872+ r_tmp->subj_hash_size = num_subjs;
42873+ r_tmp->subj_hash =
42874+ (struct acl_subject_label **)
42875+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
42876+
42877+ if (!r_tmp->subj_hash) {
42878+ err = -ENOMEM;
42879+ goto cleanup;
42880+ }
42881+
42882+ err = copy_user_allowedips(r_tmp);
42883+ if (err)
42884+ goto cleanup;
42885+
42886+ /* copy domain info */
42887+ if (r_tmp->domain_children != NULL) {
42888+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
42889+ if (domainlist == NULL) {
42890+ err = -ENOMEM;
42891+ goto cleanup;
42892+ }
42893+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
42894+ err = -EFAULT;
42895+ goto cleanup;
42896+ }
42897+ r_tmp->domain_children = domainlist;
42898+ }
42899+
42900+ err = copy_user_transitions(r_tmp);
42901+ if (err)
42902+ goto cleanup;
42903+
42904+ memset(r_tmp->subj_hash, 0,
42905+ r_tmp->subj_hash_size *
42906+ sizeof (struct acl_subject_label *));
42907+
42908+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
42909+
42910+ if (err)
42911+ goto cleanup;
42912+
42913+ /* set nested subject list to null */
42914+ r_tmp->hash->first = NULL;
42915+
42916+ insert_acl_role_label(r_tmp);
42917+ }
42918+
42919+ goto return_err;
42920+ cleanup:
42921+ free_variables();
42922+ return_err:
42923+ return err;
42924+
42925+}
42926+
42927+static int
42928+gracl_init(struct gr_arg *args)
42929+{
42930+ int error = 0;
42931+
42932+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
42933+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
42934+
42935+ if (init_variables(args)) {
42936+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
42937+ error = -ENOMEM;
42938+ free_variables();
42939+ goto out;
42940+ }
42941+
42942+ error = copy_user_acl(args);
42943+ free_init_variables();
42944+ if (error) {
42945+ free_variables();
42946+ goto out;
42947+ }
42948+
42949+ if ((error = gr_set_acls(0))) {
42950+ free_variables();
42951+ goto out;
42952+ }
42953+
42954+ pax_open_kernel();
42955+ gr_status |= GR_READY;
42956+ pax_close_kernel();
42957+
42958+ out:
42959+ return error;
42960+}
42961+
42962+/* derived from glibc fnmatch() 0: match, 1: no match*/
42963+
42964+static int
42965+glob_match(const char *p, const char *n)
42966+{
42967+ char c;
42968+
42969+ while ((c = *p++) != '\0') {
42970+ switch (c) {
42971+ case '?':
42972+ if (*n == '\0')
42973+ return 1;
42974+ else if (*n == '/')
42975+ return 1;
42976+ break;
42977+ case '\\':
42978+ if (*n != c)
42979+ return 1;
42980+ break;
42981+ case '*':
42982+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
42983+ if (*n == '/')
42984+ return 1;
42985+ else if (c == '?') {
42986+ if (*n == '\0')
42987+ return 1;
42988+ else
42989+ ++n;
42990+ }
42991+ }
42992+ if (c == '\0') {
42993+ return 0;
42994+ } else {
42995+ const char *endp;
42996+
42997+ if ((endp = strchr(n, '/')) == NULL)
42998+ endp = n + strlen(n);
42999+
43000+ if (c == '[') {
43001+ for (--p; n < endp; ++n)
43002+ if (!glob_match(p, n))
43003+ return 0;
43004+ } else if (c == '/') {
43005+ while (*n != '\0' && *n != '/')
43006+ ++n;
43007+ if (*n == '/' && !glob_match(p, n + 1))
43008+ return 0;
43009+ } else {
43010+ for (--p; n < endp; ++n)
43011+ if (*n == c && !glob_match(p, n))
43012+ return 0;
43013+ }
43014+
43015+ return 1;
43016+ }
43017+ case '[':
43018+ {
43019+ int not;
43020+ char cold;
43021+
43022+ if (*n == '\0' || *n == '/')
43023+ return 1;
43024+
43025+ not = (*p == '!' || *p == '^');
43026+ if (not)
43027+ ++p;
43028+
43029+ c = *p++;
43030+ for (;;) {
43031+ unsigned char fn = (unsigned char)*n;
43032+
43033+ if (c == '\0')
43034+ return 1;
43035+ else {
43036+ if (c == fn)
43037+ goto matched;
43038+ cold = c;
43039+ c = *p++;
43040+
43041+ if (c == '-' && *p != ']') {
43042+ unsigned char cend = *p++;
43043+
43044+ if (cend == '\0')
43045+ return 1;
43046+
43047+ if (cold <= fn && fn <= cend)
43048+ goto matched;
43049+
43050+ c = *p++;
43051+ }
43052+ }
43053+
43054+ if (c == ']')
43055+ break;
43056+ }
43057+ if (!not)
43058+ return 1;
43059+ break;
43060+ matched:
43061+ while (c != ']') {
43062+ if (c == '\0')
43063+ return 1;
43064+
43065+ c = *p++;
43066+ }
43067+ if (not)
43068+ return 1;
43069+ }
43070+ break;
43071+ default:
43072+ if (c != *n)
43073+ return 1;
43074+ }
43075+
43076+ ++n;
43077+ }
43078+
43079+ if (*n == '\0')
43080+ return 0;
43081+
43082+ if (*n == '/')
43083+ return 0;
43084+
43085+ return 1;
43086+}
43087+
43088+static struct acl_object_label *
43089+chk_glob_label(struct acl_object_label *globbed,
43090+ struct dentry *dentry, struct vfsmount *mnt, char **path)
43091+{
43092+ struct acl_object_label *tmp;
43093+
43094+ if (*path == NULL)
43095+ *path = gr_to_filename_nolock(dentry, mnt);
43096+
43097+ tmp = globbed;
43098+
43099+ while (tmp) {
43100+ if (!glob_match(tmp->filename, *path))
43101+ return tmp;
43102+ tmp = tmp->next;
43103+ }
43104+
43105+ return NULL;
43106+}
43107+
43108+static struct acl_object_label *
43109+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43110+ const ino_t curr_ino, const dev_t curr_dev,
43111+ const struct acl_subject_label *subj, char **path, const int checkglob)
43112+{
43113+ struct acl_subject_label *tmpsubj;
43114+ struct acl_object_label *retval;
43115+ struct acl_object_label *retval2;
43116+
43117+ tmpsubj = (struct acl_subject_label *) subj;
43118+ read_lock(&gr_inode_lock);
43119+ do {
43120+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43121+ if (retval) {
43122+ if (checkglob && retval->globbed) {
43123+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43124+ (struct vfsmount *)orig_mnt, path);
43125+ if (retval2)
43126+ retval = retval2;
43127+ }
43128+ break;
43129+ }
43130+ } while ((tmpsubj = tmpsubj->parent_subject));
43131+ read_unlock(&gr_inode_lock);
43132+
43133+ return retval;
43134+}
43135+
43136+static __inline__ struct acl_object_label *
43137+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43138+ struct dentry *curr_dentry,
43139+ const struct acl_subject_label *subj, char **path, const int checkglob)
43140+{
43141+ int newglob = checkglob;
43142+ ino_t inode;
43143+ dev_t device;
43144+
43145+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43146+ as we don't want a / * rule to match instead of the / object
43147+ don't do this for create lookups that call this function though, since they're looking up
43148+ on the parent and thus need globbing checks on all paths
43149+ */
43150+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43151+ newglob = GR_NO_GLOB;
43152+
43153+ spin_lock(&curr_dentry->d_lock);
43154+ inode = curr_dentry->d_inode->i_ino;
43155+ device = __get_dev(curr_dentry);
43156+ spin_unlock(&curr_dentry->d_lock);
43157+
43158+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43159+}
43160+
43161+static struct acl_object_label *
43162+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43163+ const struct acl_subject_label *subj, char *path, const int checkglob)
43164+{
43165+ struct dentry *dentry = (struct dentry *) l_dentry;
43166+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43167+ struct acl_object_label *retval;
43168+ struct dentry *parent;
43169+
43170+ write_seqlock(&rename_lock);
43171+ br_read_lock(vfsmount_lock);
43172+
43173+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43174+#ifdef CONFIG_NET
43175+ mnt == sock_mnt ||
43176+#endif
43177+#ifdef CONFIG_HUGETLBFS
43178+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43179+#endif
43180+ /* ignore Eric Biederman */
43181+ IS_PRIVATE(l_dentry->d_inode))) {
43182+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43183+ goto out;
43184+ }
43185+
43186+ for (;;) {
43187+ if (dentry == real_root.dentry && mnt == real_root.mnt)
43188+ break;
43189+
43190+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43191+ if (mnt->mnt_parent == mnt)
43192+ break;
43193+
43194+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43195+ if (retval != NULL)
43196+ goto out;
43197+
43198+ dentry = mnt->mnt_mountpoint;
43199+ mnt = mnt->mnt_parent;
43200+ continue;
43201+ }
43202+
43203+ parent = dentry->d_parent;
43204+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43205+ if (retval != NULL)
43206+ goto out;
43207+
43208+ dentry = parent;
43209+ }
43210+
43211+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43212+
43213+ /* real_root is pinned so we don't have to hold a reference */
43214+ if (retval == NULL)
43215+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43216+out:
43217+ br_read_unlock(vfsmount_lock);
43218+ write_sequnlock(&rename_lock);
43219+
43220+ BUG_ON(retval == NULL);
43221+
43222+ return retval;
43223+}
43224+
43225+static __inline__ struct acl_object_label *
43226+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43227+ const struct acl_subject_label *subj)
43228+{
43229+ char *path = NULL;
43230+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43231+}
43232+
43233+static __inline__ struct acl_object_label *
43234+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43235+ const struct acl_subject_label *subj)
43236+{
43237+ char *path = NULL;
43238+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43239+}
43240+
43241+static __inline__ struct acl_object_label *
43242+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43243+ const struct acl_subject_label *subj, char *path)
43244+{
43245+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43246+}
43247+
43248+static struct acl_subject_label *
43249+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43250+ const struct acl_role_label *role)
43251+{
43252+ struct dentry *dentry = (struct dentry *) l_dentry;
43253+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43254+ struct acl_subject_label *retval;
43255+ struct dentry *parent;
43256+
43257+ write_seqlock(&rename_lock);
43258+ br_read_lock(vfsmount_lock);
43259+
43260+ for (;;) {
43261+ if (dentry == real_root.dentry && mnt == real_root.mnt)
43262+ break;
43263+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43264+ if (mnt->mnt_parent == mnt)
43265+ break;
43266+
43267+ spin_lock(&dentry->d_lock);
43268+ read_lock(&gr_inode_lock);
43269+ retval =
43270+ lookup_acl_subj_label(dentry->d_inode->i_ino,
43271+ __get_dev(dentry), role);
43272+ read_unlock(&gr_inode_lock);
43273+ spin_unlock(&dentry->d_lock);
43274+ if (retval != NULL)
43275+ goto out;
43276+
43277+ dentry = mnt->mnt_mountpoint;
43278+ mnt = mnt->mnt_parent;
43279+ continue;
43280+ }
43281+
43282+ spin_lock(&dentry->d_lock);
43283+ read_lock(&gr_inode_lock);
43284+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43285+ __get_dev(dentry), role);
43286+ read_unlock(&gr_inode_lock);
43287+ parent = dentry->d_parent;
43288+ spin_unlock(&dentry->d_lock);
43289+
43290+ if (retval != NULL)
43291+ goto out;
43292+
43293+ dentry = parent;
43294+ }
43295+
43296+ spin_lock(&dentry->d_lock);
43297+ read_lock(&gr_inode_lock);
43298+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43299+ __get_dev(dentry), role);
43300+ read_unlock(&gr_inode_lock);
43301+ spin_unlock(&dentry->d_lock);
43302+
43303+ if (unlikely(retval == NULL)) {
43304+ /* real_root is pinned, we don't need to hold a reference */
43305+ read_lock(&gr_inode_lock);
43306+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43307+ __get_dev(real_root.dentry), role);
43308+ read_unlock(&gr_inode_lock);
43309+ }
43310+out:
43311+ br_read_unlock(vfsmount_lock);
43312+ write_sequnlock(&rename_lock);
43313+
43314+ BUG_ON(retval == NULL);
43315+
43316+ return retval;
43317+}
43318+
43319+static void
43320+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43321+{
43322+ struct task_struct *task = current;
43323+ const struct cred *cred = current_cred();
43324+
43325+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43326+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43327+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43328+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43329+
43330+ return;
43331+}
43332+
43333+static void
43334+gr_log_learn_sysctl(const char *path, const __u32 mode)
43335+{
43336+ struct task_struct *task = current;
43337+ const struct cred *cred = current_cred();
43338+
43339+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43340+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43341+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43342+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43343+
43344+ return;
43345+}
43346+
43347+static void
43348+gr_log_learn_id_change(const char type, const unsigned int real,
43349+ const unsigned int effective, const unsigned int fs)
43350+{
43351+ struct task_struct *task = current;
43352+ const struct cred *cred = current_cred();
43353+
43354+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43355+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43356+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43357+ type, real, effective, fs, &task->signal->saved_ip);
43358+
43359+ return;
43360+}
43361+
43362+__u32
43363+gr_check_link(const struct dentry * new_dentry,
43364+ const struct dentry * parent_dentry,
43365+ const struct vfsmount * parent_mnt,
43366+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43367+{
43368+ struct acl_object_label *obj;
43369+ __u32 oldmode, newmode;
43370+ __u32 needmode;
43371+
43372+ if (unlikely(!(gr_status & GR_READY)))
43373+ return (GR_CREATE | GR_LINK);
43374+
43375+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43376+ oldmode = obj->mode;
43377+
43378+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43379+ oldmode |= (GR_CREATE | GR_LINK);
43380+
43381+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43382+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43383+ needmode |= GR_SETID | GR_AUDIT_SETID;
43384+
43385+ newmode =
43386+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
43387+ oldmode | needmode);
43388+
43389+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43390+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43391+ GR_INHERIT | GR_AUDIT_INHERIT);
43392+
43393+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43394+ goto bad;
43395+
43396+ if ((oldmode & needmode) != needmode)
43397+ goto bad;
43398+
43399+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43400+ if ((newmode & needmode) != needmode)
43401+ goto bad;
43402+
43403+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43404+ return newmode;
43405+bad:
43406+ needmode = oldmode;
43407+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43408+ needmode |= GR_SETID;
43409+
43410+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43411+ gr_log_learn(old_dentry, old_mnt, needmode);
43412+ return (GR_CREATE | GR_LINK);
43413+ } else if (newmode & GR_SUPPRESS)
43414+ return GR_SUPPRESS;
43415+ else
43416+ return 0;
43417+}
43418+
43419+__u32
43420+gr_search_file(const struct dentry * dentry, const __u32 mode,
43421+ const struct vfsmount * mnt)
43422+{
43423+ __u32 retval = mode;
43424+ struct acl_subject_label *curracl;
43425+ struct acl_object_label *currobj;
43426+
43427+ if (unlikely(!(gr_status & GR_READY)))
43428+ return (mode & ~GR_AUDITS);
43429+
43430+ curracl = current->acl;
43431+
43432+ currobj = chk_obj_label(dentry, mnt, curracl);
43433+ retval = currobj->mode & mode;
43434+
43435+ /* if we're opening a specified transfer file for writing
43436+ (e.g. /dev/initctl), then transfer our role to init
43437+ */
43438+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43439+ current->role->roletype & GR_ROLE_PERSIST)) {
43440+ struct task_struct *task = init_pid_ns.child_reaper;
43441+
43442+ if (task->role != current->role) {
43443+ task->acl_sp_role = 0;
43444+ task->acl_role_id = current->acl_role_id;
43445+ task->role = current->role;
43446+ rcu_read_lock();
43447+ read_lock(&grsec_exec_file_lock);
43448+ gr_apply_subject_to_task(task);
43449+ read_unlock(&grsec_exec_file_lock);
43450+ rcu_read_unlock();
43451+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43452+ }
43453+ }
43454+
43455+ if (unlikely
43456+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43457+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43458+ __u32 new_mode = mode;
43459+
43460+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43461+
43462+ retval = new_mode;
43463+
43464+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43465+ new_mode |= GR_INHERIT;
43466+
43467+ if (!(mode & GR_NOLEARN))
43468+ gr_log_learn(dentry, mnt, new_mode);
43469+ }
43470+
43471+ return retval;
43472+}
43473+
43474+__u32
43475+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43476+ const struct vfsmount * mnt, const __u32 mode)
43477+{
43478+ struct name_entry *match;
43479+ struct acl_object_label *matchpo;
43480+ struct acl_subject_label *curracl;
43481+ char *path;
43482+ __u32 retval;
43483+
43484+ if (unlikely(!(gr_status & GR_READY)))
43485+ return (mode & ~GR_AUDITS);
43486+
43487+ preempt_disable();
43488+ path = gr_to_filename_rbac(new_dentry, mnt);
43489+ match = lookup_name_entry_create(path);
43490+
43491+ if (!match)
43492+ goto check_parent;
43493+
43494+ curracl = current->acl;
43495+
43496+ read_lock(&gr_inode_lock);
43497+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43498+ read_unlock(&gr_inode_lock);
43499+
43500+ if (matchpo) {
43501+ if ((matchpo->mode & mode) !=
43502+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
43503+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43504+ __u32 new_mode = mode;
43505+
43506+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43507+
43508+ gr_log_learn(new_dentry, mnt, new_mode);
43509+
43510+ preempt_enable();
43511+ return new_mode;
43512+ }
43513+ preempt_enable();
43514+ return (matchpo->mode & mode);
43515+ }
43516+
43517+ check_parent:
43518+ curracl = current->acl;
43519+
43520+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43521+ retval = matchpo->mode & mode;
43522+
43523+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43524+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43525+ __u32 new_mode = mode;
43526+
43527+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43528+
43529+ gr_log_learn(new_dentry, mnt, new_mode);
43530+ preempt_enable();
43531+ return new_mode;
43532+ }
43533+
43534+ preempt_enable();
43535+ return retval;
43536+}
43537+
43538+int
43539+gr_check_hidden_task(const struct task_struct *task)
43540+{
43541+ if (unlikely(!(gr_status & GR_READY)))
43542+ return 0;
43543+
43544+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43545+ return 1;
43546+
43547+ return 0;
43548+}
43549+
43550+int
43551+gr_check_protected_task(const struct task_struct *task)
43552+{
43553+ if (unlikely(!(gr_status & GR_READY) || !task))
43554+ return 0;
43555+
43556+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43557+ task->acl != current->acl)
43558+ return 1;
43559+
43560+ return 0;
43561+}
43562+
43563+int
43564+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43565+{
43566+ struct task_struct *p;
43567+ int ret = 0;
43568+
43569+ if (unlikely(!(gr_status & GR_READY) || !pid))
43570+ return ret;
43571+
43572+ read_lock(&tasklist_lock);
43573+ do_each_pid_task(pid, type, p) {
43574+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43575+ p->acl != current->acl) {
43576+ ret = 1;
43577+ goto out;
43578+ }
43579+ } while_each_pid_task(pid, type, p);
43580+out:
43581+ read_unlock(&tasklist_lock);
43582+
43583+ return ret;
43584+}
43585+
43586+void
43587+gr_copy_label(struct task_struct *tsk)
43588+{
43589+ tsk->signal->used_accept = 0;
43590+ tsk->acl_sp_role = 0;
43591+ tsk->acl_role_id = current->acl_role_id;
43592+ tsk->acl = current->acl;
43593+ tsk->role = current->role;
43594+ tsk->signal->curr_ip = current->signal->curr_ip;
43595+ tsk->signal->saved_ip = current->signal->saved_ip;
43596+ if (current->exec_file)
43597+ get_file(current->exec_file);
43598+ tsk->exec_file = current->exec_file;
43599+ tsk->is_writable = current->is_writable;
43600+ if (unlikely(current->signal->used_accept)) {
43601+ current->signal->curr_ip = 0;
43602+ current->signal->saved_ip = 0;
43603+ }
43604+
43605+ return;
43606+}
43607+
43608+static void
43609+gr_set_proc_res(struct task_struct *task)
43610+{
43611+ struct acl_subject_label *proc;
43612+ unsigned short i;
43613+
43614+ proc = task->acl;
43615+
43616+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43617+ return;
43618+
43619+ for (i = 0; i < RLIM_NLIMITS; i++) {
43620+ if (!(proc->resmask & (1 << i)))
43621+ continue;
43622+
43623+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43624+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43625+ }
43626+
43627+ return;
43628+}
43629+
43630+extern int __gr_process_user_ban(struct user_struct *user);
43631+
43632+int
43633+gr_check_user_change(int real, int effective, int fs)
43634+{
43635+ unsigned int i;
43636+ __u16 num;
43637+ uid_t *uidlist;
43638+ int curuid;
43639+ int realok = 0;
43640+ int effectiveok = 0;
43641+ int fsok = 0;
43642+
43643+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43644+ struct user_struct *user;
43645+
43646+ if (real == -1)
43647+ goto skipit;
43648+
43649+ user = find_user(real);
43650+ if (user == NULL)
43651+ goto skipit;
43652+
43653+ if (__gr_process_user_ban(user)) {
43654+ /* for find_user */
43655+ free_uid(user);
43656+ return 1;
43657+ }
43658+
43659+ /* for find_user */
43660+ free_uid(user);
43661+
43662+skipit:
43663+#endif
43664+
43665+ if (unlikely(!(gr_status & GR_READY)))
43666+ return 0;
43667+
43668+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43669+ gr_log_learn_id_change('u', real, effective, fs);
43670+
43671+ num = current->acl->user_trans_num;
43672+ uidlist = current->acl->user_transitions;
43673+
43674+ if (uidlist == NULL)
43675+ return 0;
43676+
43677+ if (real == -1)
43678+ realok = 1;
43679+ if (effective == -1)
43680+ effectiveok = 1;
43681+ if (fs == -1)
43682+ fsok = 1;
43683+
43684+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
43685+ for (i = 0; i < num; i++) {
43686+ curuid = (int)uidlist[i];
43687+ if (real == curuid)
43688+ realok = 1;
43689+ if (effective == curuid)
43690+ effectiveok = 1;
43691+ if (fs == curuid)
43692+ fsok = 1;
43693+ }
43694+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
43695+ for (i = 0; i < num; i++) {
43696+ curuid = (int)uidlist[i];
43697+ if (real == curuid)
43698+ break;
43699+ if (effective == curuid)
43700+ break;
43701+ if (fs == curuid)
43702+ break;
43703+ }
43704+ /* not in deny list */
43705+ if (i == num) {
43706+ realok = 1;
43707+ effectiveok = 1;
43708+ fsok = 1;
43709+ }
43710+ }
43711+
43712+ if (realok && effectiveok && fsok)
43713+ return 0;
43714+ else {
43715+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43716+ return 1;
43717+ }
43718+}
43719+
43720+int
43721+gr_check_group_change(int real, int effective, int fs)
43722+{
43723+ unsigned int i;
43724+ __u16 num;
43725+ gid_t *gidlist;
43726+ int curgid;
43727+ int realok = 0;
43728+ int effectiveok = 0;
43729+ int fsok = 0;
43730+
43731+ if (unlikely(!(gr_status & GR_READY)))
43732+ return 0;
43733+
43734+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43735+ gr_log_learn_id_change('g', real, effective, fs);
43736+
43737+ num = current->acl->group_trans_num;
43738+ gidlist = current->acl->group_transitions;
43739+
43740+ if (gidlist == NULL)
43741+ return 0;
43742+
43743+ if (real == -1)
43744+ realok = 1;
43745+ if (effective == -1)
43746+ effectiveok = 1;
43747+ if (fs == -1)
43748+ fsok = 1;
43749+
43750+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
43751+ for (i = 0; i < num; i++) {
43752+ curgid = (int)gidlist[i];
43753+ if (real == curgid)
43754+ realok = 1;
43755+ if (effective == curgid)
43756+ effectiveok = 1;
43757+ if (fs == curgid)
43758+ fsok = 1;
43759+ }
43760+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
43761+ for (i = 0; i < num; i++) {
43762+ curgid = (int)gidlist[i];
43763+ if (real == curgid)
43764+ break;
43765+ if (effective == curgid)
43766+ break;
43767+ if (fs == curgid)
43768+ break;
43769+ }
43770+ /* not in deny list */
43771+ if (i == num) {
43772+ realok = 1;
43773+ effectiveok = 1;
43774+ fsok = 1;
43775+ }
43776+ }
43777+
43778+ if (realok && effectiveok && fsok)
43779+ return 0;
43780+ else {
43781+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43782+ return 1;
43783+ }
43784+}
43785+
43786+void
43787+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43788+{
43789+ struct acl_role_label *role = task->role;
43790+ struct acl_subject_label *subj = NULL;
43791+ struct acl_object_label *obj;
43792+ struct file *filp;
43793+
43794+ if (unlikely(!(gr_status & GR_READY)))
43795+ return;
43796+
43797+ filp = task->exec_file;
43798+
43799+ /* kernel process, we'll give them the kernel role */
43800+ if (unlikely(!filp)) {
43801+ task->role = kernel_role;
43802+ task->acl = kernel_role->root_label;
43803+ return;
43804+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43805+ role = lookup_acl_role_label(task, uid, gid);
43806+
43807+ /* perform subject lookup in possibly new role
43808+ we can use this result below in the case where role == task->role
43809+ */
43810+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43811+
43812+ /* if we changed uid/gid, but result in the same role
43813+ and are using inheritance, don't lose the inherited subject
43814+ if current subject is other than what normal lookup
43815+ would result in, we arrived via inheritance, don't
43816+ lose subject
43817+ */
43818+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43819+ (subj == task->acl)))
43820+ task->acl = subj;
43821+
43822+ task->role = role;
43823+
43824+ task->is_writable = 0;
43825+
43826+ /* ignore additional mmap checks for processes that are writable
43827+ by the default ACL */
43828+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43829+ if (unlikely(obj->mode & GR_WRITE))
43830+ task->is_writable = 1;
43831+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43832+ if (unlikely(obj->mode & GR_WRITE))
43833+ task->is_writable = 1;
43834+
43835+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43836+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43837+#endif
43838+
43839+ gr_set_proc_res(task);
43840+
43841+ return;
43842+}
43843+
43844+int
43845+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43846+ const int unsafe_share)
43847+{
43848+ struct task_struct *task = current;
43849+ struct acl_subject_label *newacl;
43850+ struct acl_object_label *obj;
43851+ __u32 retmode;
43852+
43853+ if (unlikely(!(gr_status & GR_READY)))
43854+ return 0;
43855+
43856+ newacl = chk_subj_label(dentry, mnt, task->role);
43857+
43858+ task_lock(task);
43859+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
43860+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
43861+ !(task->role->roletype & GR_ROLE_GOD) &&
43862+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
43863+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
43864+ task_unlock(task);
43865+ if (unsafe_share)
43866+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
43867+ else
43868+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
43869+ return -EACCES;
43870+ }
43871+ task_unlock(task);
43872+
43873+ obj = chk_obj_label(dentry, mnt, task->acl);
43874+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
43875+
43876+ if (!(task->acl->mode & GR_INHERITLEARN) &&
43877+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
43878+ if (obj->nested)
43879+ task->acl = obj->nested;
43880+ else
43881+ task->acl = newacl;
43882+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
43883+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
43884+
43885+ task->is_writable = 0;
43886+
43887+ /* ignore additional mmap checks for processes that are writable
43888+ by the default ACL */
43889+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
43890+ if (unlikely(obj->mode & GR_WRITE))
43891+ task->is_writable = 1;
43892+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
43893+ if (unlikely(obj->mode & GR_WRITE))
43894+ task->is_writable = 1;
43895+
43896+ gr_set_proc_res(task);
43897+
43898+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43899+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43900+#endif
43901+ return 0;
43902+}
43903+
43904+/* always called with valid inodev ptr */
43905+static void
43906+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
43907+{
43908+ struct acl_object_label *matchpo;
43909+ struct acl_subject_label *matchps;
43910+ struct acl_subject_label *subj;
43911+ struct acl_role_label *role;
43912+ unsigned int x;
43913+
43914+ FOR_EACH_ROLE_START(role)
43915+ FOR_EACH_SUBJECT_START(role, subj, x)
43916+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
43917+ matchpo->mode |= GR_DELETED;
43918+ FOR_EACH_SUBJECT_END(subj,x)
43919+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
43920+ if (subj->inode == ino && subj->device == dev)
43921+ subj->mode |= GR_DELETED;
43922+ FOR_EACH_NESTED_SUBJECT_END(subj)
43923+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
43924+ matchps->mode |= GR_DELETED;
43925+ FOR_EACH_ROLE_END(role)
43926+
43927+ inodev->nentry->deleted = 1;
43928+
43929+ return;
43930+}
43931+
43932+void
43933+gr_handle_delete(const ino_t ino, const dev_t dev)
43934+{
43935+ struct inodev_entry *inodev;
43936+
43937+ if (unlikely(!(gr_status & GR_READY)))
43938+ return;
43939+
43940+ write_lock(&gr_inode_lock);
43941+ inodev = lookup_inodev_entry(ino, dev);
43942+ if (inodev != NULL)
43943+ do_handle_delete(inodev, ino, dev);
43944+ write_unlock(&gr_inode_lock);
43945+
43946+ return;
43947+}
43948+
43949+static void
43950+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
43951+ const ino_t newinode, const dev_t newdevice,
43952+ struct acl_subject_label *subj)
43953+{
43954+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
43955+ struct acl_object_label *match;
43956+
43957+ match = subj->obj_hash[index];
43958+
43959+ while (match && (match->inode != oldinode ||
43960+ match->device != olddevice ||
43961+ !(match->mode & GR_DELETED)))
43962+ match = match->next;
43963+
43964+ if (match && (match->inode == oldinode)
43965+ && (match->device == olddevice)
43966+ && (match->mode & GR_DELETED)) {
43967+ if (match->prev == NULL) {
43968+ subj->obj_hash[index] = match->next;
43969+ if (match->next != NULL)
43970+ match->next->prev = NULL;
43971+ } else {
43972+ match->prev->next = match->next;
43973+ if (match->next != NULL)
43974+ match->next->prev = match->prev;
43975+ }
43976+ match->prev = NULL;
43977+ match->next = NULL;
43978+ match->inode = newinode;
43979+ match->device = newdevice;
43980+ match->mode &= ~GR_DELETED;
43981+
43982+ insert_acl_obj_label(match, subj);
43983+ }
43984+
43985+ return;
43986+}
43987+
43988+static void
43989+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
43990+ const ino_t newinode, const dev_t newdevice,
43991+ struct acl_role_label *role)
43992+{
43993+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
43994+ struct acl_subject_label *match;
43995+
43996+ match = role->subj_hash[index];
43997+
43998+ while (match && (match->inode != oldinode ||
43999+ match->device != olddevice ||
44000+ !(match->mode & GR_DELETED)))
44001+ match = match->next;
44002+
44003+ if (match && (match->inode == oldinode)
44004+ && (match->device == olddevice)
44005+ && (match->mode & GR_DELETED)) {
44006+ if (match->prev == NULL) {
44007+ role->subj_hash[index] = match->next;
44008+ if (match->next != NULL)
44009+ match->next->prev = NULL;
44010+ } else {
44011+ match->prev->next = match->next;
44012+ if (match->next != NULL)
44013+ match->next->prev = match->prev;
44014+ }
44015+ match->prev = NULL;
44016+ match->next = NULL;
44017+ match->inode = newinode;
44018+ match->device = newdevice;
44019+ match->mode &= ~GR_DELETED;
44020+
44021+ insert_acl_subj_label(match, role);
44022+ }
44023+
44024+ return;
44025+}
44026+
44027+static void
44028+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44029+ const ino_t newinode, const dev_t newdevice)
44030+{
44031+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44032+ struct inodev_entry *match;
44033+
44034+ match = inodev_set.i_hash[index];
44035+
44036+ while (match && (match->nentry->inode != oldinode ||
44037+ match->nentry->device != olddevice || !match->nentry->deleted))
44038+ match = match->next;
44039+
44040+ if (match && (match->nentry->inode == oldinode)
44041+ && (match->nentry->device == olddevice) &&
44042+ match->nentry->deleted) {
44043+ if (match->prev == NULL) {
44044+ inodev_set.i_hash[index] = match->next;
44045+ if (match->next != NULL)
44046+ match->next->prev = NULL;
44047+ } else {
44048+ match->prev->next = match->next;
44049+ if (match->next != NULL)
44050+ match->next->prev = match->prev;
44051+ }
44052+ match->prev = NULL;
44053+ match->next = NULL;
44054+ match->nentry->inode = newinode;
44055+ match->nentry->device = newdevice;
44056+ match->nentry->deleted = 0;
44057+
44058+ insert_inodev_entry(match);
44059+ }
44060+
44061+ return;
44062+}
44063+
44064+static void
44065+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44066+ const struct vfsmount *mnt)
44067+{
44068+ struct acl_subject_label *subj;
44069+ struct acl_role_label *role;
44070+ unsigned int x;
44071+ ino_t ino = dentry->d_inode->i_ino;
44072+ dev_t dev = __get_dev(dentry);
44073+
44074+ FOR_EACH_ROLE_START(role)
44075+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44076+
44077+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
44078+ if ((subj->inode == ino) && (subj->device == dev)) {
44079+ subj->inode = ino;
44080+ subj->device = dev;
44081+ }
44082+ FOR_EACH_NESTED_SUBJECT_END(subj)
44083+ FOR_EACH_SUBJECT_START(role, subj, x)
44084+ update_acl_obj_label(matchn->inode, matchn->device,
44085+ ino, dev, subj);
44086+ FOR_EACH_SUBJECT_END(subj,x)
44087+ FOR_EACH_ROLE_END(role)
44088+
44089+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44090+
44091+ return;
44092+}
44093+
44094+void
44095+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44096+{
44097+ struct name_entry *matchn;
44098+
44099+ if (unlikely(!(gr_status & GR_READY)))
44100+ return;
44101+
44102+ preempt_disable();
44103+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44104+
44105+ if (unlikely((unsigned long)matchn)) {
44106+ write_lock(&gr_inode_lock);
44107+ do_handle_create(matchn, dentry, mnt);
44108+ write_unlock(&gr_inode_lock);
44109+ }
44110+ preempt_enable();
44111+
44112+ return;
44113+}
44114+
44115+void
44116+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44117+ struct dentry *old_dentry,
44118+ struct dentry *new_dentry,
44119+ struct vfsmount *mnt, const __u8 replace)
44120+{
44121+ struct name_entry *matchn;
44122+ struct inodev_entry *inodev;
44123+ ino_t old_ino = old_dentry->d_inode->i_ino;
44124+ dev_t old_dev = __get_dev(old_dentry);
44125+
44126+ /* vfs_rename swaps the name and parent link for old_dentry and
44127+ new_dentry
44128+ at this point, old_dentry has the new name, parent link, and inode
44129+ for the renamed file
44130+ if a file is being replaced by a rename, new_dentry has the inode
44131+ and name for the replaced file
44132+ */
44133+
44134+ if (unlikely(!(gr_status & GR_READY)))
44135+ return;
44136+
44137+ preempt_disable();
44138+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44139+
44140+ /* we wouldn't have to check d_inode if it weren't for
44141+ NFS silly-renaming
44142+ */
44143+
44144+ write_lock(&gr_inode_lock);
44145+ if (unlikely(replace && new_dentry->d_inode)) {
44146+ ino_t new_ino = new_dentry->d_inode->i_ino;
44147+ dev_t new_dev = __get_dev(new_dentry);
44148+
44149+ inodev = lookup_inodev_entry(new_ino, new_dev);
44150+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44151+ do_handle_delete(inodev, new_ino, new_dev);
44152+ }
44153+
44154+ inodev = lookup_inodev_entry(old_ino, old_dev);
44155+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44156+ do_handle_delete(inodev, old_ino, old_dev);
44157+
44158+ if (unlikely((unsigned long)matchn))
44159+ do_handle_create(matchn, old_dentry, mnt);
44160+
44161+ write_unlock(&gr_inode_lock);
44162+ preempt_enable();
44163+
44164+ return;
44165+}
44166+
44167+static int
44168+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44169+ unsigned char **sum)
44170+{
44171+ struct acl_role_label *r;
44172+ struct role_allowed_ip *ipp;
44173+ struct role_transition *trans;
44174+ unsigned int i;
44175+ int found = 0;
44176+ u32 curr_ip = current->signal->curr_ip;
44177+
44178+ current->signal->saved_ip = curr_ip;
44179+
44180+ /* check transition table */
44181+
44182+ for (trans = current->role->transitions; trans; trans = trans->next) {
44183+ if (!strcmp(rolename, trans->rolename)) {
44184+ found = 1;
44185+ break;
44186+ }
44187+ }
44188+
44189+ if (!found)
44190+ return 0;
44191+
44192+ /* handle special roles that do not require authentication
44193+ and check ip */
44194+
44195+ FOR_EACH_ROLE_START(r)
44196+ if (!strcmp(rolename, r->rolename) &&
44197+ (r->roletype & GR_ROLE_SPECIAL)) {
44198+ found = 0;
44199+ if (r->allowed_ips != NULL) {
44200+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44201+ if ((ntohl(curr_ip) & ipp->netmask) ==
44202+ (ntohl(ipp->addr) & ipp->netmask))
44203+ found = 1;
44204+ }
44205+ } else
44206+ found = 2;
44207+ if (!found)
44208+ return 0;
44209+
44210+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44211+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44212+ *salt = NULL;
44213+ *sum = NULL;
44214+ return 1;
44215+ }
44216+ }
44217+ FOR_EACH_ROLE_END(r)
44218+
44219+ for (i = 0; i < num_sprole_pws; i++) {
44220+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44221+ *salt = acl_special_roles[i]->salt;
44222+ *sum = acl_special_roles[i]->sum;
44223+ return 1;
44224+ }
44225+ }
44226+
44227+ return 0;
44228+}
44229+
44230+static void
44231+assign_special_role(char *rolename)
44232+{
44233+ struct acl_object_label *obj;
44234+ struct acl_role_label *r;
44235+ struct acl_role_label *assigned = NULL;
44236+ struct task_struct *tsk;
44237+ struct file *filp;
44238+
44239+ FOR_EACH_ROLE_START(r)
44240+ if (!strcmp(rolename, r->rolename) &&
44241+ (r->roletype & GR_ROLE_SPECIAL)) {
44242+ assigned = r;
44243+ break;
44244+ }
44245+ FOR_EACH_ROLE_END(r)
44246+
44247+ if (!assigned)
44248+ return;
44249+
44250+ read_lock(&tasklist_lock);
44251+ read_lock(&grsec_exec_file_lock);
44252+
44253+ tsk = current->real_parent;
44254+ if (tsk == NULL)
44255+ goto out_unlock;
44256+
44257+ filp = tsk->exec_file;
44258+ if (filp == NULL)
44259+ goto out_unlock;
44260+
44261+ tsk->is_writable = 0;
44262+
44263+ tsk->acl_sp_role = 1;
44264+ tsk->acl_role_id = ++acl_sp_role_value;
44265+ tsk->role = assigned;
44266+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44267+
44268+ /* ignore additional mmap checks for processes that are writable
44269+ by the default ACL */
44270+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44271+ if (unlikely(obj->mode & GR_WRITE))
44272+ tsk->is_writable = 1;
44273+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44274+ if (unlikely(obj->mode & GR_WRITE))
44275+ tsk->is_writable = 1;
44276+
44277+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44278+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44279+#endif
44280+
44281+out_unlock:
44282+ read_unlock(&grsec_exec_file_lock);
44283+ read_unlock(&tasklist_lock);
44284+ return;
44285+}
44286+
44287+int gr_check_secure_terminal(struct task_struct *task)
44288+{
44289+ struct task_struct *p, *p2, *p3;
44290+ struct files_struct *files;
44291+ struct fdtable *fdt;
44292+ struct file *our_file = NULL, *file;
44293+ int i;
44294+
44295+ if (task->signal->tty == NULL)
44296+ return 1;
44297+
44298+ files = get_files_struct(task);
44299+ if (files != NULL) {
44300+ rcu_read_lock();
44301+ fdt = files_fdtable(files);
44302+ for (i=0; i < fdt->max_fds; i++) {
44303+ file = fcheck_files(files, i);
44304+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44305+ get_file(file);
44306+ our_file = file;
44307+ }
44308+ }
44309+ rcu_read_unlock();
44310+ put_files_struct(files);
44311+ }
44312+
44313+ if (our_file == NULL)
44314+ return 1;
44315+
44316+ read_lock(&tasklist_lock);
44317+ do_each_thread(p2, p) {
44318+ files = get_files_struct(p);
44319+ if (files == NULL ||
44320+ (p->signal && p->signal->tty == task->signal->tty)) {
44321+ if (files != NULL)
44322+ put_files_struct(files);
44323+ continue;
44324+ }
44325+ rcu_read_lock();
44326+ fdt = files_fdtable(files);
44327+ for (i=0; i < fdt->max_fds; i++) {
44328+ file = fcheck_files(files, i);
44329+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44330+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44331+ p3 = task;
44332+ while (p3->pid > 0) {
44333+ if (p3 == p)
44334+ break;
44335+ p3 = p3->real_parent;
44336+ }
44337+ if (p3 == p)
44338+ break;
44339+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44340+ gr_handle_alertkill(p);
44341+ rcu_read_unlock();
44342+ put_files_struct(files);
44343+ read_unlock(&tasklist_lock);
44344+ fput(our_file);
44345+ return 0;
44346+ }
44347+ }
44348+ rcu_read_unlock();
44349+ put_files_struct(files);
44350+ } while_each_thread(p2, p);
44351+ read_unlock(&tasklist_lock);
44352+
44353+ fput(our_file);
44354+ return 1;
44355+}
44356+
44357+ssize_t
44358+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44359+{
44360+ struct gr_arg_wrapper uwrap;
44361+ unsigned char *sprole_salt = NULL;
44362+ unsigned char *sprole_sum = NULL;
44363+ int error = sizeof (struct gr_arg_wrapper);
44364+ int error2 = 0;
44365+
44366+ mutex_lock(&gr_dev_mutex);
44367+
44368+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44369+ error = -EPERM;
44370+ goto out;
44371+ }
44372+
44373+ if (count != sizeof (struct gr_arg_wrapper)) {
44374+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44375+ error = -EINVAL;
44376+ goto out;
44377+ }
44378+
44379+
44380+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44381+ gr_auth_expires = 0;
44382+ gr_auth_attempts = 0;
44383+ }
44384+
44385+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44386+ error = -EFAULT;
44387+ goto out;
44388+ }
44389+
44390+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44391+ error = -EINVAL;
44392+ goto out;
44393+ }
44394+
44395+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44396+ error = -EFAULT;
44397+ goto out;
44398+ }
44399+
44400+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44401+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44402+ time_after(gr_auth_expires, get_seconds())) {
44403+ error = -EBUSY;
44404+ goto out;
44405+ }
44406+
44407+ /* if non-root trying to do anything other than use a special role,
44408+ do not attempt authentication, do not count towards authentication
44409+ locking
44410+ */
44411+
44412+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44413+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44414+ current_uid()) {
44415+ error = -EPERM;
44416+ goto out;
44417+ }
44418+
44419+ /* ensure pw and special role name are null terminated */
44420+
44421+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44422+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44423+
44424+ /* Okay.
44425+ * We have our enough of the argument structure..(we have yet
44426+ * to copy_from_user the tables themselves) . Copy the tables
44427+ * only if we need them, i.e. for loading operations. */
44428+
44429+ switch (gr_usermode->mode) {
44430+ case GR_STATUS:
44431+ if (gr_status & GR_READY) {
44432+ error = 1;
44433+ if (!gr_check_secure_terminal(current))
44434+ error = 3;
44435+ } else
44436+ error = 2;
44437+ goto out;
44438+ case GR_SHUTDOWN:
44439+ if ((gr_status & GR_READY)
44440+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44441+ pax_open_kernel();
44442+ gr_status &= ~GR_READY;
44443+ pax_close_kernel();
44444+
44445+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44446+ free_variables();
44447+ memset(gr_usermode, 0, sizeof (struct gr_arg));
44448+ memset(gr_system_salt, 0, GR_SALT_LEN);
44449+ memset(gr_system_sum, 0, GR_SHA_LEN);
44450+ } else if (gr_status & GR_READY) {
44451+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44452+ error = -EPERM;
44453+ } else {
44454+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44455+ error = -EAGAIN;
44456+ }
44457+ break;
44458+ case GR_ENABLE:
44459+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44460+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44461+ else {
44462+ if (gr_status & GR_READY)
44463+ error = -EAGAIN;
44464+ else
44465+ error = error2;
44466+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44467+ }
44468+ break;
44469+ case GR_RELOAD:
44470+ if (!(gr_status & GR_READY)) {
44471+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44472+ error = -EAGAIN;
44473+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44474+ preempt_disable();
44475+
44476+ pax_open_kernel();
44477+ gr_status &= ~GR_READY;
44478+ pax_close_kernel();
44479+
44480+ free_variables();
44481+ if (!(error2 = gracl_init(gr_usermode))) {
44482+ preempt_enable();
44483+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44484+ } else {
44485+ preempt_enable();
44486+ error = error2;
44487+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44488+ }
44489+ } else {
44490+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44491+ error = -EPERM;
44492+ }
44493+ break;
44494+ case GR_SEGVMOD:
44495+ if (unlikely(!(gr_status & GR_READY))) {
44496+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44497+ error = -EAGAIN;
44498+ break;
44499+ }
44500+
44501+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44502+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44503+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44504+ struct acl_subject_label *segvacl;
44505+ segvacl =
44506+ lookup_acl_subj_label(gr_usermode->segv_inode,
44507+ gr_usermode->segv_device,
44508+ current->role);
44509+ if (segvacl) {
44510+ segvacl->crashes = 0;
44511+ segvacl->expires = 0;
44512+ }
44513+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44514+ gr_remove_uid(gr_usermode->segv_uid);
44515+ }
44516+ } else {
44517+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44518+ error = -EPERM;
44519+ }
44520+ break;
44521+ case GR_SPROLE:
44522+ case GR_SPROLEPAM:
44523+ if (unlikely(!(gr_status & GR_READY))) {
44524+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44525+ error = -EAGAIN;
44526+ break;
44527+ }
44528+
44529+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44530+ current->role->expires = 0;
44531+ current->role->auth_attempts = 0;
44532+ }
44533+
44534+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44535+ time_after(current->role->expires, get_seconds())) {
44536+ error = -EBUSY;
44537+ goto out;
44538+ }
44539+
44540+ if (lookup_special_role_auth
44541+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44542+ && ((!sprole_salt && !sprole_sum)
44543+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44544+ char *p = "";
44545+ assign_special_role(gr_usermode->sp_role);
44546+ read_lock(&tasklist_lock);
44547+ if (current->real_parent)
44548+ p = current->real_parent->role->rolename;
44549+ read_unlock(&tasklist_lock);
44550+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44551+ p, acl_sp_role_value);
44552+ } else {
44553+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44554+ error = -EPERM;
44555+ if(!(current->role->auth_attempts++))
44556+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44557+
44558+ goto out;
44559+ }
44560+ break;
44561+ case GR_UNSPROLE:
44562+ if (unlikely(!(gr_status & GR_READY))) {
44563+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44564+ error = -EAGAIN;
44565+ break;
44566+ }
44567+
44568+ if (current->role->roletype & GR_ROLE_SPECIAL) {
44569+ char *p = "";
44570+ int i = 0;
44571+
44572+ read_lock(&tasklist_lock);
44573+ if (current->real_parent) {
44574+ p = current->real_parent->role->rolename;
44575+ i = current->real_parent->acl_role_id;
44576+ }
44577+ read_unlock(&tasklist_lock);
44578+
44579+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44580+ gr_set_acls(1);
44581+ } else {
44582+ error = -EPERM;
44583+ goto out;
44584+ }
44585+ break;
44586+ default:
44587+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44588+ error = -EINVAL;
44589+ break;
44590+ }
44591+
44592+ if (error != -EPERM)
44593+ goto out;
44594+
44595+ if(!(gr_auth_attempts++))
44596+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44597+
44598+ out:
44599+ mutex_unlock(&gr_dev_mutex);
44600+ return error;
44601+}
44602+
44603+/* must be called with
44604+ rcu_read_lock();
44605+ read_lock(&tasklist_lock);
44606+ read_lock(&grsec_exec_file_lock);
44607+*/
44608+int gr_apply_subject_to_task(struct task_struct *task)
44609+{
44610+ struct acl_object_label *obj;
44611+ char *tmpname;
44612+ struct acl_subject_label *tmpsubj;
44613+ struct file *filp;
44614+ struct name_entry *nmatch;
44615+
44616+ filp = task->exec_file;
44617+ if (filp == NULL)
44618+ return 0;
44619+
44620+ /* the following is to apply the correct subject
44621+ on binaries running when the RBAC system
44622+ is enabled, when the binaries have been
44623+ replaced or deleted since their execution
44624+ -----
44625+ when the RBAC system starts, the inode/dev
44626+ from exec_file will be one the RBAC system
44627+ is unaware of. It only knows the inode/dev
44628+ of the present file on disk, or the absence
44629+ of it.
44630+ */
44631+ preempt_disable();
44632+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44633+
44634+ nmatch = lookup_name_entry(tmpname);
44635+ preempt_enable();
44636+ tmpsubj = NULL;
44637+ if (nmatch) {
44638+ if (nmatch->deleted)
44639+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44640+ else
44641+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44642+ if (tmpsubj != NULL)
44643+ task->acl = tmpsubj;
44644+ }
44645+ if (tmpsubj == NULL)
44646+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44647+ task->role);
44648+ if (task->acl) {
44649+ task->is_writable = 0;
44650+ /* ignore additional mmap checks for processes that are writable
44651+ by the default ACL */
44652+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44653+ if (unlikely(obj->mode & GR_WRITE))
44654+ task->is_writable = 1;
44655+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44656+ if (unlikely(obj->mode & GR_WRITE))
44657+ task->is_writable = 1;
44658+
44659+ gr_set_proc_res(task);
44660+
44661+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44662+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44663+#endif
44664+ } else {
44665+ return 1;
44666+ }
44667+
44668+ return 0;
44669+}
44670+
44671+int
44672+gr_set_acls(const int type)
44673+{
44674+ struct task_struct *task, *task2;
44675+ struct acl_role_label *role = current->role;
44676+ __u16 acl_role_id = current->acl_role_id;
44677+ const struct cred *cred;
44678+ int ret;
44679+
44680+ rcu_read_lock();
44681+ read_lock(&tasklist_lock);
44682+ read_lock(&grsec_exec_file_lock);
44683+ do_each_thread(task2, task) {
44684+ /* check to see if we're called from the exit handler,
44685+ if so, only replace ACLs that have inherited the admin
44686+ ACL */
44687+
44688+ if (type && (task->role != role ||
44689+ task->acl_role_id != acl_role_id))
44690+ continue;
44691+
44692+ task->acl_role_id = 0;
44693+ task->acl_sp_role = 0;
44694+
44695+ if (task->exec_file) {
44696+ cred = __task_cred(task);
44697+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44698+ ret = gr_apply_subject_to_task(task);
44699+ if (ret) {
44700+ read_unlock(&grsec_exec_file_lock);
44701+ read_unlock(&tasklist_lock);
44702+ rcu_read_unlock();
44703+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44704+ return ret;
44705+ }
44706+ } else {
44707+ // it's a kernel process
44708+ task->role = kernel_role;
44709+ task->acl = kernel_role->root_label;
44710+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44711+ task->acl->mode &= ~GR_PROCFIND;
44712+#endif
44713+ }
44714+ } while_each_thread(task2, task);
44715+ read_unlock(&grsec_exec_file_lock);
44716+ read_unlock(&tasklist_lock);
44717+ rcu_read_unlock();
44718+
44719+ return 0;
44720+}
44721+
44722+void
44723+gr_learn_resource(const struct task_struct *task,
44724+ const int res, const unsigned long wanted, const int gt)
44725+{
44726+ struct acl_subject_label *acl;
44727+ const struct cred *cred;
44728+
44729+ if (unlikely((gr_status & GR_READY) &&
44730+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44731+ goto skip_reslog;
44732+
44733+#ifdef CONFIG_GRKERNSEC_RESLOG
44734+ gr_log_resource(task, res, wanted, gt);
44735+#endif
44736+ skip_reslog:
44737+
44738+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44739+ return;
44740+
44741+ acl = task->acl;
44742+
44743+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44744+ !(acl->resmask & (1 << (unsigned short) res))))
44745+ return;
44746+
44747+ if (wanted >= acl->res[res].rlim_cur) {
44748+ unsigned long res_add;
44749+
44750+ res_add = wanted;
44751+ switch (res) {
44752+ case RLIMIT_CPU:
44753+ res_add += GR_RLIM_CPU_BUMP;
44754+ break;
44755+ case RLIMIT_FSIZE:
44756+ res_add += GR_RLIM_FSIZE_BUMP;
44757+ break;
44758+ case RLIMIT_DATA:
44759+ res_add += GR_RLIM_DATA_BUMP;
44760+ break;
44761+ case RLIMIT_STACK:
44762+ res_add += GR_RLIM_STACK_BUMP;
44763+ break;
44764+ case RLIMIT_CORE:
44765+ res_add += GR_RLIM_CORE_BUMP;
44766+ break;
44767+ case RLIMIT_RSS:
44768+ res_add += GR_RLIM_RSS_BUMP;
44769+ break;
44770+ case RLIMIT_NPROC:
44771+ res_add += GR_RLIM_NPROC_BUMP;
44772+ break;
44773+ case RLIMIT_NOFILE:
44774+ res_add += GR_RLIM_NOFILE_BUMP;
44775+ break;
44776+ case RLIMIT_MEMLOCK:
44777+ res_add += GR_RLIM_MEMLOCK_BUMP;
44778+ break;
44779+ case RLIMIT_AS:
44780+ res_add += GR_RLIM_AS_BUMP;
44781+ break;
44782+ case RLIMIT_LOCKS:
44783+ res_add += GR_RLIM_LOCKS_BUMP;
44784+ break;
44785+ case RLIMIT_SIGPENDING:
44786+ res_add += GR_RLIM_SIGPENDING_BUMP;
44787+ break;
44788+ case RLIMIT_MSGQUEUE:
44789+ res_add += GR_RLIM_MSGQUEUE_BUMP;
44790+ break;
44791+ case RLIMIT_NICE:
44792+ res_add += GR_RLIM_NICE_BUMP;
44793+ break;
44794+ case RLIMIT_RTPRIO:
44795+ res_add += GR_RLIM_RTPRIO_BUMP;
44796+ break;
44797+ case RLIMIT_RTTIME:
44798+ res_add += GR_RLIM_RTTIME_BUMP;
44799+ break;
44800+ }
44801+
44802+ acl->res[res].rlim_cur = res_add;
44803+
44804+ if (wanted > acl->res[res].rlim_max)
44805+ acl->res[res].rlim_max = res_add;
44806+
44807+ /* only log the subject filename, since resource logging is supported for
44808+ single-subject learning only */
44809+ rcu_read_lock();
44810+ cred = __task_cred(task);
44811+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44812+ task->role->roletype, cred->uid, cred->gid, acl->filename,
44813+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44814+ "", (unsigned long) res, &task->signal->saved_ip);
44815+ rcu_read_unlock();
44816+ }
44817+
44818+ return;
44819+}
44820+
44821+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44822+void
44823+pax_set_initial_flags(struct linux_binprm *bprm)
44824+{
44825+ struct task_struct *task = current;
44826+ struct acl_subject_label *proc;
44827+ unsigned long flags;
44828+
44829+ if (unlikely(!(gr_status & GR_READY)))
44830+ return;
44831+
44832+ flags = pax_get_flags(task);
44833+
44834+ proc = task->acl;
44835+
44836+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44837+ flags &= ~MF_PAX_PAGEEXEC;
44838+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44839+ flags &= ~MF_PAX_SEGMEXEC;
44840+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44841+ flags &= ~MF_PAX_RANDMMAP;
44842+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44843+ flags &= ~MF_PAX_EMUTRAMP;
44844+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44845+ flags &= ~MF_PAX_MPROTECT;
44846+
44847+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44848+ flags |= MF_PAX_PAGEEXEC;
44849+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44850+ flags |= MF_PAX_SEGMEXEC;
44851+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
44852+ flags |= MF_PAX_RANDMMAP;
44853+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
44854+ flags |= MF_PAX_EMUTRAMP;
44855+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
44856+ flags |= MF_PAX_MPROTECT;
44857+
44858+ pax_set_flags(task, flags);
44859+
44860+ return;
44861+}
44862+#endif
44863+
44864+#ifdef CONFIG_SYSCTL
44865+/* Eric Biederman likes breaking userland ABI and every inode-based security
44866+ system to save 35kb of memory */
44867+
44868+/* we modify the passed in filename, but adjust it back before returning */
44869+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
44870+{
44871+ struct name_entry *nmatch;
44872+ char *p, *lastp = NULL;
44873+ struct acl_object_label *obj = NULL, *tmp;
44874+ struct acl_subject_label *tmpsubj;
44875+ char c = '\0';
44876+
44877+ read_lock(&gr_inode_lock);
44878+
44879+ p = name + len - 1;
44880+ do {
44881+ nmatch = lookup_name_entry(name);
44882+ if (lastp != NULL)
44883+ *lastp = c;
44884+
44885+ if (nmatch == NULL)
44886+ goto next_component;
44887+ tmpsubj = current->acl;
44888+ do {
44889+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
44890+ if (obj != NULL) {
44891+ tmp = obj->globbed;
44892+ while (tmp) {
44893+ if (!glob_match(tmp->filename, name)) {
44894+ obj = tmp;
44895+ goto found_obj;
44896+ }
44897+ tmp = tmp->next;
44898+ }
44899+ goto found_obj;
44900+ }
44901+ } while ((tmpsubj = tmpsubj->parent_subject));
44902+next_component:
44903+ /* end case */
44904+ if (p == name)
44905+ break;
44906+
44907+ while (*p != '/')
44908+ p--;
44909+ if (p == name)
44910+ lastp = p + 1;
44911+ else {
44912+ lastp = p;
44913+ p--;
44914+ }
44915+ c = *lastp;
44916+ *lastp = '\0';
44917+ } while (1);
44918+found_obj:
44919+ read_unlock(&gr_inode_lock);
44920+ /* obj returned will always be non-null */
44921+ return obj;
44922+}
44923+
44924+/* returns 0 when allowing, non-zero on error
44925+ op of 0 is used for readdir, so we don't log the names of hidden files
44926+*/
44927+__u32
44928+gr_handle_sysctl(const struct ctl_table *table, const int op)
44929+{
44930+ struct ctl_table *tmp;
44931+ const char *proc_sys = "/proc/sys";
44932+ char *path;
44933+ struct acl_object_label *obj;
44934+ unsigned short len = 0, pos = 0, depth = 0, i;
44935+ __u32 err = 0;
44936+ __u32 mode = 0;
44937+
44938+ if (unlikely(!(gr_status & GR_READY)))
44939+ return 0;
44940+
44941+ /* for now, ignore operations on non-sysctl entries if it's not a
44942+ readdir*/
44943+ if (table->child != NULL && op != 0)
44944+ return 0;
44945+
44946+ mode |= GR_FIND;
44947+ /* it's only a read if it's an entry, read on dirs is for readdir */
44948+ if (op & MAY_READ)
44949+ mode |= GR_READ;
44950+ if (op & MAY_WRITE)
44951+ mode |= GR_WRITE;
44952+
44953+ preempt_disable();
44954+
44955+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
44956+
44957+ /* it's only a read/write if it's an actual entry, not a dir
44958+ (which are opened for readdir)
44959+ */
44960+
44961+ /* convert the requested sysctl entry into a pathname */
44962+
44963+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
44964+ len += strlen(tmp->procname);
44965+ len++;
44966+ depth++;
44967+ }
44968+
44969+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
44970+ /* deny */
44971+ goto out;
44972+ }
44973+
44974+ memset(path, 0, PAGE_SIZE);
44975+
44976+ memcpy(path, proc_sys, strlen(proc_sys));
44977+
44978+ pos += strlen(proc_sys);
44979+
44980+ for (; depth > 0; depth--) {
44981+ path[pos] = '/';
44982+ pos++;
44983+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
44984+ if (depth == i) {
44985+ memcpy(path + pos, tmp->procname,
44986+ strlen(tmp->procname));
44987+ pos += strlen(tmp->procname);
44988+ }
44989+ i++;
44990+ }
44991+ }
44992+
44993+ obj = gr_lookup_by_name(path, pos);
44994+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
44995+
44996+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
44997+ ((err & mode) != mode))) {
44998+ __u32 new_mode = mode;
44999+
45000+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45001+
45002+ err = 0;
45003+ gr_log_learn_sysctl(path, new_mode);
45004+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45005+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45006+ err = -ENOENT;
45007+ } else if (!(err & GR_FIND)) {
45008+ err = -ENOENT;
45009+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45010+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45011+ path, (mode & GR_READ) ? " reading" : "",
45012+ (mode & GR_WRITE) ? " writing" : "");
45013+ err = -EACCES;
45014+ } else if ((err & mode) != mode) {
45015+ err = -EACCES;
45016+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45017+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45018+ path, (mode & GR_READ) ? " reading" : "",
45019+ (mode & GR_WRITE) ? " writing" : "");
45020+ err = 0;
45021+ } else
45022+ err = 0;
45023+
45024+ out:
45025+ preempt_enable();
45026+
45027+ return err;
45028+}
45029+#endif
45030+
45031+int
45032+gr_handle_proc_ptrace(struct task_struct *task)
45033+{
45034+ struct file *filp;
45035+ struct task_struct *tmp = task;
45036+ struct task_struct *curtemp = current;
45037+ __u32 retmode;
45038+
45039+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45040+ if (unlikely(!(gr_status & GR_READY)))
45041+ return 0;
45042+#endif
45043+
45044+ read_lock(&tasklist_lock);
45045+ read_lock(&grsec_exec_file_lock);
45046+ filp = task->exec_file;
45047+
45048+ while (tmp->pid > 0) {
45049+ if (tmp == curtemp)
45050+ break;
45051+ tmp = tmp->real_parent;
45052+ }
45053+
45054+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45055+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45056+ read_unlock(&grsec_exec_file_lock);
45057+ read_unlock(&tasklist_lock);
45058+ return 1;
45059+ }
45060+
45061+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45062+ if (!(gr_status & GR_READY)) {
45063+ read_unlock(&grsec_exec_file_lock);
45064+ read_unlock(&tasklist_lock);
45065+ return 0;
45066+ }
45067+#endif
45068+
45069+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45070+ read_unlock(&grsec_exec_file_lock);
45071+ read_unlock(&tasklist_lock);
45072+
45073+ if (retmode & GR_NOPTRACE)
45074+ return 1;
45075+
45076+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45077+ && (current->acl != task->acl || (current->acl != current->role->root_label
45078+ && current->pid != task->pid)))
45079+ return 1;
45080+
45081+ return 0;
45082+}
45083+
45084+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45085+{
45086+ if (unlikely(!(gr_status & GR_READY)))
45087+ return;
45088+
45089+ if (!(current->role->roletype & GR_ROLE_GOD))
45090+ return;
45091+
45092+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45093+ p->role->rolename, gr_task_roletype_to_char(p),
45094+ p->acl->filename);
45095+}
45096+
45097+int
45098+gr_handle_ptrace(struct task_struct *task, const long request)
45099+{
45100+ struct task_struct *tmp = task;
45101+ struct task_struct *curtemp = current;
45102+ __u32 retmode;
45103+
45104+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45105+ if (unlikely(!(gr_status & GR_READY)))
45106+ return 0;
45107+#endif
45108+
45109+ read_lock(&tasklist_lock);
45110+ while (tmp->pid > 0) {
45111+ if (tmp == curtemp)
45112+ break;
45113+ tmp = tmp->real_parent;
45114+ }
45115+
45116+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45117+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45118+ read_unlock(&tasklist_lock);
45119+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45120+ return 1;
45121+ }
45122+ read_unlock(&tasklist_lock);
45123+
45124+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45125+ if (!(gr_status & GR_READY))
45126+ return 0;
45127+#endif
45128+
45129+ read_lock(&grsec_exec_file_lock);
45130+ if (unlikely(!task->exec_file)) {
45131+ read_unlock(&grsec_exec_file_lock);
45132+ return 0;
45133+ }
45134+
45135+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45136+ read_unlock(&grsec_exec_file_lock);
45137+
45138+ if (retmode & GR_NOPTRACE) {
45139+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45140+ return 1;
45141+ }
45142+
45143+ if (retmode & GR_PTRACERD) {
45144+ switch (request) {
45145+ case PTRACE_POKETEXT:
45146+ case PTRACE_POKEDATA:
45147+ case PTRACE_POKEUSR:
45148+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45149+ case PTRACE_SETREGS:
45150+ case PTRACE_SETFPREGS:
45151+#endif
45152+#ifdef CONFIG_X86
45153+ case PTRACE_SETFPXREGS:
45154+#endif
45155+#ifdef CONFIG_ALTIVEC
45156+ case PTRACE_SETVRREGS:
45157+#endif
45158+ return 1;
45159+ default:
45160+ return 0;
45161+ }
45162+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
45163+ !(current->role->roletype & GR_ROLE_GOD) &&
45164+ (current->acl != task->acl)) {
45165+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45166+ return 1;
45167+ }
45168+
45169+ return 0;
45170+}
45171+
45172+static int is_writable_mmap(const struct file *filp)
45173+{
45174+ struct task_struct *task = current;
45175+ struct acl_object_label *obj, *obj2;
45176+
45177+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45178+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45179+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45180+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45181+ task->role->root_label);
45182+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45183+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45184+ return 1;
45185+ }
45186+ }
45187+ return 0;
45188+}
45189+
45190+int
45191+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45192+{
45193+ __u32 mode;
45194+
45195+ if (unlikely(!file || !(prot & PROT_EXEC)))
45196+ return 1;
45197+
45198+ if (is_writable_mmap(file))
45199+ return 0;
45200+
45201+ mode =
45202+ gr_search_file(file->f_path.dentry,
45203+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45204+ file->f_path.mnt);
45205+
45206+ if (!gr_tpe_allow(file))
45207+ return 0;
45208+
45209+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45210+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45211+ return 0;
45212+ } else if (unlikely(!(mode & GR_EXEC))) {
45213+ return 0;
45214+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45215+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45216+ return 1;
45217+ }
45218+
45219+ return 1;
45220+}
45221+
45222+int
45223+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45224+{
45225+ __u32 mode;
45226+
45227+ if (unlikely(!file || !(prot & PROT_EXEC)))
45228+ return 1;
45229+
45230+ if (is_writable_mmap(file))
45231+ return 0;
45232+
45233+ mode =
45234+ gr_search_file(file->f_path.dentry,
45235+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45236+ file->f_path.mnt);
45237+
45238+ if (!gr_tpe_allow(file))
45239+ return 0;
45240+
45241+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45242+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45243+ return 0;
45244+ } else if (unlikely(!(mode & GR_EXEC))) {
45245+ return 0;
45246+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45247+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45248+ return 1;
45249+ }
45250+
45251+ return 1;
45252+}
45253+
45254+void
45255+gr_acl_handle_psacct(struct task_struct *task, const long code)
45256+{
45257+ unsigned long runtime;
45258+ unsigned long cputime;
45259+ unsigned int wday, cday;
45260+ __u8 whr, chr;
45261+ __u8 wmin, cmin;
45262+ __u8 wsec, csec;
45263+ struct timespec timeval;
45264+
45265+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45266+ !(task->acl->mode & GR_PROCACCT)))
45267+ return;
45268+
45269+ do_posix_clock_monotonic_gettime(&timeval);
45270+ runtime = timeval.tv_sec - task->start_time.tv_sec;
45271+ wday = runtime / (3600 * 24);
45272+ runtime -= wday * (3600 * 24);
45273+ whr = runtime / 3600;
45274+ runtime -= whr * 3600;
45275+ wmin = runtime / 60;
45276+ runtime -= wmin * 60;
45277+ wsec = runtime;
45278+
45279+ cputime = (task->utime + task->stime) / HZ;
45280+ cday = cputime / (3600 * 24);
45281+ cputime -= cday * (3600 * 24);
45282+ chr = cputime / 3600;
45283+ cputime -= chr * 3600;
45284+ cmin = cputime / 60;
45285+ cputime -= cmin * 60;
45286+ csec = cputime;
45287+
45288+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45289+
45290+ return;
45291+}
45292+
45293+void gr_set_kernel_label(struct task_struct *task)
45294+{
45295+ if (gr_status & GR_READY) {
45296+ task->role = kernel_role;
45297+ task->acl = kernel_role->root_label;
45298+ }
45299+ return;
45300+}
45301+
45302+#ifdef CONFIG_TASKSTATS
45303+int gr_is_taskstats_denied(int pid)
45304+{
45305+ struct task_struct *task;
45306+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45307+ const struct cred *cred;
45308+#endif
45309+ int ret = 0;
45310+
45311+ /* restrict taskstats viewing to un-chrooted root users
45312+ who have the 'view' subject flag if the RBAC system is enabled
45313+ */
45314+
45315+ rcu_read_lock();
45316+ read_lock(&tasklist_lock);
45317+ task = find_task_by_vpid(pid);
45318+ if (task) {
45319+#ifdef CONFIG_GRKERNSEC_CHROOT
45320+ if (proc_is_chrooted(task))
45321+ ret = -EACCES;
45322+#endif
45323+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45324+ cred = __task_cred(task);
45325+#ifdef CONFIG_GRKERNSEC_PROC_USER
45326+ if (cred->uid != 0)
45327+ ret = -EACCES;
45328+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45329+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45330+ ret = -EACCES;
45331+#endif
45332+#endif
45333+ if (gr_status & GR_READY) {
45334+ if (!(task->acl->mode & GR_VIEW))
45335+ ret = -EACCES;
45336+ }
45337+ } else
45338+ ret = -ENOENT;
45339+
45340+ read_unlock(&tasklist_lock);
45341+ rcu_read_unlock();
45342+
45343+ return ret;
45344+}
45345+#endif
45346+
45347+/* AUXV entries are filled via a descendant of search_binary_handler
45348+ after we've already applied the subject for the target
45349+*/
45350+int gr_acl_enable_at_secure(void)
45351+{
45352+ if (unlikely(!(gr_status & GR_READY)))
45353+ return 0;
45354+
45355+ if (current->acl->mode & GR_ATSECURE)
45356+ return 1;
45357+
45358+ return 0;
45359+}
45360+
45361+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45362+{
45363+ struct task_struct *task = current;
45364+ struct dentry *dentry = file->f_path.dentry;
45365+ struct vfsmount *mnt = file->f_path.mnt;
45366+ struct acl_object_label *obj, *tmp;
45367+ struct acl_subject_label *subj;
45368+ unsigned int bufsize;
45369+ int is_not_root;
45370+ char *path;
45371+ dev_t dev = __get_dev(dentry);
45372+
45373+ if (unlikely(!(gr_status & GR_READY)))
45374+ return 1;
45375+
45376+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45377+ return 1;
45378+
45379+ /* ignore Eric Biederman */
45380+ if (IS_PRIVATE(dentry->d_inode))
45381+ return 1;
45382+
45383+ subj = task->acl;
45384+ do {
45385+ obj = lookup_acl_obj_label(ino, dev, subj);
45386+ if (obj != NULL)
45387+ return (obj->mode & GR_FIND) ? 1 : 0;
45388+ } while ((subj = subj->parent_subject));
45389+
45390+ /* this is purely an optimization since we're looking for an object
45391+ for the directory we're doing a readdir on
45392+ if it's possible for any globbed object to match the entry we're
45393+ filling into the directory, then the object we find here will be
45394+ an anchor point with attached globbed objects
45395+ */
45396+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45397+ if (obj->globbed == NULL)
45398+ return (obj->mode & GR_FIND) ? 1 : 0;
45399+
45400+ is_not_root = ((obj->filename[0] == '/') &&
45401+ (obj->filename[1] == '\0')) ? 0 : 1;
45402+ bufsize = PAGE_SIZE - namelen - is_not_root;
45403+
45404+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
45405+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45406+ return 1;
45407+
45408+ preempt_disable();
45409+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45410+ bufsize);
45411+
45412+ bufsize = strlen(path);
45413+
45414+ /* if base is "/", don't append an additional slash */
45415+ if (is_not_root)
45416+ *(path + bufsize) = '/';
45417+ memcpy(path + bufsize + is_not_root, name, namelen);
45418+ *(path + bufsize + namelen + is_not_root) = '\0';
45419+
45420+ tmp = obj->globbed;
45421+ while (tmp) {
45422+ if (!glob_match(tmp->filename, path)) {
45423+ preempt_enable();
45424+ return (tmp->mode & GR_FIND) ? 1 : 0;
45425+ }
45426+ tmp = tmp->next;
45427+ }
45428+ preempt_enable();
45429+ return (obj->mode & GR_FIND) ? 1 : 0;
45430+}
45431+
45432+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45433+EXPORT_SYMBOL(gr_acl_is_enabled);
45434+#endif
45435+EXPORT_SYMBOL(gr_learn_resource);
45436+EXPORT_SYMBOL(gr_set_kernel_label);
45437+#ifdef CONFIG_SECURITY
45438+EXPORT_SYMBOL(gr_check_user_change);
45439+EXPORT_SYMBOL(gr_check_group_change);
45440+#endif
45441+
45442diff -urNp linux-2.6.39.4/grsecurity/gracl_cap.c linux-2.6.39.4/grsecurity/gracl_cap.c
45443--- linux-2.6.39.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45444+++ linux-2.6.39.4/grsecurity/gracl_cap.c 2011-08-05 19:44:37.000000000 -0400
45445@@ -0,0 +1,139 @@
45446+#include <linux/kernel.h>
45447+#include <linux/module.h>
45448+#include <linux/sched.h>
45449+#include <linux/gracl.h>
45450+#include <linux/grsecurity.h>
45451+#include <linux/grinternal.h>
45452+
45453+static const char *captab_log[] = {
45454+ "CAP_CHOWN",
45455+ "CAP_DAC_OVERRIDE",
45456+ "CAP_DAC_READ_SEARCH",
45457+ "CAP_FOWNER",
45458+ "CAP_FSETID",
45459+ "CAP_KILL",
45460+ "CAP_SETGID",
45461+ "CAP_SETUID",
45462+ "CAP_SETPCAP",
45463+ "CAP_LINUX_IMMUTABLE",
45464+ "CAP_NET_BIND_SERVICE",
45465+ "CAP_NET_BROADCAST",
45466+ "CAP_NET_ADMIN",
45467+ "CAP_NET_RAW",
45468+ "CAP_IPC_LOCK",
45469+ "CAP_IPC_OWNER",
45470+ "CAP_SYS_MODULE",
45471+ "CAP_SYS_RAWIO",
45472+ "CAP_SYS_CHROOT",
45473+ "CAP_SYS_PTRACE",
45474+ "CAP_SYS_PACCT",
45475+ "CAP_SYS_ADMIN",
45476+ "CAP_SYS_BOOT",
45477+ "CAP_SYS_NICE",
45478+ "CAP_SYS_RESOURCE",
45479+ "CAP_SYS_TIME",
45480+ "CAP_SYS_TTY_CONFIG",
45481+ "CAP_MKNOD",
45482+ "CAP_LEASE",
45483+ "CAP_AUDIT_WRITE",
45484+ "CAP_AUDIT_CONTROL",
45485+ "CAP_SETFCAP",
45486+ "CAP_MAC_OVERRIDE",
45487+ "CAP_MAC_ADMIN",
45488+ "CAP_SYSLOG"
45489+};
45490+
45491+EXPORT_SYMBOL(gr_is_capable);
45492+EXPORT_SYMBOL(gr_is_capable_nolog);
45493+
45494+int
45495+gr_is_capable(const int cap)
45496+{
45497+ struct task_struct *task = current;
45498+ const struct cred *cred = current_cred();
45499+ struct acl_subject_label *curracl;
45500+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45501+ kernel_cap_t cap_audit = __cap_empty_set;
45502+
45503+ if (!gr_acl_is_enabled())
45504+ return 1;
45505+
45506+ curracl = task->acl;
45507+
45508+ cap_drop = curracl->cap_lower;
45509+ cap_mask = curracl->cap_mask;
45510+ cap_audit = curracl->cap_invert_audit;
45511+
45512+ while ((curracl = curracl->parent_subject)) {
45513+ /* if the cap isn't specified in the current computed mask but is specified in the
45514+ current level subject, and is lowered in the current level subject, then add
45515+ it to the set of dropped capabilities
45516+ otherwise, add the current level subject's mask to the current computed mask
45517+ */
45518+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45519+ cap_raise(cap_mask, cap);
45520+ if (cap_raised(curracl->cap_lower, cap))
45521+ cap_raise(cap_drop, cap);
45522+ if (cap_raised(curracl->cap_invert_audit, cap))
45523+ cap_raise(cap_audit, cap);
45524+ }
45525+ }
45526+
45527+ if (!cap_raised(cap_drop, cap)) {
45528+ if (cap_raised(cap_audit, cap))
45529+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45530+ return 1;
45531+ }
45532+
45533+ curracl = task->acl;
45534+
45535+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45536+ && cap_raised(cred->cap_effective, cap)) {
45537+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45538+ task->role->roletype, cred->uid,
45539+ cred->gid, task->exec_file ?
45540+ gr_to_filename(task->exec_file->f_path.dentry,
45541+ task->exec_file->f_path.mnt) : curracl->filename,
45542+ curracl->filename, 0UL,
45543+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45544+ return 1;
45545+ }
45546+
45547+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45548+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45549+ return 0;
45550+}
45551+
45552+int
45553+gr_is_capable_nolog(const int cap)
45554+{
45555+ struct acl_subject_label *curracl;
45556+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45557+
45558+ if (!gr_acl_is_enabled())
45559+ return 1;
45560+
45561+ curracl = current->acl;
45562+
45563+ cap_drop = curracl->cap_lower;
45564+ cap_mask = curracl->cap_mask;
45565+
45566+ while ((curracl = curracl->parent_subject)) {
45567+ /* if the cap isn't specified in the current computed mask but is specified in the
45568+ current level subject, and is lowered in the current level subject, then add
45569+ it to the set of dropped capabilities
45570+ otherwise, add the current level subject's mask to the current computed mask
45571+ */
45572+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45573+ cap_raise(cap_mask, cap);
45574+ if (cap_raised(curracl->cap_lower, cap))
45575+ cap_raise(cap_drop, cap);
45576+ }
45577+ }
45578+
45579+ if (!cap_raised(cap_drop, cap))
45580+ return 1;
45581+
45582+ return 0;
45583+}
45584+
45585diff -urNp linux-2.6.39.4/grsecurity/gracl_fs.c linux-2.6.39.4/grsecurity/gracl_fs.c
45586--- linux-2.6.39.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45587+++ linux-2.6.39.4/grsecurity/gracl_fs.c 2011-08-05 19:44:37.000000000 -0400
45588@@ -0,0 +1,431 @@
45589+#include <linux/kernel.h>
45590+#include <linux/sched.h>
45591+#include <linux/types.h>
45592+#include <linux/fs.h>
45593+#include <linux/file.h>
45594+#include <linux/stat.h>
45595+#include <linux/grsecurity.h>
45596+#include <linux/grinternal.h>
45597+#include <linux/gracl.h>
45598+
45599+__u32
45600+gr_acl_handle_hidden_file(const struct dentry * dentry,
45601+ const struct vfsmount * mnt)
45602+{
45603+ __u32 mode;
45604+
45605+ if (unlikely(!dentry->d_inode))
45606+ return GR_FIND;
45607+
45608+ mode =
45609+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45610+
45611+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45612+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45613+ return mode;
45614+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45615+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45616+ return 0;
45617+ } else if (unlikely(!(mode & GR_FIND)))
45618+ return 0;
45619+
45620+ return GR_FIND;
45621+}
45622+
45623+__u32
45624+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45625+ const int fmode)
45626+{
45627+ __u32 reqmode = GR_FIND;
45628+ __u32 mode;
45629+
45630+ if (unlikely(!dentry->d_inode))
45631+ return reqmode;
45632+
45633+ if (unlikely(fmode & O_APPEND))
45634+ reqmode |= GR_APPEND;
45635+ else if (unlikely(fmode & FMODE_WRITE))
45636+ reqmode |= GR_WRITE;
45637+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45638+ reqmode |= GR_READ;
45639+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45640+ reqmode &= ~GR_READ;
45641+ mode =
45642+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45643+ mnt);
45644+
45645+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45646+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45647+ reqmode & GR_READ ? " reading" : "",
45648+ reqmode & GR_WRITE ? " writing" : reqmode &
45649+ GR_APPEND ? " appending" : "");
45650+ return reqmode;
45651+ } else
45652+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45653+ {
45654+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45655+ reqmode & GR_READ ? " reading" : "",
45656+ reqmode & GR_WRITE ? " writing" : reqmode &
45657+ GR_APPEND ? " appending" : "");
45658+ return 0;
45659+ } else if (unlikely((mode & reqmode) != reqmode))
45660+ return 0;
45661+
45662+ return reqmode;
45663+}
45664+
45665+__u32
45666+gr_acl_handle_creat(const struct dentry * dentry,
45667+ const struct dentry * p_dentry,
45668+ const struct vfsmount * p_mnt, const int fmode,
45669+ const int imode)
45670+{
45671+ __u32 reqmode = GR_WRITE | GR_CREATE;
45672+ __u32 mode;
45673+
45674+ if (unlikely(fmode & O_APPEND))
45675+ reqmode |= GR_APPEND;
45676+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45677+ reqmode |= GR_READ;
45678+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45679+ reqmode |= GR_SETID;
45680+
45681+ mode =
45682+ gr_check_create(dentry, p_dentry, p_mnt,
45683+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45684+
45685+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45686+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45687+ reqmode & GR_READ ? " reading" : "",
45688+ reqmode & GR_WRITE ? " writing" : reqmode &
45689+ GR_APPEND ? " appending" : "");
45690+ return reqmode;
45691+ } else
45692+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45693+ {
45694+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45695+ reqmode & GR_READ ? " reading" : "",
45696+ reqmode & GR_WRITE ? " writing" : reqmode &
45697+ GR_APPEND ? " appending" : "");
45698+ return 0;
45699+ } else if (unlikely((mode & reqmode) != reqmode))
45700+ return 0;
45701+
45702+ return reqmode;
45703+}
45704+
45705+__u32
45706+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45707+ const int fmode)
45708+{
45709+ __u32 mode, reqmode = GR_FIND;
45710+
45711+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45712+ reqmode |= GR_EXEC;
45713+ if (fmode & S_IWOTH)
45714+ reqmode |= GR_WRITE;
45715+ if (fmode & S_IROTH)
45716+ reqmode |= GR_READ;
45717+
45718+ mode =
45719+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45720+ mnt);
45721+
45722+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45723+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45724+ reqmode & GR_READ ? " reading" : "",
45725+ reqmode & GR_WRITE ? " writing" : "",
45726+ reqmode & GR_EXEC ? " executing" : "");
45727+ return reqmode;
45728+ } else
45729+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45730+ {
45731+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45732+ reqmode & GR_READ ? " reading" : "",
45733+ reqmode & GR_WRITE ? " writing" : "",
45734+ reqmode & GR_EXEC ? " executing" : "");
45735+ return 0;
45736+ } else if (unlikely((mode & reqmode) != reqmode))
45737+ return 0;
45738+
45739+ return reqmode;
45740+}
45741+
45742+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45743+{
45744+ __u32 mode;
45745+
45746+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45747+
45748+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45749+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45750+ return mode;
45751+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45752+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45753+ return 0;
45754+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
45755+ return 0;
45756+
45757+ return (reqmode);
45758+}
45759+
45760+__u32
45761+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45762+{
45763+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45764+}
45765+
45766+__u32
45767+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45768+{
45769+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45770+}
45771+
45772+__u32
45773+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45774+{
45775+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45776+}
45777+
45778+__u32
45779+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45780+{
45781+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45782+}
45783+
45784+__u32
45785+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45786+ mode_t mode)
45787+{
45788+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45789+ return 1;
45790+
45791+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45792+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45793+ GR_FCHMOD_ACL_MSG);
45794+ } else {
45795+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45796+ }
45797+}
45798+
45799+__u32
45800+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45801+ mode_t mode)
45802+{
45803+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45804+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45805+ GR_CHMOD_ACL_MSG);
45806+ } else {
45807+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45808+ }
45809+}
45810+
45811+__u32
45812+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45813+{
45814+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45815+}
45816+
45817+__u32
45818+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45819+{
45820+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45821+}
45822+
45823+__u32
45824+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45825+{
45826+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45827+}
45828+
45829+__u32
45830+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45831+{
45832+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45833+ GR_UNIXCONNECT_ACL_MSG);
45834+}
45835+
45836+/* hardlinks require at minimum create permission,
45837+ any additional privilege required is based on the
45838+ privilege of the file being linked to
45839+*/
45840+__u32
45841+gr_acl_handle_link(const struct dentry * new_dentry,
45842+ const struct dentry * parent_dentry,
45843+ const struct vfsmount * parent_mnt,
45844+ const struct dentry * old_dentry,
45845+ const struct vfsmount * old_mnt, const char *to)
45846+{
45847+ __u32 mode;
45848+ __u32 needmode = GR_CREATE | GR_LINK;
45849+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
45850+
45851+ mode =
45852+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
45853+ old_mnt);
45854+
45855+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
45856+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45857+ return mode;
45858+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45859+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45860+ return 0;
45861+ } else if (unlikely((mode & needmode) != needmode))
45862+ return 0;
45863+
45864+ return 1;
45865+}
45866+
45867+__u32
45868+gr_acl_handle_symlink(const struct dentry * new_dentry,
45869+ const struct dentry * parent_dentry,
45870+ const struct vfsmount * parent_mnt, const char *from)
45871+{
45872+ __u32 needmode = GR_WRITE | GR_CREATE;
45873+ __u32 mode;
45874+
45875+ mode =
45876+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
45877+ GR_CREATE | GR_AUDIT_CREATE |
45878+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
45879+
45880+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
45881+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45882+ return mode;
45883+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45884+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45885+ return 0;
45886+ } else if (unlikely((mode & needmode) != needmode))
45887+ return 0;
45888+
45889+ return (GR_WRITE | GR_CREATE);
45890+}
45891+
45892+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
45893+{
45894+ __u32 mode;
45895+
45896+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45897+
45898+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45899+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
45900+ return mode;
45901+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45902+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
45903+ return 0;
45904+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
45905+ return 0;
45906+
45907+ return (reqmode);
45908+}
45909+
45910+__u32
45911+gr_acl_handle_mknod(const struct dentry * new_dentry,
45912+ const struct dentry * parent_dentry,
45913+ const struct vfsmount * parent_mnt,
45914+ const int mode)
45915+{
45916+ __u32 reqmode = GR_WRITE | GR_CREATE;
45917+ if (unlikely(mode & (S_ISUID | S_ISGID)))
45918+ reqmode |= GR_SETID;
45919+
45920+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45921+ reqmode, GR_MKNOD_ACL_MSG);
45922+}
45923+
45924+__u32
45925+gr_acl_handle_mkdir(const struct dentry *new_dentry,
45926+ const struct dentry *parent_dentry,
45927+ const struct vfsmount *parent_mnt)
45928+{
45929+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45930+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
45931+}
45932+
45933+#define RENAME_CHECK_SUCCESS(old, new) \
45934+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
45935+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
45936+
45937+int
45938+gr_acl_handle_rename(struct dentry *new_dentry,
45939+ struct dentry *parent_dentry,
45940+ const struct vfsmount *parent_mnt,
45941+ struct dentry *old_dentry,
45942+ struct inode *old_parent_inode,
45943+ struct vfsmount *old_mnt, const char *newname)
45944+{
45945+ __u32 comp1, comp2;
45946+ int error = 0;
45947+
45948+ if (unlikely(!gr_acl_is_enabled()))
45949+ return 0;
45950+
45951+ if (!new_dentry->d_inode) {
45952+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
45953+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
45954+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
45955+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
45956+ GR_DELETE | GR_AUDIT_DELETE |
45957+ GR_AUDIT_READ | GR_AUDIT_WRITE |
45958+ GR_SUPPRESS, old_mnt);
45959+ } else {
45960+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
45961+ GR_CREATE | GR_DELETE |
45962+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
45963+ GR_AUDIT_READ | GR_AUDIT_WRITE |
45964+ GR_SUPPRESS, parent_mnt);
45965+ comp2 =
45966+ gr_search_file(old_dentry,
45967+ GR_READ | GR_WRITE | GR_AUDIT_READ |
45968+ GR_DELETE | GR_AUDIT_DELETE |
45969+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
45970+ }
45971+
45972+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
45973+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
45974+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
45975+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
45976+ && !(comp2 & GR_SUPPRESS)) {
45977+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
45978+ error = -EACCES;
45979+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
45980+ error = -EACCES;
45981+
45982+ return error;
45983+}
45984+
45985+void
45986+gr_acl_handle_exit(void)
45987+{
45988+ u16 id;
45989+ char *rolename;
45990+ struct file *exec_file;
45991+
45992+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
45993+ !(current->role->roletype & GR_ROLE_PERSIST))) {
45994+ id = current->acl_role_id;
45995+ rolename = current->role->rolename;
45996+ gr_set_acls(1);
45997+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
45998+ }
45999+
46000+ write_lock(&grsec_exec_file_lock);
46001+ exec_file = current->exec_file;
46002+ current->exec_file = NULL;
46003+ write_unlock(&grsec_exec_file_lock);
46004+
46005+ if (exec_file)
46006+ fput(exec_file);
46007+}
46008+
46009+int
46010+gr_acl_handle_procpidmem(const struct task_struct *task)
46011+{
46012+ if (unlikely(!gr_acl_is_enabled()))
46013+ return 0;
46014+
46015+ if (task != current && task->acl->mode & GR_PROTPROCFD)
46016+ return -EACCES;
46017+
46018+ return 0;
46019+}
46020diff -urNp linux-2.6.39.4/grsecurity/gracl_ip.c linux-2.6.39.4/grsecurity/gracl_ip.c
46021--- linux-2.6.39.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46022+++ linux-2.6.39.4/grsecurity/gracl_ip.c 2011-08-05 19:44:37.000000000 -0400
46023@@ -0,0 +1,381 @@
46024+#include <linux/kernel.h>
46025+#include <asm/uaccess.h>
46026+#include <asm/errno.h>
46027+#include <net/sock.h>
46028+#include <linux/file.h>
46029+#include <linux/fs.h>
46030+#include <linux/net.h>
46031+#include <linux/in.h>
46032+#include <linux/skbuff.h>
46033+#include <linux/ip.h>
46034+#include <linux/udp.h>
46035+#include <linux/types.h>
46036+#include <linux/sched.h>
46037+#include <linux/netdevice.h>
46038+#include <linux/inetdevice.h>
46039+#include <linux/gracl.h>
46040+#include <linux/grsecurity.h>
46041+#include <linux/grinternal.h>
46042+
46043+#define GR_BIND 0x01
46044+#define GR_CONNECT 0x02
46045+#define GR_INVERT 0x04
46046+#define GR_BINDOVERRIDE 0x08
46047+#define GR_CONNECTOVERRIDE 0x10
46048+#define GR_SOCK_FAMILY 0x20
46049+
46050+static const char * gr_protocols[IPPROTO_MAX] = {
46051+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46052+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46053+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46054+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46055+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46056+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46057+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46058+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46059+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46060+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46061+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46062+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46063+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46064+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46065+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46066+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46067+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46068+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46069+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46070+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46071+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46072+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46073+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46074+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46075+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46076+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46077+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46078+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46079+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46080+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46081+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46082+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46083+ };
46084+
46085+static const char * gr_socktypes[SOCK_MAX] = {
46086+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46087+ "unknown:7", "unknown:8", "unknown:9", "packet"
46088+ };
46089+
46090+static const char * gr_sockfamilies[AF_MAX+1] = {
46091+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46092+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46093+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46094+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46095+ };
46096+
46097+const char *
46098+gr_proto_to_name(unsigned char proto)
46099+{
46100+ return gr_protocols[proto];
46101+}
46102+
46103+const char *
46104+gr_socktype_to_name(unsigned char type)
46105+{
46106+ return gr_socktypes[type];
46107+}
46108+
46109+const char *
46110+gr_sockfamily_to_name(unsigned char family)
46111+{
46112+ return gr_sockfamilies[family];
46113+}
46114+
46115+int
46116+gr_search_socket(const int domain, const int type, const int protocol)
46117+{
46118+ struct acl_subject_label *curr;
46119+ const struct cred *cred = current_cred();
46120+
46121+ if (unlikely(!gr_acl_is_enabled()))
46122+ goto exit;
46123+
46124+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
46125+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46126+ goto exit; // let the kernel handle it
46127+
46128+ curr = current->acl;
46129+
46130+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46131+ /* the family is allowed, if this is PF_INET allow it only if
46132+ the extra sock type/protocol checks pass */
46133+ if (domain == PF_INET)
46134+ goto inet_check;
46135+ goto exit;
46136+ } else {
46137+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46138+ __u32 fakeip = 0;
46139+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46140+ current->role->roletype, cred->uid,
46141+ cred->gid, current->exec_file ?
46142+ gr_to_filename(current->exec_file->f_path.dentry,
46143+ current->exec_file->f_path.mnt) :
46144+ curr->filename, curr->filename,
46145+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46146+ &current->signal->saved_ip);
46147+ goto exit;
46148+ }
46149+ goto exit_fail;
46150+ }
46151+
46152+inet_check:
46153+ /* the rest of this checking is for IPv4 only */
46154+ if (!curr->ips)
46155+ goto exit;
46156+
46157+ if ((curr->ip_type & (1 << type)) &&
46158+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46159+ goto exit;
46160+
46161+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46162+ /* we don't place acls on raw sockets , and sometimes
46163+ dgram/ip sockets are opened for ioctl and not
46164+ bind/connect, so we'll fake a bind learn log */
46165+ if (type == SOCK_RAW || type == SOCK_PACKET) {
46166+ __u32 fakeip = 0;
46167+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46168+ current->role->roletype, cred->uid,
46169+ cred->gid, current->exec_file ?
46170+ gr_to_filename(current->exec_file->f_path.dentry,
46171+ current->exec_file->f_path.mnt) :
46172+ curr->filename, curr->filename,
46173+ &fakeip, 0, type,
46174+ protocol, GR_CONNECT, &current->signal->saved_ip);
46175+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46176+ __u32 fakeip = 0;
46177+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46178+ current->role->roletype, cred->uid,
46179+ cred->gid, current->exec_file ?
46180+ gr_to_filename(current->exec_file->f_path.dentry,
46181+ current->exec_file->f_path.mnt) :
46182+ curr->filename, curr->filename,
46183+ &fakeip, 0, type,
46184+ protocol, GR_BIND, &current->signal->saved_ip);
46185+ }
46186+ /* we'll log when they use connect or bind */
46187+ goto exit;
46188+ }
46189+
46190+exit_fail:
46191+ if (domain == PF_INET)
46192+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46193+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
46194+ else
46195+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46196+ gr_socktype_to_name(type), protocol);
46197+
46198+ return 0;
46199+exit:
46200+ return 1;
46201+}
46202+
46203+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46204+{
46205+ if ((ip->mode & mode) &&
46206+ (ip_port >= ip->low) &&
46207+ (ip_port <= ip->high) &&
46208+ ((ntohl(ip_addr) & our_netmask) ==
46209+ (ntohl(our_addr) & our_netmask))
46210+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46211+ && (ip->type & (1 << type))) {
46212+ if (ip->mode & GR_INVERT)
46213+ return 2; // specifically denied
46214+ else
46215+ return 1; // allowed
46216+ }
46217+
46218+ return 0; // not specifically allowed, may continue parsing
46219+}
46220+
46221+static int
46222+gr_search_connectbind(const int full_mode, struct sock *sk,
46223+ struct sockaddr_in *addr, const int type)
46224+{
46225+ char iface[IFNAMSIZ] = {0};
46226+ struct acl_subject_label *curr;
46227+ struct acl_ip_label *ip;
46228+ struct inet_sock *isk;
46229+ struct net_device *dev;
46230+ struct in_device *idev;
46231+ unsigned long i;
46232+ int ret;
46233+ int mode = full_mode & (GR_BIND | GR_CONNECT);
46234+ __u32 ip_addr = 0;
46235+ __u32 our_addr;
46236+ __u32 our_netmask;
46237+ char *p;
46238+ __u16 ip_port = 0;
46239+ const struct cred *cred = current_cred();
46240+
46241+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46242+ return 0;
46243+
46244+ curr = current->acl;
46245+ isk = inet_sk(sk);
46246+
46247+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46248+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46249+ addr->sin_addr.s_addr = curr->inaddr_any_override;
46250+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46251+ struct sockaddr_in saddr;
46252+ int err;
46253+
46254+ saddr.sin_family = AF_INET;
46255+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
46256+ saddr.sin_port = isk->inet_sport;
46257+
46258+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46259+ if (err)
46260+ return err;
46261+
46262+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46263+ if (err)
46264+ return err;
46265+ }
46266+
46267+ if (!curr->ips)
46268+ return 0;
46269+
46270+ ip_addr = addr->sin_addr.s_addr;
46271+ ip_port = ntohs(addr->sin_port);
46272+
46273+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46274+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46275+ current->role->roletype, cred->uid,
46276+ cred->gid, current->exec_file ?
46277+ gr_to_filename(current->exec_file->f_path.dentry,
46278+ current->exec_file->f_path.mnt) :
46279+ curr->filename, curr->filename,
46280+ &ip_addr, ip_port, type,
46281+ sk->sk_protocol, mode, &current->signal->saved_ip);
46282+ return 0;
46283+ }
46284+
46285+ for (i = 0; i < curr->ip_num; i++) {
46286+ ip = *(curr->ips + i);
46287+ if (ip->iface != NULL) {
46288+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
46289+ p = strchr(iface, ':');
46290+ if (p != NULL)
46291+ *p = '\0';
46292+ dev = dev_get_by_name(sock_net(sk), iface);
46293+ if (dev == NULL)
46294+ continue;
46295+ idev = in_dev_get(dev);
46296+ if (idev == NULL) {
46297+ dev_put(dev);
46298+ continue;
46299+ }
46300+ rcu_read_lock();
46301+ for_ifa(idev) {
46302+ if (!strcmp(ip->iface, ifa->ifa_label)) {
46303+ our_addr = ifa->ifa_address;
46304+ our_netmask = 0xffffffff;
46305+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46306+ if (ret == 1) {
46307+ rcu_read_unlock();
46308+ in_dev_put(idev);
46309+ dev_put(dev);
46310+ return 0;
46311+ } else if (ret == 2) {
46312+ rcu_read_unlock();
46313+ in_dev_put(idev);
46314+ dev_put(dev);
46315+ goto denied;
46316+ }
46317+ }
46318+ } endfor_ifa(idev);
46319+ rcu_read_unlock();
46320+ in_dev_put(idev);
46321+ dev_put(dev);
46322+ } else {
46323+ our_addr = ip->addr;
46324+ our_netmask = ip->netmask;
46325+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46326+ if (ret == 1)
46327+ return 0;
46328+ else if (ret == 2)
46329+ goto denied;
46330+ }
46331+ }
46332+
46333+denied:
46334+ if (mode == GR_BIND)
46335+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46336+ else if (mode == GR_CONNECT)
46337+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46338+
46339+ return -EACCES;
46340+}
46341+
46342+int
46343+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46344+{
46345+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46346+}
46347+
46348+int
46349+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46350+{
46351+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46352+}
46353+
46354+int gr_search_listen(struct socket *sock)
46355+{
46356+ struct sock *sk = sock->sk;
46357+ struct sockaddr_in addr;
46358+
46359+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46360+ addr.sin_port = inet_sk(sk)->inet_sport;
46361+
46362+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46363+}
46364+
46365+int gr_search_accept(struct socket *sock)
46366+{
46367+ struct sock *sk = sock->sk;
46368+ struct sockaddr_in addr;
46369+
46370+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46371+ addr.sin_port = inet_sk(sk)->inet_sport;
46372+
46373+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46374+}
46375+
46376+int
46377+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46378+{
46379+ if (addr)
46380+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46381+ else {
46382+ struct sockaddr_in sin;
46383+ const struct inet_sock *inet = inet_sk(sk);
46384+
46385+ sin.sin_addr.s_addr = inet->inet_daddr;
46386+ sin.sin_port = inet->inet_dport;
46387+
46388+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46389+ }
46390+}
46391+
46392+int
46393+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46394+{
46395+ struct sockaddr_in sin;
46396+
46397+ if (unlikely(skb->len < sizeof (struct udphdr)))
46398+ return 0; // skip this packet
46399+
46400+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46401+ sin.sin_port = udp_hdr(skb)->source;
46402+
46403+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46404+}
46405diff -urNp linux-2.6.39.4/grsecurity/gracl_learn.c linux-2.6.39.4/grsecurity/gracl_learn.c
46406--- linux-2.6.39.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46407+++ linux-2.6.39.4/grsecurity/gracl_learn.c 2011-08-05 19:44:37.000000000 -0400
46408@@ -0,0 +1,207 @@
46409+#include <linux/kernel.h>
46410+#include <linux/mm.h>
46411+#include <linux/sched.h>
46412+#include <linux/poll.h>
46413+#include <linux/string.h>
46414+#include <linux/file.h>
46415+#include <linux/types.h>
46416+#include <linux/vmalloc.h>
46417+#include <linux/grinternal.h>
46418+
46419+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46420+ size_t count, loff_t *ppos);
46421+extern int gr_acl_is_enabled(void);
46422+
46423+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46424+static int gr_learn_attached;
46425+
46426+/* use a 512k buffer */
46427+#define LEARN_BUFFER_SIZE (512 * 1024)
46428+
46429+static DEFINE_SPINLOCK(gr_learn_lock);
46430+static DEFINE_MUTEX(gr_learn_user_mutex);
46431+
46432+/* we need to maintain two buffers, so that the kernel context of grlearn
46433+ uses a semaphore around the userspace copying, and the other kernel contexts
46434+ use a spinlock when copying into the buffer, since they cannot sleep
46435+*/
46436+static char *learn_buffer;
46437+static char *learn_buffer_user;
46438+static int learn_buffer_len;
46439+static int learn_buffer_user_len;
46440+
46441+static ssize_t
46442+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46443+{
46444+ DECLARE_WAITQUEUE(wait, current);
46445+ ssize_t retval = 0;
46446+
46447+ add_wait_queue(&learn_wait, &wait);
46448+ set_current_state(TASK_INTERRUPTIBLE);
46449+ do {
46450+ mutex_lock(&gr_learn_user_mutex);
46451+ spin_lock(&gr_learn_lock);
46452+ if (learn_buffer_len)
46453+ break;
46454+ spin_unlock(&gr_learn_lock);
46455+ mutex_unlock(&gr_learn_user_mutex);
46456+ if (file->f_flags & O_NONBLOCK) {
46457+ retval = -EAGAIN;
46458+ goto out;
46459+ }
46460+ if (signal_pending(current)) {
46461+ retval = -ERESTARTSYS;
46462+ goto out;
46463+ }
46464+
46465+ schedule();
46466+ } while (1);
46467+
46468+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46469+ learn_buffer_user_len = learn_buffer_len;
46470+ retval = learn_buffer_len;
46471+ learn_buffer_len = 0;
46472+
46473+ spin_unlock(&gr_learn_lock);
46474+
46475+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46476+ retval = -EFAULT;
46477+
46478+ mutex_unlock(&gr_learn_user_mutex);
46479+out:
46480+ set_current_state(TASK_RUNNING);
46481+ remove_wait_queue(&learn_wait, &wait);
46482+ return retval;
46483+}
46484+
46485+static unsigned int
46486+poll_learn(struct file * file, poll_table * wait)
46487+{
46488+ poll_wait(file, &learn_wait, wait);
46489+
46490+ if (learn_buffer_len)
46491+ return (POLLIN | POLLRDNORM);
46492+
46493+ return 0;
46494+}
46495+
46496+void
46497+gr_clear_learn_entries(void)
46498+{
46499+ char *tmp;
46500+
46501+ mutex_lock(&gr_learn_user_mutex);
46502+ spin_lock(&gr_learn_lock);
46503+ tmp = learn_buffer;
46504+ learn_buffer = NULL;
46505+ spin_unlock(&gr_learn_lock);
46506+ if (tmp)
46507+ vfree(tmp);
46508+ if (learn_buffer_user != NULL) {
46509+ vfree(learn_buffer_user);
46510+ learn_buffer_user = NULL;
46511+ }
46512+ learn_buffer_len = 0;
46513+ mutex_unlock(&gr_learn_user_mutex);
46514+
46515+ return;
46516+}
46517+
46518+void
46519+gr_add_learn_entry(const char *fmt, ...)
46520+{
46521+ va_list args;
46522+ unsigned int len;
46523+
46524+ if (!gr_learn_attached)
46525+ return;
46526+
46527+ spin_lock(&gr_learn_lock);
46528+
46529+ /* leave a gap at the end so we know when it's "full" but don't have to
46530+ compute the exact length of the string we're trying to append
46531+ */
46532+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46533+ spin_unlock(&gr_learn_lock);
46534+ wake_up_interruptible(&learn_wait);
46535+ return;
46536+ }
46537+ if (learn_buffer == NULL) {
46538+ spin_unlock(&gr_learn_lock);
46539+ return;
46540+ }
46541+
46542+ va_start(args, fmt);
46543+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46544+ va_end(args);
46545+
46546+ learn_buffer_len += len + 1;
46547+
46548+ spin_unlock(&gr_learn_lock);
46549+ wake_up_interruptible(&learn_wait);
46550+
46551+ return;
46552+}
46553+
46554+static int
46555+open_learn(struct inode *inode, struct file *file)
46556+{
46557+ if (file->f_mode & FMODE_READ && gr_learn_attached)
46558+ return -EBUSY;
46559+ if (file->f_mode & FMODE_READ) {
46560+ int retval = 0;
46561+ mutex_lock(&gr_learn_user_mutex);
46562+ if (learn_buffer == NULL)
46563+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46564+ if (learn_buffer_user == NULL)
46565+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46566+ if (learn_buffer == NULL) {
46567+ retval = -ENOMEM;
46568+ goto out_error;
46569+ }
46570+ if (learn_buffer_user == NULL) {
46571+ retval = -ENOMEM;
46572+ goto out_error;
46573+ }
46574+ learn_buffer_len = 0;
46575+ learn_buffer_user_len = 0;
46576+ gr_learn_attached = 1;
46577+out_error:
46578+ mutex_unlock(&gr_learn_user_mutex);
46579+ return retval;
46580+ }
46581+ return 0;
46582+}
46583+
46584+static int
46585+close_learn(struct inode *inode, struct file *file)
46586+{
46587+ if (file->f_mode & FMODE_READ) {
46588+ char *tmp = NULL;
46589+ mutex_lock(&gr_learn_user_mutex);
46590+ spin_lock(&gr_learn_lock);
46591+ tmp = learn_buffer;
46592+ learn_buffer = NULL;
46593+ spin_unlock(&gr_learn_lock);
46594+ if (tmp)
46595+ vfree(tmp);
46596+ if (learn_buffer_user != NULL) {
46597+ vfree(learn_buffer_user);
46598+ learn_buffer_user = NULL;
46599+ }
46600+ learn_buffer_len = 0;
46601+ learn_buffer_user_len = 0;
46602+ gr_learn_attached = 0;
46603+ mutex_unlock(&gr_learn_user_mutex);
46604+ }
46605+
46606+ return 0;
46607+}
46608+
46609+const struct file_operations grsec_fops = {
46610+ .read = read_learn,
46611+ .write = write_grsec_handler,
46612+ .open = open_learn,
46613+ .release = close_learn,
46614+ .poll = poll_learn,
46615+};
46616diff -urNp linux-2.6.39.4/grsecurity/gracl_res.c linux-2.6.39.4/grsecurity/gracl_res.c
46617--- linux-2.6.39.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46618+++ linux-2.6.39.4/grsecurity/gracl_res.c 2011-08-05 19:44:37.000000000 -0400
46619@@ -0,0 +1,68 @@
46620+#include <linux/kernel.h>
46621+#include <linux/sched.h>
46622+#include <linux/gracl.h>
46623+#include <linux/grinternal.h>
46624+
46625+static const char *restab_log[] = {
46626+ [RLIMIT_CPU] = "RLIMIT_CPU",
46627+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46628+ [RLIMIT_DATA] = "RLIMIT_DATA",
46629+ [RLIMIT_STACK] = "RLIMIT_STACK",
46630+ [RLIMIT_CORE] = "RLIMIT_CORE",
46631+ [RLIMIT_RSS] = "RLIMIT_RSS",
46632+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
46633+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46634+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46635+ [RLIMIT_AS] = "RLIMIT_AS",
46636+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46637+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46638+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46639+ [RLIMIT_NICE] = "RLIMIT_NICE",
46640+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46641+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46642+ [GR_CRASH_RES] = "RLIMIT_CRASH"
46643+};
46644+
46645+void
46646+gr_log_resource(const struct task_struct *task,
46647+ const int res, const unsigned long wanted, const int gt)
46648+{
46649+ const struct cred *cred;
46650+ unsigned long rlim;
46651+
46652+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
46653+ return;
46654+
46655+ // not yet supported resource
46656+ if (unlikely(!restab_log[res]))
46657+ return;
46658+
46659+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46660+ rlim = task_rlimit_max(task, res);
46661+ else
46662+ rlim = task_rlimit(task, res);
46663+
46664+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46665+ return;
46666+
46667+ rcu_read_lock();
46668+ cred = __task_cred(task);
46669+
46670+ if (res == RLIMIT_NPROC &&
46671+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46672+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46673+ goto out_rcu_unlock;
46674+ else if (res == RLIMIT_MEMLOCK &&
46675+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46676+ goto out_rcu_unlock;
46677+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46678+ goto out_rcu_unlock;
46679+ rcu_read_unlock();
46680+
46681+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46682+
46683+ return;
46684+out_rcu_unlock:
46685+ rcu_read_unlock();
46686+ return;
46687+}
46688diff -urNp linux-2.6.39.4/grsecurity/gracl_segv.c linux-2.6.39.4/grsecurity/gracl_segv.c
46689--- linux-2.6.39.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46690+++ linux-2.6.39.4/grsecurity/gracl_segv.c 2011-08-05 19:44:37.000000000 -0400
46691@@ -0,0 +1,299 @@
46692+#include <linux/kernel.h>
46693+#include <linux/mm.h>
46694+#include <asm/uaccess.h>
46695+#include <asm/errno.h>
46696+#include <asm/mman.h>
46697+#include <net/sock.h>
46698+#include <linux/file.h>
46699+#include <linux/fs.h>
46700+#include <linux/net.h>
46701+#include <linux/in.h>
46702+#include <linux/slab.h>
46703+#include <linux/types.h>
46704+#include <linux/sched.h>
46705+#include <linux/timer.h>
46706+#include <linux/gracl.h>
46707+#include <linux/grsecurity.h>
46708+#include <linux/grinternal.h>
46709+
46710+static struct crash_uid *uid_set;
46711+static unsigned short uid_used;
46712+static DEFINE_SPINLOCK(gr_uid_lock);
46713+extern rwlock_t gr_inode_lock;
46714+extern struct acl_subject_label *
46715+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46716+ struct acl_role_label *role);
46717+
46718+#ifdef CONFIG_BTRFS_FS
46719+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46720+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46721+#endif
46722+
46723+static inline dev_t __get_dev(const struct dentry *dentry)
46724+{
46725+#ifdef CONFIG_BTRFS_FS
46726+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46727+ return get_btrfs_dev_from_inode(dentry->d_inode);
46728+ else
46729+#endif
46730+ return dentry->d_inode->i_sb->s_dev;
46731+}
46732+
46733+int
46734+gr_init_uidset(void)
46735+{
46736+ uid_set =
46737+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46738+ uid_used = 0;
46739+
46740+ return uid_set ? 1 : 0;
46741+}
46742+
46743+void
46744+gr_free_uidset(void)
46745+{
46746+ if (uid_set)
46747+ kfree(uid_set);
46748+
46749+ return;
46750+}
46751+
46752+int
46753+gr_find_uid(const uid_t uid)
46754+{
46755+ struct crash_uid *tmp = uid_set;
46756+ uid_t buid;
46757+ int low = 0, high = uid_used - 1, mid;
46758+
46759+ while (high >= low) {
46760+ mid = (low + high) >> 1;
46761+ buid = tmp[mid].uid;
46762+ if (buid == uid)
46763+ return mid;
46764+ if (buid > uid)
46765+ high = mid - 1;
46766+ if (buid < uid)
46767+ low = mid + 1;
46768+ }
46769+
46770+ return -1;
46771+}
46772+
46773+static __inline__ void
46774+gr_insertsort(void)
46775+{
46776+ unsigned short i, j;
46777+ struct crash_uid index;
46778+
46779+ for (i = 1; i < uid_used; i++) {
46780+ index = uid_set[i];
46781+ j = i;
46782+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46783+ uid_set[j] = uid_set[j - 1];
46784+ j--;
46785+ }
46786+ uid_set[j] = index;
46787+ }
46788+
46789+ return;
46790+}
46791+
46792+static __inline__ void
46793+gr_insert_uid(const uid_t uid, const unsigned long expires)
46794+{
46795+ int loc;
46796+
46797+ if (uid_used == GR_UIDTABLE_MAX)
46798+ return;
46799+
46800+ loc = gr_find_uid(uid);
46801+
46802+ if (loc >= 0) {
46803+ uid_set[loc].expires = expires;
46804+ return;
46805+ }
46806+
46807+ uid_set[uid_used].uid = uid;
46808+ uid_set[uid_used].expires = expires;
46809+ uid_used++;
46810+
46811+ gr_insertsort();
46812+
46813+ return;
46814+}
46815+
46816+void
46817+gr_remove_uid(const unsigned short loc)
46818+{
46819+ unsigned short i;
46820+
46821+ for (i = loc + 1; i < uid_used; i++)
46822+ uid_set[i - 1] = uid_set[i];
46823+
46824+ uid_used--;
46825+
46826+ return;
46827+}
46828+
46829+int
46830+gr_check_crash_uid(const uid_t uid)
46831+{
46832+ int loc;
46833+ int ret = 0;
46834+
46835+ if (unlikely(!gr_acl_is_enabled()))
46836+ return 0;
46837+
46838+ spin_lock(&gr_uid_lock);
46839+ loc = gr_find_uid(uid);
46840+
46841+ if (loc < 0)
46842+ goto out_unlock;
46843+
46844+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
46845+ gr_remove_uid(loc);
46846+ else
46847+ ret = 1;
46848+
46849+out_unlock:
46850+ spin_unlock(&gr_uid_lock);
46851+ return ret;
46852+}
46853+
46854+static __inline__ int
46855+proc_is_setxid(const struct cred *cred)
46856+{
46857+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
46858+ cred->uid != cred->fsuid)
46859+ return 1;
46860+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
46861+ cred->gid != cred->fsgid)
46862+ return 1;
46863+
46864+ return 0;
46865+}
46866+
46867+extern int gr_fake_force_sig(int sig, struct task_struct *t);
46868+
46869+void
46870+gr_handle_crash(struct task_struct *task, const int sig)
46871+{
46872+ struct acl_subject_label *curr;
46873+ struct acl_subject_label *curr2;
46874+ struct task_struct *tsk, *tsk2;
46875+ const struct cred *cred;
46876+ const struct cred *cred2;
46877+
46878+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
46879+ return;
46880+
46881+ if (unlikely(!gr_acl_is_enabled()))
46882+ return;
46883+
46884+ curr = task->acl;
46885+
46886+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
46887+ return;
46888+
46889+ if (time_before_eq(curr->expires, get_seconds())) {
46890+ curr->expires = 0;
46891+ curr->crashes = 0;
46892+ }
46893+
46894+ curr->crashes++;
46895+
46896+ if (!curr->expires)
46897+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
46898+
46899+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46900+ time_after(curr->expires, get_seconds())) {
46901+ rcu_read_lock();
46902+ cred = __task_cred(task);
46903+ if (cred->uid && proc_is_setxid(cred)) {
46904+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46905+ spin_lock(&gr_uid_lock);
46906+ gr_insert_uid(cred->uid, curr->expires);
46907+ spin_unlock(&gr_uid_lock);
46908+ curr->expires = 0;
46909+ curr->crashes = 0;
46910+ read_lock(&tasklist_lock);
46911+ do_each_thread(tsk2, tsk) {
46912+ cred2 = __task_cred(tsk);
46913+ if (tsk != task && cred2->uid == cred->uid)
46914+ gr_fake_force_sig(SIGKILL, tsk);
46915+ } while_each_thread(tsk2, tsk);
46916+ read_unlock(&tasklist_lock);
46917+ } else {
46918+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46919+ read_lock(&tasklist_lock);
46920+ do_each_thread(tsk2, tsk) {
46921+ if (likely(tsk != task)) {
46922+ curr2 = tsk->acl;
46923+
46924+ if (curr2->device == curr->device &&
46925+ curr2->inode == curr->inode)
46926+ gr_fake_force_sig(SIGKILL, tsk);
46927+ }
46928+ } while_each_thread(tsk2, tsk);
46929+ read_unlock(&tasklist_lock);
46930+ }
46931+ rcu_read_unlock();
46932+ }
46933+
46934+ return;
46935+}
46936+
46937+int
46938+gr_check_crash_exec(const struct file *filp)
46939+{
46940+ struct acl_subject_label *curr;
46941+
46942+ if (unlikely(!gr_acl_is_enabled()))
46943+ return 0;
46944+
46945+ read_lock(&gr_inode_lock);
46946+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
46947+ __get_dev(filp->f_path.dentry),
46948+ current->role);
46949+ read_unlock(&gr_inode_lock);
46950+
46951+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
46952+ (!curr->crashes && !curr->expires))
46953+ return 0;
46954+
46955+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46956+ time_after(curr->expires, get_seconds()))
46957+ return 1;
46958+ else if (time_before_eq(curr->expires, get_seconds())) {
46959+ curr->crashes = 0;
46960+ curr->expires = 0;
46961+ }
46962+
46963+ return 0;
46964+}
46965+
46966+void
46967+gr_handle_alertkill(struct task_struct *task)
46968+{
46969+ struct acl_subject_label *curracl;
46970+ __u32 curr_ip;
46971+ struct task_struct *p, *p2;
46972+
46973+ if (unlikely(!gr_acl_is_enabled()))
46974+ return;
46975+
46976+ curracl = task->acl;
46977+ curr_ip = task->signal->curr_ip;
46978+
46979+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
46980+ read_lock(&tasklist_lock);
46981+ do_each_thread(p2, p) {
46982+ if (p->signal->curr_ip == curr_ip)
46983+ gr_fake_force_sig(SIGKILL, p);
46984+ } while_each_thread(p2, p);
46985+ read_unlock(&tasklist_lock);
46986+ } else if (curracl->mode & GR_KILLPROC)
46987+ gr_fake_force_sig(SIGKILL, task);
46988+
46989+ return;
46990+}
46991diff -urNp linux-2.6.39.4/grsecurity/gracl_shm.c linux-2.6.39.4/grsecurity/gracl_shm.c
46992--- linux-2.6.39.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
46993+++ linux-2.6.39.4/grsecurity/gracl_shm.c 2011-08-05 19:44:37.000000000 -0400
46994@@ -0,0 +1,40 @@
46995+#include <linux/kernel.h>
46996+#include <linux/mm.h>
46997+#include <linux/sched.h>
46998+#include <linux/file.h>
46999+#include <linux/ipc.h>
47000+#include <linux/gracl.h>
47001+#include <linux/grsecurity.h>
47002+#include <linux/grinternal.h>
47003+
47004+int
47005+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47006+ const time_t shm_createtime, const uid_t cuid, const int shmid)
47007+{
47008+ struct task_struct *task;
47009+
47010+ if (!gr_acl_is_enabled())
47011+ return 1;
47012+
47013+ rcu_read_lock();
47014+ read_lock(&tasklist_lock);
47015+
47016+ task = find_task_by_vpid(shm_cprid);
47017+
47018+ if (unlikely(!task))
47019+ task = find_task_by_vpid(shm_lapid);
47020+
47021+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47022+ (task->pid == shm_lapid)) &&
47023+ (task->acl->mode & GR_PROTSHM) &&
47024+ (task->acl != current->acl))) {
47025+ read_unlock(&tasklist_lock);
47026+ rcu_read_unlock();
47027+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47028+ return 0;
47029+ }
47030+ read_unlock(&tasklist_lock);
47031+ rcu_read_unlock();
47032+
47033+ return 1;
47034+}
47035diff -urNp linux-2.6.39.4/grsecurity/grsec_chdir.c linux-2.6.39.4/grsecurity/grsec_chdir.c
47036--- linux-2.6.39.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47037+++ linux-2.6.39.4/grsecurity/grsec_chdir.c 2011-08-05 19:44:37.000000000 -0400
47038@@ -0,0 +1,19 @@
47039+#include <linux/kernel.h>
47040+#include <linux/sched.h>
47041+#include <linux/fs.h>
47042+#include <linux/file.h>
47043+#include <linux/grsecurity.h>
47044+#include <linux/grinternal.h>
47045+
47046+void
47047+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47048+{
47049+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47050+ if ((grsec_enable_chdir && grsec_enable_group &&
47051+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47052+ !grsec_enable_group)) {
47053+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47054+ }
47055+#endif
47056+ return;
47057+}
47058diff -urNp linux-2.6.39.4/grsecurity/grsec_chroot.c linux-2.6.39.4/grsecurity/grsec_chroot.c
47059--- linux-2.6.39.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47060+++ linux-2.6.39.4/grsecurity/grsec_chroot.c 2011-08-05 19:44:37.000000000 -0400
47061@@ -0,0 +1,349 @@
47062+#include <linux/kernel.h>
47063+#include <linux/module.h>
47064+#include <linux/sched.h>
47065+#include <linux/file.h>
47066+#include <linux/fs.h>
47067+#include <linux/mount.h>
47068+#include <linux/types.h>
47069+#include <linux/pid_namespace.h>
47070+#include <linux/grsecurity.h>
47071+#include <linux/grinternal.h>
47072+
47073+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47074+{
47075+#ifdef CONFIG_GRKERNSEC
47076+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47077+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47078+ task->gr_is_chrooted = 1;
47079+ else
47080+ task->gr_is_chrooted = 0;
47081+
47082+ task->gr_chroot_dentry = path->dentry;
47083+#endif
47084+ return;
47085+}
47086+
47087+void gr_clear_chroot_entries(struct task_struct *task)
47088+{
47089+#ifdef CONFIG_GRKERNSEC
47090+ task->gr_is_chrooted = 0;
47091+ task->gr_chroot_dentry = NULL;
47092+#endif
47093+ return;
47094+}
47095+
47096+int
47097+gr_handle_chroot_unix(const pid_t pid)
47098+{
47099+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47100+ struct task_struct *p;
47101+
47102+ if (unlikely(!grsec_enable_chroot_unix))
47103+ return 1;
47104+
47105+ if (likely(!proc_is_chrooted(current)))
47106+ return 1;
47107+
47108+ rcu_read_lock();
47109+ read_lock(&tasklist_lock);
47110+ p = find_task_by_vpid_unrestricted(pid);
47111+ if (unlikely(p && !have_same_root(current, p))) {
47112+ read_unlock(&tasklist_lock);
47113+ rcu_read_unlock();
47114+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47115+ return 0;
47116+ }
47117+ read_unlock(&tasklist_lock);
47118+ rcu_read_unlock();
47119+#endif
47120+ return 1;
47121+}
47122+
47123+int
47124+gr_handle_chroot_nice(void)
47125+{
47126+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47127+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47128+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47129+ return -EPERM;
47130+ }
47131+#endif
47132+ return 0;
47133+}
47134+
47135+int
47136+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47137+{
47138+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47139+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47140+ && proc_is_chrooted(current)) {
47141+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47142+ return -EACCES;
47143+ }
47144+#endif
47145+ return 0;
47146+}
47147+
47148+int
47149+gr_handle_chroot_rawio(const struct inode *inode)
47150+{
47151+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47152+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47153+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47154+ return 1;
47155+#endif
47156+ return 0;
47157+}
47158+
47159+int
47160+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47161+{
47162+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47163+ struct task_struct *p;
47164+ int ret = 0;
47165+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47166+ return ret;
47167+
47168+ read_lock(&tasklist_lock);
47169+ do_each_pid_task(pid, type, p) {
47170+ if (!have_same_root(current, p)) {
47171+ ret = 1;
47172+ goto out;
47173+ }
47174+ } while_each_pid_task(pid, type, p);
47175+out:
47176+ read_unlock(&tasklist_lock);
47177+ return ret;
47178+#endif
47179+ return 0;
47180+}
47181+
47182+int
47183+gr_pid_is_chrooted(struct task_struct *p)
47184+{
47185+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47186+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47187+ return 0;
47188+
47189+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47190+ !have_same_root(current, p)) {
47191+ return 1;
47192+ }
47193+#endif
47194+ return 0;
47195+}
47196+
47197+EXPORT_SYMBOL(gr_pid_is_chrooted);
47198+
47199+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47200+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47201+{
47202+ struct path path, currentroot;
47203+ int ret = 0;
47204+
47205+ path.dentry = (struct dentry *)u_dentry;
47206+ path.mnt = (struct vfsmount *)u_mnt;
47207+ get_fs_root(current->fs, &currentroot);
47208+ if (path_is_under(&path, &currentroot))
47209+ ret = 1;
47210+ path_put(&currentroot);
47211+
47212+ return ret;
47213+}
47214+#endif
47215+
47216+int
47217+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47218+{
47219+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47220+ if (!grsec_enable_chroot_fchdir)
47221+ return 1;
47222+
47223+ if (!proc_is_chrooted(current))
47224+ return 1;
47225+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47226+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47227+ return 0;
47228+ }
47229+#endif
47230+ return 1;
47231+}
47232+
47233+int
47234+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47235+ const time_t shm_createtime)
47236+{
47237+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47238+ struct task_struct *p;
47239+ time_t starttime;
47240+
47241+ if (unlikely(!grsec_enable_chroot_shmat))
47242+ return 1;
47243+
47244+ if (likely(!proc_is_chrooted(current)))
47245+ return 1;
47246+
47247+ rcu_read_lock();
47248+ read_lock(&tasklist_lock);
47249+
47250+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47251+ starttime = p->start_time.tv_sec;
47252+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47253+ if (have_same_root(current, p)) {
47254+ goto allow;
47255+ } else {
47256+ read_unlock(&tasklist_lock);
47257+ rcu_read_unlock();
47258+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47259+ return 0;
47260+ }
47261+ }
47262+ /* creator exited, pid reuse, fall through to next check */
47263+ }
47264+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47265+ if (unlikely(!have_same_root(current, p))) {
47266+ read_unlock(&tasklist_lock);
47267+ rcu_read_unlock();
47268+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47269+ return 0;
47270+ }
47271+ }
47272+
47273+allow:
47274+ read_unlock(&tasklist_lock);
47275+ rcu_read_unlock();
47276+#endif
47277+ return 1;
47278+}
47279+
47280+void
47281+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47282+{
47283+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47284+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47285+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47286+#endif
47287+ return;
47288+}
47289+
47290+int
47291+gr_handle_chroot_mknod(const struct dentry *dentry,
47292+ const struct vfsmount *mnt, const int mode)
47293+{
47294+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47295+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47296+ proc_is_chrooted(current)) {
47297+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47298+ return -EPERM;
47299+ }
47300+#endif
47301+ return 0;
47302+}
47303+
47304+int
47305+gr_handle_chroot_mount(const struct dentry *dentry,
47306+ const struct vfsmount *mnt, const char *dev_name)
47307+{
47308+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47309+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47310+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47311+ return -EPERM;
47312+ }
47313+#endif
47314+ return 0;
47315+}
47316+
47317+int
47318+gr_handle_chroot_pivot(void)
47319+{
47320+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47321+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47322+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47323+ return -EPERM;
47324+ }
47325+#endif
47326+ return 0;
47327+}
47328+
47329+int
47330+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47331+{
47332+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47333+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47334+ !gr_is_outside_chroot(dentry, mnt)) {
47335+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47336+ return -EPERM;
47337+ }
47338+#endif
47339+ return 0;
47340+}
47341+
47342+int
47343+gr_handle_chroot_caps(struct path *path)
47344+{
47345+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47346+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47347+ (init_task.fs->root.dentry != path->dentry) &&
47348+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47349+
47350+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47351+ const struct cred *old = current_cred();
47352+ struct cred *new = prepare_creds();
47353+ if (new == NULL)
47354+ return 1;
47355+
47356+ new->cap_permitted = cap_drop(old->cap_permitted,
47357+ chroot_caps);
47358+ new->cap_inheritable = cap_drop(old->cap_inheritable,
47359+ chroot_caps);
47360+ new->cap_effective = cap_drop(old->cap_effective,
47361+ chroot_caps);
47362+
47363+ commit_creds(new);
47364+
47365+ return 0;
47366+ }
47367+#endif
47368+ return 0;
47369+}
47370+
47371+int
47372+gr_handle_chroot_sysctl(const int op)
47373+{
47374+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47375+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47376+ proc_is_chrooted(current))
47377+ return -EACCES;
47378+#endif
47379+ return 0;
47380+}
47381+
47382+void
47383+gr_handle_chroot_chdir(struct path *path)
47384+{
47385+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47386+ if (grsec_enable_chroot_chdir)
47387+ set_fs_pwd(current->fs, path);
47388+#endif
47389+ return;
47390+}
47391+
47392+int
47393+gr_handle_chroot_chmod(const struct dentry *dentry,
47394+ const struct vfsmount *mnt, const int mode)
47395+{
47396+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47397+ /* allow chmod +s on directories, but not files */
47398+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47399+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47400+ proc_is_chrooted(current)) {
47401+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47402+ return -EPERM;
47403+ }
47404+#endif
47405+ return 0;
47406+}
47407+
47408+#ifdef CONFIG_SECURITY
47409+EXPORT_SYMBOL(gr_handle_chroot_caps);
47410+#endif
47411diff -urNp linux-2.6.39.4/grsecurity/grsec_disabled.c linux-2.6.39.4/grsecurity/grsec_disabled.c
47412--- linux-2.6.39.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47413+++ linux-2.6.39.4/grsecurity/grsec_disabled.c 2011-08-05 19:44:37.000000000 -0400
47414@@ -0,0 +1,447 @@
47415+#include <linux/kernel.h>
47416+#include <linux/module.h>
47417+#include <linux/sched.h>
47418+#include <linux/file.h>
47419+#include <linux/fs.h>
47420+#include <linux/kdev_t.h>
47421+#include <linux/net.h>
47422+#include <linux/in.h>
47423+#include <linux/ip.h>
47424+#include <linux/skbuff.h>
47425+#include <linux/sysctl.h>
47426+
47427+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47428+void
47429+pax_set_initial_flags(struct linux_binprm *bprm)
47430+{
47431+ return;
47432+}
47433+#endif
47434+
47435+#ifdef CONFIG_SYSCTL
47436+__u32
47437+gr_handle_sysctl(const struct ctl_table * table, const int op)
47438+{
47439+ return 0;
47440+}
47441+#endif
47442+
47443+#ifdef CONFIG_TASKSTATS
47444+int gr_is_taskstats_denied(int pid)
47445+{
47446+ return 0;
47447+}
47448+#endif
47449+
47450+int
47451+gr_acl_is_enabled(void)
47452+{
47453+ return 0;
47454+}
47455+
47456+int
47457+gr_handle_rawio(const struct inode *inode)
47458+{
47459+ return 0;
47460+}
47461+
47462+void
47463+gr_acl_handle_psacct(struct task_struct *task, const long code)
47464+{
47465+ return;
47466+}
47467+
47468+int
47469+gr_handle_ptrace(struct task_struct *task, const long request)
47470+{
47471+ return 0;
47472+}
47473+
47474+int
47475+gr_handle_proc_ptrace(struct task_struct *task)
47476+{
47477+ return 0;
47478+}
47479+
47480+void
47481+gr_learn_resource(const struct task_struct *task,
47482+ const int res, const unsigned long wanted, const int gt)
47483+{
47484+ return;
47485+}
47486+
47487+int
47488+gr_set_acls(const int type)
47489+{
47490+ return 0;
47491+}
47492+
47493+int
47494+gr_check_hidden_task(const struct task_struct *tsk)
47495+{
47496+ return 0;
47497+}
47498+
47499+int
47500+gr_check_protected_task(const struct task_struct *task)
47501+{
47502+ return 0;
47503+}
47504+
47505+int
47506+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47507+{
47508+ return 0;
47509+}
47510+
47511+void
47512+gr_copy_label(struct task_struct *tsk)
47513+{
47514+ return;
47515+}
47516+
47517+void
47518+gr_set_pax_flags(struct task_struct *task)
47519+{
47520+ return;
47521+}
47522+
47523+int
47524+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47525+ const int unsafe_share)
47526+{
47527+ return 0;
47528+}
47529+
47530+void
47531+gr_handle_delete(const ino_t ino, const dev_t dev)
47532+{
47533+ return;
47534+}
47535+
47536+void
47537+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47538+{
47539+ return;
47540+}
47541+
47542+void
47543+gr_handle_crash(struct task_struct *task, const int sig)
47544+{
47545+ return;
47546+}
47547+
47548+int
47549+gr_check_crash_exec(const struct file *filp)
47550+{
47551+ return 0;
47552+}
47553+
47554+int
47555+gr_check_crash_uid(const uid_t uid)
47556+{
47557+ return 0;
47558+}
47559+
47560+void
47561+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47562+ struct dentry *old_dentry,
47563+ struct dentry *new_dentry,
47564+ struct vfsmount *mnt, const __u8 replace)
47565+{
47566+ return;
47567+}
47568+
47569+int
47570+gr_search_socket(const int family, const int type, const int protocol)
47571+{
47572+ return 1;
47573+}
47574+
47575+int
47576+gr_search_connectbind(const int mode, const struct socket *sock,
47577+ const struct sockaddr_in *addr)
47578+{
47579+ return 0;
47580+}
47581+
47582+int
47583+gr_is_capable(const int cap)
47584+{
47585+ return 1;
47586+}
47587+
47588+int
47589+gr_is_capable_nolog(const int cap)
47590+{
47591+ return 1;
47592+}
47593+
47594+void
47595+gr_handle_alertkill(struct task_struct *task)
47596+{
47597+ return;
47598+}
47599+
47600+__u32
47601+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47602+{
47603+ return 1;
47604+}
47605+
47606+__u32
47607+gr_acl_handle_hidden_file(const struct dentry * dentry,
47608+ const struct vfsmount * mnt)
47609+{
47610+ return 1;
47611+}
47612+
47613+__u32
47614+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47615+ const int fmode)
47616+{
47617+ return 1;
47618+}
47619+
47620+__u32
47621+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47622+{
47623+ return 1;
47624+}
47625+
47626+__u32
47627+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47628+{
47629+ return 1;
47630+}
47631+
47632+int
47633+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47634+ unsigned int *vm_flags)
47635+{
47636+ return 1;
47637+}
47638+
47639+__u32
47640+gr_acl_handle_truncate(const struct dentry * dentry,
47641+ const struct vfsmount * mnt)
47642+{
47643+ return 1;
47644+}
47645+
47646+__u32
47647+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47648+{
47649+ return 1;
47650+}
47651+
47652+__u32
47653+gr_acl_handle_access(const struct dentry * dentry,
47654+ const struct vfsmount * mnt, const int fmode)
47655+{
47656+ return 1;
47657+}
47658+
47659+__u32
47660+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47661+ mode_t mode)
47662+{
47663+ return 1;
47664+}
47665+
47666+__u32
47667+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47668+ mode_t mode)
47669+{
47670+ return 1;
47671+}
47672+
47673+__u32
47674+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47675+{
47676+ return 1;
47677+}
47678+
47679+__u32
47680+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47681+{
47682+ return 1;
47683+}
47684+
47685+void
47686+grsecurity_init(void)
47687+{
47688+ return;
47689+}
47690+
47691+__u32
47692+gr_acl_handle_mknod(const struct dentry * new_dentry,
47693+ const struct dentry * parent_dentry,
47694+ const struct vfsmount * parent_mnt,
47695+ const int mode)
47696+{
47697+ return 1;
47698+}
47699+
47700+__u32
47701+gr_acl_handle_mkdir(const struct dentry * new_dentry,
47702+ const struct dentry * parent_dentry,
47703+ const struct vfsmount * parent_mnt)
47704+{
47705+ return 1;
47706+}
47707+
47708+__u32
47709+gr_acl_handle_symlink(const struct dentry * new_dentry,
47710+ const struct dentry * parent_dentry,
47711+ const struct vfsmount * parent_mnt, const char *from)
47712+{
47713+ return 1;
47714+}
47715+
47716+__u32
47717+gr_acl_handle_link(const struct dentry * new_dentry,
47718+ const struct dentry * parent_dentry,
47719+ const struct vfsmount * parent_mnt,
47720+ const struct dentry * old_dentry,
47721+ const struct vfsmount * old_mnt, const char *to)
47722+{
47723+ return 1;
47724+}
47725+
47726+int
47727+gr_acl_handle_rename(const struct dentry *new_dentry,
47728+ const struct dentry *parent_dentry,
47729+ const struct vfsmount *parent_mnt,
47730+ const struct dentry *old_dentry,
47731+ const struct inode *old_parent_inode,
47732+ const struct vfsmount *old_mnt, const char *newname)
47733+{
47734+ return 0;
47735+}
47736+
47737+int
47738+gr_acl_handle_filldir(const struct file *file, const char *name,
47739+ const int namelen, const ino_t ino)
47740+{
47741+ return 1;
47742+}
47743+
47744+int
47745+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47746+ const time_t shm_createtime, const uid_t cuid, const int shmid)
47747+{
47748+ return 1;
47749+}
47750+
47751+int
47752+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47753+{
47754+ return 0;
47755+}
47756+
47757+int
47758+gr_search_accept(const struct socket *sock)
47759+{
47760+ return 0;
47761+}
47762+
47763+int
47764+gr_search_listen(const struct socket *sock)
47765+{
47766+ return 0;
47767+}
47768+
47769+int
47770+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47771+{
47772+ return 0;
47773+}
47774+
47775+__u32
47776+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47777+{
47778+ return 1;
47779+}
47780+
47781+__u32
47782+gr_acl_handle_creat(const struct dentry * dentry,
47783+ const struct dentry * p_dentry,
47784+ const struct vfsmount * p_mnt, const int fmode,
47785+ const int imode)
47786+{
47787+ return 1;
47788+}
47789+
47790+void
47791+gr_acl_handle_exit(void)
47792+{
47793+ return;
47794+}
47795+
47796+int
47797+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47798+{
47799+ return 1;
47800+}
47801+
47802+void
47803+gr_set_role_label(const uid_t uid, const gid_t gid)
47804+{
47805+ return;
47806+}
47807+
47808+int
47809+gr_acl_handle_procpidmem(const struct task_struct *task)
47810+{
47811+ return 0;
47812+}
47813+
47814+int
47815+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47816+{
47817+ return 0;
47818+}
47819+
47820+int
47821+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47822+{
47823+ return 0;
47824+}
47825+
47826+void
47827+gr_set_kernel_label(struct task_struct *task)
47828+{
47829+ return;
47830+}
47831+
47832+int
47833+gr_check_user_change(int real, int effective, int fs)
47834+{
47835+ return 0;
47836+}
47837+
47838+int
47839+gr_check_group_change(int real, int effective, int fs)
47840+{
47841+ return 0;
47842+}
47843+
47844+int gr_acl_enable_at_secure(void)
47845+{
47846+ return 0;
47847+}
47848+
47849+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47850+{
47851+ return dentry->d_inode->i_sb->s_dev;
47852+}
47853+
47854+EXPORT_SYMBOL(gr_is_capable);
47855+EXPORT_SYMBOL(gr_is_capable_nolog);
47856+EXPORT_SYMBOL(gr_learn_resource);
47857+EXPORT_SYMBOL(gr_set_kernel_label);
47858+#ifdef CONFIG_SECURITY
47859+EXPORT_SYMBOL(gr_check_user_change);
47860+EXPORT_SYMBOL(gr_check_group_change);
47861+#endif
47862diff -urNp linux-2.6.39.4/grsecurity/grsec_exec.c linux-2.6.39.4/grsecurity/grsec_exec.c
47863--- linux-2.6.39.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
47864+++ linux-2.6.39.4/grsecurity/grsec_exec.c 2011-08-05 19:44:37.000000000 -0400
47865@@ -0,0 +1,146 @@
47866+#include <linux/kernel.h>
47867+#include <linux/sched.h>
47868+#include <linux/file.h>
47869+#include <linux/binfmts.h>
47870+#include <linux/fs.h>
47871+#include <linux/types.h>
47872+#include <linux/grdefs.h>
47873+#include <linux/grinternal.h>
47874+#include <linux/capability.h>
47875+#include <linux/compat.h>
47876+
47877+#include <asm/uaccess.h>
47878+
47879+#ifdef CONFIG_GRKERNSEC_EXECLOG
47880+static char gr_exec_arg_buf[132];
47881+static DEFINE_MUTEX(gr_exec_arg_mutex);
47882+#endif
47883+
47884+int
47885+gr_handle_nproc(void)
47886+{
47887+#ifdef CONFIG_GRKERNSEC_EXECVE
47888+ const struct cred *cred = current_cred();
47889+ if (grsec_enable_execve && cred->user &&
47890+ (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
47891+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
47892+ gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
47893+ return -EAGAIN;
47894+ }
47895+#endif
47896+ return 0;
47897+}
47898+
47899+void
47900+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
47901+{
47902+#ifdef CONFIG_GRKERNSEC_EXECLOG
47903+ char *grarg = gr_exec_arg_buf;
47904+ unsigned int i, x, execlen = 0;
47905+ char c;
47906+
47907+ if (!((grsec_enable_execlog && grsec_enable_group &&
47908+ in_group_p(grsec_audit_gid))
47909+ || (grsec_enable_execlog && !grsec_enable_group)))
47910+ return;
47911+
47912+ mutex_lock(&gr_exec_arg_mutex);
47913+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
47914+
47915+ if (unlikely(argv == NULL))
47916+ goto log;
47917+
47918+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
47919+ const char __user *p;
47920+ unsigned int len;
47921+
47922+ if (copy_from_user(&p, argv + i, sizeof(p)))
47923+ goto log;
47924+ if (!p)
47925+ goto log;
47926+ len = strnlen_user(p, 128 - execlen);
47927+ if (len > 128 - execlen)
47928+ len = 128 - execlen;
47929+ else if (len > 0)
47930+ len--;
47931+ if (copy_from_user(grarg + execlen, p, len))
47932+ goto log;
47933+
47934+ /* rewrite unprintable characters */
47935+ for (x = 0; x < len; x++) {
47936+ c = *(grarg + execlen + x);
47937+ if (c < 32 || c > 126)
47938+ *(grarg + execlen + x) = ' ';
47939+ }
47940+
47941+ execlen += len;
47942+ *(grarg + execlen) = ' ';
47943+ *(grarg + execlen + 1) = '\0';
47944+ execlen++;
47945+ }
47946+
47947+ log:
47948+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
47949+ bprm->file->f_path.mnt, grarg);
47950+ mutex_unlock(&gr_exec_arg_mutex);
47951+#endif
47952+ return;
47953+}
47954+
47955+#ifdef CONFIG_COMPAT
47956+void
47957+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
47958+{
47959+#ifdef CONFIG_GRKERNSEC_EXECLOG
47960+ char *grarg = gr_exec_arg_buf;
47961+ unsigned int i, x, execlen = 0;
47962+ char c;
47963+
47964+ if (!((grsec_enable_execlog && grsec_enable_group &&
47965+ in_group_p(grsec_audit_gid))
47966+ || (grsec_enable_execlog && !grsec_enable_group)))
47967+ return;
47968+
47969+ mutex_lock(&gr_exec_arg_mutex);
47970+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
47971+
47972+ if (unlikely(argv == NULL))
47973+ goto log;
47974+
47975+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
47976+ compat_uptr_t p;
47977+ unsigned int len;
47978+
47979+ if (get_user(p, argv + i))
47980+ goto log;
47981+ len = strnlen_user(compat_ptr(p), 128 - execlen);
47982+ if (len > 128 - execlen)
47983+ len = 128 - execlen;
47984+ else if (len > 0)
47985+ len--;
47986+ else
47987+ goto log;
47988+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
47989+ goto log;
47990+
47991+ /* rewrite unprintable characters */
47992+ for (x = 0; x < len; x++) {
47993+ c = *(grarg + execlen + x);
47994+ if (c < 32 || c > 126)
47995+ *(grarg + execlen + x) = ' ';
47996+ }
47997+
47998+ execlen += len;
47999+ *(grarg + execlen) = ' ';
48000+ *(grarg + execlen + 1) = '\0';
48001+ execlen++;
48002+ }
48003+
48004+ log:
48005+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48006+ bprm->file->f_path.mnt, grarg);
48007+ mutex_unlock(&gr_exec_arg_mutex);
48008+#endif
48009+ return;
48010+}
48011+#endif
48012diff -urNp linux-2.6.39.4/grsecurity/grsec_fifo.c linux-2.6.39.4/grsecurity/grsec_fifo.c
48013--- linux-2.6.39.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
48014+++ linux-2.6.39.4/grsecurity/grsec_fifo.c 2011-08-05 19:44:37.000000000 -0400
48015@@ -0,0 +1,24 @@
48016+#include <linux/kernel.h>
48017+#include <linux/sched.h>
48018+#include <linux/fs.h>
48019+#include <linux/file.h>
48020+#include <linux/grinternal.h>
48021+
48022+int
48023+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
48024+ const struct dentry *dir, const int flag, const int acc_mode)
48025+{
48026+#ifdef CONFIG_GRKERNSEC_FIFO
48027+ const struct cred *cred = current_cred();
48028+
48029+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48030+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48031+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48032+ (cred->fsuid != dentry->d_inode->i_uid)) {
48033+ if (!inode_permission(dentry->d_inode, acc_mode))
48034+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48035+ return -EACCES;
48036+ }
48037+#endif
48038+ return 0;
48039+}
48040diff -urNp linux-2.6.39.4/grsecurity/grsec_fork.c linux-2.6.39.4/grsecurity/grsec_fork.c
48041--- linux-2.6.39.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48042+++ linux-2.6.39.4/grsecurity/grsec_fork.c 2011-08-05 19:44:37.000000000 -0400
48043@@ -0,0 +1,23 @@
48044+#include <linux/kernel.h>
48045+#include <linux/sched.h>
48046+#include <linux/grsecurity.h>
48047+#include <linux/grinternal.h>
48048+#include <linux/errno.h>
48049+
48050+void
48051+gr_log_forkfail(const int retval)
48052+{
48053+#ifdef CONFIG_GRKERNSEC_FORKFAIL
48054+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48055+ switch (retval) {
48056+ case -EAGAIN:
48057+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48058+ break;
48059+ case -ENOMEM:
48060+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48061+ break;
48062+ }
48063+ }
48064+#endif
48065+ return;
48066+}
48067diff -urNp linux-2.6.39.4/grsecurity/grsec_init.c linux-2.6.39.4/grsecurity/grsec_init.c
48068--- linux-2.6.39.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48069+++ linux-2.6.39.4/grsecurity/grsec_init.c 2011-08-05 19:44:37.000000000 -0400
48070@@ -0,0 +1,273 @@
48071+#include <linux/kernel.h>
48072+#include <linux/sched.h>
48073+#include <linux/mm.h>
48074+#include <linux/gracl.h>
48075+#include <linux/slab.h>
48076+#include <linux/vmalloc.h>
48077+#include <linux/percpu.h>
48078+#include <linux/module.h>
48079+
48080+int grsec_enable_brute;
48081+int grsec_enable_link;
48082+int grsec_enable_dmesg;
48083+int grsec_enable_harden_ptrace;
48084+int grsec_enable_fifo;
48085+int grsec_enable_execve;
48086+int grsec_enable_execlog;
48087+int grsec_enable_signal;
48088+int grsec_enable_forkfail;
48089+int grsec_enable_audit_ptrace;
48090+int grsec_enable_time;
48091+int grsec_enable_audit_textrel;
48092+int grsec_enable_group;
48093+int grsec_audit_gid;
48094+int grsec_enable_chdir;
48095+int grsec_enable_mount;
48096+int grsec_enable_rofs;
48097+int grsec_enable_chroot_findtask;
48098+int grsec_enable_chroot_mount;
48099+int grsec_enable_chroot_shmat;
48100+int grsec_enable_chroot_fchdir;
48101+int grsec_enable_chroot_double;
48102+int grsec_enable_chroot_pivot;
48103+int grsec_enable_chroot_chdir;
48104+int grsec_enable_chroot_chmod;
48105+int grsec_enable_chroot_mknod;
48106+int grsec_enable_chroot_nice;
48107+int grsec_enable_chroot_execlog;
48108+int grsec_enable_chroot_caps;
48109+int grsec_enable_chroot_sysctl;
48110+int grsec_enable_chroot_unix;
48111+int grsec_enable_tpe;
48112+int grsec_tpe_gid;
48113+int grsec_enable_blackhole;
48114+#ifdef CONFIG_IPV6_MODULE
48115+EXPORT_SYMBOL(grsec_enable_blackhole);
48116+#endif
48117+int grsec_lastack_retries;
48118+int grsec_enable_tpe_all;
48119+int grsec_enable_tpe_invert;
48120+int grsec_enable_socket_all;
48121+int grsec_socket_all_gid;
48122+int grsec_enable_socket_client;
48123+int grsec_socket_client_gid;
48124+int grsec_enable_socket_server;
48125+int grsec_socket_server_gid;
48126+int grsec_resource_logging;
48127+int grsec_disable_privio;
48128+int grsec_enable_log_rwxmaps;
48129+int grsec_lock;
48130+
48131+DEFINE_SPINLOCK(grsec_alert_lock);
48132+unsigned long grsec_alert_wtime = 0;
48133+unsigned long grsec_alert_fyet = 0;
48134+
48135+DEFINE_SPINLOCK(grsec_audit_lock);
48136+
48137+DEFINE_RWLOCK(grsec_exec_file_lock);
48138+
48139+char *gr_shared_page[4];
48140+
48141+char *gr_alert_log_fmt;
48142+char *gr_audit_log_fmt;
48143+char *gr_alert_log_buf;
48144+char *gr_audit_log_buf;
48145+
48146+extern struct gr_arg *gr_usermode;
48147+extern unsigned char *gr_system_salt;
48148+extern unsigned char *gr_system_sum;
48149+
48150+void __init
48151+grsecurity_init(void)
48152+{
48153+ int j;
48154+ /* create the per-cpu shared pages */
48155+
48156+#ifdef CONFIG_X86
48157+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48158+#endif
48159+
48160+ for (j = 0; j < 4; j++) {
48161+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48162+ if (gr_shared_page[j] == NULL) {
48163+ panic("Unable to allocate grsecurity shared page");
48164+ return;
48165+ }
48166+ }
48167+
48168+ /* allocate log buffers */
48169+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48170+ if (!gr_alert_log_fmt) {
48171+ panic("Unable to allocate grsecurity alert log format buffer");
48172+ return;
48173+ }
48174+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48175+ if (!gr_audit_log_fmt) {
48176+ panic("Unable to allocate grsecurity audit log format buffer");
48177+ return;
48178+ }
48179+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48180+ if (!gr_alert_log_buf) {
48181+ panic("Unable to allocate grsecurity alert log buffer");
48182+ return;
48183+ }
48184+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48185+ if (!gr_audit_log_buf) {
48186+ panic("Unable to allocate grsecurity audit log buffer");
48187+ return;
48188+ }
48189+
48190+ /* allocate memory for authentication structure */
48191+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48192+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48193+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48194+
48195+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48196+ panic("Unable to allocate grsecurity authentication structure");
48197+ return;
48198+ }
48199+
48200+
48201+#ifdef CONFIG_GRKERNSEC_IO
48202+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48203+ grsec_disable_privio = 1;
48204+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48205+ grsec_disable_privio = 1;
48206+#else
48207+ grsec_disable_privio = 0;
48208+#endif
48209+#endif
48210+
48211+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48212+ /* for backward compatibility, tpe_invert always defaults to on if
48213+ enabled in the kernel
48214+ */
48215+ grsec_enable_tpe_invert = 1;
48216+#endif
48217+
48218+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48219+#ifndef CONFIG_GRKERNSEC_SYSCTL
48220+ grsec_lock = 1;
48221+#endif
48222+
48223+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48224+ grsec_enable_audit_textrel = 1;
48225+#endif
48226+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48227+ grsec_enable_log_rwxmaps = 1;
48228+#endif
48229+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48230+ grsec_enable_group = 1;
48231+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48232+#endif
48233+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48234+ grsec_enable_chdir = 1;
48235+#endif
48236+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48237+ grsec_enable_harden_ptrace = 1;
48238+#endif
48239+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48240+ grsec_enable_mount = 1;
48241+#endif
48242+#ifdef CONFIG_GRKERNSEC_LINK
48243+ grsec_enable_link = 1;
48244+#endif
48245+#ifdef CONFIG_GRKERNSEC_BRUTE
48246+ grsec_enable_brute = 1;
48247+#endif
48248+#ifdef CONFIG_GRKERNSEC_DMESG
48249+ grsec_enable_dmesg = 1;
48250+#endif
48251+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48252+ grsec_enable_blackhole = 1;
48253+ grsec_lastack_retries = 4;
48254+#endif
48255+#ifdef CONFIG_GRKERNSEC_FIFO
48256+ grsec_enable_fifo = 1;
48257+#endif
48258+#ifdef CONFIG_GRKERNSEC_EXECVE
48259+ grsec_enable_execve = 1;
48260+#endif
48261+#ifdef CONFIG_GRKERNSEC_EXECLOG
48262+ grsec_enable_execlog = 1;
48263+#endif
48264+#ifdef CONFIG_GRKERNSEC_SIGNAL
48265+ grsec_enable_signal = 1;
48266+#endif
48267+#ifdef CONFIG_GRKERNSEC_FORKFAIL
48268+ grsec_enable_forkfail = 1;
48269+#endif
48270+#ifdef CONFIG_GRKERNSEC_TIME
48271+ grsec_enable_time = 1;
48272+#endif
48273+#ifdef CONFIG_GRKERNSEC_RESLOG
48274+ grsec_resource_logging = 1;
48275+#endif
48276+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48277+ grsec_enable_chroot_findtask = 1;
48278+#endif
48279+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48280+ grsec_enable_chroot_unix = 1;
48281+#endif
48282+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48283+ grsec_enable_chroot_mount = 1;
48284+#endif
48285+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48286+ grsec_enable_chroot_fchdir = 1;
48287+#endif
48288+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48289+ grsec_enable_chroot_shmat = 1;
48290+#endif
48291+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48292+ grsec_enable_audit_ptrace = 1;
48293+#endif
48294+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48295+ grsec_enable_chroot_double = 1;
48296+#endif
48297+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48298+ grsec_enable_chroot_pivot = 1;
48299+#endif
48300+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48301+ grsec_enable_chroot_chdir = 1;
48302+#endif
48303+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48304+ grsec_enable_chroot_chmod = 1;
48305+#endif
48306+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48307+ grsec_enable_chroot_mknod = 1;
48308+#endif
48309+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48310+ grsec_enable_chroot_nice = 1;
48311+#endif
48312+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48313+ grsec_enable_chroot_execlog = 1;
48314+#endif
48315+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48316+ grsec_enable_chroot_caps = 1;
48317+#endif
48318+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48319+ grsec_enable_chroot_sysctl = 1;
48320+#endif
48321+#ifdef CONFIG_GRKERNSEC_TPE
48322+ grsec_enable_tpe = 1;
48323+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48324+#ifdef CONFIG_GRKERNSEC_TPE_ALL
48325+ grsec_enable_tpe_all = 1;
48326+#endif
48327+#endif
48328+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48329+ grsec_enable_socket_all = 1;
48330+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48331+#endif
48332+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48333+ grsec_enable_socket_client = 1;
48334+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48335+#endif
48336+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48337+ grsec_enable_socket_server = 1;
48338+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48339+#endif
48340+#endif
48341+
48342+ return;
48343+}
48344diff -urNp linux-2.6.39.4/grsecurity/grsec_link.c linux-2.6.39.4/grsecurity/grsec_link.c
48345--- linux-2.6.39.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48346+++ linux-2.6.39.4/grsecurity/grsec_link.c 2011-08-05 19:44:37.000000000 -0400
48347@@ -0,0 +1,43 @@
48348+#include <linux/kernel.h>
48349+#include <linux/sched.h>
48350+#include <linux/fs.h>
48351+#include <linux/file.h>
48352+#include <linux/grinternal.h>
48353+
48354+int
48355+gr_handle_follow_link(const struct inode *parent,
48356+ const struct inode *inode,
48357+ const struct dentry *dentry, const struct vfsmount *mnt)
48358+{
48359+#ifdef CONFIG_GRKERNSEC_LINK
48360+ const struct cred *cred = current_cred();
48361+
48362+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48363+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48364+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48365+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48366+ return -EACCES;
48367+ }
48368+#endif
48369+ return 0;
48370+}
48371+
48372+int
48373+gr_handle_hardlink(const struct dentry *dentry,
48374+ const struct vfsmount *mnt,
48375+ struct inode *inode, const int mode, const char *to)
48376+{
48377+#ifdef CONFIG_GRKERNSEC_LINK
48378+ const struct cred *cred = current_cred();
48379+
48380+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48381+ (!S_ISREG(mode) || (mode & S_ISUID) ||
48382+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48383+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48384+ !capable(CAP_FOWNER) && cred->uid) {
48385+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48386+ return -EPERM;
48387+ }
48388+#endif
48389+ return 0;
48390+}
48391diff -urNp linux-2.6.39.4/grsecurity/grsec_log.c linux-2.6.39.4/grsecurity/grsec_log.c
48392--- linux-2.6.39.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48393+++ linux-2.6.39.4/grsecurity/grsec_log.c 2011-08-05 19:44:37.000000000 -0400
48394@@ -0,0 +1,310 @@
48395+#include <linux/kernel.h>
48396+#include <linux/sched.h>
48397+#include <linux/file.h>
48398+#include <linux/tty.h>
48399+#include <linux/fs.h>
48400+#include <linux/grinternal.h>
48401+
48402+#ifdef CONFIG_TREE_PREEMPT_RCU
48403+#define DISABLE_PREEMPT() preempt_disable()
48404+#define ENABLE_PREEMPT() preempt_enable()
48405+#else
48406+#define DISABLE_PREEMPT()
48407+#define ENABLE_PREEMPT()
48408+#endif
48409+
48410+#define BEGIN_LOCKS(x) \
48411+ DISABLE_PREEMPT(); \
48412+ rcu_read_lock(); \
48413+ read_lock(&tasklist_lock); \
48414+ read_lock(&grsec_exec_file_lock); \
48415+ if (x != GR_DO_AUDIT) \
48416+ spin_lock(&grsec_alert_lock); \
48417+ else \
48418+ spin_lock(&grsec_audit_lock)
48419+
48420+#define END_LOCKS(x) \
48421+ if (x != GR_DO_AUDIT) \
48422+ spin_unlock(&grsec_alert_lock); \
48423+ else \
48424+ spin_unlock(&grsec_audit_lock); \
48425+ read_unlock(&grsec_exec_file_lock); \
48426+ read_unlock(&tasklist_lock); \
48427+ rcu_read_unlock(); \
48428+ ENABLE_PREEMPT(); \
48429+ if (x == GR_DONT_AUDIT) \
48430+ gr_handle_alertkill(current)
48431+
48432+enum {
48433+ FLOODING,
48434+ NO_FLOODING
48435+};
48436+
48437+extern char *gr_alert_log_fmt;
48438+extern char *gr_audit_log_fmt;
48439+extern char *gr_alert_log_buf;
48440+extern char *gr_audit_log_buf;
48441+
48442+static int gr_log_start(int audit)
48443+{
48444+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48445+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48446+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48447+
48448+ if (audit == GR_DO_AUDIT)
48449+ goto set_fmt;
48450+
48451+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48452+ grsec_alert_wtime = jiffies;
48453+ grsec_alert_fyet = 0;
48454+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48455+ grsec_alert_fyet++;
48456+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48457+ grsec_alert_wtime = jiffies;
48458+ grsec_alert_fyet++;
48459+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48460+ return FLOODING;
48461+ } else return FLOODING;
48462+
48463+set_fmt:
48464+ memset(buf, 0, PAGE_SIZE);
48465+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
48466+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48467+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48468+ } else if (current->signal->curr_ip) {
48469+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48470+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48471+ } else if (gr_acl_is_enabled()) {
48472+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48473+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48474+ } else {
48475+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
48476+ strcpy(buf, fmt);
48477+ }
48478+
48479+ return NO_FLOODING;
48480+}
48481+
48482+static void gr_log_middle(int audit, const char *msg, va_list ap)
48483+ __attribute__ ((format (printf, 2, 0)));
48484+
48485+static void gr_log_middle(int audit, const char *msg, va_list ap)
48486+{
48487+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48488+ unsigned int len = strlen(buf);
48489+
48490+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48491+
48492+ return;
48493+}
48494+
48495+static void gr_log_middle_varargs(int audit, const char *msg, ...)
48496+ __attribute__ ((format (printf, 2, 3)));
48497+
48498+static void gr_log_middle_varargs(int audit, const char *msg, ...)
48499+{
48500+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48501+ unsigned int len = strlen(buf);
48502+ va_list ap;
48503+
48504+ va_start(ap, msg);
48505+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48506+ va_end(ap);
48507+
48508+ return;
48509+}
48510+
48511+static void gr_log_end(int audit)
48512+{
48513+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48514+ unsigned int len = strlen(buf);
48515+
48516+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48517+ printk("%s\n", buf);
48518+
48519+ return;
48520+}
48521+
48522+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48523+{
48524+ int logtype;
48525+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48526+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48527+ void *voidptr = NULL;
48528+ int num1 = 0, num2 = 0;
48529+ unsigned long ulong1 = 0, ulong2 = 0;
48530+ struct dentry *dentry = NULL;
48531+ struct vfsmount *mnt = NULL;
48532+ struct file *file = NULL;
48533+ struct task_struct *task = NULL;
48534+ const struct cred *cred, *pcred;
48535+ va_list ap;
48536+
48537+ BEGIN_LOCKS(audit);
48538+ logtype = gr_log_start(audit);
48539+ if (logtype == FLOODING) {
48540+ END_LOCKS(audit);
48541+ return;
48542+ }
48543+ va_start(ap, argtypes);
48544+ switch (argtypes) {
48545+ case GR_TTYSNIFF:
48546+ task = va_arg(ap, struct task_struct *);
48547+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48548+ break;
48549+ case GR_SYSCTL_HIDDEN:
48550+ str1 = va_arg(ap, char *);
48551+ gr_log_middle_varargs(audit, msg, result, str1);
48552+ break;
48553+ case GR_RBAC:
48554+ dentry = va_arg(ap, struct dentry *);
48555+ mnt = va_arg(ap, struct vfsmount *);
48556+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48557+ break;
48558+ case GR_RBAC_STR:
48559+ dentry = va_arg(ap, struct dentry *);
48560+ mnt = va_arg(ap, struct vfsmount *);
48561+ str1 = va_arg(ap, char *);
48562+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48563+ break;
48564+ case GR_STR_RBAC:
48565+ str1 = va_arg(ap, char *);
48566+ dentry = va_arg(ap, struct dentry *);
48567+ mnt = va_arg(ap, struct vfsmount *);
48568+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48569+ break;
48570+ case GR_RBAC_MODE2:
48571+ dentry = va_arg(ap, struct dentry *);
48572+ mnt = va_arg(ap, struct vfsmount *);
48573+ str1 = va_arg(ap, char *);
48574+ str2 = va_arg(ap, char *);
48575+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48576+ break;
48577+ case GR_RBAC_MODE3:
48578+ dentry = va_arg(ap, struct dentry *);
48579+ mnt = va_arg(ap, struct vfsmount *);
48580+ str1 = va_arg(ap, char *);
48581+ str2 = va_arg(ap, char *);
48582+ str3 = va_arg(ap, char *);
48583+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48584+ break;
48585+ case GR_FILENAME:
48586+ dentry = va_arg(ap, struct dentry *);
48587+ mnt = va_arg(ap, struct vfsmount *);
48588+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48589+ break;
48590+ case GR_STR_FILENAME:
48591+ str1 = va_arg(ap, char *);
48592+ dentry = va_arg(ap, struct dentry *);
48593+ mnt = va_arg(ap, struct vfsmount *);
48594+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48595+ break;
48596+ case GR_FILENAME_STR:
48597+ dentry = va_arg(ap, struct dentry *);
48598+ mnt = va_arg(ap, struct vfsmount *);
48599+ str1 = va_arg(ap, char *);
48600+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48601+ break;
48602+ case GR_FILENAME_TWO_INT:
48603+ dentry = va_arg(ap, struct dentry *);
48604+ mnt = va_arg(ap, struct vfsmount *);
48605+ num1 = va_arg(ap, int);
48606+ num2 = va_arg(ap, int);
48607+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48608+ break;
48609+ case GR_FILENAME_TWO_INT_STR:
48610+ dentry = va_arg(ap, struct dentry *);
48611+ mnt = va_arg(ap, struct vfsmount *);
48612+ num1 = va_arg(ap, int);
48613+ num2 = va_arg(ap, int);
48614+ str1 = va_arg(ap, char *);
48615+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48616+ break;
48617+ case GR_TEXTREL:
48618+ file = va_arg(ap, struct file *);
48619+ ulong1 = va_arg(ap, unsigned long);
48620+ ulong2 = va_arg(ap, unsigned long);
48621+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48622+ break;
48623+ case GR_PTRACE:
48624+ task = va_arg(ap, struct task_struct *);
48625+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48626+ break;
48627+ case GR_RESOURCE:
48628+ task = va_arg(ap, struct task_struct *);
48629+ cred = __task_cred(task);
48630+ pcred = __task_cred(task->real_parent);
48631+ ulong1 = va_arg(ap, unsigned long);
48632+ str1 = va_arg(ap, char *);
48633+ ulong2 = va_arg(ap, unsigned long);
48634+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48635+ break;
48636+ case GR_CAP:
48637+ task = va_arg(ap, struct task_struct *);
48638+ cred = __task_cred(task);
48639+ pcred = __task_cred(task->real_parent);
48640+ str1 = va_arg(ap, char *);
48641+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48642+ break;
48643+ case GR_SIG:
48644+ str1 = va_arg(ap, char *);
48645+ voidptr = va_arg(ap, void *);
48646+ gr_log_middle_varargs(audit, msg, str1, voidptr);
48647+ break;
48648+ case GR_SIG2:
48649+ task = va_arg(ap, struct task_struct *);
48650+ cred = __task_cred(task);
48651+ pcred = __task_cred(task->real_parent);
48652+ num1 = va_arg(ap, int);
48653+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48654+ break;
48655+ case GR_CRASH1:
48656+ task = va_arg(ap, struct task_struct *);
48657+ cred = __task_cred(task);
48658+ pcred = __task_cred(task->real_parent);
48659+ ulong1 = va_arg(ap, unsigned long);
48660+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48661+ break;
48662+ case GR_CRASH2:
48663+ task = va_arg(ap, struct task_struct *);
48664+ cred = __task_cred(task);
48665+ pcred = __task_cred(task->real_parent);
48666+ ulong1 = va_arg(ap, unsigned long);
48667+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48668+ break;
48669+ case GR_RWXMAP:
48670+ file = va_arg(ap, struct file *);
48671+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48672+ break;
48673+ case GR_PSACCT:
48674+ {
48675+ unsigned int wday, cday;
48676+ __u8 whr, chr;
48677+ __u8 wmin, cmin;
48678+ __u8 wsec, csec;
48679+ char cur_tty[64] = { 0 };
48680+ char parent_tty[64] = { 0 };
48681+
48682+ task = va_arg(ap, struct task_struct *);
48683+ wday = va_arg(ap, unsigned int);
48684+ cday = va_arg(ap, unsigned int);
48685+ whr = va_arg(ap, int);
48686+ chr = va_arg(ap, int);
48687+ wmin = va_arg(ap, int);
48688+ cmin = va_arg(ap, int);
48689+ wsec = va_arg(ap, int);
48690+ csec = va_arg(ap, int);
48691+ ulong1 = va_arg(ap, unsigned long);
48692+ cred = __task_cred(task);
48693+ pcred = __task_cred(task->real_parent);
48694+
48695+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48696+ }
48697+ break;
48698+ default:
48699+ gr_log_middle(audit, msg, ap);
48700+ }
48701+ va_end(ap);
48702+ gr_log_end(audit);
48703+ END_LOCKS(audit);
48704+}
48705diff -urNp linux-2.6.39.4/grsecurity/grsec_mem.c linux-2.6.39.4/grsecurity/grsec_mem.c
48706--- linux-2.6.39.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48707+++ linux-2.6.39.4/grsecurity/grsec_mem.c 2011-08-05 19:44:37.000000000 -0400
48708@@ -0,0 +1,33 @@
48709+#include <linux/kernel.h>
48710+#include <linux/sched.h>
48711+#include <linux/mm.h>
48712+#include <linux/mman.h>
48713+#include <linux/grinternal.h>
48714+
48715+void
48716+gr_handle_ioperm(void)
48717+{
48718+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48719+ return;
48720+}
48721+
48722+void
48723+gr_handle_iopl(void)
48724+{
48725+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48726+ return;
48727+}
48728+
48729+void
48730+gr_handle_mem_readwrite(u64 from, u64 to)
48731+{
48732+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48733+ return;
48734+}
48735+
48736+void
48737+gr_handle_vm86(void)
48738+{
48739+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48740+ return;
48741+}
48742diff -urNp linux-2.6.39.4/grsecurity/grsec_mount.c linux-2.6.39.4/grsecurity/grsec_mount.c
48743--- linux-2.6.39.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48744+++ linux-2.6.39.4/grsecurity/grsec_mount.c 2011-08-05 19:44:37.000000000 -0400
48745@@ -0,0 +1,62 @@
48746+#include <linux/kernel.h>
48747+#include <linux/sched.h>
48748+#include <linux/mount.h>
48749+#include <linux/grsecurity.h>
48750+#include <linux/grinternal.h>
48751+
48752+void
48753+gr_log_remount(const char *devname, const int retval)
48754+{
48755+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48756+ if (grsec_enable_mount && (retval >= 0))
48757+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48758+#endif
48759+ return;
48760+}
48761+
48762+void
48763+gr_log_unmount(const char *devname, const int retval)
48764+{
48765+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48766+ if (grsec_enable_mount && (retval >= 0))
48767+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48768+#endif
48769+ return;
48770+}
48771+
48772+void
48773+gr_log_mount(const char *from, const char *to, const int retval)
48774+{
48775+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48776+ if (grsec_enable_mount && (retval >= 0))
48777+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48778+#endif
48779+ return;
48780+}
48781+
48782+int
48783+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48784+{
48785+#ifdef CONFIG_GRKERNSEC_ROFS
48786+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48787+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48788+ return -EPERM;
48789+ } else
48790+ return 0;
48791+#endif
48792+ return 0;
48793+}
48794+
48795+int
48796+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48797+{
48798+#ifdef CONFIG_GRKERNSEC_ROFS
48799+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48800+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48801+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48802+ return -EPERM;
48803+ } else
48804+ return 0;
48805+#endif
48806+ return 0;
48807+}
48808diff -urNp linux-2.6.39.4/grsecurity/grsec_pax.c linux-2.6.39.4/grsecurity/grsec_pax.c
48809--- linux-2.6.39.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48810+++ linux-2.6.39.4/grsecurity/grsec_pax.c 2011-08-05 19:44:37.000000000 -0400
48811@@ -0,0 +1,36 @@
48812+#include <linux/kernel.h>
48813+#include <linux/sched.h>
48814+#include <linux/mm.h>
48815+#include <linux/file.h>
48816+#include <linux/grinternal.h>
48817+#include <linux/grsecurity.h>
48818+
48819+void
48820+gr_log_textrel(struct vm_area_struct * vma)
48821+{
48822+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48823+ if (grsec_enable_audit_textrel)
48824+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48825+#endif
48826+ return;
48827+}
48828+
48829+void
48830+gr_log_rwxmmap(struct file *file)
48831+{
48832+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48833+ if (grsec_enable_log_rwxmaps)
48834+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48835+#endif
48836+ return;
48837+}
48838+
48839+void
48840+gr_log_rwxmprotect(struct file *file)
48841+{
48842+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48843+ if (grsec_enable_log_rwxmaps)
48844+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48845+#endif
48846+ return;
48847+}
48848diff -urNp linux-2.6.39.4/grsecurity/grsec_ptrace.c linux-2.6.39.4/grsecurity/grsec_ptrace.c
48849--- linux-2.6.39.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48850+++ linux-2.6.39.4/grsecurity/grsec_ptrace.c 2011-08-05 19:44:37.000000000 -0400
48851@@ -0,0 +1,14 @@
48852+#include <linux/kernel.h>
48853+#include <linux/sched.h>
48854+#include <linux/grinternal.h>
48855+#include <linux/grsecurity.h>
48856+
48857+void
48858+gr_audit_ptrace(struct task_struct *task)
48859+{
48860+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48861+ if (grsec_enable_audit_ptrace)
48862+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
48863+#endif
48864+ return;
48865+}
48866diff -urNp linux-2.6.39.4/grsecurity/grsec_sig.c linux-2.6.39.4/grsecurity/grsec_sig.c
48867--- linux-2.6.39.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
48868+++ linux-2.6.39.4/grsecurity/grsec_sig.c 2011-08-05 19:44:37.000000000 -0400
48869@@ -0,0 +1,206 @@
48870+#include <linux/kernel.h>
48871+#include <linux/sched.h>
48872+#include <linux/delay.h>
48873+#include <linux/grsecurity.h>
48874+#include <linux/grinternal.h>
48875+#include <linux/hardirq.h>
48876+
48877+char *signames[] = {
48878+ [SIGSEGV] = "Segmentation fault",
48879+ [SIGILL] = "Illegal instruction",
48880+ [SIGABRT] = "Abort",
48881+ [SIGBUS] = "Invalid alignment/Bus error"
48882+};
48883+
48884+void
48885+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
48886+{
48887+#ifdef CONFIG_GRKERNSEC_SIGNAL
48888+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
48889+ (sig == SIGABRT) || (sig == SIGBUS))) {
48890+ if (t->pid == current->pid) {
48891+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
48892+ } else {
48893+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
48894+ }
48895+ }
48896+#endif
48897+ return;
48898+}
48899+
48900+int
48901+gr_handle_signal(const struct task_struct *p, const int sig)
48902+{
48903+#ifdef CONFIG_GRKERNSEC
48904+ if (current->pid > 1 && gr_check_protected_task(p)) {
48905+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
48906+ return -EPERM;
48907+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
48908+ return -EPERM;
48909+ }
48910+#endif
48911+ return 0;
48912+}
48913+
48914+#ifdef CONFIG_GRKERNSEC
48915+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
48916+
48917+int gr_fake_force_sig(int sig, struct task_struct *t)
48918+{
48919+ unsigned long int flags;
48920+ int ret, blocked, ignored;
48921+ struct k_sigaction *action;
48922+
48923+ spin_lock_irqsave(&t->sighand->siglock, flags);
48924+ action = &t->sighand->action[sig-1];
48925+ ignored = action->sa.sa_handler == SIG_IGN;
48926+ blocked = sigismember(&t->blocked, sig);
48927+ if (blocked || ignored) {
48928+ action->sa.sa_handler = SIG_DFL;
48929+ if (blocked) {
48930+ sigdelset(&t->blocked, sig);
48931+ recalc_sigpending_and_wake(t);
48932+ }
48933+ }
48934+ if (action->sa.sa_handler == SIG_DFL)
48935+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
48936+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
48937+
48938+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
48939+
48940+ return ret;
48941+}
48942+#endif
48943+
48944+#ifdef CONFIG_GRKERNSEC_BRUTE
48945+#define GR_USER_BAN_TIME (15 * 60)
48946+
48947+static int __get_dumpable(unsigned long mm_flags)
48948+{
48949+ int ret;
48950+
48951+ ret = mm_flags & MMF_DUMPABLE_MASK;
48952+ return (ret >= 2) ? 2 : ret;
48953+}
48954+#endif
48955+
48956+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
48957+{
48958+#ifdef CONFIG_GRKERNSEC_BRUTE
48959+ uid_t uid = 0;
48960+
48961+ if (!grsec_enable_brute)
48962+ return;
48963+
48964+ rcu_read_lock();
48965+ read_lock(&tasklist_lock);
48966+ read_lock(&grsec_exec_file_lock);
48967+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
48968+ p->real_parent->brute = 1;
48969+ else {
48970+ const struct cred *cred = __task_cred(p), *cred2;
48971+ struct task_struct *tsk, *tsk2;
48972+
48973+ if (!__get_dumpable(mm_flags) && cred->uid) {
48974+ struct user_struct *user;
48975+
48976+ uid = cred->uid;
48977+
48978+ /* this is put upon execution past expiration */
48979+ user = find_user(uid);
48980+ if (user == NULL)
48981+ goto unlock;
48982+ user->banned = 1;
48983+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
48984+ if (user->ban_expires == ~0UL)
48985+ user->ban_expires--;
48986+
48987+ do_each_thread(tsk2, tsk) {
48988+ cred2 = __task_cred(tsk);
48989+ if (tsk != p && cred2->uid == uid)
48990+ gr_fake_force_sig(SIGKILL, tsk);
48991+ } while_each_thread(tsk2, tsk);
48992+ }
48993+ }
48994+unlock:
48995+ read_unlock(&grsec_exec_file_lock);
48996+ read_unlock(&tasklist_lock);
48997+ rcu_read_unlock();
48998+
48999+ if (uid)
49000+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
49001+
49002+#endif
49003+ return;
49004+}
49005+
49006+void gr_handle_brute_check(void)
49007+{
49008+#ifdef CONFIG_GRKERNSEC_BRUTE
49009+ if (current->brute)
49010+ msleep(30 * 1000);
49011+#endif
49012+ return;
49013+}
49014+
49015+void gr_handle_kernel_exploit(void)
49016+{
49017+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
49018+ const struct cred *cred;
49019+ struct task_struct *tsk, *tsk2;
49020+ struct user_struct *user;
49021+ uid_t uid;
49022+
49023+ if (in_irq() || in_serving_softirq() || in_nmi())
49024+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
49025+
49026+ uid = current_uid();
49027+
49028+ if (uid == 0)
49029+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
49030+ else {
49031+ /* kill all the processes of this user, hold a reference
49032+ to their creds struct, and prevent them from creating
49033+ another process until system reset
49034+ */
49035+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49036+ /* we intentionally leak this ref */
49037+ user = get_uid(current->cred->user);
49038+ if (user) {
49039+ user->banned = 1;
49040+ user->ban_expires = ~0UL;
49041+ }
49042+
49043+ read_lock(&tasklist_lock);
49044+ do_each_thread(tsk2, tsk) {
49045+ cred = __task_cred(tsk);
49046+ if (cred->uid == uid)
49047+ gr_fake_force_sig(SIGKILL, tsk);
49048+ } while_each_thread(tsk2, tsk);
49049+ read_unlock(&tasklist_lock);
49050+ }
49051+#endif
49052+}
49053+
49054+int __gr_process_user_ban(struct user_struct *user)
49055+{
49056+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49057+ if (unlikely(user->banned)) {
49058+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49059+ user->banned = 0;
49060+ user->ban_expires = 0;
49061+ free_uid(user);
49062+ } else
49063+ return -EPERM;
49064+ }
49065+#endif
49066+ return 0;
49067+}
49068+
49069+int gr_process_user_ban(void)
49070+{
49071+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49072+ return __gr_process_user_ban(current->cred->user);
49073+#endif
49074+ return 0;
49075+}
49076diff -urNp linux-2.6.39.4/grsecurity/grsec_sock.c linux-2.6.39.4/grsecurity/grsec_sock.c
49077--- linux-2.6.39.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49078+++ linux-2.6.39.4/grsecurity/grsec_sock.c 2011-08-05 19:44:37.000000000 -0400
49079@@ -0,0 +1,244 @@
49080+#include <linux/kernel.h>
49081+#include <linux/module.h>
49082+#include <linux/sched.h>
49083+#include <linux/file.h>
49084+#include <linux/net.h>
49085+#include <linux/in.h>
49086+#include <linux/ip.h>
49087+#include <net/sock.h>
49088+#include <net/inet_sock.h>
49089+#include <linux/grsecurity.h>
49090+#include <linux/grinternal.h>
49091+#include <linux/gracl.h>
49092+
49093+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49094+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49095+
49096+EXPORT_SYMBOL(gr_search_udp_recvmsg);
49097+EXPORT_SYMBOL(gr_search_udp_sendmsg);
49098+
49099+#ifdef CONFIG_UNIX_MODULE
49100+EXPORT_SYMBOL(gr_acl_handle_unix);
49101+EXPORT_SYMBOL(gr_acl_handle_mknod);
49102+EXPORT_SYMBOL(gr_handle_chroot_unix);
49103+EXPORT_SYMBOL(gr_handle_create);
49104+#endif
49105+
49106+#ifdef CONFIG_GRKERNSEC
49107+#define gr_conn_table_size 32749
49108+struct conn_table_entry {
49109+ struct conn_table_entry *next;
49110+ struct signal_struct *sig;
49111+};
49112+
49113+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49114+DEFINE_SPINLOCK(gr_conn_table_lock);
49115+
49116+extern const char * gr_socktype_to_name(unsigned char type);
49117+extern const char * gr_proto_to_name(unsigned char proto);
49118+extern const char * gr_sockfamily_to_name(unsigned char family);
49119+
49120+static __inline__ int
49121+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49122+{
49123+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49124+}
49125+
49126+static __inline__ int
49127+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49128+ __u16 sport, __u16 dport)
49129+{
49130+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49131+ sig->gr_sport == sport && sig->gr_dport == dport))
49132+ return 1;
49133+ else
49134+ return 0;
49135+}
49136+
49137+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49138+{
49139+ struct conn_table_entry **match;
49140+ unsigned int index;
49141+
49142+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49143+ sig->gr_sport, sig->gr_dport,
49144+ gr_conn_table_size);
49145+
49146+ newent->sig = sig;
49147+
49148+ match = &gr_conn_table[index];
49149+ newent->next = *match;
49150+ *match = newent;
49151+
49152+ return;
49153+}
49154+
49155+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49156+{
49157+ struct conn_table_entry *match, *last = NULL;
49158+ unsigned int index;
49159+
49160+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49161+ sig->gr_sport, sig->gr_dport,
49162+ gr_conn_table_size);
49163+
49164+ match = gr_conn_table[index];
49165+ while (match && !conn_match(match->sig,
49166+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49167+ sig->gr_dport)) {
49168+ last = match;
49169+ match = match->next;
49170+ }
49171+
49172+ if (match) {
49173+ if (last)
49174+ last->next = match->next;
49175+ else
49176+ gr_conn_table[index] = NULL;
49177+ kfree(match);
49178+ }
49179+
49180+ return;
49181+}
49182+
49183+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49184+ __u16 sport, __u16 dport)
49185+{
49186+ struct conn_table_entry *match;
49187+ unsigned int index;
49188+
49189+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49190+
49191+ match = gr_conn_table[index];
49192+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49193+ match = match->next;
49194+
49195+ if (match)
49196+ return match->sig;
49197+ else
49198+ return NULL;
49199+}
49200+
49201+#endif
49202+
49203+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49204+{
49205+#ifdef CONFIG_GRKERNSEC
49206+ struct signal_struct *sig = task->signal;
49207+ struct conn_table_entry *newent;
49208+
49209+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49210+ if (newent == NULL)
49211+ return;
49212+ /* no bh lock needed since we are called with bh disabled */
49213+ spin_lock(&gr_conn_table_lock);
49214+ gr_del_task_from_ip_table_nolock(sig);
49215+ sig->gr_saddr = inet->inet_rcv_saddr;
49216+ sig->gr_daddr = inet->inet_daddr;
49217+ sig->gr_sport = inet->inet_sport;
49218+ sig->gr_dport = inet->inet_dport;
49219+ gr_add_to_task_ip_table_nolock(sig, newent);
49220+ spin_unlock(&gr_conn_table_lock);
49221+#endif
49222+ return;
49223+}
49224+
49225+void gr_del_task_from_ip_table(struct task_struct *task)
49226+{
49227+#ifdef CONFIG_GRKERNSEC
49228+ spin_lock_bh(&gr_conn_table_lock);
49229+ gr_del_task_from_ip_table_nolock(task->signal);
49230+ spin_unlock_bh(&gr_conn_table_lock);
49231+#endif
49232+ return;
49233+}
49234+
49235+void
49236+gr_attach_curr_ip(const struct sock *sk)
49237+{
49238+#ifdef CONFIG_GRKERNSEC
49239+ struct signal_struct *p, *set;
49240+ const struct inet_sock *inet = inet_sk(sk);
49241+
49242+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49243+ return;
49244+
49245+ set = current->signal;
49246+
49247+ spin_lock_bh(&gr_conn_table_lock);
49248+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49249+ inet->inet_dport, inet->inet_sport);
49250+ if (unlikely(p != NULL)) {
49251+ set->curr_ip = p->curr_ip;
49252+ set->used_accept = 1;
49253+ gr_del_task_from_ip_table_nolock(p);
49254+ spin_unlock_bh(&gr_conn_table_lock);
49255+ return;
49256+ }
49257+ spin_unlock_bh(&gr_conn_table_lock);
49258+
49259+ set->curr_ip = inet->inet_daddr;
49260+ set->used_accept = 1;
49261+#endif
49262+ return;
49263+}
49264+
49265+int
49266+gr_handle_sock_all(const int family, const int type, const int protocol)
49267+{
49268+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49269+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49270+ (family != AF_UNIX)) {
49271+ if (family == AF_INET)
49272+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49273+ else
49274+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49275+ return -EACCES;
49276+ }
49277+#endif
49278+ return 0;
49279+}
49280+
49281+int
49282+gr_handle_sock_server(const struct sockaddr *sck)
49283+{
49284+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49285+ if (grsec_enable_socket_server &&
49286+ in_group_p(grsec_socket_server_gid) &&
49287+ sck && (sck->sa_family != AF_UNIX) &&
49288+ (sck->sa_family != AF_LOCAL)) {
49289+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49290+ return -EACCES;
49291+ }
49292+#endif
49293+ return 0;
49294+}
49295+
49296+int
49297+gr_handle_sock_server_other(const struct sock *sck)
49298+{
49299+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49300+ if (grsec_enable_socket_server &&
49301+ in_group_p(grsec_socket_server_gid) &&
49302+ sck && (sck->sk_family != AF_UNIX) &&
49303+ (sck->sk_family != AF_LOCAL)) {
49304+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49305+ return -EACCES;
49306+ }
49307+#endif
49308+ return 0;
49309+}
49310+
49311+int
49312+gr_handle_sock_client(const struct sockaddr *sck)
49313+{
49314+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49315+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49316+ sck && (sck->sa_family != AF_UNIX) &&
49317+ (sck->sa_family != AF_LOCAL)) {
49318+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49319+ return -EACCES;
49320+ }
49321+#endif
49322+ return 0;
49323+}
49324diff -urNp linux-2.6.39.4/grsecurity/grsec_sysctl.c linux-2.6.39.4/grsecurity/grsec_sysctl.c
49325--- linux-2.6.39.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49326+++ linux-2.6.39.4/grsecurity/grsec_sysctl.c 2011-08-05 19:44:37.000000000 -0400
49327@@ -0,0 +1,442 @@
49328+#include <linux/kernel.h>
49329+#include <linux/sched.h>
49330+#include <linux/sysctl.h>
49331+#include <linux/grsecurity.h>
49332+#include <linux/grinternal.h>
49333+
49334+int
49335+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49336+{
49337+#ifdef CONFIG_GRKERNSEC_SYSCTL
49338+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49339+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49340+ return -EACCES;
49341+ }
49342+#endif
49343+ return 0;
49344+}
49345+
49346+#ifdef CONFIG_GRKERNSEC_ROFS
49347+static int __maybe_unused one = 1;
49348+#endif
49349+
49350+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49351+struct ctl_table grsecurity_table[] = {
49352+#ifdef CONFIG_GRKERNSEC_SYSCTL
49353+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49354+#ifdef CONFIG_GRKERNSEC_IO
49355+ {
49356+ .procname = "disable_priv_io",
49357+ .data = &grsec_disable_privio,
49358+ .maxlen = sizeof(int),
49359+ .mode = 0600,
49360+ .proc_handler = &proc_dointvec,
49361+ },
49362+#endif
49363+#endif
49364+#ifdef CONFIG_GRKERNSEC_LINK
49365+ {
49366+ .procname = "linking_restrictions",
49367+ .data = &grsec_enable_link,
49368+ .maxlen = sizeof(int),
49369+ .mode = 0600,
49370+ .proc_handler = &proc_dointvec,
49371+ },
49372+#endif
49373+#ifdef CONFIG_GRKERNSEC_BRUTE
49374+ {
49375+ .procname = "deter_bruteforce",
49376+ .data = &grsec_enable_brute,
49377+ .maxlen = sizeof(int),
49378+ .mode = 0600,
49379+ .proc_handler = &proc_dointvec,
49380+ },
49381+#endif
49382+#ifdef CONFIG_GRKERNSEC_FIFO
49383+ {
49384+ .procname = "fifo_restrictions",
49385+ .data = &grsec_enable_fifo,
49386+ .maxlen = sizeof(int),
49387+ .mode = 0600,
49388+ .proc_handler = &proc_dointvec,
49389+ },
49390+#endif
49391+#ifdef CONFIG_GRKERNSEC_EXECVE
49392+ {
49393+ .procname = "execve_limiting",
49394+ .data = &grsec_enable_execve,
49395+ .maxlen = sizeof(int),
49396+ .mode = 0600,
49397+ .proc_handler = &proc_dointvec,
49398+ },
49399+#endif
49400+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49401+ {
49402+ .procname = "ip_blackhole",
49403+ .data = &grsec_enable_blackhole,
49404+ .maxlen = sizeof(int),
49405+ .mode = 0600,
49406+ .proc_handler = &proc_dointvec,
49407+ },
49408+ {
49409+ .procname = "lastack_retries",
49410+ .data = &grsec_lastack_retries,
49411+ .maxlen = sizeof(int),
49412+ .mode = 0600,
49413+ .proc_handler = &proc_dointvec,
49414+ },
49415+#endif
49416+#ifdef CONFIG_GRKERNSEC_EXECLOG
49417+ {
49418+ .procname = "exec_logging",
49419+ .data = &grsec_enable_execlog,
49420+ .maxlen = sizeof(int),
49421+ .mode = 0600,
49422+ .proc_handler = &proc_dointvec,
49423+ },
49424+#endif
49425+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49426+ {
49427+ .procname = "rwxmap_logging",
49428+ .data = &grsec_enable_log_rwxmaps,
49429+ .maxlen = sizeof(int),
49430+ .mode = 0600,
49431+ .proc_handler = &proc_dointvec,
49432+ },
49433+#endif
49434+#ifdef CONFIG_GRKERNSEC_SIGNAL
49435+ {
49436+ .procname = "signal_logging",
49437+ .data = &grsec_enable_signal,
49438+ .maxlen = sizeof(int),
49439+ .mode = 0600,
49440+ .proc_handler = &proc_dointvec,
49441+ },
49442+#endif
49443+#ifdef CONFIG_GRKERNSEC_FORKFAIL
49444+ {
49445+ .procname = "forkfail_logging",
49446+ .data = &grsec_enable_forkfail,
49447+ .maxlen = sizeof(int),
49448+ .mode = 0600,
49449+ .proc_handler = &proc_dointvec,
49450+ },
49451+#endif
49452+#ifdef CONFIG_GRKERNSEC_TIME
49453+ {
49454+ .procname = "timechange_logging",
49455+ .data = &grsec_enable_time,
49456+ .maxlen = sizeof(int),
49457+ .mode = 0600,
49458+ .proc_handler = &proc_dointvec,
49459+ },
49460+#endif
49461+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49462+ {
49463+ .procname = "chroot_deny_shmat",
49464+ .data = &grsec_enable_chroot_shmat,
49465+ .maxlen = sizeof(int),
49466+ .mode = 0600,
49467+ .proc_handler = &proc_dointvec,
49468+ },
49469+#endif
49470+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49471+ {
49472+ .procname = "chroot_deny_unix",
49473+ .data = &grsec_enable_chroot_unix,
49474+ .maxlen = sizeof(int),
49475+ .mode = 0600,
49476+ .proc_handler = &proc_dointvec,
49477+ },
49478+#endif
49479+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49480+ {
49481+ .procname = "chroot_deny_mount",
49482+ .data = &grsec_enable_chroot_mount,
49483+ .maxlen = sizeof(int),
49484+ .mode = 0600,
49485+ .proc_handler = &proc_dointvec,
49486+ },
49487+#endif
49488+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49489+ {
49490+ .procname = "chroot_deny_fchdir",
49491+ .data = &grsec_enable_chroot_fchdir,
49492+ .maxlen = sizeof(int),
49493+ .mode = 0600,
49494+ .proc_handler = &proc_dointvec,
49495+ },
49496+#endif
49497+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49498+ {
49499+ .procname = "chroot_deny_chroot",
49500+ .data = &grsec_enable_chroot_double,
49501+ .maxlen = sizeof(int),
49502+ .mode = 0600,
49503+ .proc_handler = &proc_dointvec,
49504+ },
49505+#endif
49506+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49507+ {
49508+ .procname = "chroot_deny_pivot",
49509+ .data = &grsec_enable_chroot_pivot,
49510+ .maxlen = sizeof(int),
49511+ .mode = 0600,
49512+ .proc_handler = &proc_dointvec,
49513+ },
49514+#endif
49515+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49516+ {
49517+ .procname = "chroot_enforce_chdir",
49518+ .data = &grsec_enable_chroot_chdir,
49519+ .maxlen = sizeof(int),
49520+ .mode = 0600,
49521+ .proc_handler = &proc_dointvec,
49522+ },
49523+#endif
49524+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49525+ {
49526+ .procname = "chroot_deny_chmod",
49527+ .data = &grsec_enable_chroot_chmod,
49528+ .maxlen = sizeof(int),
49529+ .mode = 0600,
49530+ .proc_handler = &proc_dointvec,
49531+ },
49532+#endif
49533+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49534+ {
49535+ .procname = "chroot_deny_mknod",
49536+ .data = &grsec_enable_chroot_mknod,
49537+ .maxlen = sizeof(int),
49538+ .mode = 0600,
49539+ .proc_handler = &proc_dointvec,
49540+ },
49541+#endif
49542+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49543+ {
49544+ .procname = "chroot_restrict_nice",
49545+ .data = &grsec_enable_chroot_nice,
49546+ .maxlen = sizeof(int),
49547+ .mode = 0600,
49548+ .proc_handler = &proc_dointvec,
49549+ },
49550+#endif
49551+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49552+ {
49553+ .procname = "chroot_execlog",
49554+ .data = &grsec_enable_chroot_execlog,
49555+ .maxlen = sizeof(int),
49556+ .mode = 0600,
49557+ .proc_handler = &proc_dointvec,
49558+ },
49559+#endif
49560+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49561+ {
49562+ .procname = "chroot_caps",
49563+ .data = &grsec_enable_chroot_caps,
49564+ .maxlen = sizeof(int),
49565+ .mode = 0600,
49566+ .proc_handler = &proc_dointvec,
49567+ },
49568+#endif
49569+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49570+ {
49571+ .procname = "chroot_deny_sysctl",
49572+ .data = &grsec_enable_chroot_sysctl,
49573+ .maxlen = sizeof(int),
49574+ .mode = 0600,
49575+ .proc_handler = &proc_dointvec,
49576+ },
49577+#endif
49578+#ifdef CONFIG_GRKERNSEC_TPE
49579+ {
49580+ .procname = "tpe",
49581+ .data = &grsec_enable_tpe,
49582+ .maxlen = sizeof(int),
49583+ .mode = 0600,
49584+ .proc_handler = &proc_dointvec,
49585+ },
49586+ {
49587+ .procname = "tpe_gid",
49588+ .data = &grsec_tpe_gid,
49589+ .maxlen = sizeof(int),
49590+ .mode = 0600,
49591+ .proc_handler = &proc_dointvec,
49592+ },
49593+#endif
49594+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49595+ {
49596+ .procname = "tpe_invert",
49597+ .data = &grsec_enable_tpe_invert,
49598+ .maxlen = sizeof(int),
49599+ .mode = 0600,
49600+ .proc_handler = &proc_dointvec,
49601+ },
49602+#endif
49603+#ifdef CONFIG_GRKERNSEC_TPE_ALL
49604+ {
49605+ .procname = "tpe_restrict_all",
49606+ .data = &grsec_enable_tpe_all,
49607+ .maxlen = sizeof(int),
49608+ .mode = 0600,
49609+ .proc_handler = &proc_dointvec,
49610+ },
49611+#endif
49612+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49613+ {
49614+ .procname = "socket_all",
49615+ .data = &grsec_enable_socket_all,
49616+ .maxlen = sizeof(int),
49617+ .mode = 0600,
49618+ .proc_handler = &proc_dointvec,
49619+ },
49620+ {
49621+ .procname = "socket_all_gid",
49622+ .data = &grsec_socket_all_gid,
49623+ .maxlen = sizeof(int),
49624+ .mode = 0600,
49625+ .proc_handler = &proc_dointvec,
49626+ },
49627+#endif
49628+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49629+ {
49630+ .procname = "socket_client",
49631+ .data = &grsec_enable_socket_client,
49632+ .maxlen = sizeof(int),
49633+ .mode = 0600,
49634+ .proc_handler = &proc_dointvec,
49635+ },
49636+ {
49637+ .procname = "socket_client_gid",
49638+ .data = &grsec_socket_client_gid,
49639+ .maxlen = sizeof(int),
49640+ .mode = 0600,
49641+ .proc_handler = &proc_dointvec,
49642+ },
49643+#endif
49644+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49645+ {
49646+ .procname = "socket_server",
49647+ .data = &grsec_enable_socket_server,
49648+ .maxlen = sizeof(int),
49649+ .mode = 0600,
49650+ .proc_handler = &proc_dointvec,
49651+ },
49652+ {
49653+ .procname = "socket_server_gid",
49654+ .data = &grsec_socket_server_gid,
49655+ .maxlen = sizeof(int),
49656+ .mode = 0600,
49657+ .proc_handler = &proc_dointvec,
49658+ },
49659+#endif
49660+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49661+ {
49662+ .procname = "audit_group",
49663+ .data = &grsec_enable_group,
49664+ .maxlen = sizeof(int),
49665+ .mode = 0600,
49666+ .proc_handler = &proc_dointvec,
49667+ },
49668+ {
49669+ .procname = "audit_gid",
49670+ .data = &grsec_audit_gid,
49671+ .maxlen = sizeof(int),
49672+ .mode = 0600,
49673+ .proc_handler = &proc_dointvec,
49674+ },
49675+#endif
49676+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49677+ {
49678+ .procname = "audit_chdir",
49679+ .data = &grsec_enable_chdir,
49680+ .maxlen = sizeof(int),
49681+ .mode = 0600,
49682+ .proc_handler = &proc_dointvec,
49683+ },
49684+#endif
49685+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49686+ {
49687+ .procname = "audit_mount",
49688+ .data = &grsec_enable_mount,
49689+ .maxlen = sizeof(int),
49690+ .mode = 0600,
49691+ .proc_handler = &proc_dointvec,
49692+ },
49693+#endif
49694+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49695+ {
49696+ .procname = "audit_textrel",
49697+ .data = &grsec_enable_audit_textrel,
49698+ .maxlen = sizeof(int),
49699+ .mode = 0600,
49700+ .proc_handler = &proc_dointvec,
49701+ },
49702+#endif
49703+#ifdef CONFIG_GRKERNSEC_DMESG
49704+ {
49705+ .procname = "dmesg",
49706+ .data = &grsec_enable_dmesg,
49707+ .maxlen = sizeof(int),
49708+ .mode = 0600,
49709+ .proc_handler = &proc_dointvec,
49710+ },
49711+#endif
49712+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49713+ {
49714+ .procname = "chroot_findtask",
49715+ .data = &grsec_enable_chroot_findtask,
49716+ .maxlen = sizeof(int),
49717+ .mode = 0600,
49718+ .proc_handler = &proc_dointvec,
49719+ },
49720+#endif
49721+#ifdef CONFIG_GRKERNSEC_RESLOG
49722+ {
49723+ .procname = "resource_logging",
49724+ .data = &grsec_resource_logging,
49725+ .maxlen = sizeof(int),
49726+ .mode = 0600,
49727+ .proc_handler = &proc_dointvec,
49728+ },
49729+#endif
49730+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49731+ {
49732+ .procname = "audit_ptrace",
49733+ .data = &grsec_enable_audit_ptrace,
49734+ .maxlen = sizeof(int),
49735+ .mode = 0600,
49736+ .proc_handler = &proc_dointvec,
49737+ },
49738+#endif
49739+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49740+ {
49741+ .procname = "harden_ptrace",
49742+ .data = &grsec_enable_harden_ptrace,
49743+ .maxlen = sizeof(int),
49744+ .mode = 0600,
49745+ .proc_handler = &proc_dointvec,
49746+ },
49747+#endif
49748+ {
49749+ .procname = "grsec_lock",
49750+ .data = &grsec_lock,
49751+ .maxlen = sizeof(int),
49752+ .mode = 0600,
49753+ .proc_handler = &proc_dointvec,
49754+ },
49755+#endif
49756+#ifdef CONFIG_GRKERNSEC_ROFS
49757+ {
49758+ .procname = "romount_protect",
49759+ .data = &grsec_enable_rofs,
49760+ .maxlen = sizeof(int),
49761+ .mode = 0600,
49762+ .proc_handler = &proc_dointvec_minmax,
49763+ .extra1 = &one,
49764+ .extra2 = &one,
49765+ },
49766+#endif
49767+ { }
49768+};
49769+#endif
49770diff -urNp linux-2.6.39.4/grsecurity/grsec_time.c linux-2.6.39.4/grsecurity/grsec_time.c
49771--- linux-2.6.39.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49772+++ linux-2.6.39.4/grsecurity/grsec_time.c 2011-08-05 19:44:37.000000000 -0400
49773@@ -0,0 +1,16 @@
49774+#include <linux/kernel.h>
49775+#include <linux/sched.h>
49776+#include <linux/grinternal.h>
49777+#include <linux/module.h>
49778+
49779+void
49780+gr_log_timechange(void)
49781+{
49782+#ifdef CONFIG_GRKERNSEC_TIME
49783+ if (grsec_enable_time)
49784+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49785+#endif
49786+ return;
49787+}
49788+
49789+EXPORT_SYMBOL(gr_log_timechange);
49790diff -urNp linux-2.6.39.4/grsecurity/grsec_tpe.c linux-2.6.39.4/grsecurity/grsec_tpe.c
49791--- linux-2.6.39.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49792+++ linux-2.6.39.4/grsecurity/grsec_tpe.c 2011-08-05 19:44:37.000000000 -0400
49793@@ -0,0 +1,39 @@
49794+#include <linux/kernel.h>
49795+#include <linux/sched.h>
49796+#include <linux/file.h>
49797+#include <linux/fs.h>
49798+#include <linux/grinternal.h>
49799+
49800+extern int gr_acl_tpe_check(void);
49801+
49802+int
49803+gr_tpe_allow(const struct file *file)
49804+{
49805+#ifdef CONFIG_GRKERNSEC
49806+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49807+ const struct cred *cred = current_cred();
49808+
49809+ if (cred->uid && ((grsec_enable_tpe &&
49810+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49811+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49812+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49813+#else
49814+ in_group_p(grsec_tpe_gid)
49815+#endif
49816+ ) || gr_acl_tpe_check()) &&
49817+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49818+ (inode->i_mode & S_IWOTH))))) {
49819+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49820+ return 0;
49821+ }
49822+#ifdef CONFIG_GRKERNSEC_TPE_ALL
49823+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49824+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49825+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49826+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49827+ return 0;
49828+ }
49829+#endif
49830+#endif
49831+ return 1;
49832+}
49833diff -urNp linux-2.6.39.4/grsecurity/grsum.c linux-2.6.39.4/grsecurity/grsum.c
49834--- linux-2.6.39.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49835+++ linux-2.6.39.4/grsecurity/grsum.c 2011-08-05 19:44:37.000000000 -0400
49836@@ -0,0 +1,61 @@
49837+#include <linux/err.h>
49838+#include <linux/kernel.h>
49839+#include <linux/sched.h>
49840+#include <linux/mm.h>
49841+#include <linux/scatterlist.h>
49842+#include <linux/crypto.h>
49843+#include <linux/gracl.h>
49844+
49845+
49846+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49847+#error "crypto and sha256 must be built into the kernel"
49848+#endif
49849+
49850+int
49851+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
49852+{
49853+ char *p;
49854+ struct crypto_hash *tfm;
49855+ struct hash_desc desc;
49856+ struct scatterlist sg;
49857+ unsigned char temp_sum[GR_SHA_LEN];
49858+ volatile int retval = 0;
49859+ volatile int dummy = 0;
49860+ unsigned int i;
49861+
49862+ sg_init_table(&sg, 1);
49863+
49864+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
49865+ if (IS_ERR(tfm)) {
49866+ /* should never happen, since sha256 should be built in */
49867+ return 1;
49868+ }
49869+
49870+ desc.tfm = tfm;
49871+ desc.flags = 0;
49872+
49873+ crypto_hash_init(&desc);
49874+
49875+ p = salt;
49876+ sg_set_buf(&sg, p, GR_SALT_LEN);
49877+ crypto_hash_update(&desc, &sg, sg.length);
49878+
49879+ p = entry->pw;
49880+ sg_set_buf(&sg, p, strlen(p));
49881+
49882+ crypto_hash_update(&desc, &sg, sg.length);
49883+
49884+ crypto_hash_final(&desc, temp_sum);
49885+
49886+ memset(entry->pw, 0, GR_PW_LEN);
49887+
49888+ for (i = 0; i < GR_SHA_LEN; i++)
49889+ if (sum[i] != temp_sum[i])
49890+ retval = 1;
49891+ else
49892+ dummy = 1; // waste a cycle
49893+
49894+ crypto_free_hash(tfm);
49895+
49896+ return retval;
49897+}
49898diff -urNp linux-2.6.39.4/grsecurity/Kconfig linux-2.6.39.4/grsecurity/Kconfig
49899--- linux-2.6.39.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
49900+++ linux-2.6.39.4/grsecurity/Kconfig 2011-08-05 19:44:37.000000000 -0400
49901@@ -0,0 +1,1048 @@
49902+#
49903+# grecurity configuration
49904+#
49905+
49906+menu "Grsecurity"
49907+
49908+config GRKERNSEC
49909+ bool "Grsecurity"
49910+ select CRYPTO
49911+ select CRYPTO_SHA256
49912+ help
49913+ If you say Y here, you will be able to configure many features
49914+ that will enhance the security of your system. It is highly
49915+ recommended that you say Y here and read through the help
49916+ for each option so that you fully understand the features and
49917+ can evaluate their usefulness for your machine.
49918+
49919+choice
49920+ prompt "Security Level"
49921+ depends on GRKERNSEC
49922+ default GRKERNSEC_CUSTOM
49923+
49924+config GRKERNSEC_LOW
49925+ bool "Low"
49926+ select GRKERNSEC_LINK
49927+ select GRKERNSEC_FIFO
49928+ select GRKERNSEC_EXECVE
49929+ select GRKERNSEC_RANDNET
49930+ select GRKERNSEC_DMESG
49931+ select GRKERNSEC_CHROOT
49932+ select GRKERNSEC_CHROOT_CHDIR
49933+
49934+ help
49935+ If you choose this option, several of the grsecurity options will
49936+ be enabled that will give you greater protection against a number
49937+ of attacks, while assuring that none of your software will have any
49938+ conflicts with the additional security measures. If you run a lot
49939+ of unusual software, or you are having problems with the higher
49940+ security levels, you should say Y here. With this option, the
49941+ following features are enabled:
49942+
49943+ - Linking restrictions
49944+ - FIFO restrictions
49945+ - Enforcing RLIMIT_NPROC on execve
49946+ - Restricted dmesg
49947+ - Enforced chdir("/") on chroot
49948+ - Runtime module disabling
49949+
49950+config GRKERNSEC_MEDIUM
49951+ bool "Medium"
49952+ select PAX
49953+ select PAX_EI_PAX
49954+ select PAX_PT_PAX_FLAGS
49955+ select PAX_HAVE_ACL_FLAGS
49956+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49957+ select GRKERNSEC_CHROOT
49958+ select GRKERNSEC_CHROOT_SYSCTL
49959+ select GRKERNSEC_LINK
49960+ select GRKERNSEC_FIFO
49961+ select GRKERNSEC_EXECVE
49962+ select GRKERNSEC_DMESG
49963+ select GRKERNSEC_RANDNET
49964+ select GRKERNSEC_FORKFAIL
49965+ select GRKERNSEC_TIME
49966+ select GRKERNSEC_SIGNAL
49967+ select GRKERNSEC_CHROOT
49968+ select GRKERNSEC_CHROOT_UNIX
49969+ select GRKERNSEC_CHROOT_MOUNT
49970+ select GRKERNSEC_CHROOT_PIVOT
49971+ select GRKERNSEC_CHROOT_DOUBLE
49972+ select GRKERNSEC_CHROOT_CHDIR
49973+ select GRKERNSEC_CHROOT_MKNOD
49974+ select GRKERNSEC_PROC
49975+ select GRKERNSEC_PROC_USERGROUP
49976+ select PAX_RANDUSTACK
49977+ select PAX_ASLR
49978+ select PAX_RANDMMAP
49979+ select PAX_REFCOUNT if (X86 || SPARC64)
49980+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49981+
49982+ help
49983+ If you say Y here, several features in addition to those included
49984+ in the low additional security level will be enabled. These
49985+ features provide even more security to your system, though in rare
49986+ cases they may be incompatible with very old or poorly written
49987+ software. If you enable this option, make sure that your auth
49988+ service (identd) is running as gid 1001. With this option,
49989+ the following features (in addition to those provided in the
49990+ low additional security level) will be enabled:
49991+
49992+ - Failed fork logging
49993+ - Time change logging
49994+ - Signal logging
49995+ - Deny mounts in chroot
49996+ - Deny double chrooting
49997+ - Deny sysctl writes in chroot
49998+ - Deny mknod in chroot
49999+ - Deny access to abstract AF_UNIX sockets out of chroot
50000+ - Deny pivot_root in chroot
50001+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
50002+ - /proc restrictions with special GID set to 10 (usually wheel)
50003+ - Address Space Layout Randomization (ASLR)
50004+ - Prevent exploitation of most refcount overflows
50005+ - Bounds checking of copying between the kernel and userland
50006+
50007+config GRKERNSEC_HIGH
50008+ bool "High"
50009+ select GRKERNSEC_LINK
50010+ select GRKERNSEC_FIFO
50011+ select GRKERNSEC_EXECVE
50012+ select GRKERNSEC_DMESG
50013+ select GRKERNSEC_FORKFAIL
50014+ select GRKERNSEC_TIME
50015+ select GRKERNSEC_SIGNAL
50016+ select GRKERNSEC_CHROOT
50017+ select GRKERNSEC_CHROOT_SHMAT
50018+ select GRKERNSEC_CHROOT_UNIX
50019+ select GRKERNSEC_CHROOT_MOUNT
50020+ select GRKERNSEC_CHROOT_FCHDIR
50021+ select GRKERNSEC_CHROOT_PIVOT
50022+ select GRKERNSEC_CHROOT_DOUBLE
50023+ select GRKERNSEC_CHROOT_CHDIR
50024+ select GRKERNSEC_CHROOT_MKNOD
50025+ select GRKERNSEC_CHROOT_CAPS
50026+ select GRKERNSEC_CHROOT_SYSCTL
50027+ select GRKERNSEC_CHROOT_FINDTASK
50028+ select GRKERNSEC_SYSFS_RESTRICT
50029+ select GRKERNSEC_PROC
50030+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50031+ select GRKERNSEC_HIDESYM
50032+ select GRKERNSEC_BRUTE
50033+ select GRKERNSEC_PROC_USERGROUP
50034+ select GRKERNSEC_KMEM
50035+ select GRKERNSEC_RESLOG
50036+ select GRKERNSEC_RANDNET
50037+ select GRKERNSEC_PROC_ADD
50038+ select GRKERNSEC_CHROOT_CHMOD
50039+ select GRKERNSEC_CHROOT_NICE
50040+ select GRKERNSEC_AUDIT_MOUNT
50041+ select GRKERNSEC_MODHARDEN if (MODULES)
50042+ select GRKERNSEC_HARDEN_PTRACE
50043+ select GRKERNSEC_VM86 if (X86_32)
50044+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50045+ select PAX
50046+ select PAX_RANDUSTACK
50047+ select PAX_ASLR
50048+ select PAX_RANDMMAP
50049+ select PAX_NOEXEC
50050+ select PAX_MPROTECT
50051+ select PAX_EI_PAX
50052+ select PAX_PT_PAX_FLAGS
50053+ select PAX_HAVE_ACL_FLAGS
50054+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50055+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
50056+ select PAX_RANDKSTACK if (X86_TSC && X86)
50057+ select PAX_SEGMEXEC if (X86_32)
50058+ select PAX_PAGEEXEC
50059+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50060+ select PAX_EMUTRAMP if (PARISC)
50061+ select PAX_EMUSIGRT if (PARISC)
50062+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50063+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50064+ select PAX_REFCOUNT if (X86 || SPARC64)
50065+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50066+ help
50067+ If you say Y here, many of the features of grsecurity will be
50068+ enabled, which will protect you against many kinds of attacks
50069+ against your system. The heightened security comes at a cost
50070+ of an increased chance of incompatibilities with rare software
50071+ on your machine. Since this security level enables PaX, you should
50072+ view <http://pax.grsecurity.net> and read about the PaX
50073+ project. While you are there, download chpax and run it on
50074+ binaries that cause problems with PaX. Also remember that
50075+ since the /proc restrictions are enabled, you must run your
50076+ identd as gid 1001. This security level enables the following
50077+ features in addition to those listed in the low and medium
50078+ security levels:
50079+
50080+ - Additional /proc restrictions
50081+ - Chmod restrictions in chroot
50082+ - No signals, ptrace, or viewing of processes outside of chroot
50083+ - Capability restrictions in chroot
50084+ - Deny fchdir out of chroot
50085+ - Priority restrictions in chroot
50086+ - Segmentation-based implementation of PaX
50087+ - Mprotect restrictions
50088+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50089+ - Kernel stack randomization
50090+ - Mount/unmount/remount logging
50091+ - Kernel symbol hiding
50092+ - Prevention of memory exhaustion-based exploits
50093+ - Hardening of module auto-loading
50094+ - Ptrace restrictions
50095+ - Restricted vm86 mode
50096+ - Restricted sysfs/debugfs
50097+ - Active kernel exploit response
50098+
50099+config GRKERNSEC_CUSTOM
50100+ bool "Custom"
50101+ help
50102+ If you say Y here, you will be able to configure every grsecurity
50103+ option, which allows you to enable many more features that aren't
50104+ covered in the basic security levels. These additional features
50105+ include TPE, socket restrictions, and the sysctl system for
50106+ grsecurity. It is advised that you read through the help for
50107+ each option to determine its usefulness in your situation.
50108+
50109+endchoice
50110+
50111+menu "Address Space Protection"
50112+depends on GRKERNSEC
50113+
50114+config GRKERNSEC_KMEM
50115+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50116+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50117+ help
50118+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50119+ be written to via mmap or otherwise to modify the running kernel.
50120+ /dev/port will also not be allowed to be opened. If you have module
50121+ support disabled, enabling this will close up four ways that are
50122+ currently used to insert malicious code into the running kernel.
50123+ Even with all these features enabled, we still highly recommend that
50124+ you use the RBAC system, as it is still possible for an attacker to
50125+ modify the running kernel through privileged I/O granted by ioperm/iopl.
50126+ If you are not using XFree86, you may be able to stop this additional
50127+ case by enabling the 'Disable privileged I/O' option. Though nothing
50128+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50129+ but only to video memory, which is the only writing we allow in this
50130+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50131+ not be allowed to mprotect it with PROT_WRITE later.
50132+ It is highly recommended that you say Y here if you meet all the
50133+ conditions above.
50134+
50135+config GRKERNSEC_VM86
50136+ bool "Restrict VM86 mode"
50137+ depends on X86_32
50138+
50139+ help
50140+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50141+ make use of a special execution mode on 32bit x86 processors called
50142+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50143+ video cards and will still work with this option enabled. The purpose
50144+ of the option is to prevent exploitation of emulation errors in
50145+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
50146+ Nearly all users should be able to enable this option.
50147+
50148+config GRKERNSEC_IO
50149+ bool "Disable privileged I/O"
50150+ depends on X86
50151+ select RTC_CLASS
50152+ select RTC_INTF_DEV
50153+ select RTC_DRV_CMOS
50154+
50155+ help
50156+ If you say Y here, all ioperm and iopl calls will return an error.
50157+ Ioperm and iopl can be used to modify the running kernel.
50158+ Unfortunately, some programs need this access to operate properly,
50159+ the most notable of which are XFree86 and hwclock. hwclock can be
50160+ remedied by having RTC support in the kernel, so real-time
50161+ clock support is enabled if this option is enabled, to ensure
50162+ that hwclock operates correctly. XFree86 still will not
50163+ operate correctly with this option enabled, so DO NOT CHOOSE Y
50164+ IF YOU USE XFree86. If you use XFree86 and you still want to
50165+ protect your kernel against modification, use the RBAC system.
50166+
50167+config GRKERNSEC_PROC_MEMMAP
50168+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50169+ default y if (PAX_NOEXEC || PAX_ASLR)
50170+ depends on PAX_NOEXEC || PAX_ASLR
50171+ help
50172+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50173+ give no information about the addresses of its mappings if
50174+ PaX features that rely on random addresses are enabled on the task.
50175+ If you use PaX it is greatly recommended that you say Y here as it
50176+ closes up a hole that makes the full ASLR useless for suid
50177+ binaries.
50178+
50179+config GRKERNSEC_BRUTE
50180+ bool "Deter exploit bruteforcing"
50181+ help
50182+ If you say Y here, attempts to bruteforce exploits against forking
50183+ daemons such as apache or sshd, as well as against suid/sgid binaries
50184+ will be deterred. When a child of a forking daemon is killed by PaX
50185+ or crashes due to an illegal instruction or other suspicious signal,
50186+ the parent process will be delayed 30 seconds upon every subsequent
50187+ fork until the administrator is able to assess the situation and
50188+ restart the daemon.
50189+ In the suid/sgid case, the attempt is logged, the user has all their
50190+ processes terminated, and they are prevented from executing any further
50191+ processes for 15 minutes.
50192+ It is recommended that you also enable signal logging in the auditing
50193+ section so that logs are generated when a process triggers a suspicious
50194+ signal.
50195+ If the sysctl option is enabled, a sysctl option with name
50196+ "deter_bruteforce" is created.
50197+
50198+
50199+config GRKERNSEC_MODHARDEN
50200+ bool "Harden module auto-loading"
50201+ depends on MODULES
50202+ help
50203+ If you say Y here, module auto-loading in response to use of some
50204+ feature implemented by an unloaded module will be restricted to
50205+ root users. Enabling this option helps defend against attacks
50206+ by unprivileged users who abuse the auto-loading behavior to
50207+ cause a vulnerable module to load that is then exploited.
50208+
50209+ If this option prevents a legitimate use of auto-loading for a
50210+ non-root user, the administrator can execute modprobe manually
50211+ with the exact name of the module mentioned in the alert log.
50212+ Alternatively, the administrator can add the module to the list
50213+ of modules loaded at boot by modifying init scripts.
50214+
50215+ Modification of init scripts will most likely be needed on
50216+ Ubuntu servers with encrypted home directory support enabled,
50217+ as the first non-root user logging in will cause the ecb(aes),
50218+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50219+
50220+config GRKERNSEC_HIDESYM
50221+ bool "Hide kernel symbols"
50222+ help
50223+ If you say Y here, getting information on loaded modules, and
50224+ displaying all kernel symbols through a syscall will be restricted
50225+ to users with CAP_SYS_MODULE. For software compatibility reasons,
50226+ /proc/kallsyms will be restricted to the root user. The RBAC
50227+ system can hide that entry even from root.
50228+
50229+ This option also prevents leaking of kernel addresses through
50230+ several /proc entries.
50231+
50232+ Note that this option is only effective provided the following
50233+ conditions are met:
50234+ 1) The kernel using grsecurity is not precompiled by some distribution
50235+ 2) You have also enabled GRKERNSEC_DMESG
50236+ 3) You are using the RBAC system and hiding other files such as your
50237+ kernel image and System.map. Alternatively, enabling this option
50238+ causes the permissions on /boot, /lib/modules, and the kernel
50239+ source directory to change at compile time to prevent
50240+ reading by non-root users.
50241+ If the above conditions are met, this option will aid in providing a
50242+ useful protection against local kernel exploitation of overflows
50243+ and arbitrary read/write vulnerabilities.
50244+
50245+config GRKERNSEC_KERN_LOCKOUT
50246+ bool "Active kernel exploit response"
50247+ depends on X86 || ARM || PPC || SPARC
50248+ help
50249+ If you say Y here, when a PaX alert is triggered due to suspicious
50250+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50251+ or an OOPs occurs due to bad memory accesses, instead of just
50252+ terminating the offending process (and potentially allowing
50253+ a subsequent exploit from the same user), we will take one of two
50254+ actions:
50255+ If the user was root, we will panic the system
50256+ If the user was non-root, we will log the attempt, terminate
50257+ all processes owned by the user, then prevent them from creating
50258+ any new processes until the system is restarted
50259+ This deters repeated kernel exploitation/bruteforcing attempts
50260+ and is useful for later forensics.
50261+
50262+endmenu
50263+menu "Role Based Access Control Options"
50264+depends on GRKERNSEC
50265+
50266+config GRKERNSEC_RBAC_DEBUG
50267+ bool
50268+
50269+config GRKERNSEC_NO_RBAC
50270+ bool "Disable RBAC system"
50271+ help
50272+ If you say Y here, the /dev/grsec device will be removed from the kernel,
50273+ preventing the RBAC system from being enabled. You should only say Y
50274+ here if you have no intention of using the RBAC system, so as to prevent
50275+ an attacker with root access from misusing the RBAC system to hide files
50276+ and processes when loadable module support and /dev/[k]mem have been
50277+ locked down.
50278+
50279+config GRKERNSEC_ACL_HIDEKERN
50280+ bool "Hide kernel processes"
50281+ help
50282+ If you say Y here, all kernel threads will be hidden to all
50283+ processes but those whose subject has the "view hidden processes"
50284+ flag.
50285+
50286+config GRKERNSEC_ACL_MAXTRIES
50287+ int "Maximum tries before password lockout"
50288+ default 3
50289+ help
50290+ This option enforces the maximum number of times a user can attempt
50291+ to authorize themselves with the grsecurity RBAC system before being
50292+ denied the ability to attempt authorization again for a specified time.
50293+ The lower the number, the harder it will be to brute-force a password.
50294+
50295+config GRKERNSEC_ACL_TIMEOUT
50296+ int "Time to wait after max password tries, in seconds"
50297+ default 30
50298+ help
50299+ This option specifies the time the user must wait after attempting to
50300+ authorize to the RBAC system with the maximum number of invalid
50301+ passwords. The higher the number, the harder it will be to brute-force
50302+ a password.
50303+
50304+endmenu
50305+menu "Filesystem Protections"
50306+depends on GRKERNSEC
50307+
50308+config GRKERNSEC_PROC
50309+ bool "Proc restrictions"
50310+ help
50311+ If you say Y here, the permissions of the /proc filesystem
50312+ will be altered to enhance system security and privacy. You MUST
50313+ choose either a user only restriction or a user and group restriction.
50314+ Depending upon the option you choose, you can either restrict users to
50315+ see only the processes they themselves run, or choose a group that can
50316+ view all processes and files normally restricted to root if you choose
50317+ the "restrict to user only" option. NOTE: If you're running identd as
50318+ a non-root user, you will have to run it as the group you specify here.
50319+
50320+config GRKERNSEC_PROC_USER
50321+ bool "Restrict /proc to user only"
50322+ depends on GRKERNSEC_PROC
50323+ help
50324+ If you say Y here, non-root users will only be able to view their own
50325+ processes, and restricts them from viewing network-related information,
50326+ and viewing kernel symbol and module information.
50327+
50328+config GRKERNSEC_PROC_USERGROUP
50329+ bool "Allow special group"
50330+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50331+ help
50332+ If you say Y here, you will be able to select a group that will be
50333+ able to view all processes and network-related information. If you've
50334+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50335+ remain hidden. This option is useful if you want to run identd as
50336+ a non-root user.
50337+
50338+config GRKERNSEC_PROC_GID
50339+ int "GID for special group"
50340+ depends on GRKERNSEC_PROC_USERGROUP
50341+ default 1001
50342+
50343+config GRKERNSEC_PROC_ADD
50344+ bool "Additional restrictions"
50345+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50346+ help
50347+ If you say Y here, additional restrictions will be placed on
50348+ /proc that keep normal users from viewing device information and
50349+ slabinfo information that could be useful for exploits.
50350+
50351+config GRKERNSEC_LINK
50352+ bool "Linking restrictions"
50353+ help
50354+ If you say Y here, /tmp race exploits will be prevented, since users
50355+ will no longer be able to follow symlinks owned by other users in
50356+ world-writable +t directories (e.g. /tmp), unless the owner of the
50357+ symlink is the owner of the directory. users will also not be
50358+ able to hardlink to files they do not own. If the sysctl option is
50359+ enabled, a sysctl option with name "linking_restrictions" is created.
50360+
50361+config GRKERNSEC_FIFO
50362+ bool "FIFO restrictions"
50363+ help
50364+ If you say Y here, users will not be able to write to FIFOs they don't
50365+ own in world-writable +t directories (e.g. /tmp), unless the owner of
50366+ the FIFO is the same owner of the directory it's held in. If the sysctl
50367+ option is enabled, a sysctl option with name "fifo_restrictions" is
50368+ created.
50369+
50370+config GRKERNSEC_SYSFS_RESTRICT
50371+ bool "Sysfs/debugfs restriction"
50372+ depends on SYSFS
50373+ help
50374+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50375+ any filesystem normally mounted under it (e.g. debugfs) will only
50376+ be accessible by root. These filesystems generally provide access
50377+ to hardware and debug information that isn't appropriate for unprivileged
50378+ users of the system. Sysfs and debugfs have also become a large source
50379+ of new vulnerabilities, ranging from infoleaks to local compromise.
50380+ There has been very little oversight with an eye toward security involved
50381+ in adding new exporters of information to these filesystems, so their
50382+ use is discouraged.
50383+ This option is equivalent to a chmod 0700 of the mount paths.
50384+
50385+config GRKERNSEC_ROFS
50386+ bool "Runtime read-only mount protection"
50387+ help
50388+ If you say Y here, a sysctl option with name "romount_protect" will
50389+ be created. By setting this option to 1 at runtime, filesystems
50390+ will be protected in the following ways:
50391+ * No new writable mounts will be allowed
50392+ * Existing read-only mounts won't be able to be remounted read/write
50393+ * Write operations will be denied on all block devices
50394+ This option acts independently of grsec_lock: once it is set to 1,
50395+ it cannot be turned off. Therefore, please be mindful of the resulting
50396+ behavior if this option is enabled in an init script on a read-only
50397+ filesystem. This feature is mainly intended for secure embedded systems.
50398+
50399+config GRKERNSEC_CHROOT
50400+ bool "Chroot jail restrictions"
50401+ help
50402+ If you say Y here, you will be able to choose several options that will
50403+ make breaking out of a chrooted jail much more difficult. If you
50404+ encounter no software incompatibilities with the following options, it
50405+ is recommended that you enable each one.
50406+
50407+config GRKERNSEC_CHROOT_MOUNT
50408+ bool "Deny mounts"
50409+ depends on GRKERNSEC_CHROOT
50410+ help
50411+ If you say Y here, processes inside a chroot will not be able to
50412+ mount or remount filesystems. If the sysctl option is enabled, a
50413+ sysctl option with name "chroot_deny_mount" is created.
50414+
50415+config GRKERNSEC_CHROOT_DOUBLE
50416+ bool "Deny double-chroots"
50417+ depends on GRKERNSEC_CHROOT
50418+ help
50419+ If you say Y here, processes inside a chroot will not be able to chroot
50420+ again outside the chroot. This is a widely used method of breaking
50421+ out of a chroot jail and should not be allowed. If the sysctl
50422+ option is enabled, a sysctl option with name
50423+ "chroot_deny_chroot" is created.
50424+
50425+config GRKERNSEC_CHROOT_PIVOT
50426+ bool "Deny pivot_root in chroot"
50427+ depends on GRKERNSEC_CHROOT
50428+ help
50429+ If you say Y here, processes inside a chroot will not be able to use
50430+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50431+ works similar to chroot in that it changes the root filesystem. This
50432+ function could be misused in a chrooted process to attempt to break out
50433+ of the chroot, and therefore should not be allowed. If the sysctl
50434+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50435+ created.
50436+
50437+config GRKERNSEC_CHROOT_CHDIR
50438+ bool "Enforce chdir(\"/\") on all chroots"
50439+ depends on GRKERNSEC_CHROOT
50440+ help
50441+ If you say Y here, the current working directory of all newly-chrooted
50442+ applications will be set to the the root directory of the chroot.
50443+ The man page on chroot(2) states:
50444+ Note that this call does not change the current working
50445+ directory, so that `.' can be outside the tree rooted at
50446+ `/'. In particular, the super-user can escape from a
50447+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50448+
50449+ It is recommended that you say Y here, since it's not known to break
50450+ any software. If the sysctl option is enabled, a sysctl option with
50451+ name "chroot_enforce_chdir" is created.
50452+
50453+config GRKERNSEC_CHROOT_CHMOD
50454+ bool "Deny (f)chmod +s"
50455+ depends on GRKERNSEC_CHROOT
50456+ help
50457+ If you say Y here, processes inside a chroot will not be able to chmod
50458+ or fchmod files to make them have suid or sgid bits. This protects
50459+ against another published method of breaking a chroot. If the sysctl
50460+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50461+ created.
50462+
50463+config GRKERNSEC_CHROOT_FCHDIR
50464+ bool "Deny fchdir out of chroot"
50465+ depends on GRKERNSEC_CHROOT
50466+ help
50467+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50468+ to a file descriptor of the chrooting process that points to a directory
50469+ outside the filesystem will be stopped. If the sysctl option
50470+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50471+
50472+config GRKERNSEC_CHROOT_MKNOD
50473+ bool "Deny mknod"
50474+ depends on GRKERNSEC_CHROOT
50475+ help
50476+ If you say Y here, processes inside a chroot will not be allowed to
50477+ mknod. The problem with using mknod inside a chroot is that it
50478+ would allow an attacker to create a device entry that is the same
50479+ as one on the physical root of your system, which could range from
50480+ anything from the console device to a device for your harddrive (which
50481+ they could then use to wipe the drive or steal data). It is recommended
50482+ that you say Y here, unless you run into software incompatibilities.
50483+ If the sysctl option is enabled, a sysctl option with name
50484+ "chroot_deny_mknod" is created.
50485+
50486+config GRKERNSEC_CHROOT_SHMAT
50487+ bool "Deny shmat() out of chroot"
50488+ depends on GRKERNSEC_CHROOT
50489+ help
50490+ If you say Y here, processes inside a chroot will not be able to attach
50491+ to shared memory segments that were created outside of the chroot jail.
50492+ It is recommended that you say Y here. If the sysctl option is enabled,
50493+ a sysctl option with name "chroot_deny_shmat" is created.
50494+
50495+config GRKERNSEC_CHROOT_UNIX
50496+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50497+ depends on GRKERNSEC_CHROOT
50498+ help
50499+ If you say Y here, processes inside a chroot will not be able to
50500+ connect to abstract (meaning not belonging to a filesystem) Unix
50501+ domain sockets that were bound outside of a chroot. It is recommended
50502+ that you say Y here. If the sysctl option is enabled, a sysctl option
50503+ with name "chroot_deny_unix" is created.
50504+
50505+config GRKERNSEC_CHROOT_FINDTASK
50506+ bool "Protect outside processes"
50507+ depends on GRKERNSEC_CHROOT
50508+ help
50509+ If you say Y here, processes inside a chroot will not be able to
50510+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50511+ getsid, or view any process outside of the chroot. If the sysctl
50512+ option is enabled, a sysctl option with name "chroot_findtask" is
50513+ created.
50514+
50515+config GRKERNSEC_CHROOT_NICE
50516+ bool "Restrict priority changes"
50517+ depends on GRKERNSEC_CHROOT
50518+ help
50519+ If you say Y here, processes inside a chroot will not be able to raise
50520+ the priority of processes in the chroot, or alter the priority of
50521+ processes outside the chroot. This provides more security than simply
50522+ removing CAP_SYS_NICE from the process' capability set. If the
50523+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50524+ is created.
50525+
50526+config GRKERNSEC_CHROOT_SYSCTL
50527+ bool "Deny sysctl writes"
50528+ depends on GRKERNSEC_CHROOT
50529+ help
50530+ If you say Y here, an attacker in a chroot will not be able to
50531+ write to sysctl entries, either by sysctl(2) or through a /proc
50532+ interface. It is strongly recommended that you say Y here. If the
50533+ sysctl option is enabled, a sysctl option with name
50534+ "chroot_deny_sysctl" is created.
50535+
50536+config GRKERNSEC_CHROOT_CAPS
50537+ bool "Capability restrictions"
50538+ depends on GRKERNSEC_CHROOT
50539+ help
50540+ If you say Y here, the capabilities on all root processes within a
50541+ chroot jail will be lowered to stop module insertion, raw i/o,
50542+ system and net admin tasks, rebooting the system, modifying immutable
50543+ files, modifying IPC owned by another, and changing the system time.
50544+ This is left an option because it can break some apps. Disable this
50545+ if your chrooted apps are having problems performing those kinds of
50546+ tasks. If the sysctl option is enabled, a sysctl option with
50547+ name "chroot_caps" is created.
50548+
50549+endmenu
50550+menu "Kernel Auditing"
50551+depends on GRKERNSEC
50552+
50553+config GRKERNSEC_AUDIT_GROUP
50554+ bool "Single group for auditing"
50555+ help
50556+ If you say Y here, the exec, chdir, and (un)mount logging features
50557+ will only operate on a group you specify. This option is recommended
50558+ if you only want to watch certain users instead of having a large
50559+ amount of logs from the entire system. If the sysctl option is enabled,
50560+ a sysctl option with name "audit_group" is created.
50561+
50562+config GRKERNSEC_AUDIT_GID
50563+ int "GID for auditing"
50564+ depends on GRKERNSEC_AUDIT_GROUP
50565+ default 1007
50566+
50567+config GRKERNSEC_EXECLOG
50568+ bool "Exec logging"
50569+ help
50570+ If you say Y here, all execve() calls will be logged (since the
50571+ other exec*() calls are frontends to execve(), all execution
50572+ will be logged). Useful for shell-servers that like to keep track
50573+ of their users. If the sysctl option is enabled, a sysctl option with
50574+ name "exec_logging" is created.
50575+ WARNING: This option when enabled will produce a LOT of logs, especially
50576+ on an active system.
50577+
50578+config GRKERNSEC_RESLOG
50579+ bool "Resource logging"
50580+ help
50581+ If you say Y here, all attempts to overstep resource limits will
50582+ be logged with the resource name, the requested size, and the current
50583+ limit. It is highly recommended that you say Y here. If the sysctl
50584+ option is enabled, a sysctl option with name "resource_logging" is
50585+ created. If the RBAC system is enabled, the sysctl value is ignored.
50586+
50587+config GRKERNSEC_CHROOT_EXECLOG
50588+ bool "Log execs within chroot"
50589+ help
50590+ If you say Y here, all executions inside a chroot jail will be logged
50591+ to syslog. This can cause a large amount of logs if certain
50592+ applications (eg. djb's daemontools) are installed on the system, and
50593+ is therefore left as an option. If the sysctl option is enabled, a
50594+ sysctl option with name "chroot_execlog" is created.
50595+
50596+config GRKERNSEC_AUDIT_PTRACE
50597+ bool "Ptrace logging"
50598+ help
50599+ If you say Y here, all attempts to attach to a process via ptrace
50600+ will be logged. If the sysctl option is enabled, a sysctl option
50601+ with name "audit_ptrace" is created.
50602+
50603+config GRKERNSEC_AUDIT_CHDIR
50604+ bool "Chdir logging"
50605+ help
50606+ If you say Y here, all chdir() calls will be logged. If the sysctl
50607+ option is enabled, a sysctl option with name "audit_chdir" is created.
50608+
50609+config GRKERNSEC_AUDIT_MOUNT
50610+ bool "(Un)Mount logging"
50611+ help
50612+ If you say Y here, all mounts and unmounts will be logged. If the
50613+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50614+ created.
50615+
50616+config GRKERNSEC_SIGNAL
50617+ bool "Signal logging"
50618+ help
50619+ If you say Y here, certain important signals will be logged, such as
50620+ SIGSEGV, which will as a result inform you of when a error in a program
50621+ occurred, which in some cases could mean a possible exploit attempt.
50622+ If the sysctl option is enabled, a sysctl option with name
50623+ "signal_logging" is created.
50624+
50625+config GRKERNSEC_FORKFAIL
50626+ bool "Fork failure logging"
50627+ help
50628+ If you say Y here, all failed fork() attempts will be logged.
50629+ This could suggest a fork bomb, or someone attempting to overstep
50630+ their process limit. If the sysctl option is enabled, a sysctl option
50631+ with name "forkfail_logging" is created.
50632+
50633+config GRKERNSEC_TIME
50634+ bool "Time change logging"
50635+ help
50636+ If you say Y here, any changes of the system clock will be logged.
50637+ If the sysctl option is enabled, a sysctl option with name
50638+ "timechange_logging" is created.
50639+
50640+config GRKERNSEC_PROC_IPADDR
50641+ bool "/proc/<pid>/ipaddr support"
50642+ help
50643+ If you say Y here, a new entry will be added to each /proc/<pid>
50644+ directory that contains the IP address of the person using the task.
50645+ The IP is carried across local TCP and AF_UNIX stream sockets.
50646+ This information can be useful for IDS/IPSes to perform remote response
50647+ to a local attack. The entry is readable by only the owner of the
50648+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50649+ the RBAC system), and thus does not create privacy concerns.
50650+
50651+config GRKERNSEC_RWXMAP_LOG
50652+ bool 'Denied RWX mmap/mprotect logging'
50653+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50654+ help
50655+ If you say Y here, calls to mmap() and mprotect() with explicit
50656+ usage of PROT_WRITE and PROT_EXEC together will be logged when
50657+ denied by the PAX_MPROTECT feature. If the sysctl option is
50658+ enabled, a sysctl option with name "rwxmap_logging" is created.
50659+
50660+config GRKERNSEC_AUDIT_TEXTREL
50661+ bool 'ELF text relocations logging (READ HELP)'
50662+ depends on PAX_MPROTECT
50663+ help
50664+ If you say Y here, text relocations will be logged with the filename
50665+ of the offending library or binary. The purpose of the feature is
50666+ to help Linux distribution developers get rid of libraries and
50667+ binaries that need text relocations which hinder the future progress
50668+ of PaX. Only Linux distribution developers should say Y here, and
50669+ never on a production machine, as this option creates an information
50670+ leak that could aid an attacker in defeating the randomization of
50671+ a single memory region. If the sysctl option is enabled, a sysctl
50672+ option with name "audit_textrel" is created.
50673+
50674+endmenu
50675+
50676+menu "Executable Protections"
50677+depends on GRKERNSEC
50678+
50679+config GRKERNSEC_EXECVE
50680+ bool "Enforce RLIMIT_NPROC on execs"
50681+ help
50682+ If you say Y here, users with a resource limit on processes will
50683+ have the value checked during execve() calls. The current system
50684+ only checks the system limit during fork() calls. If the sysctl option
50685+ is enabled, a sysctl option with name "execve_limiting" is created.
50686+
50687+config GRKERNSEC_DMESG
50688+ bool "Dmesg(8) restriction"
50689+ help
50690+ If you say Y here, non-root users will not be able to use dmesg(8)
50691+ to view up to the last 4kb of messages in the kernel's log buffer.
50692+ The kernel's log buffer often contains kernel addresses and other
50693+ identifying information useful to an attacker in fingerprinting a
50694+ system for a targeted exploit.
50695+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
50696+ created.
50697+
50698+config GRKERNSEC_HARDEN_PTRACE
50699+ bool "Deter ptrace-based process snooping"
50700+ help
50701+ If you say Y here, TTY sniffers and other malicious monitoring
50702+ programs implemented through ptrace will be defeated. If you
50703+ have been using the RBAC system, this option has already been
50704+ enabled for several years for all users, with the ability to make
50705+ fine-grained exceptions.
50706+
50707+ This option only affects the ability of non-root users to ptrace
50708+ processes that are not a descendent of the ptracing process.
50709+ This means that strace ./binary and gdb ./binary will still work,
50710+ but attaching to arbitrary processes will not. If the sysctl
50711+ option is enabled, a sysctl option with name "harden_ptrace" is
50712+ created.
50713+
50714+config GRKERNSEC_TPE
50715+ bool "Trusted Path Execution (TPE)"
50716+ help
50717+ If you say Y here, you will be able to choose a gid to add to the
50718+ supplementary groups of users you want to mark as "untrusted."
50719+ These users will not be able to execute any files that are not in
50720+ root-owned directories writable only by root. If the sysctl option
50721+ is enabled, a sysctl option with name "tpe" is created.
50722+
50723+config GRKERNSEC_TPE_ALL
50724+ bool "Partially restrict all non-root users"
50725+ depends on GRKERNSEC_TPE
50726+ help
50727+ If you say Y here, all non-root users will be covered under
50728+ a weaker TPE restriction. This is separate from, and in addition to,
50729+ the main TPE options that you have selected elsewhere. Thus, if a
50730+ "trusted" GID is chosen, this restriction applies to even that GID.
50731+ Under this restriction, all non-root users will only be allowed to
50732+ execute files in directories they own that are not group or
50733+ world-writable, or in directories owned by root and writable only by
50734+ root. If the sysctl option is enabled, a sysctl option with name
50735+ "tpe_restrict_all" is created.
50736+
50737+config GRKERNSEC_TPE_INVERT
50738+ bool "Invert GID option"
50739+ depends on GRKERNSEC_TPE
50740+ help
50741+ If you say Y here, the group you specify in the TPE configuration will
50742+ decide what group TPE restrictions will be *disabled* for. This
50743+ option is useful if you want TPE restrictions to be applied to most
50744+ users on the system. If the sysctl option is enabled, a sysctl option
50745+ with name "tpe_invert" is created. Unlike other sysctl options, this
50746+ entry will default to on for backward-compatibility.
50747+
50748+config GRKERNSEC_TPE_GID
50749+ int "GID for untrusted users"
50750+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50751+ default 1005
50752+ help
50753+ Setting this GID determines what group TPE restrictions will be
50754+ *enabled* for. If the sysctl option is enabled, a sysctl option
50755+ with name "tpe_gid" is created.
50756+
50757+config GRKERNSEC_TPE_GID
50758+ int "GID for trusted users"
50759+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50760+ default 1005
50761+ help
50762+ Setting this GID determines what group TPE restrictions will be
50763+ *disabled* for. If the sysctl option is enabled, a sysctl option
50764+ with name "tpe_gid" is created.
50765+
50766+endmenu
50767+menu "Network Protections"
50768+depends on GRKERNSEC
50769+
50770+config GRKERNSEC_RANDNET
50771+ bool "Larger entropy pools"
50772+ help
50773+ If you say Y here, the entropy pools used for many features of Linux
50774+ and grsecurity will be doubled in size. Since several grsecurity
50775+ features use additional randomness, it is recommended that you say Y
50776+ here. Saying Y here has a similar effect as modifying
50777+ /proc/sys/kernel/random/poolsize.
50778+
50779+config GRKERNSEC_BLACKHOLE
50780+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50781+ help
50782+ If you say Y here, neither TCP resets nor ICMP
50783+ destination-unreachable packets will be sent in response to packets
50784+ sent to ports for which no associated listening process exists.
50785+ This feature supports both IPV4 and IPV6 and exempts the
50786+ loopback interface from blackholing. Enabling this feature
50787+ makes a host more resilient to DoS attacks and reduces network
50788+ visibility against scanners.
50789+
50790+ The blackhole feature as-implemented is equivalent to the FreeBSD
50791+ blackhole feature, as it prevents RST responses to all packets, not
50792+ just SYNs. Under most application behavior this causes no
50793+ problems, but applications (like haproxy) may not close certain
50794+ connections in a way that cleanly terminates them on the remote
50795+ end, leaving the remote host in LAST_ACK state. Because of this
50796+ side-effect and to prevent intentional LAST_ACK DoSes, this
50797+ feature also adds automatic mitigation against such attacks.
50798+ The mitigation drastically reduces the amount of time a socket
50799+ can spend in LAST_ACK state. If you're using haproxy and not
50800+ all servers it connects to have this option enabled, consider
50801+ disabling this feature on the haproxy host.
50802+
50803+ If the sysctl option is enabled, two sysctl options with names
50804+ "ip_blackhole" and "lastack_retries" will be created.
50805+ While "ip_blackhole" takes the standard zero/non-zero on/off
50806+ toggle, "lastack_retries" uses the same kinds of values as
50807+ "tcp_retries1" and "tcp_retries2". The default value of 4
50808+ prevents a socket from lasting more than 45 seconds in LAST_ACK
50809+ state.
50810+
50811+config GRKERNSEC_SOCKET
50812+ bool "Socket restrictions"
50813+ help
50814+ If you say Y here, you will be able to choose from several options.
50815+ If you assign a GID on your system and add it to the supplementary
50816+ groups of users you want to restrict socket access to, this patch
50817+ will perform up to three things, based on the option(s) you choose.
50818+
50819+config GRKERNSEC_SOCKET_ALL
50820+ bool "Deny any sockets to group"
50821+ depends on GRKERNSEC_SOCKET
50822+ help
50823+ If you say Y here, you will be able to choose a GID of whose users will
50824+ be unable to connect to other hosts from your machine or run server
50825+ applications from your machine. If the sysctl option is enabled, a
50826+ sysctl option with name "socket_all" is created.
50827+
50828+config GRKERNSEC_SOCKET_ALL_GID
50829+ int "GID to deny all sockets for"
50830+ depends on GRKERNSEC_SOCKET_ALL
50831+ default 1004
50832+ help
50833+ Here you can choose the GID to disable socket access for. Remember to
50834+ add the users you want socket access disabled for to the GID
50835+ specified here. If the sysctl option is enabled, a sysctl option
50836+ with name "socket_all_gid" is created.
50837+
50838+config GRKERNSEC_SOCKET_CLIENT
50839+ bool "Deny client sockets to group"
50840+ depends on GRKERNSEC_SOCKET
50841+ help
50842+ If you say Y here, you will be able to choose a GID of whose users will
50843+ be unable to connect to other hosts from your machine, but will be
50844+ able to run servers. If this option is enabled, all users in the group
50845+ you specify will have to use passive mode when initiating ftp transfers
50846+ from the shell on your machine. If the sysctl option is enabled, a
50847+ sysctl option with name "socket_client" is created.
50848+
50849+config GRKERNSEC_SOCKET_CLIENT_GID
50850+ int "GID to deny client sockets for"
50851+ depends on GRKERNSEC_SOCKET_CLIENT
50852+ default 1003
50853+ help
50854+ Here you can choose the GID to disable client socket access for.
50855+ Remember to add the users you want client socket access disabled for to
50856+ the GID specified here. If the sysctl option is enabled, a sysctl
50857+ option with name "socket_client_gid" is created.
50858+
50859+config GRKERNSEC_SOCKET_SERVER
50860+ bool "Deny server sockets to group"
50861+ depends on GRKERNSEC_SOCKET
50862+ help
50863+ If you say Y here, you will be able to choose a GID of whose users will
50864+ be unable to run server applications from your machine. If the sysctl
50865+ option is enabled, a sysctl option with name "socket_server" is created.
50866+
50867+config GRKERNSEC_SOCKET_SERVER_GID
50868+ int "GID to deny server sockets for"
50869+ depends on GRKERNSEC_SOCKET_SERVER
50870+ default 1002
50871+ help
50872+ Here you can choose the GID to disable server socket access for.
50873+ Remember to add the users you want server socket access disabled for to
50874+ the GID specified here. If the sysctl option is enabled, a sysctl
50875+ option with name "socket_server_gid" is created.
50876+
50877+endmenu
50878+menu "Sysctl support"
50879+depends on GRKERNSEC && SYSCTL
50880+
50881+config GRKERNSEC_SYSCTL
50882+ bool "Sysctl support"
50883+ help
50884+ If you say Y here, you will be able to change the options that
50885+ grsecurity runs with at bootup, without having to recompile your
50886+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50887+ to enable (1) or disable (0) various features. All the sysctl entries
50888+ are mutable until the "grsec_lock" entry is set to a non-zero value.
50889+ All features enabled in the kernel configuration are disabled at boot
50890+ if you do not say Y to the "Turn on features by default" option.
50891+ All options should be set at startup, and the grsec_lock entry should
50892+ be set to a non-zero value after all the options are set.
50893+ *THIS IS EXTREMELY IMPORTANT*
50894+
50895+config GRKERNSEC_SYSCTL_DISTRO
50896+ bool "Extra sysctl support for distro makers (READ HELP)"
50897+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50898+ help
50899+ If you say Y here, additional sysctl options will be created
50900+ for features that affect processes running as root. Therefore,
50901+ it is critical when using this option that the grsec_lock entry be
50902+ enabled after boot. Only distros with prebuilt kernel packages
50903+ with this option enabled that can ensure grsec_lock is enabled
50904+ after boot should use this option.
50905+ *Failure to set grsec_lock after boot makes all grsec features
50906+ this option covers useless*
50907+
50908+ Currently this option creates the following sysctl entries:
50909+ "Disable Privileged I/O": "disable_priv_io"
50910+
50911+config GRKERNSEC_SYSCTL_ON
50912+ bool "Turn on features by default"
50913+ depends on GRKERNSEC_SYSCTL
50914+ help
50915+ If you say Y here, instead of having all features enabled in the
50916+ kernel configuration disabled at boot time, the features will be
50917+ enabled at boot time. It is recommended you say Y here unless
50918+ there is some reason you would want all sysctl-tunable features to
50919+ be disabled by default. As mentioned elsewhere, it is important
50920+ to enable the grsec_lock entry once you have finished modifying
50921+ the sysctl entries.
50922+
50923+endmenu
50924+menu "Logging Options"
50925+depends on GRKERNSEC
50926+
50927+config GRKERNSEC_FLOODTIME
50928+ int "Seconds in between log messages (minimum)"
50929+ default 10
50930+ help
50931+ This option allows you to enforce the number of seconds between
50932+ grsecurity log messages. The default should be suitable for most
50933+ people, however, if you choose to change it, choose a value small enough
50934+ to allow informative logs to be produced, but large enough to
50935+ prevent flooding.
50936+
50937+config GRKERNSEC_FLOODBURST
50938+ int "Number of messages in a burst (maximum)"
50939+ default 4
50940+ help
50941+ This option allows you to choose the maximum number of messages allowed
50942+ within the flood time interval you chose in a separate option. The
50943+ default should be suitable for most people, however if you find that
50944+ many of your logs are being interpreted as flooding, you may want to
50945+ raise this value.
50946+
50947+endmenu
50948+
50949+endmenu
50950diff -urNp linux-2.6.39.4/grsecurity/Makefile linux-2.6.39.4/grsecurity/Makefile
50951--- linux-2.6.39.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
50952+++ linux-2.6.39.4/grsecurity/Makefile 2011-08-05 19:44:37.000000000 -0400
50953@@ -0,0 +1,33 @@
50954+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50955+# during 2001-2009 it has been completely redesigned by Brad Spengler
50956+# into an RBAC system
50957+#
50958+# All code in this directory and various hooks inserted throughout the kernel
50959+# are copyright Brad Spengler - Open Source Security, Inc., and released
50960+# under the GPL v2 or higher
50961+
50962+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50963+ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
50964+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50965+
50966+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50967+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50968+ gracl_learn.o grsec_log.o
50969+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50970+
50971+ifdef CONFIG_NET
50972+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50973+endif
50974+
50975+ifndef CONFIG_GRKERNSEC
50976+obj-y += grsec_disabled.o
50977+endif
50978+
50979+ifdef CONFIG_GRKERNSEC_HIDESYM
50980+extra-y := grsec_hidesym.o
50981+$(obj)/grsec_hidesym.o:
50982+ @-chmod -f 500 /boot
50983+ @-chmod -f 500 /lib/modules
50984+ @-chmod -f 700 .
50985+ @echo ' grsec: protected kernel image paths'
50986+endif
50987diff -urNp linux-2.6.39.4/include/acpi/acpi_bus.h linux-2.6.39.4/include/acpi/acpi_bus.h
50988--- linux-2.6.39.4/include/acpi/acpi_bus.h 2011-05-19 00:06:34.000000000 -0400
50989+++ linux-2.6.39.4/include/acpi/acpi_bus.h 2011-08-05 20:34:06.000000000 -0400
50990@@ -107,7 +107,7 @@ struct acpi_device_ops {
50991 acpi_op_bind bind;
50992 acpi_op_unbind unbind;
50993 acpi_op_notify notify;
50994-};
50995+} __no_const;
50996
50997 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
50998
50999diff -urNp linux-2.6.39.4/include/asm-generic/atomic-long.h linux-2.6.39.4/include/asm-generic/atomic-long.h
51000--- linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-05-19 00:06:34.000000000 -0400
51001+++ linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-08-05 20:34:06.000000000 -0400
51002@@ -22,6 +22,12 @@
51003
51004 typedef atomic64_t atomic_long_t;
51005
51006+#ifdef CONFIG_PAX_REFCOUNT
51007+typedef atomic64_unchecked_t atomic_long_unchecked_t;
51008+#else
51009+typedef atomic64_t atomic_long_unchecked_t;
51010+#endif
51011+
51012 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
51013
51014 static inline long atomic_long_read(atomic_long_t *l)
51015@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
51016 return (long)atomic64_read(v);
51017 }
51018
51019+#ifdef CONFIG_PAX_REFCOUNT
51020+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51021+{
51022+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51023+
51024+ return (long)atomic64_read_unchecked(v);
51025+}
51026+#endif
51027+
51028 static inline void atomic_long_set(atomic_long_t *l, long i)
51029 {
51030 atomic64_t *v = (atomic64_t *)l;
51031@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
51032 atomic64_set(v, i);
51033 }
51034
51035+#ifdef CONFIG_PAX_REFCOUNT
51036+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51037+{
51038+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51039+
51040+ atomic64_set_unchecked(v, i);
51041+}
51042+#endif
51043+
51044 static inline void atomic_long_inc(atomic_long_t *l)
51045 {
51046 atomic64_t *v = (atomic64_t *)l;
51047@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
51048 atomic64_inc(v);
51049 }
51050
51051+#ifdef CONFIG_PAX_REFCOUNT
51052+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51053+{
51054+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51055+
51056+ atomic64_inc_unchecked(v);
51057+}
51058+#endif
51059+
51060 static inline void atomic_long_dec(atomic_long_t *l)
51061 {
51062 atomic64_t *v = (atomic64_t *)l;
51063@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51064 atomic64_dec(v);
51065 }
51066
51067+#ifdef CONFIG_PAX_REFCOUNT
51068+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51069+{
51070+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51071+
51072+ atomic64_dec_unchecked(v);
51073+}
51074+#endif
51075+
51076 static inline void atomic_long_add(long i, atomic_long_t *l)
51077 {
51078 atomic64_t *v = (atomic64_t *)l;
51079@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51080 atomic64_add(i, v);
51081 }
51082
51083+#ifdef CONFIG_PAX_REFCOUNT
51084+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51085+{
51086+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51087+
51088+ atomic64_add_unchecked(i, v);
51089+}
51090+#endif
51091+
51092 static inline void atomic_long_sub(long i, atomic_long_t *l)
51093 {
51094 atomic64_t *v = (atomic64_t *)l;
51095@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51096 atomic64_sub(i, v);
51097 }
51098
51099+#ifdef CONFIG_PAX_REFCOUNT
51100+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51101+{
51102+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51103+
51104+ atomic64_sub_unchecked(i, v);
51105+}
51106+#endif
51107+
51108 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51109 {
51110 atomic64_t *v = (atomic64_t *)l;
51111@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51112 return (long)atomic64_inc_return(v);
51113 }
51114
51115+#ifdef CONFIG_PAX_REFCOUNT
51116+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51117+{
51118+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51119+
51120+ return (long)atomic64_inc_return_unchecked(v);
51121+}
51122+#endif
51123+
51124 static inline long atomic_long_dec_return(atomic_long_t *l)
51125 {
51126 atomic64_t *v = (atomic64_t *)l;
51127@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51128
51129 typedef atomic_t atomic_long_t;
51130
51131+#ifdef CONFIG_PAX_REFCOUNT
51132+typedef atomic_unchecked_t atomic_long_unchecked_t;
51133+#else
51134+typedef atomic_t atomic_long_unchecked_t;
51135+#endif
51136+
51137 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51138 static inline long atomic_long_read(atomic_long_t *l)
51139 {
51140@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51141 return (long)atomic_read(v);
51142 }
51143
51144+#ifdef CONFIG_PAX_REFCOUNT
51145+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51146+{
51147+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51148+
51149+ return (long)atomic_read_unchecked(v);
51150+}
51151+#endif
51152+
51153 static inline void atomic_long_set(atomic_long_t *l, long i)
51154 {
51155 atomic_t *v = (atomic_t *)l;
51156@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51157 atomic_set(v, i);
51158 }
51159
51160+#ifdef CONFIG_PAX_REFCOUNT
51161+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51162+{
51163+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51164+
51165+ atomic_set_unchecked(v, i);
51166+}
51167+#endif
51168+
51169 static inline void atomic_long_inc(atomic_long_t *l)
51170 {
51171 atomic_t *v = (atomic_t *)l;
51172@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51173 atomic_inc(v);
51174 }
51175
51176+#ifdef CONFIG_PAX_REFCOUNT
51177+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51178+{
51179+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51180+
51181+ atomic_inc_unchecked(v);
51182+}
51183+#endif
51184+
51185 static inline void atomic_long_dec(atomic_long_t *l)
51186 {
51187 atomic_t *v = (atomic_t *)l;
51188@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51189 atomic_dec(v);
51190 }
51191
51192+#ifdef CONFIG_PAX_REFCOUNT
51193+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51194+{
51195+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51196+
51197+ atomic_dec_unchecked(v);
51198+}
51199+#endif
51200+
51201 static inline void atomic_long_add(long i, atomic_long_t *l)
51202 {
51203 atomic_t *v = (atomic_t *)l;
51204@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51205 atomic_add(i, v);
51206 }
51207
51208+#ifdef CONFIG_PAX_REFCOUNT
51209+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51210+{
51211+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51212+
51213+ atomic_add_unchecked(i, v);
51214+}
51215+#endif
51216+
51217 static inline void atomic_long_sub(long i, atomic_long_t *l)
51218 {
51219 atomic_t *v = (atomic_t *)l;
51220@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51221 atomic_sub(i, v);
51222 }
51223
51224+#ifdef CONFIG_PAX_REFCOUNT
51225+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51226+{
51227+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51228+
51229+ atomic_sub_unchecked(i, v);
51230+}
51231+#endif
51232+
51233 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51234 {
51235 atomic_t *v = (atomic_t *)l;
51236@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51237 return (long)atomic_inc_return(v);
51238 }
51239
51240+#ifdef CONFIG_PAX_REFCOUNT
51241+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51242+{
51243+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51244+
51245+ return (long)atomic_inc_return_unchecked(v);
51246+}
51247+#endif
51248+
51249 static inline long atomic_long_dec_return(atomic_long_t *l)
51250 {
51251 atomic_t *v = (atomic_t *)l;
51252@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51253
51254 #endif /* BITS_PER_LONG == 64 */
51255
51256+#ifdef CONFIG_PAX_REFCOUNT
51257+static inline void pax_refcount_needs_these_functions(void)
51258+{
51259+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
51260+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51261+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51262+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51263+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51264+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51265+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51266+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51267+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51268+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51269+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51270+
51271+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51272+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51273+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51274+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51275+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51276+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51277+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51278+}
51279+#else
51280+#define atomic_read_unchecked(v) atomic_read(v)
51281+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51282+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51283+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51284+#define atomic_inc_unchecked(v) atomic_inc(v)
51285+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51286+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51287+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51288+#define atomic_dec_unchecked(v) atomic_dec(v)
51289+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51290+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51291+
51292+#define atomic_long_read_unchecked(v) atomic_long_read(v)
51293+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51294+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51295+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51296+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51297+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51298+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51299+#endif
51300+
51301 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51302diff -urNp linux-2.6.39.4/include/asm-generic/cache.h linux-2.6.39.4/include/asm-generic/cache.h
51303--- linux-2.6.39.4/include/asm-generic/cache.h 2011-05-19 00:06:34.000000000 -0400
51304+++ linux-2.6.39.4/include/asm-generic/cache.h 2011-08-05 19:44:37.000000000 -0400
51305@@ -6,7 +6,7 @@
51306 * cache lines need to provide their own cache.h.
51307 */
51308
51309-#define L1_CACHE_SHIFT 5
51310-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51311+#define L1_CACHE_SHIFT 5UL
51312+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51313
51314 #endif /* __ASM_GENERIC_CACHE_H */
51315diff -urNp linux-2.6.39.4/include/asm-generic/int-l64.h linux-2.6.39.4/include/asm-generic/int-l64.h
51316--- linux-2.6.39.4/include/asm-generic/int-l64.h 2011-05-19 00:06:34.000000000 -0400
51317+++ linux-2.6.39.4/include/asm-generic/int-l64.h 2011-08-05 19:44:37.000000000 -0400
51318@@ -46,6 +46,8 @@ typedef unsigned int u32;
51319 typedef signed long s64;
51320 typedef unsigned long u64;
51321
51322+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51323+
51324 #define S8_C(x) x
51325 #define U8_C(x) x ## U
51326 #define S16_C(x) x
51327diff -urNp linux-2.6.39.4/include/asm-generic/int-ll64.h linux-2.6.39.4/include/asm-generic/int-ll64.h
51328--- linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-05-19 00:06:34.000000000 -0400
51329+++ linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-08-05 19:44:37.000000000 -0400
51330@@ -51,6 +51,8 @@ typedef unsigned int u32;
51331 typedef signed long long s64;
51332 typedef unsigned long long u64;
51333
51334+typedef unsigned long long intoverflow_t;
51335+
51336 #define S8_C(x) x
51337 #define U8_C(x) x ## U
51338 #define S16_C(x) x
51339diff -urNp linux-2.6.39.4/include/asm-generic/kmap_types.h linux-2.6.39.4/include/asm-generic/kmap_types.h
51340--- linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
51341+++ linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-08-05 19:44:37.000000000 -0400
51342@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51343 KMAP_D(17) KM_NMI,
51344 KMAP_D(18) KM_NMI_PTE,
51345 KMAP_D(19) KM_KDB,
51346+KMAP_D(20) KM_CLEARPAGE,
51347 /*
51348 * Remember to update debug_kmap_atomic() when adding new kmap types!
51349 */
51350-KMAP_D(20) KM_TYPE_NR
51351+KMAP_D(21) KM_TYPE_NR
51352 };
51353
51354 #undef KMAP_D
51355diff -urNp linux-2.6.39.4/include/asm-generic/pgtable.h linux-2.6.39.4/include/asm-generic/pgtable.h
51356--- linux-2.6.39.4/include/asm-generic/pgtable.h 2011-05-19 00:06:34.000000000 -0400
51357+++ linux-2.6.39.4/include/asm-generic/pgtable.h 2011-08-05 19:44:37.000000000 -0400
51358@@ -447,6 +447,14 @@ static inline int pmd_write(pmd_t pmd)
51359 #endif /* __HAVE_ARCH_PMD_WRITE */
51360 #endif
51361
51362+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51363+static inline unsigned long pax_open_kernel(void) { return 0; }
51364+#endif
51365+
51366+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51367+static inline unsigned long pax_close_kernel(void) { return 0; }
51368+#endif
51369+
51370 #endif /* !__ASSEMBLY__ */
51371
51372 #endif /* _ASM_GENERIC_PGTABLE_H */
51373diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h
51374--- linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-05-19 00:06:34.000000000 -0400
51375+++ linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-08-05 19:44:37.000000000 -0400
51376@@ -1,14 +1,19 @@
51377 #ifndef _PGTABLE_NOPMD_H
51378 #define _PGTABLE_NOPMD_H
51379
51380-#ifndef __ASSEMBLY__
51381-
51382 #include <asm-generic/pgtable-nopud.h>
51383
51384-struct mm_struct;
51385-
51386 #define __PAGETABLE_PMD_FOLDED
51387
51388+#define PMD_SHIFT PUD_SHIFT
51389+#define PTRS_PER_PMD 1
51390+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51391+#define PMD_MASK (~(PMD_SIZE-1))
51392+
51393+#ifndef __ASSEMBLY__
51394+
51395+struct mm_struct;
51396+
51397 /*
51398 * Having the pmd type consist of a pud gets the size right, and allows
51399 * us to conceptually access the pud entry that this pmd is folded into
51400@@ -16,11 +21,6 @@ struct mm_struct;
51401 */
51402 typedef struct { pud_t pud; } pmd_t;
51403
51404-#define PMD_SHIFT PUD_SHIFT
51405-#define PTRS_PER_PMD 1
51406-#define PMD_SIZE (1UL << PMD_SHIFT)
51407-#define PMD_MASK (~(PMD_SIZE-1))
51408-
51409 /*
51410 * The "pud_xxx()" functions here are trivial for a folded two-level
51411 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51412diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopud.h linux-2.6.39.4/include/asm-generic/pgtable-nopud.h
51413--- linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-05-19 00:06:34.000000000 -0400
51414+++ linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-08-05 19:44:37.000000000 -0400
51415@@ -1,10 +1,15 @@
51416 #ifndef _PGTABLE_NOPUD_H
51417 #define _PGTABLE_NOPUD_H
51418
51419-#ifndef __ASSEMBLY__
51420-
51421 #define __PAGETABLE_PUD_FOLDED
51422
51423+#define PUD_SHIFT PGDIR_SHIFT
51424+#define PTRS_PER_PUD 1
51425+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51426+#define PUD_MASK (~(PUD_SIZE-1))
51427+
51428+#ifndef __ASSEMBLY__
51429+
51430 /*
51431 * Having the pud type consist of a pgd gets the size right, and allows
51432 * us to conceptually access the pgd entry that this pud is folded into
51433@@ -12,11 +17,6 @@
51434 */
51435 typedef struct { pgd_t pgd; } pud_t;
51436
51437-#define PUD_SHIFT PGDIR_SHIFT
51438-#define PTRS_PER_PUD 1
51439-#define PUD_SIZE (1UL << PUD_SHIFT)
51440-#define PUD_MASK (~(PUD_SIZE-1))
51441-
51442 /*
51443 * The "pgd_xxx()" functions here are trivial for a folded two-level
51444 * setup: the pud is never bad, and a pud always exists (as it's folded
51445diff -urNp linux-2.6.39.4/include/asm-generic/vmlinux.lds.h linux-2.6.39.4/include/asm-generic/vmlinux.lds.h
51446--- linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-05-19 00:06:34.000000000 -0400
51447+++ linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-08-05 19:44:37.000000000 -0400
51448@@ -213,6 +213,7 @@
51449 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51450 VMLINUX_SYMBOL(__start_rodata) = .; \
51451 *(.rodata) *(.rodata.*) \
51452+ *(.data..read_only) \
51453 *(__vermagic) /* Kernel version magic */ \
51454 . = ALIGN(8); \
51455 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51456@@ -707,14 +708,15 @@
51457 * section in the linker script will go there too. @phdr should have
51458 * a leading colon.
51459 *
51460- * Note that this macros defines __per_cpu_load as an absolute symbol.
51461+ * Note that this macros defines per_cpu_load as an absolute symbol.
51462 * If there is no need to put the percpu section at a predetermined
51463 * address, use PERCPU().
51464 */
51465 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51466- VMLINUX_SYMBOL(__per_cpu_load) = .; \
51467- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51468+ per_cpu_load = .; \
51469+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51470 - LOAD_OFFSET) { \
51471+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51472 VMLINUX_SYMBOL(__per_cpu_start) = .; \
51473 *(.data..percpu..first) \
51474 . = ALIGN(PAGE_SIZE); \
51475@@ -726,7 +728,7 @@
51476 *(.data..percpu..shared_aligned) \
51477 VMLINUX_SYMBOL(__per_cpu_end) = .; \
51478 } phdr \
51479- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51480+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51481
51482 /**
51483 * PERCPU - define output section for percpu area, simple version
51484diff -urNp linux-2.6.39.4/include/drm/drm_crtc_helper.h linux-2.6.39.4/include/drm/drm_crtc_helper.h
51485--- linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-05-19 00:06:34.000000000 -0400
51486+++ linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-08-05 20:34:06.000000000 -0400
51487@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51488
51489 /* disable crtc when not in use - more explicit than dpms off */
51490 void (*disable)(struct drm_crtc *crtc);
51491-};
51492+} __no_const;
51493
51494 struct drm_encoder_helper_funcs {
51495 void (*dpms)(struct drm_encoder *encoder, int mode);
51496@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51497 struct drm_connector *connector);
51498 /* disable encoder when not in use - more explicit than dpms off */
51499 void (*disable)(struct drm_encoder *encoder);
51500-};
51501+} __no_const;
51502
51503 struct drm_connector_helper_funcs {
51504 int (*get_modes)(struct drm_connector *connector);
51505diff -urNp linux-2.6.39.4/include/drm/drmP.h linux-2.6.39.4/include/drm/drmP.h
51506--- linux-2.6.39.4/include/drm/drmP.h 2011-05-19 00:06:34.000000000 -0400
51507+++ linux-2.6.39.4/include/drm/drmP.h 2011-08-05 20:34:06.000000000 -0400
51508@@ -73,6 +73,7 @@
51509 #include <linux/workqueue.h>
51510 #include <linux/poll.h>
51511 #include <asm/pgalloc.h>
51512+#include <asm/local.h>
51513 #include "drm.h"
51514
51515 #include <linux/idr.h>
51516@@ -1023,7 +1024,7 @@ struct drm_device {
51517
51518 /** \name Usage Counters */
51519 /*@{ */
51520- int open_count; /**< Outstanding files open */
51521+ local_t open_count; /**< Outstanding files open */
51522 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51523 atomic_t vma_count; /**< Outstanding vma areas open */
51524 int buf_use; /**< Buffers in use -- cannot alloc */
51525@@ -1034,7 +1035,7 @@ struct drm_device {
51526 /*@{ */
51527 unsigned long counters;
51528 enum drm_stat_type types[15];
51529- atomic_t counts[15];
51530+ atomic_unchecked_t counts[15];
51531 /*@} */
51532
51533 struct list_head filelist;
51534diff -urNp linux-2.6.39.4/include/drm/ttm/ttm_memory.h linux-2.6.39.4/include/drm/ttm/ttm_memory.h
51535--- linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-05-19 00:06:34.000000000 -0400
51536+++ linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-08-05 20:34:06.000000000 -0400
51537@@ -47,7 +47,7 @@
51538
51539 struct ttm_mem_shrink {
51540 int (*do_shrink) (struct ttm_mem_shrink *);
51541-};
51542+} __no_const;
51543
51544 /**
51545 * struct ttm_mem_global - Global memory accounting structure.
51546diff -urNp linux-2.6.39.4/include/linux/a.out.h linux-2.6.39.4/include/linux/a.out.h
51547--- linux-2.6.39.4/include/linux/a.out.h 2011-05-19 00:06:34.000000000 -0400
51548+++ linux-2.6.39.4/include/linux/a.out.h 2011-08-05 19:44:37.000000000 -0400
51549@@ -39,6 +39,14 @@ enum machine_type {
51550 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51551 };
51552
51553+/* Constants for the N_FLAGS field */
51554+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51555+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51556+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51557+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51558+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51559+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51560+
51561 #if !defined (N_MAGIC)
51562 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51563 #endif
51564diff -urNp linux-2.6.39.4/include/linux/atmdev.h linux-2.6.39.4/include/linux/atmdev.h
51565--- linux-2.6.39.4/include/linux/atmdev.h 2011-05-19 00:06:34.000000000 -0400
51566+++ linux-2.6.39.4/include/linux/atmdev.h 2011-08-05 19:44:37.000000000 -0400
51567@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51568 #endif
51569
51570 struct k_atm_aal_stats {
51571-#define __HANDLE_ITEM(i) atomic_t i
51572+#define __HANDLE_ITEM(i) atomic_unchecked_t i
51573 __AAL_STAT_ITEMS
51574 #undef __HANDLE_ITEM
51575 };
51576diff -urNp linux-2.6.39.4/include/linux/binfmts.h linux-2.6.39.4/include/linux/binfmts.h
51577--- linux-2.6.39.4/include/linux/binfmts.h 2011-05-19 00:06:34.000000000 -0400
51578+++ linux-2.6.39.4/include/linux/binfmts.h 2011-08-05 19:44:37.000000000 -0400
51579@@ -92,6 +92,7 @@ struct linux_binfmt {
51580 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51581 int (*load_shlib)(struct file *);
51582 int (*core_dump)(struct coredump_params *cprm);
51583+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51584 unsigned long min_coredump; /* minimal dump size */
51585 };
51586
51587diff -urNp linux-2.6.39.4/include/linux/blkdev.h linux-2.6.39.4/include/linux/blkdev.h
51588--- linux-2.6.39.4/include/linux/blkdev.h 2011-06-03 00:04:14.000000000 -0400
51589+++ linux-2.6.39.4/include/linux/blkdev.h 2011-08-05 20:34:06.000000000 -0400
51590@@ -1307,7 +1307,7 @@ struct block_device_operations {
51591 int (*getgeo)(struct block_device *, struct hd_geometry *);
51592 /* this callback is with swap_lock and sometimes page table lock held */
51593 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51594- struct module *owner;
51595+ struct module * const owner;
51596 };
51597
51598 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51599diff -urNp linux-2.6.39.4/include/linux/blktrace_api.h linux-2.6.39.4/include/linux/blktrace_api.h
51600--- linux-2.6.39.4/include/linux/blktrace_api.h 2011-05-19 00:06:34.000000000 -0400
51601+++ linux-2.6.39.4/include/linux/blktrace_api.h 2011-08-05 19:44:37.000000000 -0400
51602@@ -161,7 +161,7 @@ struct blk_trace {
51603 struct dentry *dir;
51604 struct dentry *dropped_file;
51605 struct dentry *msg_file;
51606- atomic_t dropped;
51607+ atomic_unchecked_t dropped;
51608 };
51609
51610 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51611diff -urNp linux-2.6.39.4/include/linux/byteorder/little_endian.h linux-2.6.39.4/include/linux/byteorder/little_endian.h
51612--- linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-05-19 00:06:34.000000000 -0400
51613+++ linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-08-05 19:44:37.000000000 -0400
51614@@ -42,51 +42,51 @@
51615
51616 static inline __le64 __cpu_to_le64p(const __u64 *p)
51617 {
51618- return (__force __le64)*p;
51619+ return (__force const __le64)*p;
51620 }
51621 static inline __u64 __le64_to_cpup(const __le64 *p)
51622 {
51623- return (__force __u64)*p;
51624+ return (__force const __u64)*p;
51625 }
51626 static inline __le32 __cpu_to_le32p(const __u32 *p)
51627 {
51628- return (__force __le32)*p;
51629+ return (__force const __le32)*p;
51630 }
51631 static inline __u32 __le32_to_cpup(const __le32 *p)
51632 {
51633- return (__force __u32)*p;
51634+ return (__force const __u32)*p;
51635 }
51636 static inline __le16 __cpu_to_le16p(const __u16 *p)
51637 {
51638- return (__force __le16)*p;
51639+ return (__force const __le16)*p;
51640 }
51641 static inline __u16 __le16_to_cpup(const __le16 *p)
51642 {
51643- return (__force __u16)*p;
51644+ return (__force const __u16)*p;
51645 }
51646 static inline __be64 __cpu_to_be64p(const __u64 *p)
51647 {
51648- return (__force __be64)__swab64p(p);
51649+ return (__force const __be64)__swab64p(p);
51650 }
51651 static inline __u64 __be64_to_cpup(const __be64 *p)
51652 {
51653- return __swab64p((__u64 *)p);
51654+ return __swab64p((const __u64 *)p);
51655 }
51656 static inline __be32 __cpu_to_be32p(const __u32 *p)
51657 {
51658- return (__force __be32)__swab32p(p);
51659+ return (__force const __be32)__swab32p(p);
51660 }
51661 static inline __u32 __be32_to_cpup(const __be32 *p)
51662 {
51663- return __swab32p((__u32 *)p);
51664+ return __swab32p((const __u32 *)p);
51665 }
51666 static inline __be16 __cpu_to_be16p(const __u16 *p)
51667 {
51668- return (__force __be16)__swab16p(p);
51669+ return (__force const __be16)__swab16p(p);
51670 }
51671 static inline __u16 __be16_to_cpup(const __be16 *p)
51672 {
51673- return __swab16p((__u16 *)p);
51674+ return __swab16p((const __u16 *)p);
51675 }
51676 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51677 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51678diff -urNp linux-2.6.39.4/include/linux/cache.h linux-2.6.39.4/include/linux/cache.h
51679--- linux-2.6.39.4/include/linux/cache.h 2011-05-19 00:06:34.000000000 -0400
51680+++ linux-2.6.39.4/include/linux/cache.h 2011-08-05 19:44:37.000000000 -0400
51681@@ -16,6 +16,10 @@
51682 #define __read_mostly
51683 #endif
51684
51685+#ifndef __read_only
51686+#define __read_only __read_mostly
51687+#endif
51688+
51689 #ifndef ____cacheline_aligned
51690 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51691 #endif
51692diff -urNp linux-2.6.39.4/include/linux/capability.h linux-2.6.39.4/include/linux/capability.h
51693--- linux-2.6.39.4/include/linux/capability.h 2011-05-19 00:06:34.000000000 -0400
51694+++ linux-2.6.39.4/include/linux/capability.h 2011-08-05 19:44:37.000000000 -0400
51695@@ -547,6 +547,9 @@ extern bool capable(int cap);
51696 extern bool ns_capable(struct user_namespace *ns, int cap);
51697 extern bool task_ns_capable(struct task_struct *t, int cap);
51698 extern bool nsown_capable(int cap);
51699+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51700+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51701+extern bool capable_nolog(int cap);
51702
51703 /* audit system wants to get cap info from files as well */
51704 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51705diff -urNp linux-2.6.39.4/include/linux/compiler-gcc4.h linux-2.6.39.4/include/linux/compiler-gcc4.h
51706--- linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-05-19 00:06:34.000000000 -0400
51707+++ linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-08-05 20:34:06.000000000 -0400
51708@@ -31,6 +31,9 @@
51709
51710
51711 #if __GNUC_MINOR__ >= 5
51712+
51713+#define __no_const __attribute__((no_const))
51714+
51715 /*
51716 * Mark a position in code as unreachable. This can be used to
51717 * suppress control flow warnings after asm blocks that transfer
51718@@ -46,6 +49,11 @@
51719 #define __noclone __attribute__((__noclone__))
51720
51721 #endif
51722+
51723+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51724+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51725+#define __bos0(ptr) __bos((ptr), 0)
51726+#define __bos1(ptr) __bos((ptr), 1)
51727 #endif
51728
51729 #if __GNUC_MINOR__ > 0
51730diff -urNp linux-2.6.39.4/include/linux/compiler.h linux-2.6.39.4/include/linux/compiler.h
51731--- linux-2.6.39.4/include/linux/compiler.h 2011-05-19 00:06:34.000000000 -0400
51732+++ linux-2.6.39.4/include/linux/compiler.h 2011-08-05 20:34:06.000000000 -0400
51733@@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51734 # define __attribute_const__ /* unimplemented */
51735 #endif
51736
51737+#ifndef __no_const
51738+# define __no_const
51739+#endif
51740+
51741 /*
51742 * Tell gcc if a function is cold. The compiler will assume any path
51743 * directly leading to the call is unlikely.
51744@@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51745 #define __cold
51746 #endif
51747
51748+#ifndef __alloc_size
51749+#define __alloc_size(...)
51750+#endif
51751+
51752+#ifndef __bos
51753+#define __bos(ptr, arg)
51754+#endif
51755+
51756+#ifndef __bos0
51757+#define __bos0(ptr)
51758+#endif
51759+
51760+#ifndef __bos1
51761+#define __bos1(ptr)
51762+#endif
51763+
51764 /* Simple shorthand for a section definition */
51765 #ifndef __section
51766 # define __section(S) __attribute__ ((__section__(#S)))
51767@@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51768 * use is to mediate communication between process-level code and irq/NMI
51769 * handlers, all running on the same CPU.
51770 */
51771-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51772+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51773+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51774
51775 #endif /* __LINUX_COMPILER_H */
51776diff -urNp linux-2.6.39.4/include/linux/cpuset.h linux-2.6.39.4/include/linux/cpuset.h
51777--- linux-2.6.39.4/include/linux/cpuset.h 2011-05-19 00:06:34.000000000 -0400
51778+++ linux-2.6.39.4/include/linux/cpuset.h 2011-08-05 19:44:37.000000000 -0400
51779@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51780 * nodemask.
51781 */
51782 smp_mb();
51783- --ACCESS_ONCE(current->mems_allowed_change_disable);
51784+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51785 }
51786
51787 static inline void set_mems_allowed(nodemask_t nodemask)
51788diff -urNp linux-2.6.39.4/include/linux/crypto.h linux-2.6.39.4/include/linux/crypto.h
51789--- linux-2.6.39.4/include/linux/crypto.h 2011-05-19 00:06:34.000000000 -0400
51790+++ linux-2.6.39.4/include/linux/crypto.h 2011-08-05 20:34:06.000000000 -0400
51791@@ -361,7 +361,7 @@ struct cipher_tfm {
51792 const u8 *key, unsigned int keylen);
51793 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51794 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51795-};
51796+} __no_const;
51797
51798 struct hash_tfm {
51799 int (*init)(struct hash_desc *desc);
51800@@ -382,13 +382,13 @@ struct compress_tfm {
51801 int (*cot_decompress)(struct crypto_tfm *tfm,
51802 const u8 *src, unsigned int slen,
51803 u8 *dst, unsigned int *dlen);
51804-};
51805+} __no_const;
51806
51807 struct rng_tfm {
51808 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51809 unsigned int dlen);
51810 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51811-};
51812+} __no_const;
51813
51814 #define crt_ablkcipher crt_u.ablkcipher
51815 #define crt_aead crt_u.aead
51816diff -urNp linux-2.6.39.4/include/linux/decompress/mm.h linux-2.6.39.4/include/linux/decompress/mm.h
51817--- linux-2.6.39.4/include/linux/decompress/mm.h 2011-05-19 00:06:34.000000000 -0400
51818+++ linux-2.6.39.4/include/linux/decompress/mm.h 2011-08-05 19:44:37.000000000 -0400
51819@@ -77,7 +77,7 @@ static void free(void *where)
51820 * warnings when not needed (indeed large_malloc / large_free are not
51821 * needed by inflate */
51822
51823-#define malloc(a) kmalloc(a, GFP_KERNEL)
51824+#define malloc(a) kmalloc((a), GFP_KERNEL)
51825 #define free(a) kfree(a)
51826
51827 #define large_malloc(a) vmalloc(a)
51828diff -urNp linux-2.6.39.4/include/linux/dma-mapping.h linux-2.6.39.4/include/linux/dma-mapping.h
51829--- linux-2.6.39.4/include/linux/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
51830+++ linux-2.6.39.4/include/linux/dma-mapping.h 2011-08-05 20:34:06.000000000 -0400
51831@@ -49,7 +49,7 @@ struct dma_map_ops {
51832 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
51833 int (*dma_supported)(struct device *dev, u64 mask);
51834 int (*set_dma_mask)(struct device *dev, u64 mask);
51835- int is_phys;
51836+ const int is_phys;
51837 };
51838
51839 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51840diff -urNp linux-2.6.39.4/include/linux/efi.h linux-2.6.39.4/include/linux/efi.h
51841--- linux-2.6.39.4/include/linux/efi.h 2011-06-25 12:55:23.000000000 -0400
51842+++ linux-2.6.39.4/include/linux/efi.h 2011-08-05 20:34:06.000000000 -0400
51843@@ -409,7 +409,7 @@ struct efivar_operations {
51844 efi_get_variable_t *get_variable;
51845 efi_get_next_variable_t *get_next_variable;
51846 efi_set_variable_t *set_variable;
51847-};
51848+} __no_const;
51849
51850 struct efivars {
51851 /*
51852diff -urNp linux-2.6.39.4/include/linux/elf.h linux-2.6.39.4/include/linux/elf.h
51853--- linux-2.6.39.4/include/linux/elf.h 2011-05-19 00:06:34.000000000 -0400
51854+++ linux-2.6.39.4/include/linux/elf.h 2011-08-05 19:44:37.000000000 -0400
51855@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
51856 #define PT_GNU_EH_FRAME 0x6474e550
51857
51858 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
51859+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
51860+
51861+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
51862+
51863+/* Constants for the e_flags field */
51864+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51865+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
51866+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
51867+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
51868+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51869+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51870
51871 /*
51872 * Extended Numbering
51873@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
51874 #define DT_DEBUG 21
51875 #define DT_TEXTREL 22
51876 #define DT_JMPREL 23
51877+#define DT_FLAGS 30
51878+ #define DF_TEXTREL 0x00000004
51879 #define DT_ENCODING 32
51880 #define OLD_DT_LOOS 0x60000000
51881 #define DT_LOOS 0x6000000d
51882@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
51883 #define PF_W 0x2
51884 #define PF_X 0x1
51885
51886+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
51887+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
51888+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
51889+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
51890+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
51891+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
51892+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
51893+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
51894+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
51895+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
51896+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
51897+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
51898+
51899 typedef struct elf32_phdr{
51900 Elf32_Word p_type;
51901 Elf32_Off p_offset;
51902@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
51903 #define EI_OSABI 7
51904 #define EI_PAD 8
51905
51906+#define EI_PAX 14
51907+
51908 #define ELFMAG0 0x7f /* EI_MAG */
51909 #define ELFMAG1 'E'
51910 #define ELFMAG2 'L'
51911@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
51912 #define elf_note elf32_note
51913 #define elf_addr_t Elf32_Off
51914 #define Elf_Half Elf32_Half
51915+#define elf_dyn Elf32_Dyn
51916
51917 #else
51918
51919@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
51920 #define elf_note elf64_note
51921 #define elf_addr_t Elf64_Off
51922 #define Elf_Half Elf64_Half
51923+#define elf_dyn Elf64_Dyn
51924
51925 #endif
51926
51927diff -urNp linux-2.6.39.4/include/linux/firewire.h linux-2.6.39.4/include/linux/firewire.h
51928--- linux-2.6.39.4/include/linux/firewire.h 2011-05-19 00:06:34.000000000 -0400
51929+++ linux-2.6.39.4/include/linux/firewire.h 2011-08-05 20:34:06.000000000 -0400
51930@@ -429,7 +429,7 @@ struct fw_iso_context {
51931 union {
51932 fw_iso_callback_t sc;
51933 fw_iso_mc_callback_t mc;
51934- } callback;
51935+ } __no_const callback;
51936 void *callback_data;
51937 };
51938
51939diff -urNp linux-2.6.39.4/include/linux/fscache-cache.h linux-2.6.39.4/include/linux/fscache-cache.h
51940--- linux-2.6.39.4/include/linux/fscache-cache.h 2011-05-19 00:06:34.000000000 -0400
51941+++ linux-2.6.39.4/include/linux/fscache-cache.h 2011-08-05 19:44:37.000000000 -0400
51942@@ -113,7 +113,7 @@ struct fscache_operation {
51943 #endif
51944 };
51945
51946-extern atomic_t fscache_op_debug_id;
51947+extern atomic_unchecked_t fscache_op_debug_id;
51948 extern void fscache_op_work_func(struct work_struct *work);
51949
51950 extern void fscache_enqueue_operation(struct fscache_operation *);
51951@@ -133,7 +133,7 @@ static inline void fscache_operation_ini
51952 {
51953 INIT_WORK(&op->work, fscache_op_work_func);
51954 atomic_set(&op->usage, 1);
51955- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
51956+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51957 op->processor = processor;
51958 op->release = release;
51959 INIT_LIST_HEAD(&op->pend_link);
51960diff -urNp linux-2.6.39.4/include/linux/fs.h linux-2.6.39.4/include/linux/fs.h
51961--- linux-2.6.39.4/include/linux/fs.h 2011-05-19 00:06:34.000000000 -0400
51962+++ linux-2.6.39.4/include/linux/fs.h 2011-08-05 20:34:06.000000000 -0400
51963@@ -108,6 +108,11 @@ struct inodes_stat_t {
51964 /* File was opened by fanotify and shouldn't generate fanotify events */
51965 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
51966
51967+/* Hack for grsec so as not to require read permission simply to execute
51968+ * a binary
51969+ */
51970+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
51971+
51972 /*
51973 * The below are the various read and write types that we support. Some of
51974 * them include behavioral modifiers that send information down to the
51975@@ -1535,7 +1540,7 @@ struct block_device_operations;
51976 * the big kernel lock held in all filesystems.
51977 */
51978 struct file_operations {
51979- struct module *owner;
51980+ struct module * const owner;
51981 loff_t (*llseek) (struct file *, loff_t, int);
51982 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
51983 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
51984@@ -1563,6 +1568,7 @@ struct file_operations {
51985 long (*fallocate)(struct file *file, int mode, loff_t offset,
51986 loff_t len);
51987 };
51988+typedef struct file_operations __no_const file_operations_no_const;
51989
51990 #define IPERM_FLAG_RCU 0x0001
51991
51992diff -urNp linux-2.6.39.4/include/linux/fs_struct.h linux-2.6.39.4/include/linux/fs_struct.h
51993--- linux-2.6.39.4/include/linux/fs_struct.h 2011-05-19 00:06:34.000000000 -0400
51994+++ linux-2.6.39.4/include/linux/fs_struct.h 2011-08-05 19:44:37.000000000 -0400
51995@@ -6,7 +6,7 @@
51996 #include <linux/seqlock.h>
51997
51998 struct fs_struct {
51999- int users;
52000+ atomic_t users;
52001 spinlock_t lock;
52002 seqcount_t seq;
52003 int umask;
52004diff -urNp linux-2.6.39.4/include/linux/ftrace_event.h linux-2.6.39.4/include/linux/ftrace_event.h
52005--- linux-2.6.39.4/include/linux/ftrace_event.h 2011-05-19 00:06:34.000000000 -0400
52006+++ linux-2.6.39.4/include/linux/ftrace_event.h 2011-08-05 20:34:06.000000000 -0400
52007@@ -84,7 +84,7 @@ struct trace_event_functions {
52008 trace_print_func raw;
52009 trace_print_func hex;
52010 trace_print_func binary;
52011-};
52012+} __no_const;
52013
52014 struct trace_event {
52015 struct hlist_node node;
52016@@ -235,7 +235,7 @@ extern int trace_define_field(struct ftr
52017 extern int trace_add_event_call(struct ftrace_event_call *call);
52018 extern void trace_remove_event_call(struct ftrace_event_call *call);
52019
52020-#define is_signed_type(type) (((type)(-1)) < 0)
52021+#define is_signed_type(type) (((type)(-1)) < (type)1)
52022
52023 int trace_set_clr_event(const char *system, const char *event, int set);
52024
52025diff -urNp linux-2.6.39.4/include/linux/genhd.h linux-2.6.39.4/include/linux/genhd.h
52026--- linux-2.6.39.4/include/linux/genhd.h 2011-06-03 00:04:14.000000000 -0400
52027+++ linux-2.6.39.4/include/linux/genhd.h 2011-08-05 19:44:37.000000000 -0400
52028@@ -184,7 +184,7 @@ struct gendisk {
52029 struct kobject *slave_dir;
52030
52031 struct timer_rand_state *random;
52032- atomic_t sync_io; /* RAID */
52033+ atomic_unchecked_t sync_io; /* RAID */
52034 struct disk_events *ev;
52035 #ifdef CONFIG_BLK_DEV_INTEGRITY
52036 struct blk_integrity *integrity;
52037diff -urNp linux-2.6.39.4/include/linux/gracl.h linux-2.6.39.4/include/linux/gracl.h
52038--- linux-2.6.39.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52039+++ linux-2.6.39.4/include/linux/gracl.h 2011-08-05 19:44:37.000000000 -0400
52040@@ -0,0 +1,317 @@
52041+#ifndef GR_ACL_H
52042+#define GR_ACL_H
52043+
52044+#include <linux/grdefs.h>
52045+#include <linux/resource.h>
52046+#include <linux/capability.h>
52047+#include <linux/dcache.h>
52048+#include <asm/resource.h>
52049+
52050+/* Major status information */
52051+
52052+#define GR_VERSION "grsecurity 2.2.2"
52053+#define GRSECURITY_VERSION 0x2202
52054+
52055+enum {
52056+ GR_SHUTDOWN = 0,
52057+ GR_ENABLE = 1,
52058+ GR_SPROLE = 2,
52059+ GR_RELOAD = 3,
52060+ GR_SEGVMOD = 4,
52061+ GR_STATUS = 5,
52062+ GR_UNSPROLE = 6,
52063+ GR_PASSSET = 7,
52064+ GR_SPROLEPAM = 8,
52065+};
52066+
52067+/* Password setup definitions
52068+ * kernel/grhash.c */
52069+enum {
52070+ GR_PW_LEN = 128,
52071+ GR_SALT_LEN = 16,
52072+ GR_SHA_LEN = 32,
52073+};
52074+
52075+enum {
52076+ GR_SPROLE_LEN = 64,
52077+};
52078+
52079+enum {
52080+ GR_NO_GLOB = 0,
52081+ GR_REG_GLOB,
52082+ GR_CREATE_GLOB
52083+};
52084+
52085+#define GR_NLIMITS 32
52086+
52087+/* Begin Data Structures */
52088+
52089+struct sprole_pw {
52090+ unsigned char *rolename;
52091+ unsigned char salt[GR_SALT_LEN];
52092+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52093+};
52094+
52095+struct name_entry {
52096+ __u32 key;
52097+ ino_t inode;
52098+ dev_t device;
52099+ char *name;
52100+ __u16 len;
52101+ __u8 deleted;
52102+ struct name_entry *prev;
52103+ struct name_entry *next;
52104+};
52105+
52106+struct inodev_entry {
52107+ struct name_entry *nentry;
52108+ struct inodev_entry *prev;
52109+ struct inodev_entry *next;
52110+};
52111+
52112+struct acl_role_db {
52113+ struct acl_role_label **r_hash;
52114+ __u32 r_size;
52115+};
52116+
52117+struct inodev_db {
52118+ struct inodev_entry **i_hash;
52119+ __u32 i_size;
52120+};
52121+
52122+struct name_db {
52123+ struct name_entry **n_hash;
52124+ __u32 n_size;
52125+};
52126+
52127+struct crash_uid {
52128+ uid_t uid;
52129+ unsigned long expires;
52130+};
52131+
52132+struct gr_hash_struct {
52133+ void **table;
52134+ void **nametable;
52135+ void *first;
52136+ __u32 table_size;
52137+ __u32 used_size;
52138+ int type;
52139+};
52140+
52141+/* Userspace Grsecurity ACL data structures */
52142+
52143+struct acl_subject_label {
52144+ char *filename;
52145+ ino_t inode;
52146+ dev_t device;
52147+ __u32 mode;
52148+ kernel_cap_t cap_mask;
52149+ kernel_cap_t cap_lower;
52150+ kernel_cap_t cap_invert_audit;
52151+
52152+ struct rlimit res[GR_NLIMITS];
52153+ __u32 resmask;
52154+
52155+ __u8 user_trans_type;
52156+ __u8 group_trans_type;
52157+ uid_t *user_transitions;
52158+ gid_t *group_transitions;
52159+ __u16 user_trans_num;
52160+ __u16 group_trans_num;
52161+
52162+ __u32 sock_families[2];
52163+ __u32 ip_proto[8];
52164+ __u32 ip_type;
52165+ struct acl_ip_label **ips;
52166+ __u32 ip_num;
52167+ __u32 inaddr_any_override;
52168+
52169+ __u32 crashes;
52170+ unsigned long expires;
52171+
52172+ struct acl_subject_label *parent_subject;
52173+ struct gr_hash_struct *hash;
52174+ struct acl_subject_label *prev;
52175+ struct acl_subject_label *next;
52176+
52177+ struct acl_object_label **obj_hash;
52178+ __u32 obj_hash_size;
52179+ __u16 pax_flags;
52180+};
52181+
52182+struct role_allowed_ip {
52183+ __u32 addr;
52184+ __u32 netmask;
52185+
52186+ struct role_allowed_ip *prev;
52187+ struct role_allowed_ip *next;
52188+};
52189+
52190+struct role_transition {
52191+ char *rolename;
52192+
52193+ struct role_transition *prev;
52194+ struct role_transition *next;
52195+};
52196+
52197+struct acl_role_label {
52198+ char *rolename;
52199+ uid_t uidgid;
52200+ __u16 roletype;
52201+
52202+ __u16 auth_attempts;
52203+ unsigned long expires;
52204+
52205+ struct acl_subject_label *root_label;
52206+ struct gr_hash_struct *hash;
52207+
52208+ struct acl_role_label *prev;
52209+ struct acl_role_label *next;
52210+
52211+ struct role_transition *transitions;
52212+ struct role_allowed_ip *allowed_ips;
52213+ uid_t *domain_children;
52214+ __u16 domain_child_num;
52215+
52216+ struct acl_subject_label **subj_hash;
52217+ __u32 subj_hash_size;
52218+};
52219+
52220+struct user_acl_role_db {
52221+ struct acl_role_label **r_table;
52222+ __u32 num_pointers; /* Number of allocations to track */
52223+ __u32 num_roles; /* Number of roles */
52224+ __u32 num_domain_children; /* Number of domain children */
52225+ __u32 num_subjects; /* Number of subjects */
52226+ __u32 num_objects; /* Number of objects */
52227+};
52228+
52229+struct acl_object_label {
52230+ char *filename;
52231+ ino_t inode;
52232+ dev_t device;
52233+ __u32 mode;
52234+
52235+ struct acl_subject_label *nested;
52236+ struct acl_object_label *globbed;
52237+
52238+ /* next two structures not used */
52239+
52240+ struct acl_object_label *prev;
52241+ struct acl_object_label *next;
52242+};
52243+
52244+struct acl_ip_label {
52245+ char *iface;
52246+ __u32 addr;
52247+ __u32 netmask;
52248+ __u16 low, high;
52249+ __u8 mode;
52250+ __u32 type;
52251+ __u32 proto[8];
52252+
52253+ /* next two structures not used */
52254+
52255+ struct acl_ip_label *prev;
52256+ struct acl_ip_label *next;
52257+};
52258+
52259+struct gr_arg {
52260+ struct user_acl_role_db role_db;
52261+ unsigned char pw[GR_PW_LEN];
52262+ unsigned char salt[GR_SALT_LEN];
52263+ unsigned char sum[GR_SHA_LEN];
52264+ unsigned char sp_role[GR_SPROLE_LEN];
52265+ struct sprole_pw *sprole_pws;
52266+ dev_t segv_device;
52267+ ino_t segv_inode;
52268+ uid_t segv_uid;
52269+ __u16 num_sprole_pws;
52270+ __u16 mode;
52271+};
52272+
52273+struct gr_arg_wrapper {
52274+ struct gr_arg *arg;
52275+ __u32 version;
52276+ __u32 size;
52277+};
52278+
52279+struct subject_map {
52280+ struct acl_subject_label *user;
52281+ struct acl_subject_label *kernel;
52282+ struct subject_map *prev;
52283+ struct subject_map *next;
52284+};
52285+
52286+struct acl_subj_map_db {
52287+ struct subject_map **s_hash;
52288+ __u32 s_size;
52289+};
52290+
52291+/* End Data Structures Section */
52292+
52293+/* Hash functions generated by empirical testing by Brad Spengler
52294+ Makes good use of the low bits of the inode. Generally 0-1 times
52295+ in loop for successful match. 0-3 for unsuccessful match.
52296+ Shift/add algorithm with modulus of table size and an XOR*/
52297+
52298+static __inline__ unsigned int
52299+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52300+{
52301+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
52302+}
52303+
52304+ static __inline__ unsigned int
52305+shash(const struct acl_subject_label *userp, const unsigned int sz)
52306+{
52307+ return ((const unsigned long)userp % sz);
52308+}
52309+
52310+static __inline__ unsigned int
52311+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52312+{
52313+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52314+}
52315+
52316+static __inline__ unsigned int
52317+nhash(const char *name, const __u16 len, const unsigned int sz)
52318+{
52319+ return full_name_hash((const unsigned char *)name, len) % sz;
52320+}
52321+
52322+#define FOR_EACH_ROLE_START(role) \
52323+ role = role_list; \
52324+ while (role) {
52325+
52326+#define FOR_EACH_ROLE_END(role) \
52327+ role = role->prev; \
52328+ }
52329+
52330+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52331+ subj = NULL; \
52332+ iter = 0; \
52333+ while (iter < role->subj_hash_size) { \
52334+ if (subj == NULL) \
52335+ subj = role->subj_hash[iter]; \
52336+ if (subj == NULL) { \
52337+ iter++; \
52338+ continue; \
52339+ }
52340+
52341+#define FOR_EACH_SUBJECT_END(subj,iter) \
52342+ subj = subj->next; \
52343+ if (subj == NULL) \
52344+ iter++; \
52345+ }
52346+
52347+
52348+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52349+ subj = role->hash->first; \
52350+ while (subj != NULL) {
52351+
52352+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52353+ subj = subj->next; \
52354+ }
52355+
52356+#endif
52357+
52358diff -urNp linux-2.6.39.4/include/linux/gralloc.h linux-2.6.39.4/include/linux/gralloc.h
52359--- linux-2.6.39.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52360+++ linux-2.6.39.4/include/linux/gralloc.h 2011-08-05 19:44:37.000000000 -0400
52361@@ -0,0 +1,9 @@
52362+#ifndef __GRALLOC_H
52363+#define __GRALLOC_H
52364+
52365+void acl_free_all(void);
52366+int acl_alloc_stack_init(unsigned long size);
52367+void *acl_alloc(unsigned long len);
52368+void *acl_alloc_num(unsigned long num, unsigned long len);
52369+
52370+#endif
52371diff -urNp linux-2.6.39.4/include/linux/grdefs.h linux-2.6.39.4/include/linux/grdefs.h
52372--- linux-2.6.39.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52373+++ linux-2.6.39.4/include/linux/grdefs.h 2011-08-05 19:44:37.000000000 -0400
52374@@ -0,0 +1,140 @@
52375+#ifndef GRDEFS_H
52376+#define GRDEFS_H
52377+
52378+/* Begin grsecurity status declarations */
52379+
52380+enum {
52381+ GR_READY = 0x01,
52382+ GR_STATUS_INIT = 0x00 // disabled state
52383+};
52384+
52385+/* Begin ACL declarations */
52386+
52387+/* Role flags */
52388+
52389+enum {
52390+ GR_ROLE_USER = 0x0001,
52391+ GR_ROLE_GROUP = 0x0002,
52392+ GR_ROLE_DEFAULT = 0x0004,
52393+ GR_ROLE_SPECIAL = 0x0008,
52394+ GR_ROLE_AUTH = 0x0010,
52395+ GR_ROLE_NOPW = 0x0020,
52396+ GR_ROLE_GOD = 0x0040,
52397+ GR_ROLE_LEARN = 0x0080,
52398+ GR_ROLE_TPE = 0x0100,
52399+ GR_ROLE_DOMAIN = 0x0200,
52400+ GR_ROLE_PAM = 0x0400,
52401+ GR_ROLE_PERSIST = 0x0800
52402+};
52403+
52404+/* ACL Subject and Object mode flags */
52405+enum {
52406+ GR_DELETED = 0x80000000
52407+};
52408+
52409+/* ACL Object-only mode flags */
52410+enum {
52411+ GR_READ = 0x00000001,
52412+ GR_APPEND = 0x00000002,
52413+ GR_WRITE = 0x00000004,
52414+ GR_EXEC = 0x00000008,
52415+ GR_FIND = 0x00000010,
52416+ GR_INHERIT = 0x00000020,
52417+ GR_SETID = 0x00000040,
52418+ GR_CREATE = 0x00000080,
52419+ GR_DELETE = 0x00000100,
52420+ GR_LINK = 0x00000200,
52421+ GR_AUDIT_READ = 0x00000400,
52422+ GR_AUDIT_APPEND = 0x00000800,
52423+ GR_AUDIT_WRITE = 0x00001000,
52424+ GR_AUDIT_EXEC = 0x00002000,
52425+ GR_AUDIT_FIND = 0x00004000,
52426+ GR_AUDIT_INHERIT= 0x00008000,
52427+ GR_AUDIT_SETID = 0x00010000,
52428+ GR_AUDIT_CREATE = 0x00020000,
52429+ GR_AUDIT_DELETE = 0x00040000,
52430+ GR_AUDIT_LINK = 0x00080000,
52431+ GR_PTRACERD = 0x00100000,
52432+ GR_NOPTRACE = 0x00200000,
52433+ GR_SUPPRESS = 0x00400000,
52434+ GR_NOLEARN = 0x00800000,
52435+ GR_INIT_TRANSFER= 0x01000000
52436+};
52437+
52438+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52439+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52440+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52441+
52442+/* ACL subject-only mode flags */
52443+enum {
52444+ GR_KILL = 0x00000001,
52445+ GR_VIEW = 0x00000002,
52446+ GR_PROTECTED = 0x00000004,
52447+ GR_LEARN = 0x00000008,
52448+ GR_OVERRIDE = 0x00000010,
52449+ /* just a placeholder, this mode is only used in userspace */
52450+ GR_DUMMY = 0x00000020,
52451+ GR_PROTSHM = 0x00000040,
52452+ GR_KILLPROC = 0x00000080,
52453+ GR_KILLIPPROC = 0x00000100,
52454+ /* just a placeholder, this mode is only used in userspace */
52455+ GR_NOTROJAN = 0x00000200,
52456+ GR_PROTPROCFD = 0x00000400,
52457+ GR_PROCACCT = 0x00000800,
52458+ GR_RELAXPTRACE = 0x00001000,
52459+ GR_NESTED = 0x00002000,
52460+ GR_INHERITLEARN = 0x00004000,
52461+ GR_PROCFIND = 0x00008000,
52462+ GR_POVERRIDE = 0x00010000,
52463+ GR_KERNELAUTH = 0x00020000,
52464+ GR_ATSECURE = 0x00040000,
52465+ GR_SHMEXEC = 0x00080000
52466+};
52467+
52468+enum {
52469+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52470+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52471+ GR_PAX_ENABLE_MPROTECT = 0x0004,
52472+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
52473+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52474+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52475+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52476+ GR_PAX_DISABLE_MPROTECT = 0x0400,
52477+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
52478+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52479+};
52480+
52481+enum {
52482+ GR_ID_USER = 0x01,
52483+ GR_ID_GROUP = 0x02,
52484+};
52485+
52486+enum {
52487+ GR_ID_ALLOW = 0x01,
52488+ GR_ID_DENY = 0x02,
52489+};
52490+
52491+#define GR_CRASH_RES 31
52492+#define GR_UIDTABLE_MAX 500
52493+
52494+/* begin resource learning section */
52495+enum {
52496+ GR_RLIM_CPU_BUMP = 60,
52497+ GR_RLIM_FSIZE_BUMP = 50000,
52498+ GR_RLIM_DATA_BUMP = 10000,
52499+ GR_RLIM_STACK_BUMP = 1000,
52500+ GR_RLIM_CORE_BUMP = 10000,
52501+ GR_RLIM_RSS_BUMP = 500000,
52502+ GR_RLIM_NPROC_BUMP = 1,
52503+ GR_RLIM_NOFILE_BUMP = 5,
52504+ GR_RLIM_MEMLOCK_BUMP = 50000,
52505+ GR_RLIM_AS_BUMP = 500000,
52506+ GR_RLIM_LOCKS_BUMP = 2,
52507+ GR_RLIM_SIGPENDING_BUMP = 5,
52508+ GR_RLIM_MSGQUEUE_BUMP = 10000,
52509+ GR_RLIM_NICE_BUMP = 1,
52510+ GR_RLIM_RTPRIO_BUMP = 1,
52511+ GR_RLIM_RTTIME_BUMP = 1000000
52512+};
52513+
52514+#endif
52515diff -urNp linux-2.6.39.4/include/linux/grinternal.h linux-2.6.39.4/include/linux/grinternal.h
52516--- linux-2.6.39.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52517+++ linux-2.6.39.4/include/linux/grinternal.h 2011-08-05 19:44:37.000000000 -0400
52518@@ -0,0 +1,219 @@
52519+#ifndef __GRINTERNAL_H
52520+#define __GRINTERNAL_H
52521+
52522+#ifdef CONFIG_GRKERNSEC
52523+
52524+#include <linux/fs.h>
52525+#include <linux/mnt_namespace.h>
52526+#include <linux/nsproxy.h>
52527+#include <linux/gracl.h>
52528+#include <linux/grdefs.h>
52529+#include <linux/grmsg.h>
52530+
52531+void gr_add_learn_entry(const char *fmt, ...)
52532+ __attribute__ ((format (printf, 1, 2)));
52533+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52534+ const struct vfsmount *mnt);
52535+__u32 gr_check_create(const struct dentry *new_dentry,
52536+ const struct dentry *parent,
52537+ const struct vfsmount *mnt, const __u32 mode);
52538+int gr_check_protected_task(const struct task_struct *task);
52539+__u32 to_gr_audit(const __u32 reqmode);
52540+int gr_set_acls(const int type);
52541+int gr_apply_subject_to_task(struct task_struct *task);
52542+int gr_acl_is_enabled(void);
52543+char gr_roletype_to_char(void);
52544+
52545+void gr_handle_alertkill(struct task_struct *task);
52546+char *gr_to_filename(const struct dentry *dentry,
52547+ const struct vfsmount *mnt);
52548+char *gr_to_filename1(const struct dentry *dentry,
52549+ const struct vfsmount *mnt);
52550+char *gr_to_filename2(const struct dentry *dentry,
52551+ const struct vfsmount *mnt);
52552+char *gr_to_filename3(const struct dentry *dentry,
52553+ const struct vfsmount *mnt);
52554+
52555+extern int grsec_enable_harden_ptrace;
52556+extern int grsec_enable_link;
52557+extern int grsec_enable_fifo;
52558+extern int grsec_enable_execve;
52559+extern int grsec_enable_shm;
52560+extern int grsec_enable_execlog;
52561+extern int grsec_enable_signal;
52562+extern int grsec_enable_audit_ptrace;
52563+extern int grsec_enable_forkfail;
52564+extern int grsec_enable_time;
52565+extern int grsec_enable_rofs;
52566+extern int grsec_enable_chroot_shmat;
52567+extern int grsec_enable_chroot_mount;
52568+extern int grsec_enable_chroot_double;
52569+extern int grsec_enable_chroot_pivot;
52570+extern int grsec_enable_chroot_chdir;
52571+extern int grsec_enable_chroot_chmod;
52572+extern int grsec_enable_chroot_mknod;
52573+extern int grsec_enable_chroot_fchdir;
52574+extern int grsec_enable_chroot_nice;
52575+extern int grsec_enable_chroot_execlog;
52576+extern int grsec_enable_chroot_caps;
52577+extern int grsec_enable_chroot_sysctl;
52578+extern int grsec_enable_chroot_unix;
52579+extern int grsec_enable_tpe;
52580+extern int grsec_tpe_gid;
52581+extern int grsec_enable_tpe_all;
52582+extern int grsec_enable_tpe_invert;
52583+extern int grsec_enable_socket_all;
52584+extern int grsec_socket_all_gid;
52585+extern int grsec_enable_socket_client;
52586+extern int grsec_socket_client_gid;
52587+extern int grsec_enable_socket_server;
52588+extern int grsec_socket_server_gid;
52589+extern int grsec_audit_gid;
52590+extern int grsec_enable_group;
52591+extern int grsec_enable_audit_textrel;
52592+extern int grsec_enable_log_rwxmaps;
52593+extern int grsec_enable_mount;
52594+extern int grsec_enable_chdir;
52595+extern int grsec_resource_logging;
52596+extern int grsec_enable_blackhole;
52597+extern int grsec_lastack_retries;
52598+extern int grsec_enable_brute;
52599+extern int grsec_lock;
52600+
52601+extern spinlock_t grsec_alert_lock;
52602+extern unsigned long grsec_alert_wtime;
52603+extern unsigned long grsec_alert_fyet;
52604+
52605+extern spinlock_t grsec_audit_lock;
52606+
52607+extern rwlock_t grsec_exec_file_lock;
52608+
52609+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52610+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52611+ (tsk)->exec_file->f_vfsmnt) : "/")
52612+
52613+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52614+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52615+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52616+
52617+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52618+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
52619+ (tsk)->exec_file->f_vfsmnt) : "/")
52620+
52621+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52622+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52623+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52624+
52625+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52626+
52627+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52628+
52629+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52630+ (task)->pid, (cred)->uid, \
52631+ (cred)->euid, (cred)->gid, (cred)->egid, \
52632+ gr_parent_task_fullpath(task), \
52633+ (task)->real_parent->comm, (task)->real_parent->pid, \
52634+ (pcred)->uid, (pcred)->euid, \
52635+ (pcred)->gid, (pcred)->egid
52636+
52637+#define GR_CHROOT_CAPS {{ \
52638+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52639+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52640+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52641+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52642+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52643+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52644+
52645+#define security_learn(normal_msg,args...) \
52646+({ \
52647+ read_lock(&grsec_exec_file_lock); \
52648+ gr_add_learn_entry(normal_msg "\n", ## args); \
52649+ read_unlock(&grsec_exec_file_lock); \
52650+})
52651+
52652+enum {
52653+ GR_DO_AUDIT,
52654+ GR_DONT_AUDIT,
52655+ /* used for non-audit messages that we shouldn't kill the task on */
52656+ GR_DONT_AUDIT_GOOD
52657+};
52658+
52659+enum {
52660+ GR_TTYSNIFF,
52661+ GR_RBAC,
52662+ GR_RBAC_STR,
52663+ GR_STR_RBAC,
52664+ GR_RBAC_MODE2,
52665+ GR_RBAC_MODE3,
52666+ GR_FILENAME,
52667+ GR_SYSCTL_HIDDEN,
52668+ GR_NOARGS,
52669+ GR_ONE_INT,
52670+ GR_ONE_INT_TWO_STR,
52671+ GR_ONE_STR,
52672+ GR_STR_INT,
52673+ GR_TWO_STR_INT,
52674+ GR_TWO_INT,
52675+ GR_TWO_U64,
52676+ GR_THREE_INT,
52677+ GR_FIVE_INT_TWO_STR,
52678+ GR_TWO_STR,
52679+ GR_THREE_STR,
52680+ GR_FOUR_STR,
52681+ GR_STR_FILENAME,
52682+ GR_FILENAME_STR,
52683+ GR_FILENAME_TWO_INT,
52684+ GR_FILENAME_TWO_INT_STR,
52685+ GR_TEXTREL,
52686+ GR_PTRACE,
52687+ GR_RESOURCE,
52688+ GR_CAP,
52689+ GR_SIG,
52690+ GR_SIG2,
52691+ GR_CRASH1,
52692+ GR_CRASH2,
52693+ GR_PSACCT,
52694+ GR_RWXMAP
52695+};
52696+
52697+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52698+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52699+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52700+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52701+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52702+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52703+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52704+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52705+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52706+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52707+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52708+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52709+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52710+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52711+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52712+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52713+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52714+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52715+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52716+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52717+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52718+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52719+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52720+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52721+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52722+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52723+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52724+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52725+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52726+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52727+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52728+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52729+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52730+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52731+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52732+
52733+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52734+
52735+#endif
52736+
52737+#endif
52738diff -urNp linux-2.6.39.4/include/linux/grmsg.h linux-2.6.39.4/include/linux/grmsg.h
52739--- linux-2.6.39.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52740+++ linux-2.6.39.4/include/linux/grmsg.h 2011-08-05 19:44:37.000000000 -0400
52741@@ -0,0 +1,108 @@
52742+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52743+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52744+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52745+#define GR_STOPMOD_MSG "denied modification of module state by "
52746+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52747+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52748+#define GR_IOPERM_MSG "denied use of ioperm() by "
52749+#define GR_IOPL_MSG "denied use of iopl() by "
52750+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52751+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52752+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52753+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52754+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52755+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52756+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52757+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52758+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52759+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52760+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52761+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52762+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52763+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52764+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52765+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52766+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52767+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52768+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52769+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52770+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52771+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52772+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52773+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52774+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52775+#define GR_NPROC_MSG "denied overstep of process limit by "
52776+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52777+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52778+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52779+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52780+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52781+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52782+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52783+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52784+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52785+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52786+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52787+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52788+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52789+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52790+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52791+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52792+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52793+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52794+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52795+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52796+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52797+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52798+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52799+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52800+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52801+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52802+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52803+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52804+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52805+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52806+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52807+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52808+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52809+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52810+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52811+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52812+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52813+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52814+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52815+#define GR_FAILFORK_MSG "failed fork with errno %s by "
52816+#define GR_NICE_CHROOT_MSG "denied priority change by "
52817+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52818+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52819+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52820+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52821+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52822+#define GR_TIME_MSG "time set by "
52823+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52824+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52825+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52826+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52827+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52828+#define GR_BIND_MSG "denied bind() by "
52829+#define GR_CONNECT_MSG "denied connect() by "
52830+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52831+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52832+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52833+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52834+#define GR_CAP_ACL_MSG "use of %s denied for "
52835+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52836+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52837+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52838+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52839+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52840+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52841+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52842+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52843+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52844+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52845+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52846+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52847+#define GR_VM86_MSG "denied use of vm86 by "
52848+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52849+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52850diff -urNp linux-2.6.39.4/include/linux/grsecurity.h linux-2.6.39.4/include/linux/grsecurity.h
52851--- linux-2.6.39.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
52852+++ linux-2.6.39.4/include/linux/grsecurity.h 2011-08-05 19:54:17.000000000 -0400
52853@@ -0,0 +1,218 @@
52854+#ifndef GR_SECURITY_H
52855+#define GR_SECURITY_H
52856+#include <linux/fs.h>
52857+#include <linux/fs_struct.h>
52858+#include <linux/binfmts.h>
52859+#include <linux/gracl.h>
52860+#include <linux/compat.h>
52861+
52862+/* notify of brain-dead configs */
52863+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52864+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
52865+#endif
52866+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
52867+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
52868+#endif
52869+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52870+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52871+#endif
52872+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52873+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52874+#endif
52875+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
52876+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
52877+#endif
52878+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
52879+#error "CONFIG_PAX enabled, but no PaX options are enabled."
52880+#endif
52881+
52882+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
52883+void gr_handle_brute_check(void);
52884+void gr_handle_kernel_exploit(void);
52885+int gr_process_user_ban(void);
52886+
52887+char gr_roletype_to_char(void);
52888+
52889+int gr_acl_enable_at_secure(void);
52890+
52891+int gr_check_user_change(int real, int effective, int fs);
52892+int gr_check_group_change(int real, int effective, int fs);
52893+
52894+void gr_del_task_from_ip_table(struct task_struct *p);
52895+
52896+int gr_pid_is_chrooted(struct task_struct *p);
52897+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
52898+int gr_handle_chroot_nice(void);
52899+int gr_handle_chroot_sysctl(const int op);
52900+int gr_handle_chroot_setpriority(struct task_struct *p,
52901+ const int niceval);
52902+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
52903+int gr_handle_chroot_chroot(const struct dentry *dentry,
52904+ const struct vfsmount *mnt);
52905+int gr_handle_chroot_caps(struct path *path);
52906+void gr_handle_chroot_chdir(struct path *path);
52907+int gr_handle_chroot_chmod(const struct dentry *dentry,
52908+ const struct vfsmount *mnt, const int mode);
52909+int gr_handle_chroot_mknod(const struct dentry *dentry,
52910+ const struct vfsmount *mnt, const int mode);
52911+int gr_handle_chroot_mount(const struct dentry *dentry,
52912+ const struct vfsmount *mnt,
52913+ const char *dev_name);
52914+int gr_handle_chroot_pivot(void);
52915+int gr_handle_chroot_unix(const pid_t pid);
52916+
52917+int gr_handle_rawio(const struct inode *inode);
52918+int gr_handle_nproc(void);
52919+
52920+void gr_handle_ioperm(void);
52921+void gr_handle_iopl(void);
52922+
52923+int gr_tpe_allow(const struct file *file);
52924+
52925+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
52926+void gr_clear_chroot_entries(struct task_struct *task);
52927+
52928+void gr_log_forkfail(const int retval);
52929+void gr_log_timechange(void);
52930+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
52931+void gr_log_chdir(const struct dentry *dentry,
52932+ const struct vfsmount *mnt);
52933+void gr_log_chroot_exec(const struct dentry *dentry,
52934+ const struct vfsmount *mnt);
52935+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
52936+#ifdef CONFIG_COMPAT
52937+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
52938+#endif
52939+void gr_log_remount(const char *devname, const int retval);
52940+void gr_log_unmount(const char *devname, const int retval);
52941+void gr_log_mount(const char *from, const char *to, const int retval);
52942+void gr_log_textrel(struct vm_area_struct *vma);
52943+void gr_log_rwxmmap(struct file *file);
52944+void gr_log_rwxmprotect(struct file *file);
52945+
52946+int gr_handle_follow_link(const struct inode *parent,
52947+ const struct inode *inode,
52948+ const struct dentry *dentry,
52949+ const struct vfsmount *mnt);
52950+int gr_handle_fifo(const struct dentry *dentry,
52951+ const struct vfsmount *mnt,
52952+ const struct dentry *dir, const int flag,
52953+ const int acc_mode);
52954+int gr_handle_hardlink(const struct dentry *dentry,
52955+ const struct vfsmount *mnt,
52956+ struct inode *inode,
52957+ const int mode, const char *to);
52958+
52959+int gr_is_capable(const int cap);
52960+int gr_is_capable_nolog(const int cap);
52961+void gr_learn_resource(const struct task_struct *task, const int limit,
52962+ const unsigned long wanted, const int gt);
52963+void gr_copy_label(struct task_struct *tsk);
52964+void gr_handle_crash(struct task_struct *task, const int sig);
52965+int gr_handle_signal(const struct task_struct *p, const int sig);
52966+int gr_check_crash_uid(const uid_t uid);
52967+int gr_check_protected_task(const struct task_struct *task);
52968+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
52969+int gr_acl_handle_mmap(const struct file *file,
52970+ const unsigned long prot);
52971+int gr_acl_handle_mprotect(const struct file *file,
52972+ const unsigned long prot);
52973+int gr_check_hidden_task(const struct task_struct *tsk);
52974+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
52975+ const struct vfsmount *mnt);
52976+__u32 gr_acl_handle_utime(const struct dentry *dentry,
52977+ const struct vfsmount *mnt);
52978+__u32 gr_acl_handle_access(const struct dentry *dentry,
52979+ const struct vfsmount *mnt, const int fmode);
52980+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
52981+ const struct vfsmount *mnt, mode_t mode);
52982+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
52983+ const struct vfsmount *mnt, mode_t mode);
52984+__u32 gr_acl_handle_chown(const struct dentry *dentry,
52985+ const struct vfsmount *mnt);
52986+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
52987+ const struct vfsmount *mnt);
52988+int gr_handle_ptrace(struct task_struct *task, const long request);
52989+int gr_handle_proc_ptrace(struct task_struct *task);
52990+__u32 gr_acl_handle_execve(const struct dentry *dentry,
52991+ const struct vfsmount *mnt);
52992+int gr_check_crash_exec(const struct file *filp);
52993+int gr_acl_is_enabled(void);
52994+void gr_set_kernel_label(struct task_struct *task);
52995+void gr_set_role_label(struct task_struct *task, const uid_t uid,
52996+ const gid_t gid);
52997+int gr_set_proc_label(const struct dentry *dentry,
52998+ const struct vfsmount *mnt,
52999+ const int unsafe_share);
53000+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
53001+ const struct vfsmount *mnt);
53002+__u32 gr_acl_handle_open(const struct dentry *dentry,
53003+ const struct vfsmount *mnt, const int fmode);
53004+__u32 gr_acl_handle_creat(const struct dentry *dentry,
53005+ const struct dentry *p_dentry,
53006+ const struct vfsmount *p_mnt, const int fmode,
53007+ const int imode);
53008+void gr_handle_create(const struct dentry *dentry,
53009+ const struct vfsmount *mnt);
53010+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
53011+ const struct dentry *parent_dentry,
53012+ const struct vfsmount *parent_mnt,
53013+ const int mode);
53014+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
53015+ const struct dentry *parent_dentry,
53016+ const struct vfsmount *parent_mnt);
53017+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
53018+ const struct vfsmount *mnt);
53019+void gr_handle_delete(const ino_t ino, const dev_t dev);
53020+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
53021+ const struct vfsmount *mnt);
53022+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53023+ const struct dentry *parent_dentry,
53024+ const struct vfsmount *parent_mnt,
53025+ const char *from);
53026+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53027+ const struct dentry *parent_dentry,
53028+ const struct vfsmount *parent_mnt,
53029+ const struct dentry *old_dentry,
53030+ const struct vfsmount *old_mnt, const char *to);
53031+int gr_acl_handle_rename(struct dentry *new_dentry,
53032+ struct dentry *parent_dentry,
53033+ const struct vfsmount *parent_mnt,
53034+ struct dentry *old_dentry,
53035+ struct inode *old_parent_inode,
53036+ struct vfsmount *old_mnt, const char *newname);
53037+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53038+ struct dentry *old_dentry,
53039+ struct dentry *new_dentry,
53040+ struct vfsmount *mnt, const __u8 replace);
53041+__u32 gr_check_link(const struct dentry *new_dentry,
53042+ const struct dentry *parent_dentry,
53043+ const struct vfsmount *parent_mnt,
53044+ const struct dentry *old_dentry,
53045+ const struct vfsmount *old_mnt);
53046+int gr_acl_handle_filldir(const struct file *file, const char *name,
53047+ const unsigned int namelen, const ino_t ino);
53048+
53049+__u32 gr_acl_handle_unix(const struct dentry *dentry,
53050+ const struct vfsmount *mnt);
53051+void gr_acl_handle_exit(void);
53052+void gr_acl_handle_psacct(struct task_struct *task, const long code);
53053+int gr_acl_handle_procpidmem(const struct task_struct *task);
53054+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53055+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53056+void gr_audit_ptrace(struct task_struct *task);
53057+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53058+
53059+#ifdef CONFIG_GRKERNSEC
53060+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53061+void gr_handle_vm86(void);
53062+void gr_handle_mem_readwrite(u64 from, u64 to);
53063+
53064+extern int grsec_enable_dmesg;
53065+extern int grsec_disable_privio;
53066+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53067+extern int grsec_enable_chroot_findtask;
53068+#endif
53069+#endif
53070+
53071+#endif
53072diff -urNp linux-2.6.39.4/include/linux/grsock.h linux-2.6.39.4/include/linux/grsock.h
53073--- linux-2.6.39.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53074+++ linux-2.6.39.4/include/linux/grsock.h 2011-08-05 19:44:37.000000000 -0400
53075@@ -0,0 +1,19 @@
53076+#ifndef __GRSOCK_H
53077+#define __GRSOCK_H
53078+
53079+extern void gr_attach_curr_ip(const struct sock *sk);
53080+extern int gr_handle_sock_all(const int family, const int type,
53081+ const int protocol);
53082+extern int gr_handle_sock_server(const struct sockaddr *sck);
53083+extern int gr_handle_sock_server_other(const struct sock *sck);
53084+extern int gr_handle_sock_client(const struct sockaddr *sck);
53085+extern int gr_search_connect(struct socket * sock,
53086+ struct sockaddr_in * addr);
53087+extern int gr_search_bind(struct socket * sock,
53088+ struct sockaddr_in * addr);
53089+extern int gr_search_listen(struct socket * sock);
53090+extern int gr_search_accept(struct socket * sock);
53091+extern int gr_search_socket(const int domain, const int type,
53092+ const int protocol);
53093+
53094+#endif
53095diff -urNp linux-2.6.39.4/include/linux/highmem.h linux-2.6.39.4/include/linux/highmem.h
53096--- linux-2.6.39.4/include/linux/highmem.h 2011-05-19 00:06:34.000000000 -0400
53097+++ linux-2.6.39.4/include/linux/highmem.h 2011-08-05 19:44:37.000000000 -0400
53098@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53099 kunmap_atomic(kaddr, KM_USER0);
53100 }
53101
53102+static inline void sanitize_highpage(struct page *page)
53103+{
53104+ void *kaddr;
53105+ unsigned long flags;
53106+
53107+ local_irq_save(flags);
53108+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
53109+ clear_page(kaddr);
53110+ kunmap_atomic(kaddr, KM_CLEARPAGE);
53111+ local_irq_restore(flags);
53112+}
53113+
53114 static inline void zero_user_segments(struct page *page,
53115 unsigned start1, unsigned end1,
53116 unsigned start2, unsigned end2)
53117diff -urNp linux-2.6.39.4/include/linux/i2c.h linux-2.6.39.4/include/linux/i2c.h
53118--- linux-2.6.39.4/include/linux/i2c.h 2011-05-19 00:06:34.000000000 -0400
53119+++ linux-2.6.39.4/include/linux/i2c.h 2011-08-05 20:34:06.000000000 -0400
53120@@ -346,6 +346,7 @@ struct i2c_algorithm {
53121 /* To determine what the adapter supports */
53122 u32 (*functionality) (struct i2c_adapter *);
53123 };
53124+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53125
53126 /*
53127 * i2c_adapter is the structure used to identify a physical i2c bus along
53128diff -urNp linux-2.6.39.4/include/linux/i2o.h linux-2.6.39.4/include/linux/i2o.h
53129--- linux-2.6.39.4/include/linux/i2o.h 2011-05-19 00:06:34.000000000 -0400
53130+++ linux-2.6.39.4/include/linux/i2o.h 2011-08-05 19:44:37.000000000 -0400
53131@@ -564,7 +564,7 @@ struct i2o_controller {
53132 struct i2o_device *exec; /* Executive */
53133 #if BITS_PER_LONG == 64
53134 spinlock_t context_list_lock; /* lock for context_list */
53135- atomic_t context_list_counter; /* needed for unique contexts */
53136+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53137 struct list_head context_list; /* list of context id's
53138 and pointers */
53139 #endif
53140diff -urNp linux-2.6.39.4/include/linux/init.h linux-2.6.39.4/include/linux/init.h
53141--- linux-2.6.39.4/include/linux/init.h 2011-05-19 00:06:34.000000000 -0400
53142+++ linux-2.6.39.4/include/linux/init.h 2011-08-05 19:44:37.000000000 -0400
53143@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53144
53145 /* Each module must use one module_init(). */
53146 #define module_init(initfn) \
53147- static inline initcall_t __inittest(void) \
53148+ static inline __used initcall_t __inittest(void) \
53149 { return initfn; } \
53150 int init_module(void) __attribute__((alias(#initfn)));
53151
53152 /* This is only required if you want to be unloadable. */
53153 #define module_exit(exitfn) \
53154- static inline exitcall_t __exittest(void) \
53155+ static inline __used exitcall_t __exittest(void) \
53156 { return exitfn; } \
53157 void cleanup_module(void) __attribute__((alias(#exitfn)));
53158
53159diff -urNp linux-2.6.39.4/include/linux/init_task.h linux-2.6.39.4/include/linux/init_task.h
53160--- linux-2.6.39.4/include/linux/init_task.h 2011-05-19 00:06:34.000000000 -0400
53161+++ linux-2.6.39.4/include/linux/init_task.h 2011-08-05 19:44:37.000000000 -0400
53162@@ -83,6 +83,12 @@ extern struct group_info init_groups;
53163 #define INIT_IDS
53164 #endif
53165
53166+#ifdef CONFIG_X86
53167+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53168+#else
53169+#define INIT_TASK_THREAD_INFO
53170+#endif
53171+
53172 /*
53173 * Because of the reduced scope of CAP_SETPCAP when filesystem
53174 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
53175@@ -163,6 +169,7 @@ extern struct cred init_cred;
53176 RCU_INIT_POINTER(.cred, &init_cred), \
53177 .comm = "swapper", \
53178 .thread = INIT_THREAD, \
53179+ INIT_TASK_THREAD_INFO \
53180 .fs = &init_fs, \
53181 .files = &init_files, \
53182 .signal = &init_signals, \
53183diff -urNp linux-2.6.39.4/include/linux/intel-iommu.h linux-2.6.39.4/include/linux/intel-iommu.h
53184--- linux-2.6.39.4/include/linux/intel-iommu.h 2011-05-19 00:06:34.000000000 -0400
53185+++ linux-2.6.39.4/include/linux/intel-iommu.h 2011-08-05 20:34:06.000000000 -0400
53186@@ -296,7 +296,7 @@ struct iommu_flush {
53187 u8 fm, u64 type);
53188 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53189 unsigned int size_order, u64 type);
53190-};
53191+} __no_const;
53192
53193 enum {
53194 SR_DMAR_FECTL_REG,
53195diff -urNp linux-2.6.39.4/include/linux/interrupt.h linux-2.6.39.4/include/linux/interrupt.h
53196--- linux-2.6.39.4/include/linux/interrupt.h 2011-05-19 00:06:34.000000000 -0400
53197+++ linux-2.6.39.4/include/linux/interrupt.h 2011-08-05 19:44:37.000000000 -0400
53198@@ -422,7 +422,7 @@ enum
53199 /* map softirq index to softirq name. update 'softirq_to_name' in
53200 * kernel/softirq.c when adding a new softirq.
53201 */
53202-extern char *softirq_to_name[NR_SOFTIRQS];
53203+extern const char * const softirq_to_name[NR_SOFTIRQS];
53204
53205 /* softirq mask and active fields moved to irq_cpustat_t in
53206 * asm/hardirq.h to get better cache usage. KAO
53207@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53208
53209 struct softirq_action
53210 {
53211- void (*action)(struct softirq_action *);
53212+ void (*action)(void);
53213 };
53214
53215 asmlinkage void do_softirq(void);
53216 asmlinkage void __do_softirq(void);
53217-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53218+extern void open_softirq(int nr, void (*action)(void));
53219 extern void softirq_init(void);
53220 static inline void __raise_softirq_irqoff(unsigned int nr)
53221 {
53222diff -urNp linux-2.6.39.4/include/linux/kallsyms.h linux-2.6.39.4/include/linux/kallsyms.h
53223--- linux-2.6.39.4/include/linux/kallsyms.h 2011-05-19 00:06:34.000000000 -0400
53224+++ linux-2.6.39.4/include/linux/kallsyms.h 2011-08-05 19:44:37.000000000 -0400
53225@@ -15,7 +15,8 @@
53226
53227 struct module;
53228
53229-#ifdef CONFIG_KALLSYMS
53230+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53231+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53232 /* Lookup the address for a symbol. Returns 0 if not found. */
53233 unsigned long kallsyms_lookup_name(const char *name);
53234
53235@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53236 /* Stupid that this does nothing, but I didn't create this mess. */
53237 #define __print_symbol(fmt, addr)
53238 #endif /*CONFIG_KALLSYMS*/
53239+#else /* when included by kallsyms.c, vsnprintf.c, or
53240+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53241+extern void __print_symbol(const char *fmt, unsigned long address);
53242+extern int sprint_backtrace(char *buffer, unsigned long address);
53243+extern int sprint_symbol(char *buffer, unsigned long address);
53244+const char *kallsyms_lookup(unsigned long addr,
53245+ unsigned long *symbolsize,
53246+ unsigned long *offset,
53247+ char **modname, char *namebuf);
53248+#endif
53249
53250 /* This macro allows us to keep printk typechecking */
53251 static void __check_printsym_format(const char *fmt, ...)
53252diff -urNp linux-2.6.39.4/include/linux/kgdb.h linux-2.6.39.4/include/linux/kgdb.h
53253--- linux-2.6.39.4/include/linux/kgdb.h 2011-05-19 00:06:34.000000000 -0400
53254+++ linux-2.6.39.4/include/linux/kgdb.h 2011-08-05 20:34:06.000000000 -0400
53255@@ -53,7 +53,7 @@ extern int kgdb_connected;
53256 extern int kgdb_io_module_registered;
53257
53258 extern atomic_t kgdb_setting_breakpoint;
53259-extern atomic_t kgdb_cpu_doing_single_step;
53260+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53261
53262 extern struct task_struct *kgdb_usethread;
53263 extern struct task_struct *kgdb_contthread;
53264@@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
53265 * hardware debug registers.
53266 */
53267 struct kgdb_arch {
53268- unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53269- unsigned long flags;
53270+ const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53271+ const unsigned long flags;
53272
53273 int (*set_breakpoint)(unsigned long, char *);
53274 int (*remove_breakpoint)(unsigned long, char *);
53275@@ -268,14 +268,14 @@ struct kgdb_arch {
53276 * not a console
53277 */
53278 struct kgdb_io {
53279- const char *name;
53280+ const char * const name;
53281 int (*read_char) (void);
53282 void (*write_char) (u8);
53283 void (*flush) (void);
53284 int (*init) (void);
53285 void (*pre_exception) (void);
53286 void (*post_exception) (void);
53287- int is_console;
53288+ const int is_console;
53289 };
53290
53291 extern struct kgdb_arch arch_kgdb_ops;
53292diff -urNp linux-2.6.39.4/include/linux/kmod.h linux-2.6.39.4/include/linux/kmod.h
53293--- linux-2.6.39.4/include/linux/kmod.h 2011-05-19 00:06:34.000000000 -0400
53294+++ linux-2.6.39.4/include/linux/kmod.h 2011-08-05 19:44:37.000000000 -0400
53295@@ -33,6 +33,8 @@ extern char modprobe_path[]; /* for sysc
53296 * usually useless though. */
53297 extern int __request_module(bool wait, const char *name, ...) \
53298 __attribute__((format(printf, 2, 3)));
53299+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53300+ __attribute__((format(printf, 3, 4)));
53301 #define request_module(mod...) __request_module(true, mod)
53302 #define request_module_nowait(mod...) __request_module(false, mod)
53303 #define try_then_request_module(x, mod...) \
53304diff -urNp linux-2.6.39.4/include/linux/kvm_host.h linux-2.6.39.4/include/linux/kvm_host.h
53305--- linux-2.6.39.4/include/linux/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
53306+++ linux-2.6.39.4/include/linux/kvm_host.h 2011-08-05 19:44:37.000000000 -0400
53307@@ -302,7 +302,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53308 void vcpu_load(struct kvm_vcpu *vcpu);
53309 void vcpu_put(struct kvm_vcpu *vcpu);
53310
53311-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53312+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53313 struct module *module);
53314 void kvm_exit(void);
53315
53316@@ -442,7 +442,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53317 struct kvm_guest_debug *dbg);
53318 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53319
53320-int kvm_arch_init(void *opaque);
53321+int kvm_arch_init(const void *opaque);
53322 void kvm_arch_exit(void);
53323
53324 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53325diff -urNp linux-2.6.39.4/include/linux/libata.h linux-2.6.39.4/include/linux/libata.h
53326--- linux-2.6.39.4/include/linux/libata.h 2011-05-19 00:06:34.000000000 -0400
53327+++ linux-2.6.39.4/include/linux/libata.h 2011-08-05 20:34:06.000000000 -0400
53328@@ -898,7 +898,7 @@ struct ata_port_operations {
53329 * ->inherits must be the last field and all the preceding
53330 * fields must be pointers.
53331 */
53332- const struct ata_port_operations *inherits;
53333+ const struct ata_port_operations * const inherits;
53334 };
53335
53336 struct ata_port_info {
53337diff -urNp linux-2.6.39.4/include/linux/mca.h linux-2.6.39.4/include/linux/mca.h
53338--- linux-2.6.39.4/include/linux/mca.h 2011-05-19 00:06:34.000000000 -0400
53339+++ linux-2.6.39.4/include/linux/mca.h 2011-08-05 20:34:06.000000000 -0400
53340@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53341 int region);
53342 void * (*mca_transform_memory)(struct mca_device *,
53343 void *memory);
53344-};
53345+} __no_const;
53346
53347 struct mca_bus {
53348 u64 default_dma_mask;
53349diff -urNp linux-2.6.39.4/include/linux/memory.h linux-2.6.39.4/include/linux/memory.h
53350--- linux-2.6.39.4/include/linux/memory.h 2011-05-19 00:06:34.000000000 -0400
53351+++ linux-2.6.39.4/include/linux/memory.h 2011-08-05 20:34:06.000000000 -0400
53352@@ -142,7 +142,7 @@ struct memory_accessor {
53353 size_t count);
53354 ssize_t (*write)(struct memory_accessor *, const char *buf,
53355 off_t offset, size_t count);
53356-};
53357+} __no_const;
53358
53359 /*
53360 * Kernel text modification mutex, used for code patching. Users of this lock
53361diff -urNp linux-2.6.39.4/include/linux/mfd/abx500.h linux-2.6.39.4/include/linux/mfd/abx500.h
53362--- linux-2.6.39.4/include/linux/mfd/abx500.h 2011-05-19 00:06:34.000000000 -0400
53363+++ linux-2.6.39.4/include/linux/mfd/abx500.h 2011-08-05 20:34:06.000000000 -0400
53364@@ -226,6 +226,7 @@ struct abx500_ops {
53365 int (*event_registers_startup_state_get) (struct device *, u8 *);
53366 int (*startup_irq_enabled) (struct device *, unsigned int);
53367 };
53368+typedef struct abx500_ops __no_const abx500_ops_no_const;
53369
53370 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53371 void abx500_remove_ops(struct device *dev);
53372diff -urNp linux-2.6.39.4/include/linux/mm.h linux-2.6.39.4/include/linux/mm.h
53373--- linux-2.6.39.4/include/linux/mm.h 2011-05-19 00:06:34.000000000 -0400
53374+++ linux-2.6.39.4/include/linux/mm.h 2011-08-05 19:44:37.000000000 -0400
53375@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53376
53377 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53378 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53379+
53380+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53381+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53382+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53383+#else
53384 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53385+#endif
53386+
53387 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53388 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53389
53390@@ -1010,34 +1017,6 @@ int set_page_dirty(struct page *page);
53391 int set_page_dirty_lock(struct page *page);
53392 int clear_page_dirty_for_io(struct page *page);
53393
53394-/* Is the vma a continuation of the stack vma above it? */
53395-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53396-{
53397- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53398-}
53399-
53400-static inline int stack_guard_page_start(struct vm_area_struct *vma,
53401- unsigned long addr)
53402-{
53403- return (vma->vm_flags & VM_GROWSDOWN) &&
53404- (vma->vm_start == addr) &&
53405- !vma_growsdown(vma->vm_prev, addr);
53406-}
53407-
53408-/* Is the vma a continuation of the stack vma below it? */
53409-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53410-{
53411- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53412-}
53413-
53414-static inline int stack_guard_page_end(struct vm_area_struct *vma,
53415- unsigned long addr)
53416-{
53417- return (vma->vm_flags & VM_GROWSUP) &&
53418- (vma->vm_end == addr) &&
53419- !vma_growsup(vma->vm_next, addr);
53420-}
53421-
53422 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53423 unsigned long old_addr, struct vm_area_struct *new_vma,
53424 unsigned long new_addr, unsigned long len);
53425@@ -1189,6 +1168,15 @@ struct shrinker {
53426 extern void register_shrinker(struct shrinker *);
53427 extern void unregister_shrinker(struct shrinker *);
53428
53429+#ifdef CONFIG_MMU
53430+pgprot_t vm_get_page_prot(unsigned long vm_flags);
53431+#else
53432+static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53433+{
53434+ return __pgprot(0);
53435+}
53436+#endif
53437+
53438 int vma_wants_writenotify(struct vm_area_struct *vma);
53439
53440 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53441@@ -1476,6 +1464,7 @@ out:
53442 }
53443
53444 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53445+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53446
53447 extern unsigned long do_brk(unsigned long, unsigned long);
53448
53449@@ -1532,6 +1521,10 @@ extern struct vm_area_struct * find_vma(
53450 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53451 struct vm_area_struct **pprev);
53452
53453+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53454+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53455+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53456+
53457 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53458 NULL if none. Assume start_addr < end_addr. */
53459 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53460@@ -1548,15 +1541,6 @@ static inline unsigned long vma_pages(st
53461 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53462 }
53463
53464-#ifdef CONFIG_MMU
53465-pgprot_t vm_get_page_prot(unsigned long vm_flags);
53466-#else
53467-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53468-{
53469- return __pgprot(0);
53470-}
53471-#endif
53472-
53473 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53474 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53475 unsigned long pfn, unsigned long size, pgprot_t);
53476@@ -1668,7 +1652,7 @@ extern int unpoison_memory(unsigned long
53477 extern int sysctl_memory_failure_early_kill;
53478 extern int sysctl_memory_failure_recovery;
53479 extern void shake_page(struct page *p, int access);
53480-extern atomic_long_t mce_bad_pages;
53481+extern atomic_long_unchecked_t mce_bad_pages;
53482 extern int soft_offline_page(struct page *page, int flags);
53483
53484 extern void dump_page(struct page *page);
53485@@ -1682,5 +1666,11 @@ extern void copy_user_huge_page(struct p
53486 unsigned int pages_per_huge_page);
53487 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53488
53489+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53490+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53491+#else
53492+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53493+#endif
53494+
53495 #endif /* __KERNEL__ */
53496 #endif /* _LINUX_MM_H */
53497diff -urNp linux-2.6.39.4/include/linux/mm_types.h linux-2.6.39.4/include/linux/mm_types.h
53498--- linux-2.6.39.4/include/linux/mm_types.h 2011-05-19 00:06:34.000000000 -0400
53499+++ linux-2.6.39.4/include/linux/mm_types.h 2011-08-05 19:44:37.000000000 -0400
53500@@ -183,6 +183,8 @@ struct vm_area_struct {
53501 #ifdef CONFIG_NUMA
53502 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53503 #endif
53504+
53505+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53506 };
53507
53508 struct core_thread {
53509@@ -317,6 +319,24 @@ struct mm_struct {
53510 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
53511 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
53512 #endif
53513+
53514+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53515+ unsigned long pax_flags;
53516+#endif
53517+
53518+#ifdef CONFIG_PAX_DLRESOLVE
53519+ unsigned long call_dl_resolve;
53520+#endif
53521+
53522+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53523+ unsigned long call_syscall;
53524+#endif
53525+
53526+#ifdef CONFIG_PAX_ASLR
53527+ unsigned long delta_mmap; /* randomized offset */
53528+ unsigned long delta_stack; /* randomized offset */
53529+#endif
53530+
53531 };
53532
53533 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
53534diff -urNp linux-2.6.39.4/include/linux/mmu_notifier.h linux-2.6.39.4/include/linux/mmu_notifier.h
53535--- linux-2.6.39.4/include/linux/mmu_notifier.h 2011-05-19 00:06:34.000000000 -0400
53536+++ linux-2.6.39.4/include/linux/mmu_notifier.h 2011-08-05 19:44:37.000000000 -0400
53537@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53538 */
53539 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53540 ({ \
53541- pte_t __pte; \
53542+ pte_t ___pte; \
53543 struct vm_area_struct *___vma = __vma; \
53544 unsigned long ___address = __address; \
53545- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53546+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53547 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53548- __pte; \
53549+ ___pte; \
53550 })
53551
53552 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53553diff -urNp linux-2.6.39.4/include/linux/mmzone.h linux-2.6.39.4/include/linux/mmzone.h
53554--- linux-2.6.39.4/include/linux/mmzone.h 2011-05-19 00:06:34.000000000 -0400
53555+++ linux-2.6.39.4/include/linux/mmzone.h 2011-08-05 19:44:37.000000000 -0400
53556@@ -355,7 +355,7 @@ struct zone {
53557 unsigned long flags; /* zone flags, see below */
53558
53559 /* Zone statistics */
53560- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53561+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53562
53563 /*
53564 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53565diff -urNp linux-2.6.39.4/include/linux/mod_devicetable.h linux-2.6.39.4/include/linux/mod_devicetable.h
53566--- linux-2.6.39.4/include/linux/mod_devicetable.h 2011-05-19 00:06:34.000000000 -0400
53567+++ linux-2.6.39.4/include/linux/mod_devicetable.h 2011-08-05 19:44:37.000000000 -0400
53568@@ -12,7 +12,7 @@
53569 typedef unsigned long kernel_ulong_t;
53570 #endif
53571
53572-#define PCI_ANY_ID (~0)
53573+#define PCI_ANY_ID ((__u16)~0)
53574
53575 struct pci_device_id {
53576 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53577@@ -131,7 +131,7 @@ struct usb_device_id {
53578 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53579 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53580
53581-#define HID_ANY_ID (~0)
53582+#define HID_ANY_ID (~0U)
53583
53584 struct hid_device_id {
53585 __u16 bus;
53586diff -urNp linux-2.6.39.4/include/linux/module.h linux-2.6.39.4/include/linux/module.h
53587--- linux-2.6.39.4/include/linux/module.h 2011-05-19 00:06:34.000000000 -0400
53588+++ linux-2.6.39.4/include/linux/module.h 2011-08-05 20:34:06.000000000 -0400
53589@@ -16,6 +16,7 @@
53590 #include <linux/kobject.h>
53591 #include <linux/moduleparam.h>
53592 #include <linux/tracepoint.h>
53593+#include <linux/fs.h>
53594
53595 #include <linux/percpu.h>
53596 #include <asm/module.h>
53597@@ -324,19 +325,16 @@ struct module
53598 int (*init)(void);
53599
53600 /* If this is non-NULL, vfree after init() returns */
53601- void *module_init;
53602+ void *module_init_rx, *module_init_rw;
53603
53604 /* Here is the actual code + data, vfree'd on unload. */
53605- void *module_core;
53606+ void *module_core_rx, *module_core_rw;
53607
53608 /* Here are the sizes of the init and core sections */
53609- unsigned int init_size, core_size;
53610+ unsigned int init_size_rw, core_size_rw;
53611
53612 /* The size of the executable code in each section. */
53613- unsigned int init_text_size, core_text_size;
53614-
53615- /* Size of RO sections of the module (text+rodata) */
53616- unsigned int init_ro_size, core_ro_size;
53617+ unsigned int init_size_rx, core_size_rx;
53618
53619 /* Arch-specific module values */
53620 struct mod_arch_specific arch;
53621@@ -391,6 +389,10 @@ struct module
53622 #ifdef CONFIG_EVENT_TRACING
53623 struct ftrace_event_call **trace_events;
53624 unsigned int num_trace_events;
53625+ struct file_operations trace_id;
53626+ struct file_operations trace_enable;
53627+ struct file_operations trace_format;
53628+ struct file_operations trace_filter;
53629 #endif
53630 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53631 unsigned long *ftrace_callsites;
53632@@ -441,16 +443,46 @@ bool is_module_address(unsigned long add
53633 bool is_module_percpu_address(unsigned long addr);
53634 bool is_module_text_address(unsigned long addr);
53635
53636+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53637+{
53638+
53639+#ifdef CONFIG_PAX_KERNEXEC
53640+ if (ktla_ktva(addr) >= (unsigned long)start &&
53641+ ktla_ktva(addr) < (unsigned long)start + size)
53642+ return 1;
53643+#endif
53644+
53645+ return ((void *)addr >= start && (void *)addr < start + size);
53646+}
53647+
53648+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53649+{
53650+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53651+}
53652+
53653+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53654+{
53655+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53656+}
53657+
53658+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53659+{
53660+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53661+}
53662+
53663+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53664+{
53665+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53666+}
53667+
53668 static inline int within_module_core(unsigned long addr, struct module *mod)
53669 {
53670- return (unsigned long)mod->module_core <= addr &&
53671- addr < (unsigned long)mod->module_core + mod->core_size;
53672+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53673 }
53674
53675 static inline int within_module_init(unsigned long addr, struct module *mod)
53676 {
53677- return (unsigned long)mod->module_init <= addr &&
53678- addr < (unsigned long)mod->module_init + mod->init_size;
53679+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53680 }
53681
53682 /* Search for module by name: must hold module_mutex. */
53683diff -urNp linux-2.6.39.4/include/linux/moduleloader.h linux-2.6.39.4/include/linux/moduleloader.h
53684--- linux-2.6.39.4/include/linux/moduleloader.h 2011-05-19 00:06:34.000000000 -0400
53685+++ linux-2.6.39.4/include/linux/moduleloader.h 2011-08-05 19:44:37.000000000 -0400
53686@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53687 sections. Returns NULL on failure. */
53688 void *module_alloc(unsigned long size);
53689
53690+#ifdef CONFIG_PAX_KERNEXEC
53691+void *module_alloc_exec(unsigned long size);
53692+#else
53693+#define module_alloc_exec(x) module_alloc(x)
53694+#endif
53695+
53696 /* Free memory returned from module_alloc. */
53697 void module_free(struct module *mod, void *module_region);
53698
53699+#ifdef CONFIG_PAX_KERNEXEC
53700+void module_free_exec(struct module *mod, void *module_region);
53701+#else
53702+#define module_free_exec(x, y) module_free((x), (y))
53703+#endif
53704+
53705 /* Apply the given relocation to the (simplified) ELF. Return -error
53706 or 0. */
53707 int apply_relocate(Elf_Shdr *sechdrs,
53708diff -urNp linux-2.6.39.4/include/linux/moduleparam.h linux-2.6.39.4/include/linux/moduleparam.h
53709--- linux-2.6.39.4/include/linux/moduleparam.h 2011-05-19 00:06:34.000000000 -0400
53710+++ linux-2.6.39.4/include/linux/moduleparam.h 2011-08-05 20:34:06.000000000 -0400
53711@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53712 * @len is usually just sizeof(string).
53713 */
53714 #define module_param_string(name, string, len, perm) \
53715- static const struct kparam_string __param_string_##name \
53716+ static const struct kparam_string __param_string_##name __used \
53717 = { len, string }; \
53718 __module_param_call(MODULE_PARAM_PREFIX, name, \
53719 &param_ops_string, \
53720@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53721 * module_param_named() for why this might be necessary.
53722 */
53723 #define module_param_array_named(name, array, type, nump, perm) \
53724- static const struct kparam_array __param_arr_##name \
53725+ static const struct kparam_array __param_arr_##name __used \
53726 = { ARRAY_SIZE(array), nump, &param_ops_##type, \
53727 sizeof(array[0]), array }; \
53728 __module_param_call(MODULE_PARAM_PREFIX, name, \
53729diff -urNp linux-2.6.39.4/include/linux/mutex.h linux-2.6.39.4/include/linux/mutex.h
53730--- linux-2.6.39.4/include/linux/mutex.h 2011-05-19 00:06:34.000000000 -0400
53731+++ linux-2.6.39.4/include/linux/mutex.h 2011-08-05 19:44:37.000000000 -0400
53732@@ -51,7 +51,7 @@ struct mutex {
53733 spinlock_t wait_lock;
53734 struct list_head wait_list;
53735 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
53736- struct thread_info *owner;
53737+ struct task_struct *owner;
53738 #endif
53739 #ifdef CONFIG_DEBUG_MUTEXES
53740 const char *name;
53741diff -urNp linux-2.6.39.4/include/linux/namei.h linux-2.6.39.4/include/linux/namei.h
53742--- linux-2.6.39.4/include/linux/namei.h 2011-05-19 00:06:34.000000000 -0400
53743+++ linux-2.6.39.4/include/linux/namei.h 2011-08-05 19:44:37.000000000 -0400
53744@@ -24,7 +24,7 @@ struct nameidata {
53745 unsigned seq;
53746 int last_type;
53747 unsigned depth;
53748- char *saved_names[MAX_NESTED_LINKS + 1];
53749+ const char *saved_names[MAX_NESTED_LINKS + 1];
53750
53751 /* Intent data */
53752 union {
53753@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53754 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53755 extern void unlock_rename(struct dentry *, struct dentry *);
53756
53757-static inline void nd_set_link(struct nameidata *nd, char *path)
53758+static inline void nd_set_link(struct nameidata *nd, const char *path)
53759 {
53760 nd->saved_names[nd->depth] = path;
53761 }
53762
53763-static inline char *nd_get_link(struct nameidata *nd)
53764+static inline const char *nd_get_link(const struct nameidata *nd)
53765 {
53766 return nd->saved_names[nd->depth];
53767 }
53768diff -urNp linux-2.6.39.4/include/linux/netdevice.h linux-2.6.39.4/include/linux/netdevice.h
53769--- linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:11:51.000000000 -0400
53770+++ linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:12:20.000000000 -0400
53771@@ -979,6 +979,7 @@ struct net_device_ops {
53772 int (*ndo_set_features)(struct net_device *dev,
53773 u32 features);
53774 };
53775+typedef struct net_device_ops __no_const net_device_ops_no_const;
53776
53777 /*
53778 * The DEVICE structure.
53779diff -urNp linux-2.6.39.4/include/linux/netfilter/xt_gradm.h linux-2.6.39.4/include/linux/netfilter/xt_gradm.h
53780--- linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53781+++ linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 2011-08-05 19:44:37.000000000 -0400
53782@@ -0,0 +1,9 @@
53783+#ifndef _LINUX_NETFILTER_XT_GRADM_H
53784+#define _LINUX_NETFILTER_XT_GRADM_H 1
53785+
53786+struct xt_gradm_mtinfo {
53787+ __u16 flags;
53788+ __u16 invflags;
53789+};
53790+
53791+#endif
53792diff -urNp linux-2.6.39.4/include/linux/oprofile.h linux-2.6.39.4/include/linux/oprofile.h
53793--- linux-2.6.39.4/include/linux/oprofile.h 2011-05-19 00:06:34.000000000 -0400
53794+++ linux-2.6.39.4/include/linux/oprofile.h 2011-08-05 19:44:37.000000000 -0400
53795@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53796 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53797 char const * name, ulong * val);
53798
53799-/** Create a file for read-only access to an atomic_t. */
53800+/** Create a file for read-only access to an atomic_unchecked_t. */
53801 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53802- char const * name, atomic_t * val);
53803+ char const * name, atomic_unchecked_t * val);
53804
53805 /** create a directory */
53806 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53807diff -urNp linux-2.6.39.4/include/linux/padata.h linux-2.6.39.4/include/linux/padata.h
53808--- linux-2.6.39.4/include/linux/padata.h 2011-05-19 00:06:34.000000000 -0400
53809+++ linux-2.6.39.4/include/linux/padata.h 2011-08-05 19:44:37.000000000 -0400
53810@@ -129,7 +129,7 @@ struct parallel_data {
53811 struct padata_instance *pinst;
53812 struct padata_parallel_queue __percpu *pqueue;
53813 struct padata_serial_queue __percpu *squeue;
53814- atomic_t seq_nr;
53815+ atomic_unchecked_t seq_nr;
53816 atomic_t reorder_objects;
53817 atomic_t refcnt;
53818 unsigned int max_seq_nr;
53819diff -urNp linux-2.6.39.4/include/linux/perf_event.h linux-2.6.39.4/include/linux/perf_event.h
53820--- linux-2.6.39.4/include/linux/perf_event.h 2011-05-19 00:06:34.000000000 -0400
53821+++ linux-2.6.39.4/include/linux/perf_event.h 2011-08-05 20:34:06.000000000 -0400
53822@@ -759,8 +759,8 @@ struct perf_event {
53823
53824 enum perf_event_active_state state;
53825 unsigned int attach_state;
53826- local64_t count;
53827- atomic64_t child_count;
53828+ local64_t count; /* PaX: fix it one day */
53829+ atomic64_unchecked_t child_count;
53830
53831 /*
53832 * These are the total time in nanoseconds that the event
53833@@ -811,8 +811,8 @@ struct perf_event {
53834 * These accumulate total time (in nanoseconds) that children
53835 * events have been enabled and running, respectively.
53836 */
53837- atomic64_t child_total_time_enabled;
53838- atomic64_t child_total_time_running;
53839+ atomic64_unchecked_t child_total_time_enabled;
53840+ atomic64_unchecked_t child_total_time_running;
53841
53842 /*
53843 * Protect attach/detach and child_list:
53844diff -urNp linux-2.6.39.4/include/linux/pipe_fs_i.h linux-2.6.39.4/include/linux/pipe_fs_i.h
53845--- linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-05-19 00:06:34.000000000 -0400
53846+++ linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-08-05 19:44:37.000000000 -0400
53847@@ -46,9 +46,9 @@ struct pipe_buffer {
53848 struct pipe_inode_info {
53849 wait_queue_head_t wait;
53850 unsigned int nrbufs, curbuf, buffers;
53851- unsigned int readers;
53852- unsigned int writers;
53853- unsigned int waiting_writers;
53854+ atomic_t readers;
53855+ atomic_t writers;
53856+ atomic_t waiting_writers;
53857 unsigned int r_counter;
53858 unsigned int w_counter;
53859 struct page *tmp_page;
53860diff -urNp linux-2.6.39.4/include/linux/pm_runtime.h linux-2.6.39.4/include/linux/pm_runtime.h
53861--- linux-2.6.39.4/include/linux/pm_runtime.h 2011-05-19 00:06:34.000000000 -0400
53862+++ linux-2.6.39.4/include/linux/pm_runtime.h 2011-08-05 19:44:37.000000000 -0400
53863@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
53864
53865 static inline void pm_runtime_mark_last_busy(struct device *dev)
53866 {
53867- ACCESS_ONCE(dev->power.last_busy) = jiffies;
53868+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
53869 }
53870
53871 #else /* !CONFIG_PM_RUNTIME */
53872diff -urNp linux-2.6.39.4/include/linux/poison.h linux-2.6.39.4/include/linux/poison.h
53873--- linux-2.6.39.4/include/linux/poison.h 2011-05-19 00:06:34.000000000 -0400
53874+++ linux-2.6.39.4/include/linux/poison.h 2011-08-05 19:44:37.000000000 -0400
53875@@ -19,8 +19,8 @@
53876 * under normal circumstances, used to verify that nobody uses
53877 * non-initialized list entries.
53878 */
53879-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
53880-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
53881+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
53882+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
53883
53884 /********** include/linux/timer.h **********/
53885 /*
53886diff -urNp linux-2.6.39.4/include/linux/preempt.h linux-2.6.39.4/include/linux/preempt.h
53887--- linux-2.6.39.4/include/linux/preempt.h 2011-05-19 00:06:34.000000000 -0400
53888+++ linux-2.6.39.4/include/linux/preempt.h 2011-08-05 20:34:06.000000000 -0400
53889@@ -115,7 +115,7 @@ struct preempt_ops {
53890 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
53891 void (*sched_out)(struct preempt_notifier *notifier,
53892 struct task_struct *next);
53893-};
53894+} __no_const;
53895
53896 /**
53897 * preempt_notifier - key for installing preemption notifiers
53898diff -urNp linux-2.6.39.4/include/linux/proc_fs.h linux-2.6.39.4/include/linux/proc_fs.h
53899--- linux-2.6.39.4/include/linux/proc_fs.h 2011-05-19 00:06:34.000000000 -0400
53900+++ linux-2.6.39.4/include/linux/proc_fs.h 2011-08-05 20:34:06.000000000 -0400
53901@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
53902 return proc_create_data(name, mode, parent, proc_fops, NULL);
53903 }
53904
53905+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
53906+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
53907+{
53908+#ifdef CONFIG_GRKERNSEC_PROC_USER
53909+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
53910+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53911+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
53912+#else
53913+ return proc_create_data(name, mode, parent, proc_fops, NULL);
53914+#endif
53915+}
53916+
53917+
53918 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
53919 mode_t mode, struct proc_dir_entry *base,
53920 read_proc_t *read_proc, void * data)
53921@@ -258,7 +271,7 @@ union proc_op {
53922 int (*proc_show)(struct seq_file *m,
53923 struct pid_namespace *ns, struct pid *pid,
53924 struct task_struct *task);
53925-};
53926+} __no_const;
53927
53928 struct ctl_table_header;
53929 struct ctl_table;
53930diff -urNp linux-2.6.39.4/include/linux/ptrace.h linux-2.6.39.4/include/linux/ptrace.h
53931--- linux-2.6.39.4/include/linux/ptrace.h 2011-05-19 00:06:34.000000000 -0400
53932+++ linux-2.6.39.4/include/linux/ptrace.h 2011-08-05 19:44:37.000000000 -0400
53933@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
53934 extern void exit_ptrace(struct task_struct *tracer);
53935 #define PTRACE_MODE_READ 1
53936 #define PTRACE_MODE_ATTACH 2
53937-/* Returns 0 on success, -errno on denial. */
53938-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
53939 /* Returns true on success, false on denial. */
53940 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
53941+/* Returns true on success, false on denial. */
53942+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
53943
53944 static inline int ptrace_reparented(struct task_struct *child)
53945 {
53946diff -urNp linux-2.6.39.4/include/linux/random.h linux-2.6.39.4/include/linux/random.h
53947--- linux-2.6.39.4/include/linux/random.h 2011-05-19 00:06:34.000000000 -0400
53948+++ linux-2.6.39.4/include/linux/random.h 2011-08-05 19:44:37.000000000 -0400
53949@@ -80,12 +80,17 @@ void srandom32(u32 seed);
53950
53951 u32 prandom32(struct rnd_state *);
53952
53953+static inline unsigned long pax_get_random_long(void)
53954+{
53955+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
53956+}
53957+
53958 /*
53959 * Handle minimum values for seeds
53960 */
53961 static inline u32 __seed(u32 x, u32 m)
53962 {
53963- return (x < m) ? x + m : x;
53964+ return (x <= m) ? x + m + 1 : x;
53965 }
53966
53967 /**
53968diff -urNp linux-2.6.39.4/include/linux/reboot.h linux-2.6.39.4/include/linux/reboot.h
53969--- linux-2.6.39.4/include/linux/reboot.h 2011-05-19 00:06:34.000000000 -0400
53970+++ linux-2.6.39.4/include/linux/reboot.h 2011-08-05 19:44:37.000000000 -0400
53971@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
53972 * Architecture-specific implementations of sys_reboot commands.
53973 */
53974
53975-extern void machine_restart(char *cmd);
53976-extern void machine_halt(void);
53977-extern void machine_power_off(void);
53978+extern void machine_restart(char *cmd) __noreturn;
53979+extern void machine_halt(void) __noreturn;
53980+extern void machine_power_off(void) __noreturn;
53981
53982 extern void machine_shutdown(void);
53983 struct pt_regs;
53984@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
53985 */
53986
53987 extern void kernel_restart_prepare(char *cmd);
53988-extern void kernel_restart(char *cmd);
53989-extern void kernel_halt(void);
53990-extern void kernel_power_off(void);
53991+extern void kernel_restart(char *cmd) __noreturn;
53992+extern void kernel_halt(void) __noreturn;
53993+extern void kernel_power_off(void) __noreturn;
53994
53995 extern int C_A_D; /* for sysctl */
53996 void ctrl_alt_del(void);
53997@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
53998 * Emergency restart, callable from an interrupt handler.
53999 */
54000
54001-extern void emergency_restart(void);
54002+extern void emergency_restart(void) __noreturn;
54003 #include <asm/emergency-restart.h>
54004
54005 #endif
54006diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs.h linux-2.6.39.4/include/linux/reiserfs_fs.h
54007--- linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-05-19 00:06:34.000000000 -0400
54008+++ linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-08-05 20:34:06.000000000 -0400
54009@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
54010 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
54011
54012 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
54013-#define get_generation(s) atomic_read (&fs_generation(s))
54014+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
54015 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
54016 #define __fs_changed(gen,s) (gen != get_generation (s))
54017 #define fs_changed(gen,s) \
54018diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs_sb.h linux-2.6.39.4/include/linux/reiserfs_fs_sb.h
54019--- linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-05-19 00:06:34.000000000 -0400
54020+++ linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-08-05 19:44:37.000000000 -0400
54021@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
54022 /* Comment? -Hans */
54023 wait_queue_head_t s_wait;
54024 /* To be obsoleted soon by per buffer seals.. -Hans */
54025- atomic_t s_generation_counter; // increased by one every time the
54026+ atomic_unchecked_t s_generation_counter; // increased by one every time the
54027 // tree gets re-balanced
54028 unsigned long s_properties; /* File system properties. Currently holds
54029 on-disk FS format */
54030diff -urNp linux-2.6.39.4/include/linux/relay.h linux-2.6.39.4/include/linux/relay.h
54031--- linux-2.6.39.4/include/linux/relay.h 2011-05-19 00:06:34.000000000 -0400
54032+++ linux-2.6.39.4/include/linux/relay.h 2011-08-05 20:34:06.000000000 -0400
54033@@ -159,7 +159,7 @@ struct rchan_callbacks
54034 * The callback should return 0 if successful, negative if not.
54035 */
54036 int (*remove_buf_file)(struct dentry *dentry);
54037-};
54038+} __no_const;
54039
54040 /*
54041 * CONFIG_RELAY kernel API, kernel/relay.c
54042diff -urNp linux-2.6.39.4/include/linux/rfkill.h linux-2.6.39.4/include/linux/rfkill.h
54043--- linux-2.6.39.4/include/linux/rfkill.h 2011-05-19 00:06:34.000000000 -0400
54044+++ linux-2.6.39.4/include/linux/rfkill.h 2011-08-05 20:34:06.000000000 -0400
54045@@ -147,6 +147,7 @@ struct rfkill_ops {
54046 void (*query)(struct rfkill *rfkill, void *data);
54047 int (*set_block)(void *data, bool blocked);
54048 };
54049+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54050
54051 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54052 /**
54053diff -urNp linux-2.6.39.4/include/linux/rmap.h linux-2.6.39.4/include/linux/rmap.h
54054--- linux-2.6.39.4/include/linux/rmap.h 2011-05-19 00:06:34.000000000 -0400
54055+++ linux-2.6.39.4/include/linux/rmap.h 2011-08-05 19:44:37.000000000 -0400
54056@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54057 void anon_vma_init(void); /* create anon_vma_cachep */
54058 int anon_vma_prepare(struct vm_area_struct *);
54059 void unlink_anon_vmas(struct vm_area_struct *);
54060-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54061-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54062+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54063+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54064 void __anon_vma_link(struct vm_area_struct *);
54065
54066 static inline void anon_vma_merge(struct vm_area_struct *vma,
54067diff -urNp linux-2.6.39.4/include/linux/sched.h linux-2.6.39.4/include/linux/sched.h
54068--- linux-2.6.39.4/include/linux/sched.h 2011-05-19 00:06:34.000000000 -0400
54069+++ linux-2.6.39.4/include/linux/sched.h 2011-08-05 20:34:06.000000000 -0400
54070@@ -100,6 +100,7 @@ struct bio_list;
54071 struct fs_struct;
54072 struct perf_event_context;
54073 struct blk_plug;
54074+struct linux_binprm;
54075
54076 /*
54077 * List of flags we want to share for kernel threads,
54078@@ -360,7 +361,7 @@ extern signed long schedule_timeout_inte
54079 extern signed long schedule_timeout_killable(signed long timeout);
54080 extern signed long schedule_timeout_uninterruptible(signed long timeout);
54081 asmlinkage void schedule(void);
54082-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
54083+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
54084
54085 struct nsproxy;
54086 struct user_namespace;
54087@@ -381,10 +382,13 @@ struct user_namespace;
54088 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54089
54090 extern int sysctl_max_map_count;
54091+extern unsigned long sysctl_heap_stack_gap;
54092
54093 #include <linux/aio.h>
54094
54095 #ifdef CONFIG_MMU
54096+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54097+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54098 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54099 extern unsigned long
54100 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54101@@ -629,6 +633,17 @@ struct signal_struct {
54102 #ifdef CONFIG_TASKSTATS
54103 struct taskstats *stats;
54104 #endif
54105+
54106+#ifdef CONFIG_GRKERNSEC
54107+ u32 curr_ip;
54108+ u32 saved_ip;
54109+ u32 gr_saddr;
54110+ u32 gr_daddr;
54111+ u16 gr_sport;
54112+ u16 gr_dport;
54113+ u8 used_accept:1;
54114+#endif
54115+
54116 #ifdef CONFIG_AUDIT
54117 unsigned audit_tty;
54118 struct tty_audit_buf *tty_audit_buf;
54119@@ -701,6 +716,11 @@ struct user_struct {
54120 struct key *session_keyring; /* UID's default session keyring */
54121 #endif
54122
54123+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54124+ unsigned int banned;
54125+ unsigned long ban_expires;
54126+#endif
54127+
54128 /* Hash table maintenance information */
54129 struct hlist_node uidhash_node;
54130 uid_t uid;
54131@@ -1310,8 +1330,8 @@ struct task_struct {
54132 struct list_head thread_group;
54133
54134 struct completion *vfork_done; /* for vfork() */
54135- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54136- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54137+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54138+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54139
54140 cputime_t utime, stime, utimescaled, stimescaled;
54141 cputime_t gtime;
54142@@ -1327,13 +1347,6 @@ struct task_struct {
54143 struct task_cputime cputime_expires;
54144 struct list_head cpu_timers[3];
54145
54146-/* process credentials */
54147- const struct cred __rcu *real_cred; /* objective and real subjective task
54148- * credentials (COW) */
54149- const struct cred __rcu *cred; /* effective (overridable) subjective task
54150- * credentials (COW) */
54151- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54152-
54153 char comm[TASK_COMM_LEN]; /* executable name excluding path
54154 - access with [gs]et_task_comm (which lock
54155 it with task_lock())
54156@@ -1350,8 +1363,16 @@ struct task_struct {
54157 #endif
54158 /* CPU-specific state of this task */
54159 struct thread_struct thread;
54160+/* thread_info moved to task_struct */
54161+#ifdef CONFIG_X86
54162+ struct thread_info tinfo;
54163+#endif
54164 /* filesystem information */
54165 struct fs_struct *fs;
54166+
54167+ const struct cred __rcu *cred; /* effective (overridable) subjective task
54168+ * credentials (COW) */
54169+
54170 /* open file information */
54171 struct files_struct *files;
54172 /* namespaces */
54173@@ -1398,6 +1419,11 @@ struct task_struct {
54174 struct rt_mutex_waiter *pi_blocked_on;
54175 #endif
54176
54177+/* process credentials */
54178+ const struct cred __rcu *real_cred; /* objective and real subjective task
54179+ * credentials (COW) */
54180+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54181+
54182 #ifdef CONFIG_DEBUG_MUTEXES
54183 /* mutex deadlock detection */
54184 struct mutex_waiter *blocked_on;
54185@@ -1508,6 +1534,21 @@ struct task_struct {
54186 unsigned long default_timer_slack_ns;
54187
54188 struct list_head *scm_work_list;
54189+
54190+#ifdef CONFIG_GRKERNSEC
54191+ /* grsecurity */
54192+ struct dentry *gr_chroot_dentry;
54193+ struct acl_subject_label *acl;
54194+ struct acl_role_label *role;
54195+ struct file *exec_file;
54196+ u16 acl_role_id;
54197+ /* is this the task that authenticated to the special role */
54198+ u8 acl_sp_role;
54199+ u8 is_writable;
54200+ u8 brute;
54201+ u8 gr_is_chrooted;
54202+#endif
54203+
54204 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54205 /* Index of current stored address in ret_stack */
54206 int curr_ret_stack;
54207@@ -1542,6 +1583,57 @@ struct task_struct {
54208 #endif
54209 };
54210
54211+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54212+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54213+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54214+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54215+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54216+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54217+
54218+#ifdef CONFIG_PAX_SOFTMODE
54219+extern int pax_softmode;
54220+#endif
54221+
54222+extern int pax_check_flags(unsigned long *);
54223+
54224+/* if tsk != current then task_lock must be held on it */
54225+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54226+static inline unsigned long pax_get_flags(struct task_struct *tsk)
54227+{
54228+ if (likely(tsk->mm))
54229+ return tsk->mm->pax_flags;
54230+ else
54231+ return 0UL;
54232+}
54233+
54234+/* if tsk != current then task_lock must be held on it */
54235+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54236+{
54237+ if (likely(tsk->mm)) {
54238+ tsk->mm->pax_flags = flags;
54239+ return 0;
54240+ }
54241+ return -EINVAL;
54242+}
54243+#endif
54244+
54245+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54246+extern void pax_set_initial_flags(struct linux_binprm *bprm);
54247+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54248+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54249+#endif
54250+
54251+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54252+extern void pax_report_insns(void *pc, void *sp);
54253+extern void pax_report_refcount_overflow(struct pt_regs *regs);
54254+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54255+
54256+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54257+extern void pax_track_stack(void);
54258+#else
54259+static inline void pax_track_stack(void) {}
54260+#endif
54261+
54262 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54263 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54264
54265@@ -2009,7 +2101,9 @@ void yield(void);
54266 extern struct exec_domain default_exec_domain;
54267
54268 union thread_union {
54269+#ifndef CONFIG_X86
54270 struct thread_info thread_info;
54271+#endif
54272 unsigned long stack[THREAD_SIZE/sizeof(long)];
54273 };
54274
54275@@ -2042,6 +2136,7 @@ extern struct pid_namespace init_pid_ns;
54276 */
54277
54278 extern struct task_struct *find_task_by_vpid(pid_t nr);
54279+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54280 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54281 struct pid_namespace *ns);
54282
54283@@ -2179,7 +2274,7 @@ extern void __cleanup_sighand(struct sig
54284 extern void exit_itimers(struct signal_struct *);
54285 extern void flush_itimer_signals(void);
54286
54287-extern NORET_TYPE void do_group_exit(int);
54288+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54289
54290 extern void daemonize(const char *, ...);
54291 extern int allow_signal(int);
54292@@ -2320,13 +2415,17 @@ static inline unsigned long *end_of_stac
54293
54294 #endif
54295
54296-static inline int object_is_on_stack(void *obj)
54297+static inline int object_starts_on_stack(void *obj)
54298 {
54299- void *stack = task_stack_page(current);
54300+ const void *stack = task_stack_page(current);
54301
54302 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54303 }
54304
54305+#ifdef CONFIG_PAX_USERCOPY
54306+extern int object_is_on_stack(const void *obj, unsigned long len);
54307+#endif
54308+
54309 extern void thread_info_cache_init(void);
54310
54311 #ifdef CONFIG_DEBUG_STACK_USAGE
54312diff -urNp linux-2.6.39.4/include/linux/screen_info.h linux-2.6.39.4/include/linux/screen_info.h
54313--- linux-2.6.39.4/include/linux/screen_info.h 2011-05-19 00:06:34.000000000 -0400
54314+++ linux-2.6.39.4/include/linux/screen_info.h 2011-08-05 19:44:37.000000000 -0400
54315@@ -43,7 +43,8 @@ struct screen_info {
54316 __u16 pages; /* 0x32 */
54317 __u16 vesa_attributes; /* 0x34 */
54318 __u32 capabilities; /* 0x36 */
54319- __u8 _reserved[6]; /* 0x3a */
54320+ __u16 vesapm_size; /* 0x3a */
54321+ __u8 _reserved[4]; /* 0x3c */
54322 } __attribute__((packed));
54323
54324 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54325diff -urNp linux-2.6.39.4/include/linux/security.h linux-2.6.39.4/include/linux/security.h
54326--- linux-2.6.39.4/include/linux/security.h 2011-05-19 00:06:34.000000000 -0400
54327+++ linux-2.6.39.4/include/linux/security.h 2011-08-05 19:44:37.000000000 -0400
54328@@ -36,6 +36,7 @@
54329 #include <linux/key.h>
54330 #include <linux/xfrm.h>
54331 #include <linux/slab.h>
54332+#include <linux/grsecurity.h>
54333 #include <net/flow.h>
54334
54335 /* Maximum number of letters for an LSM name string */
54336diff -urNp linux-2.6.39.4/include/linux/seq_file.h linux-2.6.39.4/include/linux/seq_file.h
54337--- linux-2.6.39.4/include/linux/seq_file.h 2011-05-19 00:06:34.000000000 -0400
54338+++ linux-2.6.39.4/include/linux/seq_file.h 2011-08-05 20:34:06.000000000 -0400
54339@@ -32,6 +32,7 @@ struct seq_operations {
54340 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54341 int (*show) (struct seq_file *m, void *v);
54342 };
54343+typedef struct seq_operations __no_const seq_operations_no_const;
54344
54345 #define SEQ_SKIP 1
54346
54347diff -urNp linux-2.6.39.4/include/linux/shm.h linux-2.6.39.4/include/linux/shm.h
54348--- linux-2.6.39.4/include/linux/shm.h 2011-05-19 00:06:34.000000000 -0400
54349+++ linux-2.6.39.4/include/linux/shm.h 2011-08-05 19:44:37.000000000 -0400
54350@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54351 pid_t shm_cprid;
54352 pid_t shm_lprid;
54353 struct user_struct *mlock_user;
54354+#ifdef CONFIG_GRKERNSEC
54355+ time_t shm_createtime;
54356+ pid_t shm_lapid;
54357+#endif
54358 };
54359
54360 /* shm_mode upper byte flags */
54361diff -urNp linux-2.6.39.4/include/linux/skbuff.h linux-2.6.39.4/include/linux/skbuff.h
54362--- linux-2.6.39.4/include/linux/skbuff.h 2011-05-19 00:06:34.000000000 -0400
54363+++ linux-2.6.39.4/include/linux/skbuff.h 2011-08-05 19:44:37.000000000 -0400
54364@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54365 */
54366 static inline int skb_queue_empty(const struct sk_buff_head *list)
54367 {
54368- return list->next == (struct sk_buff *)list;
54369+ return list->next == (const struct sk_buff *)list;
54370 }
54371
54372 /**
54373@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54374 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54375 const struct sk_buff *skb)
54376 {
54377- return skb->next == (struct sk_buff *)list;
54378+ return skb->next == (const struct sk_buff *)list;
54379 }
54380
54381 /**
54382@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54383 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54384 const struct sk_buff *skb)
54385 {
54386- return skb->prev == (struct sk_buff *)list;
54387+ return skb->prev == (const struct sk_buff *)list;
54388 }
54389
54390 /**
54391@@ -1435,7 +1435,7 @@ static inline int pskb_network_may_pull(
54392 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54393 */
54394 #ifndef NET_SKB_PAD
54395-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54396+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54397 #endif
54398
54399 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54400diff -urNp linux-2.6.39.4/include/linux/slab_def.h linux-2.6.39.4/include/linux/slab_def.h
54401--- linux-2.6.39.4/include/linux/slab_def.h 2011-05-19 00:06:34.000000000 -0400
54402+++ linux-2.6.39.4/include/linux/slab_def.h 2011-08-05 19:44:37.000000000 -0400
54403@@ -96,10 +96,10 @@ struct kmem_cache {
54404 unsigned long node_allocs;
54405 unsigned long node_frees;
54406 unsigned long node_overflow;
54407- atomic_t allochit;
54408- atomic_t allocmiss;
54409- atomic_t freehit;
54410- atomic_t freemiss;
54411+ atomic_unchecked_t allochit;
54412+ atomic_unchecked_t allocmiss;
54413+ atomic_unchecked_t freehit;
54414+ atomic_unchecked_t freemiss;
54415
54416 /*
54417 * If debugging is enabled, then the allocator can add additional
54418diff -urNp linux-2.6.39.4/include/linux/slab.h linux-2.6.39.4/include/linux/slab.h
54419--- linux-2.6.39.4/include/linux/slab.h 2011-05-19 00:06:34.000000000 -0400
54420+++ linux-2.6.39.4/include/linux/slab.h 2011-08-05 19:44:37.000000000 -0400
54421@@ -11,12 +11,20 @@
54422
54423 #include <linux/gfp.h>
54424 #include <linux/types.h>
54425+#include <linux/err.h>
54426
54427 /*
54428 * Flags to pass to kmem_cache_create().
54429 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54430 */
54431 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54432+
54433+#ifdef CONFIG_PAX_USERCOPY
54434+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54435+#else
54436+#define SLAB_USERCOPY 0x00000000UL
54437+#endif
54438+
54439 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54440 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54441 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54442@@ -87,10 +95,13 @@
54443 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54444 * Both make kfree a no-op.
54445 */
54446-#define ZERO_SIZE_PTR ((void *)16)
54447+#define ZERO_SIZE_PTR \
54448+({ \
54449+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54450+ (void *)(-MAX_ERRNO-1L); \
54451+})
54452
54453-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54454- (unsigned long)ZERO_SIZE_PTR)
54455+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54456
54457 /*
54458 * struct kmem_cache related prototypes
54459@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54460 void kfree(const void *);
54461 void kzfree(const void *);
54462 size_t ksize(const void *);
54463+void check_object_size(const void *ptr, unsigned long n, bool to);
54464
54465 /*
54466 * Allocator specific definitions. These are mainly used to establish optimized
54467@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54468
54469 void __init kmem_cache_init_late(void);
54470
54471+#define kmalloc(x, y) \
54472+({ \
54473+ void *___retval; \
54474+ intoverflow_t ___x = (intoverflow_t)x; \
54475+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54476+ ___retval = NULL; \
54477+ else \
54478+ ___retval = kmalloc((size_t)___x, (y)); \
54479+ ___retval; \
54480+})
54481+
54482+#define kmalloc_node(x, y, z) \
54483+({ \
54484+ void *___retval; \
54485+ intoverflow_t ___x = (intoverflow_t)x; \
54486+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54487+ ___retval = NULL; \
54488+ else \
54489+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
54490+ ___retval; \
54491+})
54492+
54493+#define kzalloc(x, y) \
54494+({ \
54495+ void *___retval; \
54496+ intoverflow_t ___x = (intoverflow_t)x; \
54497+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54498+ ___retval = NULL; \
54499+ else \
54500+ ___retval = kzalloc((size_t)___x, (y)); \
54501+ ___retval; \
54502+})
54503+
54504+#define __krealloc(x, y, z) \
54505+({ \
54506+ void *___retval; \
54507+ intoverflow_t ___y = (intoverflow_t)y; \
54508+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54509+ ___retval = NULL; \
54510+ else \
54511+ ___retval = __krealloc((x), (size_t)___y, (z)); \
54512+ ___retval; \
54513+})
54514+
54515+#define krealloc(x, y, z) \
54516+({ \
54517+ void *___retval; \
54518+ intoverflow_t ___y = (intoverflow_t)y; \
54519+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54520+ ___retval = NULL; \
54521+ else \
54522+ ___retval = krealloc((x), (size_t)___y, (z)); \
54523+ ___retval; \
54524+})
54525+
54526 #endif /* _LINUX_SLAB_H */
54527diff -urNp linux-2.6.39.4/include/linux/slub_def.h linux-2.6.39.4/include/linux/slub_def.h
54528--- linux-2.6.39.4/include/linux/slub_def.h 2011-05-19 00:06:34.000000000 -0400
54529+++ linux-2.6.39.4/include/linux/slub_def.h 2011-08-05 20:34:06.000000000 -0400
54530@@ -84,7 +84,7 @@ struct kmem_cache {
54531 struct kmem_cache_order_objects max;
54532 struct kmem_cache_order_objects min;
54533 gfp_t allocflags; /* gfp flags to use on each alloc */
54534- int refcount; /* Refcount for slab cache destroy */
54535+ atomic_t refcount; /* Refcount for slab cache destroy */
54536 void (*ctor)(void *);
54537 int inuse; /* Offset to metadata */
54538 int align; /* Alignment */
54539@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54540 }
54541
54542 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54543-void *__kmalloc(size_t size, gfp_t flags);
54544+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54545
54546 static __always_inline void *
54547 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54548diff -urNp linux-2.6.39.4/include/linux/sonet.h linux-2.6.39.4/include/linux/sonet.h
54549--- linux-2.6.39.4/include/linux/sonet.h 2011-05-19 00:06:34.000000000 -0400
54550+++ linux-2.6.39.4/include/linux/sonet.h 2011-08-05 19:44:37.000000000 -0400
54551@@ -61,7 +61,7 @@ struct sonet_stats {
54552 #include <asm/atomic.h>
54553
54554 struct k_sonet_stats {
54555-#define __HANDLE_ITEM(i) atomic_t i
54556+#define __HANDLE_ITEM(i) atomic_unchecked_t i
54557 __SONET_ITEMS
54558 #undef __HANDLE_ITEM
54559 };
54560diff -urNp linux-2.6.39.4/include/linux/sunrpc/clnt.h linux-2.6.39.4/include/linux/sunrpc/clnt.h
54561--- linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-05-19 00:06:34.000000000 -0400
54562+++ linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-08-05 19:44:37.000000000 -0400
54563@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54564 {
54565 switch (sap->sa_family) {
54566 case AF_INET:
54567- return ntohs(((struct sockaddr_in *)sap)->sin_port);
54568+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54569 case AF_INET6:
54570- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54571+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54572 }
54573 return 0;
54574 }
54575@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54576 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54577 const struct sockaddr *src)
54578 {
54579- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54580+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54581 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54582
54583 dsin->sin_family = ssin->sin_family;
54584@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54585 if (sa->sa_family != AF_INET6)
54586 return 0;
54587
54588- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54589+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54590 }
54591
54592 #endif /* __KERNEL__ */
54593diff -urNp linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h
54594--- linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-05-19 00:06:34.000000000 -0400
54595+++ linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-08-05 19:44:37.000000000 -0400
54596@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54597 extern unsigned int svcrdma_max_requests;
54598 extern unsigned int svcrdma_max_req_size;
54599
54600-extern atomic_t rdma_stat_recv;
54601-extern atomic_t rdma_stat_read;
54602-extern atomic_t rdma_stat_write;
54603-extern atomic_t rdma_stat_sq_starve;
54604-extern atomic_t rdma_stat_rq_starve;
54605-extern atomic_t rdma_stat_rq_poll;
54606-extern atomic_t rdma_stat_rq_prod;
54607-extern atomic_t rdma_stat_sq_poll;
54608-extern atomic_t rdma_stat_sq_prod;
54609+extern atomic_unchecked_t rdma_stat_recv;
54610+extern atomic_unchecked_t rdma_stat_read;
54611+extern atomic_unchecked_t rdma_stat_write;
54612+extern atomic_unchecked_t rdma_stat_sq_starve;
54613+extern atomic_unchecked_t rdma_stat_rq_starve;
54614+extern atomic_unchecked_t rdma_stat_rq_poll;
54615+extern atomic_unchecked_t rdma_stat_rq_prod;
54616+extern atomic_unchecked_t rdma_stat_sq_poll;
54617+extern atomic_unchecked_t rdma_stat_sq_prod;
54618
54619 #define RPCRDMA_VERSION 1
54620
54621diff -urNp linux-2.6.39.4/include/linux/sysctl.h linux-2.6.39.4/include/linux/sysctl.h
54622--- linux-2.6.39.4/include/linux/sysctl.h 2011-05-19 00:06:34.000000000 -0400
54623+++ linux-2.6.39.4/include/linux/sysctl.h 2011-08-05 19:44:37.000000000 -0400
54624@@ -155,7 +155,11 @@ enum
54625 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54626 };
54627
54628-
54629+#ifdef CONFIG_PAX_SOFTMODE
54630+enum {
54631+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54632+};
54633+#endif
54634
54635 /* CTL_VM names: */
54636 enum
54637@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54638
54639 extern int proc_dostring(struct ctl_table *, int,
54640 void __user *, size_t *, loff_t *);
54641+extern int proc_dostring_modpriv(struct ctl_table *, int,
54642+ void __user *, size_t *, loff_t *);
54643 extern int proc_dointvec(struct ctl_table *, int,
54644 void __user *, size_t *, loff_t *);
54645 extern int proc_dointvec_minmax(struct ctl_table *, int,
54646diff -urNp linux-2.6.39.4/include/linux/tty_ldisc.h linux-2.6.39.4/include/linux/tty_ldisc.h
54647--- linux-2.6.39.4/include/linux/tty_ldisc.h 2011-05-19 00:06:34.000000000 -0400
54648+++ linux-2.6.39.4/include/linux/tty_ldisc.h 2011-08-05 19:44:37.000000000 -0400
54649@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54650
54651 struct module *owner;
54652
54653- int refcount;
54654+ atomic_t refcount;
54655 };
54656
54657 struct tty_ldisc {
54658diff -urNp linux-2.6.39.4/include/linux/types.h linux-2.6.39.4/include/linux/types.h
54659--- linux-2.6.39.4/include/linux/types.h 2011-05-19 00:06:34.000000000 -0400
54660+++ linux-2.6.39.4/include/linux/types.h 2011-08-05 19:44:37.000000000 -0400
54661@@ -213,10 +213,26 @@ typedef struct {
54662 int counter;
54663 } atomic_t;
54664
54665+#ifdef CONFIG_PAX_REFCOUNT
54666+typedef struct {
54667+ int counter;
54668+} atomic_unchecked_t;
54669+#else
54670+typedef atomic_t atomic_unchecked_t;
54671+#endif
54672+
54673 #ifdef CONFIG_64BIT
54674 typedef struct {
54675 long counter;
54676 } atomic64_t;
54677+
54678+#ifdef CONFIG_PAX_REFCOUNT
54679+typedef struct {
54680+ long counter;
54681+} atomic64_unchecked_t;
54682+#else
54683+typedef atomic64_t atomic64_unchecked_t;
54684+#endif
54685 #endif
54686
54687 struct list_head {
54688diff -urNp linux-2.6.39.4/include/linux/uaccess.h linux-2.6.39.4/include/linux/uaccess.h
54689--- linux-2.6.39.4/include/linux/uaccess.h 2011-05-19 00:06:34.000000000 -0400
54690+++ linux-2.6.39.4/include/linux/uaccess.h 2011-08-05 19:44:37.000000000 -0400
54691@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54692 long ret; \
54693 mm_segment_t old_fs = get_fs(); \
54694 \
54695- set_fs(KERNEL_DS); \
54696 pagefault_disable(); \
54697+ set_fs(KERNEL_DS); \
54698 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54699- pagefault_enable(); \
54700 set_fs(old_fs); \
54701+ pagefault_enable(); \
54702 ret; \
54703 })
54704
54705@@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
54706 * Safely read from address @src to the buffer at @dst. If a kernel fault
54707 * happens, handle that and return -EFAULT.
54708 */
54709-extern long probe_kernel_read(void *dst, void *src, size_t size);
54710-extern long __probe_kernel_read(void *dst, void *src, size_t size);
54711+extern long probe_kernel_read(void *dst, const void *src, size_t size);
54712+extern long __probe_kernel_read(void *dst, const void *src, size_t size);
54713
54714 /*
54715 * probe_kernel_write(): safely attempt to write to a location
54716@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
54717 * Safely write to address @dst from the buffer at @src. If a kernel fault
54718 * happens, handle that and return -EFAULT.
54719 */
54720-extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
54721-extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
54722+extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
54723+extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
54724
54725 #endif /* __LINUX_UACCESS_H__ */
54726diff -urNp linux-2.6.39.4/include/linux/unaligned/access_ok.h linux-2.6.39.4/include/linux/unaligned/access_ok.h
54727--- linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-05-19 00:06:34.000000000 -0400
54728+++ linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-08-05 19:44:37.000000000 -0400
54729@@ -6,32 +6,32 @@
54730
54731 static inline u16 get_unaligned_le16(const void *p)
54732 {
54733- return le16_to_cpup((__le16 *)p);
54734+ return le16_to_cpup((const __le16 *)p);
54735 }
54736
54737 static inline u32 get_unaligned_le32(const void *p)
54738 {
54739- return le32_to_cpup((__le32 *)p);
54740+ return le32_to_cpup((const __le32 *)p);
54741 }
54742
54743 static inline u64 get_unaligned_le64(const void *p)
54744 {
54745- return le64_to_cpup((__le64 *)p);
54746+ return le64_to_cpup((const __le64 *)p);
54747 }
54748
54749 static inline u16 get_unaligned_be16(const void *p)
54750 {
54751- return be16_to_cpup((__be16 *)p);
54752+ return be16_to_cpup((const __be16 *)p);
54753 }
54754
54755 static inline u32 get_unaligned_be32(const void *p)
54756 {
54757- return be32_to_cpup((__be32 *)p);
54758+ return be32_to_cpup((const __be32 *)p);
54759 }
54760
54761 static inline u64 get_unaligned_be64(const void *p)
54762 {
54763- return be64_to_cpup((__be64 *)p);
54764+ return be64_to_cpup((const __be64 *)p);
54765 }
54766
54767 static inline void put_unaligned_le16(u16 val, void *p)
54768diff -urNp linux-2.6.39.4/include/linux/vmalloc.h linux-2.6.39.4/include/linux/vmalloc.h
54769--- linux-2.6.39.4/include/linux/vmalloc.h 2011-05-19 00:06:34.000000000 -0400
54770+++ linux-2.6.39.4/include/linux/vmalloc.h 2011-08-05 19:44:37.000000000 -0400
54771@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54772 #define VM_MAP 0x00000004 /* vmap()ed pages */
54773 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54774 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54775+
54776+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54777+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54778+#endif
54779+
54780 /* bits [20..32] reserved for arch specific ioremap internals */
54781
54782 /*
54783@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54784 # endif
54785 #endif
54786
54787+#define vmalloc(x) \
54788+({ \
54789+ void *___retval; \
54790+ intoverflow_t ___x = (intoverflow_t)x; \
54791+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54792+ ___retval = NULL; \
54793+ else \
54794+ ___retval = vmalloc((unsigned long)___x); \
54795+ ___retval; \
54796+})
54797+
54798+#define vzalloc(x) \
54799+({ \
54800+ void *___retval; \
54801+ intoverflow_t ___x = (intoverflow_t)x; \
54802+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54803+ ___retval = NULL; \
54804+ else \
54805+ ___retval = vzalloc((unsigned long)___x); \
54806+ ___retval; \
54807+})
54808+
54809+#define __vmalloc(x, y, z) \
54810+({ \
54811+ void *___retval; \
54812+ intoverflow_t ___x = (intoverflow_t)x; \
54813+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54814+ ___retval = NULL; \
54815+ else \
54816+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54817+ ___retval; \
54818+})
54819+
54820+#define vmalloc_user(x) \
54821+({ \
54822+ void *___retval; \
54823+ intoverflow_t ___x = (intoverflow_t)x; \
54824+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54825+ ___retval = NULL; \
54826+ else \
54827+ ___retval = vmalloc_user((unsigned long)___x); \
54828+ ___retval; \
54829+})
54830+
54831+#define vmalloc_exec(x) \
54832+({ \
54833+ void *___retval; \
54834+ intoverflow_t ___x = (intoverflow_t)x; \
54835+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54836+ ___retval = NULL; \
54837+ else \
54838+ ___retval = vmalloc_exec((unsigned long)___x); \
54839+ ___retval; \
54840+})
54841+
54842+#define vmalloc_node(x, y) \
54843+({ \
54844+ void *___retval; \
54845+ intoverflow_t ___x = (intoverflow_t)x; \
54846+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54847+ ___retval = NULL; \
54848+ else \
54849+ ___retval = vmalloc_node((unsigned long)___x, (y));\
54850+ ___retval; \
54851+})
54852+
54853+#define vzalloc_node(x, y) \
54854+({ \
54855+ void *___retval; \
54856+ intoverflow_t ___x = (intoverflow_t)x; \
54857+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
54858+ ___retval = NULL; \
54859+ else \
54860+ ___retval = vzalloc_node((unsigned long)___x, (y));\
54861+ ___retval; \
54862+})
54863+
54864+#define vmalloc_32(x) \
54865+({ \
54866+ void *___retval; \
54867+ intoverflow_t ___x = (intoverflow_t)x; \
54868+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
54869+ ___retval = NULL; \
54870+ else \
54871+ ___retval = vmalloc_32((unsigned long)___x); \
54872+ ___retval; \
54873+})
54874+
54875+#define vmalloc_32_user(x) \
54876+({ \
54877+void *___retval; \
54878+ intoverflow_t ___x = (intoverflow_t)x; \
54879+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
54880+ ___retval = NULL; \
54881+ else \
54882+ ___retval = vmalloc_32_user((unsigned long)___x);\
54883+ ___retval; \
54884+})
54885+
54886 #endif /* _LINUX_VMALLOC_H */
54887diff -urNp linux-2.6.39.4/include/linux/vmstat.h linux-2.6.39.4/include/linux/vmstat.h
54888--- linux-2.6.39.4/include/linux/vmstat.h 2011-05-19 00:06:34.000000000 -0400
54889+++ linux-2.6.39.4/include/linux/vmstat.h 2011-08-05 19:44:37.000000000 -0400
54890@@ -147,18 +147,18 @@ static inline void vm_events_fold_cpu(in
54891 /*
54892 * Zone based page accounting with per cpu differentials.
54893 */
54894-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54895+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54896
54897 static inline void zone_page_state_add(long x, struct zone *zone,
54898 enum zone_stat_item item)
54899 {
54900- atomic_long_add(x, &zone->vm_stat[item]);
54901- atomic_long_add(x, &vm_stat[item]);
54902+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
54903+ atomic_long_add_unchecked(x, &vm_stat[item]);
54904 }
54905
54906 static inline unsigned long global_page_state(enum zone_stat_item item)
54907 {
54908- long x = atomic_long_read(&vm_stat[item]);
54909+ long x = atomic_long_read_unchecked(&vm_stat[item]);
54910 #ifdef CONFIG_SMP
54911 if (x < 0)
54912 x = 0;
54913@@ -169,7 +169,7 @@ static inline unsigned long global_page_
54914 static inline unsigned long zone_page_state(struct zone *zone,
54915 enum zone_stat_item item)
54916 {
54917- long x = atomic_long_read(&zone->vm_stat[item]);
54918+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54919 #ifdef CONFIG_SMP
54920 if (x < 0)
54921 x = 0;
54922@@ -186,7 +186,7 @@ static inline unsigned long zone_page_st
54923 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
54924 enum zone_stat_item item)
54925 {
54926- long x = atomic_long_read(&zone->vm_stat[item]);
54927+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54928
54929 #ifdef CONFIG_SMP
54930 int cpu;
54931@@ -280,8 +280,8 @@ static inline void __mod_zone_page_state
54932
54933 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
54934 {
54935- atomic_long_inc(&zone->vm_stat[item]);
54936- atomic_long_inc(&vm_stat[item]);
54937+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
54938+ atomic_long_inc_unchecked(&vm_stat[item]);
54939 }
54940
54941 static inline void __inc_zone_page_state(struct page *page,
54942@@ -292,8 +292,8 @@ static inline void __inc_zone_page_state
54943
54944 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
54945 {
54946- atomic_long_dec(&zone->vm_stat[item]);
54947- atomic_long_dec(&vm_stat[item]);
54948+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
54949+ atomic_long_dec_unchecked(&vm_stat[item]);
54950 }
54951
54952 static inline void __dec_zone_page_state(struct page *page,
54953diff -urNp linux-2.6.39.4/include/media/saa7146_vv.h linux-2.6.39.4/include/media/saa7146_vv.h
54954--- linux-2.6.39.4/include/media/saa7146_vv.h 2011-05-19 00:06:34.000000000 -0400
54955+++ linux-2.6.39.4/include/media/saa7146_vv.h 2011-08-05 20:34:06.000000000 -0400
54956@@ -163,7 +163,7 @@ struct saa7146_ext_vv
54957 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
54958
54959 /* the extension can override this */
54960- struct v4l2_ioctl_ops ops;
54961+ v4l2_ioctl_ops_no_const ops;
54962 /* pointer to the saa7146 core ops */
54963 const struct v4l2_ioctl_ops *core_ops;
54964
54965diff -urNp linux-2.6.39.4/include/media/v4l2-dev.h linux-2.6.39.4/include/media/v4l2-dev.h
54966--- linux-2.6.39.4/include/media/v4l2-dev.h 2011-05-19 00:06:34.000000000 -0400
54967+++ linux-2.6.39.4/include/media/v4l2-dev.h 2011-08-05 20:34:06.000000000 -0400
54968@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
54969
54970
54971 struct v4l2_file_operations {
54972- struct module *owner;
54973+ struct module * const owner;
54974 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
54975 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
54976 unsigned int (*poll) (struct file *, struct poll_table_struct *);
54977diff -urNp linux-2.6.39.4/include/media/v4l2-device.h linux-2.6.39.4/include/media/v4l2-device.h
54978--- linux-2.6.39.4/include/media/v4l2-device.h 2011-05-19 00:06:34.000000000 -0400
54979+++ linux-2.6.39.4/include/media/v4l2-device.h 2011-08-05 19:44:37.000000000 -0400
54980@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(st
54981 this function returns 0. If the name ends with a digit (e.g. cx18),
54982 then the name will be set to cx18-0 since cx180 looks really odd. */
54983 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
54984- atomic_t *instance);
54985+ atomic_unchecked_t *instance);
54986
54987 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
54988 Since the parent disappears this ensures that v4l2_dev doesn't have an
54989diff -urNp linux-2.6.39.4/include/media/v4l2-ioctl.h linux-2.6.39.4/include/media/v4l2-ioctl.h
54990--- linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-05-19 00:06:34.000000000 -0400
54991+++ linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-08-05 20:34:06.000000000 -0400
54992@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
54993 long (*vidioc_default) (struct file *file, void *fh,
54994 bool valid_prio, int cmd, void *arg);
54995 };
54996+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
54997
54998
54999 /* v4l debugging and diagnostics */
55000diff -urNp linux-2.6.39.4/include/net/caif/cfctrl.h linux-2.6.39.4/include/net/caif/cfctrl.h
55001--- linux-2.6.39.4/include/net/caif/cfctrl.h 2011-05-19 00:06:34.000000000 -0400
55002+++ linux-2.6.39.4/include/net/caif/cfctrl.h 2011-08-05 20:34:06.000000000 -0400
55003@@ -52,7 +52,7 @@ struct cfctrl_rsp {
55004 void (*radioset_rsp)(void);
55005 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
55006 struct cflayer *client_layer);
55007-};
55008+} __no_const;
55009
55010 /* Link Setup Parameters for CAIF-Links. */
55011 struct cfctrl_link_param {
55012@@ -101,8 +101,8 @@ struct cfctrl_request_info {
55013 struct cfctrl {
55014 struct cfsrvl serv;
55015 struct cfctrl_rsp res;
55016- atomic_t req_seq_no;
55017- atomic_t rsp_seq_no;
55018+ atomic_unchecked_t req_seq_no;
55019+ atomic_unchecked_t rsp_seq_no;
55020 struct list_head list;
55021 /* Protects from simultaneous access to first_req list */
55022 spinlock_t info_list_lock;
55023diff -urNp linux-2.6.39.4/include/net/flow.h linux-2.6.39.4/include/net/flow.h
55024--- linux-2.6.39.4/include/net/flow.h 2011-05-19 00:06:34.000000000 -0400
55025+++ linux-2.6.39.4/include/net/flow.h 2011-08-05 19:44:37.000000000 -0400
55026@@ -167,6 +167,6 @@ extern struct flow_cache_object *flow_ca
55027 u8 dir, flow_resolve_t resolver, void *ctx);
55028
55029 extern void flow_cache_flush(void);
55030-extern atomic_t flow_cache_genid;
55031+extern atomic_unchecked_t flow_cache_genid;
55032
55033 #endif
55034diff -urNp linux-2.6.39.4/include/net/inetpeer.h linux-2.6.39.4/include/net/inetpeer.h
55035--- linux-2.6.39.4/include/net/inetpeer.h 2011-05-19 00:06:34.000000000 -0400
55036+++ linux-2.6.39.4/include/net/inetpeer.h 2011-08-05 19:44:37.000000000 -0400
55037@@ -43,8 +43,8 @@ struct inet_peer {
55038 */
55039 union {
55040 struct {
55041- atomic_t rid; /* Frag reception counter */
55042- atomic_t ip_id_count; /* IP ID for the next packet */
55043+ atomic_unchecked_t rid; /* Frag reception counter */
55044+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
55045 __u32 tcp_ts;
55046 __u32 tcp_ts_stamp;
55047 u32 metrics[RTAX_MAX];
55048@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
55049 {
55050 more++;
55051 inet_peer_refcheck(p);
55052- return atomic_add_return(more, &p->ip_id_count) - more;
55053+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
55054 }
55055
55056 #endif /* _NET_INETPEER_H */
55057diff -urNp linux-2.6.39.4/include/net/ip_fib.h linux-2.6.39.4/include/net/ip_fib.h
55058--- linux-2.6.39.4/include/net/ip_fib.h 2011-05-19 00:06:34.000000000 -0400
55059+++ linux-2.6.39.4/include/net/ip_fib.h 2011-08-05 19:44:37.000000000 -0400
55060@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
55061
55062 #define FIB_RES_SADDR(net, res) \
55063 ((FIB_RES_NH(res).nh_saddr_genid == \
55064- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55065+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55066 FIB_RES_NH(res).nh_saddr : \
55067 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55068 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55069diff -urNp linux-2.6.39.4/include/net/ip_vs.h linux-2.6.39.4/include/net/ip_vs.h
55070--- linux-2.6.39.4/include/net/ip_vs.h 2011-07-09 09:18:51.000000000 -0400
55071+++ linux-2.6.39.4/include/net/ip_vs.h 2011-08-05 19:44:37.000000000 -0400
55072@@ -512,7 +512,7 @@ struct ip_vs_conn {
55073 struct ip_vs_conn *control; /* Master control connection */
55074 atomic_t n_control; /* Number of controlled ones */
55075 struct ip_vs_dest *dest; /* real server */
55076- atomic_t in_pkts; /* incoming packet counter */
55077+ atomic_unchecked_t in_pkts; /* incoming packet counter */
55078
55079 /* packet transmitter for different forwarding methods. If it
55080 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55081@@ -650,7 +650,7 @@ struct ip_vs_dest {
55082 __be16 port; /* port number of the server */
55083 union nf_inet_addr addr; /* IP address of the server */
55084 volatile unsigned flags; /* dest status flags */
55085- atomic_t conn_flags; /* flags to copy to conn */
55086+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
55087 atomic_t weight; /* server weight */
55088
55089 atomic_t refcnt; /* reference counter */
55090diff -urNp linux-2.6.39.4/include/net/irda/ircomm_core.h linux-2.6.39.4/include/net/irda/ircomm_core.h
55091--- linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-05-19 00:06:34.000000000 -0400
55092+++ linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-08-05 20:34:06.000000000 -0400
55093@@ -51,7 +51,7 @@ typedef struct {
55094 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55095 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55096 struct ircomm_info *);
55097-} call_t;
55098+} __no_const call_t;
55099
55100 struct ircomm_cb {
55101 irda_queue_t queue;
55102diff -urNp linux-2.6.39.4/include/net/irda/ircomm_tty.h linux-2.6.39.4/include/net/irda/ircomm_tty.h
55103--- linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-05-19 00:06:34.000000000 -0400
55104+++ linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-08-05 19:44:37.000000000 -0400
55105@@ -35,6 +35,7 @@
55106 #include <linux/termios.h>
55107 #include <linux/timer.h>
55108 #include <linux/tty.h> /* struct tty_struct */
55109+#include <asm/local.h>
55110
55111 #include <net/irda/irias_object.h>
55112 #include <net/irda/ircomm_core.h>
55113@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55114 unsigned short close_delay;
55115 unsigned short closing_wait; /* time to wait before closing */
55116
55117- int open_count;
55118- int blocked_open; /* # of blocked opens */
55119+ local_t open_count;
55120+ local_t blocked_open; /* # of blocked opens */
55121
55122 /* Protect concurent access to :
55123 * o self->open_count
55124diff -urNp linux-2.6.39.4/include/net/iucv/af_iucv.h linux-2.6.39.4/include/net/iucv/af_iucv.h
55125--- linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-05-19 00:06:34.000000000 -0400
55126+++ linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-08-05 19:44:37.000000000 -0400
55127@@ -87,7 +87,7 @@ struct iucv_sock {
55128 struct iucv_sock_list {
55129 struct hlist_head head;
55130 rwlock_t lock;
55131- atomic_t autobind_name;
55132+ atomic_unchecked_t autobind_name;
55133 };
55134
55135 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55136diff -urNp linux-2.6.39.4/include/net/lapb.h linux-2.6.39.4/include/net/lapb.h
55137--- linux-2.6.39.4/include/net/lapb.h 2011-05-19 00:06:34.000000000 -0400
55138+++ linux-2.6.39.4/include/net/lapb.h 2011-08-05 20:34:06.000000000 -0400
55139@@ -95,7 +95,7 @@ struct lapb_cb {
55140 struct sk_buff_head write_queue;
55141 struct sk_buff_head ack_queue;
55142 unsigned char window;
55143- struct lapb_register_struct callbacks;
55144+ struct lapb_register_struct *callbacks;
55145
55146 /* FRMR control information */
55147 struct lapb_frame frmr_data;
55148diff -urNp linux-2.6.39.4/include/net/neighbour.h linux-2.6.39.4/include/net/neighbour.h
55149--- linux-2.6.39.4/include/net/neighbour.h 2011-05-19 00:06:34.000000000 -0400
55150+++ linux-2.6.39.4/include/net/neighbour.h 2011-08-05 20:34:06.000000000 -0400
55151@@ -117,7 +117,7 @@ struct neighbour {
55152 };
55153
55154 struct neigh_ops {
55155- int family;
55156+ const int family;
55157 void (*solicit)(struct neighbour *, struct sk_buff*);
55158 void (*error_report)(struct neighbour *, struct sk_buff*);
55159 int (*output)(struct sk_buff*);
55160diff -urNp linux-2.6.39.4/include/net/netlink.h linux-2.6.39.4/include/net/netlink.h
55161--- linux-2.6.39.4/include/net/netlink.h 2011-05-19 00:06:34.000000000 -0400
55162+++ linux-2.6.39.4/include/net/netlink.h 2011-08-05 19:44:37.000000000 -0400
55163@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55164 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55165 {
55166 if (mark)
55167- skb_trim(skb, (unsigned char *) mark - skb->data);
55168+ skb_trim(skb, (const unsigned char *) mark - skb->data);
55169 }
55170
55171 /**
55172diff -urNp linux-2.6.39.4/include/net/netns/ipv4.h linux-2.6.39.4/include/net/netns/ipv4.h
55173--- linux-2.6.39.4/include/net/netns/ipv4.h 2011-05-19 00:06:34.000000000 -0400
55174+++ linux-2.6.39.4/include/net/netns/ipv4.h 2011-08-05 19:44:37.000000000 -0400
55175@@ -54,8 +54,8 @@ struct netns_ipv4 {
55176 int sysctl_rt_cache_rebuild_count;
55177 int current_rt_cache_rebuild_count;
55178
55179- atomic_t rt_genid;
55180- atomic_t dev_addr_genid;
55181+ atomic_unchecked_t rt_genid;
55182+ atomic_unchecked_t dev_addr_genid;
55183
55184 #ifdef CONFIG_IP_MROUTE
55185 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55186diff -urNp linux-2.6.39.4/include/net/sctp/sctp.h linux-2.6.39.4/include/net/sctp/sctp.h
55187--- linux-2.6.39.4/include/net/sctp/sctp.h 2011-05-19 00:06:34.000000000 -0400
55188+++ linux-2.6.39.4/include/net/sctp/sctp.h 2011-08-05 19:44:37.000000000 -0400
55189@@ -316,9 +316,9 @@ do { \
55190
55191 #else /* SCTP_DEBUG */
55192
55193-#define SCTP_DEBUG_PRINTK(whatever...)
55194-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55195-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55196+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55197+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55198+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55199 #define SCTP_ENABLE_DEBUG
55200 #define SCTP_DISABLE_DEBUG
55201 #define SCTP_ASSERT(expr, str, func)
55202diff -urNp linux-2.6.39.4/include/net/sock.h linux-2.6.39.4/include/net/sock.h
55203--- linux-2.6.39.4/include/net/sock.h 2011-05-19 00:06:34.000000000 -0400
55204+++ linux-2.6.39.4/include/net/sock.h 2011-08-05 19:44:37.000000000 -0400
55205@@ -277,7 +277,7 @@ struct sock {
55206 #ifdef CONFIG_RPS
55207 __u32 sk_rxhash;
55208 #endif
55209- atomic_t sk_drops;
55210+ atomic_unchecked_t sk_drops;
55211 int sk_rcvbuf;
55212
55213 struct sk_filter __rcu *sk_filter;
55214diff -urNp linux-2.6.39.4/include/net/tcp.h linux-2.6.39.4/include/net/tcp.h
55215--- linux-2.6.39.4/include/net/tcp.h 2011-05-19 00:06:34.000000000 -0400
55216+++ linux-2.6.39.4/include/net/tcp.h 2011-08-05 20:34:06.000000000 -0400
55217@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55218 struct tcp_seq_afinfo {
55219 char *name;
55220 sa_family_t family;
55221- struct file_operations seq_fops;
55222- struct seq_operations seq_ops;
55223+ file_operations_no_const seq_fops;
55224+ seq_operations_no_const seq_ops;
55225 };
55226
55227 struct tcp_iter_state {
55228diff -urNp linux-2.6.39.4/include/net/udp.h linux-2.6.39.4/include/net/udp.h
55229--- linux-2.6.39.4/include/net/udp.h 2011-05-19 00:06:34.000000000 -0400
55230+++ linux-2.6.39.4/include/net/udp.h 2011-08-05 20:34:06.000000000 -0400
55231@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55232 char *name;
55233 sa_family_t family;
55234 struct udp_table *udp_table;
55235- struct file_operations seq_fops;
55236- struct seq_operations seq_ops;
55237+ file_operations_no_const seq_fops;
55238+ seq_operations_no_const seq_ops;
55239 };
55240
55241 struct udp_iter_state {
55242diff -urNp linux-2.6.39.4/include/net/xfrm.h linux-2.6.39.4/include/net/xfrm.h
55243--- linux-2.6.39.4/include/net/xfrm.h 2011-05-19 00:06:34.000000000 -0400
55244+++ linux-2.6.39.4/include/net/xfrm.h 2011-08-05 19:44:37.000000000 -0400
55245@@ -505,7 +505,7 @@ struct xfrm_policy {
55246 struct timer_list timer;
55247
55248 struct flow_cache_object flo;
55249- atomic_t genid;
55250+ atomic_unchecked_t genid;
55251 u32 priority;
55252 u32 index;
55253 struct xfrm_mark mark;
55254diff -urNp linux-2.6.39.4/include/rdma/iw_cm.h linux-2.6.39.4/include/rdma/iw_cm.h
55255--- linux-2.6.39.4/include/rdma/iw_cm.h 2011-05-19 00:06:34.000000000 -0400
55256+++ linux-2.6.39.4/include/rdma/iw_cm.h 2011-08-05 20:34:06.000000000 -0400
55257@@ -129,7 +129,7 @@ struct iw_cm_verbs {
55258 int backlog);
55259
55260 int (*destroy_listen)(struct iw_cm_id *cm_id);
55261-};
55262+} __no_const;
55263
55264 /**
55265 * iw_create_cm_id - Create an IW CM identifier.
55266diff -urNp linux-2.6.39.4/include/scsi/libfc.h linux-2.6.39.4/include/scsi/libfc.h
55267--- linux-2.6.39.4/include/scsi/libfc.h 2011-05-19 00:06:34.000000000 -0400
55268+++ linux-2.6.39.4/include/scsi/libfc.h 2011-08-05 20:34:06.000000000 -0400
55269@@ -750,6 +750,7 @@ struct libfc_function_template {
55270 */
55271 void (*disc_stop_final) (struct fc_lport *);
55272 };
55273+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55274
55275 /**
55276 * struct fc_disc - Discovery context
55277@@ -853,7 +854,7 @@ struct fc_lport {
55278 struct fc_vport *vport;
55279
55280 /* Operational Information */
55281- struct libfc_function_template tt;
55282+ libfc_function_template_no_const tt;
55283 u8 link_up;
55284 u8 qfull;
55285 enum fc_lport_state state;
55286diff -urNp linux-2.6.39.4/include/scsi/scsi_device.h linux-2.6.39.4/include/scsi/scsi_device.h
55287--- linux-2.6.39.4/include/scsi/scsi_device.h 2011-05-19 00:06:34.000000000 -0400
55288+++ linux-2.6.39.4/include/scsi/scsi_device.h 2011-08-05 19:44:37.000000000 -0400
55289@@ -161,9 +161,9 @@ struct scsi_device {
55290 unsigned int max_device_blocked; /* what device_blocked counts down from */
55291 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55292
55293- atomic_t iorequest_cnt;
55294- atomic_t iodone_cnt;
55295- atomic_t ioerr_cnt;
55296+ atomic_unchecked_t iorequest_cnt;
55297+ atomic_unchecked_t iodone_cnt;
55298+ atomic_unchecked_t ioerr_cnt;
55299
55300 struct device sdev_gendev,
55301 sdev_dev;
55302diff -urNp linux-2.6.39.4/include/scsi/scsi_transport_fc.h linux-2.6.39.4/include/scsi/scsi_transport_fc.h
55303--- linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-05-19 00:06:34.000000000 -0400
55304+++ linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-08-05 20:34:06.000000000 -0400
55305@@ -666,9 +666,9 @@ struct fc_function_template {
55306 int (*bsg_timeout)(struct fc_bsg_job *);
55307
55308 /* allocation lengths for host-specific data */
55309- u32 dd_fcrport_size;
55310- u32 dd_fcvport_size;
55311- u32 dd_bsg_size;
55312+ const u32 dd_fcrport_size;
55313+ const u32 dd_fcvport_size;
55314+ const u32 dd_bsg_size;
55315
55316 /*
55317 * The driver sets these to tell the transport class it
55318@@ -678,39 +678,39 @@ struct fc_function_template {
55319 */
55320
55321 /* remote port fixed attributes */
55322- unsigned long show_rport_maxframe_size:1;
55323- unsigned long show_rport_supported_classes:1;
55324- unsigned long show_rport_dev_loss_tmo:1;
55325+ const unsigned long show_rport_maxframe_size:1;
55326+ const unsigned long show_rport_supported_classes:1;
55327+ const unsigned long show_rport_dev_loss_tmo:1;
55328
55329 /*
55330 * target dynamic attributes
55331 * These should all be "1" if the driver uses the remote port
55332 * add/delete functions (so attributes reflect rport values).
55333 */
55334- unsigned long show_starget_node_name:1;
55335- unsigned long show_starget_port_name:1;
55336- unsigned long show_starget_port_id:1;
55337+ const unsigned long show_starget_node_name:1;
55338+ const unsigned long show_starget_port_name:1;
55339+ const unsigned long show_starget_port_id:1;
55340
55341 /* host fixed attributes */
55342- unsigned long show_host_node_name:1;
55343- unsigned long show_host_port_name:1;
55344- unsigned long show_host_permanent_port_name:1;
55345- unsigned long show_host_supported_classes:1;
55346- unsigned long show_host_supported_fc4s:1;
55347- unsigned long show_host_supported_speeds:1;
55348- unsigned long show_host_maxframe_size:1;
55349- unsigned long show_host_serial_number:1;
55350+ const unsigned long show_host_node_name:1;
55351+ const unsigned long show_host_port_name:1;
55352+ const unsigned long show_host_permanent_port_name:1;
55353+ const unsigned long show_host_supported_classes:1;
55354+ const unsigned long show_host_supported_fc4s:1;
55355+ const unsigned long show_host_supported_speeds:1;
55356+ const unsigned long show_host_maxframe_size:1;
55357+ const unsigned long show_host_serial_number:1;
55358 /* host dynamic attributes */
55359- unsigned long show_host_port_id:1;
55360- unsigned long show_host_port_type:1;
55361- unsigned long show_host_port_state:1;
55362- unsigned long show_host_active_fc4s:1;
55363- unsigned long show_host_speed:1;
55364- unsigned long show_host_fabric_name:1;
55365- unsigned long show_host_symbolic_name:1;
55366- unsigned long show_host_system_hostname:1;
55367+ const unsigned long show_host_port_id:1;
55368+ const unsigned long show_host_port_type:1;
55369+ const unsigned long show_host_port_state:1;
55370+ const unsigned long show_host_active_fc4s:1;
55371+ const unsigned long show_host_speed:1;
55372+ const unsigned long show_host_fabric_name:1;
55373+ const unsigned long show_host_symbolic_name:1;
55374+ const unsigned long show_host_system_hostname:1;
55375
55376- unsigned long disable_target_scan:1;
55377+ const unsigned long disable_target_scan:1;
55378 };
55379
55380
55381diff -urNp linux-2.6.39.4/include/sound/ak4xxx-adda.h linux-2.6.39.4/include/sound/ak4xxx-adda.h
55382--- linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-05-19 00:06:34.000000000 -0400
55383+++ linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-08-05 20:34:06.000000000 -0400
55384@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55385 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55386 unsigned char val);
55387 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55388-};
55389+} __no_const;
55390
55391 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55392
55393diff -urNp linux-2.6.39.4/include/sound/hwdep.h linux-2.6.39.4/include/sound/hwdep.h
55394--- linux-2.6.39.4/include/sound/hwdep.h 2011-05-19 00:06:34.000000000 -0400
55395+++ linux-2.6.39.4/include/sound/hwdep.h 2011-08-05 20:34:06.000000000 -0400
55396@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55397 struct snd_hwdep_dsp_status *status);
55398 int (*dsp_load)(struct snd_hwdep *hw,
55399 struct snd_hwdep_dsp_image *image);
55400-};
55401+} __no_const;
55402
55403 struct snd_hwdep {
55404 struct snd_card *card;
55405diff -urNp linux-2.6.39.4/include/sound/info.h linux-2.6.39.4/include/sound/info.h
55406--- linux-2.6.39.4/include/sound/info.h 2011-05-19 00:06:34.000000000 -0400
55407+++ linux-2.6.39.4/include/sound/info.h 2011-08-05 20:34:06.000000000 -0400
55408@@ -44,7 +44,7 @@ struct snd_info_entry_text {
55409 struct snd_info_buffer *buffer);
55410 void (*write)(struct snd_info_entry *entry,
55411 struct snd_info_buffer *buffer);
55412-};
55413+} __no_const;
55414
55415 struct snd_info_entry_ops {
55416 int (*open)(struct snd_info_entry *entry,
55417diff -urNp linux-2.6.39.4/include/sound/pcm.h linux-2.6.39.4/include/sound/pcm.h
55418--- linux-2.6.39.4/include/sound/pcm.h 2011-05-19 00:06:34.000000000 -0400
55419+++ linux-2.6.39.4/include/sound/pcm.h 2011-08-05 20:34:06.000000000 -0400
55420@@ -81,6 +81,7 @@ struct snd_pcm_ops {
55421 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55422 int (*ack)(struct snd_pcm_substream *substream);
55423 };
55424+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55425
55426 /*
55427 *
55428diff -urNp linux-2.6.39.4/include/sound/sb16_csp.h linux-2.6.39.4/include/sound/sb16_csp.h
55429--- linux-2.6.39.4/include/sound/sb16_csp.h 2011-05-19 00:06:34.000000000 -0400
55430+++ linux-2.6.39.4/include/sound/sb16_csp.h 2011-08-05 20:34:06.000000000 -0400
55431@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
55432 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55433 int (*csp_stop) (struct snd_sb_csp * p);
55434 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55435-};
55436+} __no_const;
55437
55438 /*
55439 * CSP private data
55440diff -urNp linux-2.6.39.4/include/sound/soc.h linux-2.6.39.4/include/sound/soc.h
55441--- linux-2.6.39.4/include/sound/soc.h 2011-05-19 00:06:34.000000000 -0400
55442+++ linux-2.6.39.4/include/sound/soc.h 2011-08-05 20:34:06.000000000 -0400
55443@@ -624,7 +624,7 @@ struct snd_soc_platform_driver {
55444 struct snd_soc_dai *);
55445
55446 /* platform stream ops */
55447- struct snd_pcm_ops *ops;
55448+ struct snd_pcm_ops * const ops;
55449 };
55450
55451 struct snd_soc_platform {
55452diff -urNp linux-2.6.39.4/include/sound/ymfpci.h linux-2.6.39.4/include/sound/ymfpci.h
55453--- linux-2.6.39.4/include/sound/ymfpci.h 2011-05-19 00:06:34.000000000 -0400
55454+++ linux-2.6.39.4/include/sound/ymfpci.h 2011-08-05 19:44:37.000000000 -0400
55455@@ -358,7 +358,7 @@ struct snd_ymfpci {
55456 spinlock_t reg_lock;
55457 spinlock_t voice_lock;
55458 wait_queue_head_t interrupt_sleep;
55459- atomic_t interrupt_sleep_count;
55460+ atomic_unchecked_t interrupt_sleep_count;
55461 struct snd_info_entry *proc_entry;
55462 const struct firmware *dsp_microcode;
55463 const struct firmware *controller_microcode;
55464diff -urNp linux-2.6.39.4/include/target/target_core_base.h linux-2.6.39.4/include/target/target_core_base.h
55465--- linux-2.6.39.4/include/target/target_core_base.h 2011-06-03 00:04:14.000000000 -0400
55466+++ linux-2.6.39.4/include/target/target_core_base.h 2011-08-05 20:34:06.000000000 -0400
55467@@ -364,7 +364,7 @@ struct t10_reservation_ops {
55468 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55469 int (*t10_pr_register)(struct se_cmd *);
55470 int (*t10_pr_clear)(struct se_cmd *);
55471-};
55472+} __no_const;
55473
55474 struct t10_reservation_template {
55475 /* Reservation effects all target ports */
55476@@ -432,8 +432,8 @@ struct se_transport_task {
55477 atomic_t t_task_cdbs_left;
55478 atomic_t t_task_cdbs_ex_left;
55479 atomic_t t_task_cdbs_timeout_left;
55480- atomic_t t_task_cdbs_sent;
55481- atomic_t t_transport_aborted;
55482+ atomic_unchecked_t t_task_cdbs_sent;
55483+ atomic_unchecked_t t_transport_aborted;
55484 atomic_t t_transport_active;
55485 atomic_t t_transport_complete;
55486 atomic_t t_transport_queue_active;
55487@@ -774,7 +774,7 @@ struct se_device {
55488 atomic_t active_cmds;
55489 atomic_t simple_cmds;
55490 atomic_t depth_left;
55491- atomic_t dev_ordered_id;
55492+ atomic_unchecked_t dev_ordered_id;
55493 atomic_t dev_tur_active;
55494 atomic_t execute_tasks;
55495 atomic_t dev_status_thr_count;
55496diff -urNp linux-2.6.39.4/include/trace/events/irq.h linux-2.6.39.4/include/trace/events/irq.h
55497--- linux-2.6.39.4/include/trace/events/irq.h 2011-05-19 00:06:34.000000000 -0400
55498+++ linux-2.6.39.4/include/trace/events/irq.h 2011-08-05 19:44:37.000000000 -0400
55499@@ -36,7 +36,7 @@ struct softirq_action;
55500 */
55501 TRACE_EVENT(irq_handler_entry,
55502
55503- TP_PROTO(int irq, struct irqaction *action),
55504+ TP_PROTO(int irq, const struct irqaction *action),
55505
55506 TP_ARGS(irq, action),
55507
55508@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55509 */
55510 TRACE_EVENT(irq_handler_exit,
55511
55512- TP_PROTO(int irq, struct irqaction *action, int ret),
55513+ TP_PROTO(int irq, const struct irqaction *action, int ret),
55514
55515 TP_ARGS(irq, action, ret),
55516
55517diff -urNp linux-2.6.39.4/include/video/udlfb.h linux-2.6.39.4/include/video/udlfb.h
55518--- linux-2.6.39.4/include/video/udlfb.h 2011-05-19 00:06:34.000000000 -0400
55519+++ linux-2.6.39.4/include/video/udlfb.h 2011-08-05 19:44:37.000000000 -0400
55520@@ -51,10 +51,10 @@ struct dlfb_data {
55521 int base8;
55522 u32 pseudo_palette[256];
55523 /* blit-only rendering path metrics, exposed through sysfs */
55524- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55525- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55526- atomic_t bytes_sent; /* to usb, after compression including overhead */
55527- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55528+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55529+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55530+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55531+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55532 };
55533
55534 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55535diff -urNp linux-2.6.39.4/include/video/uvesafb.h linux-2.6.39.4/include/video/uvesafb.h
55536--- linux-2.6.39.4/include/video/uvesafb.h 2011-05-19 00:06:34.000000000 -0400
55537+++ linux-2.6.39.4/include/video/uvesafb.h 2011-08-05 19:44:37.000000000 -0400
55538@@ -177,6 +177,7 @@ struct uvesafb_par {
55539 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55540 u8 pmi_setpal; /* PMI for palette changes */
55541 u16 *pmi_base; /* protected mode interface location */
55542+ u8 *pmi_code; /* protected mode code location */
55543 void *pmi_start;
55544 void *pmi_pal;
55545 u8 *vbe_state_orig; /*
55546diff -urNp linux-2.6.39.4/init/do_mounts.c linux-2.6.39.4/init/do_mounts.c
55547--- linux-2.6.39.4/init/do_mounts.c 2011-05-19 00:06:34.000000000 -0400
55548+++ linux-2.6.39.4/init/do_mounts.c 2011-08-05 19:44:37.000000000 -0400
55549@@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55550
55551 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55552 {
55553- int err = sys_mount(name, "/root", fs, flags, data);
55554+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55555 if (err)
55556 return err;
55557
55558@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55559 va_start(args, fmt);
55560 vsprintf(buf, fmt, args);
55561 va_end(args);
55562- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55563+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55564 if (fd >= 0) {
55565 sys_ioctl(fd, FDEJECT, 0);
55566 sys_close(fd);
55567 }
55568 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55569- fd = sys_open("/dev/console", O_RDWR, 0);
55570+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55571 if (fd >= 0) {
55572 sys_ioctl(fd, TCGETS, (long)&termios);
55573 termios.c_lflag &= ~ICANON;
55574 sys_ioctl(fd, TCSETSF, (long)&termios);
55575- sys_read(fd, &c, 1);
55576+ sys_read(fd, (char __user *)&c, 1);
55577 termios.c_lflag |= ICANON;
55578 sys_ioctl(fd, TCSETSF, (long)&termios);
55579 sys_close(fd);
55580@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55581 mount_root();
55582 out:
55583 devtmpfs_mount("dev");
55584- sys_mount(".", "/", NULL, MS_MOVE, NULL);
55585+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55586 sys_chroot((const char __user __force *)".");
55587 }
55588diff -urNp linux-2.6.39.4/init/do_mounts.h linux-2.6.39.4/init/do_mounts.h
55589--- linux-2.6.39.4/init/do_mounts.h 2011-05-19 00:06:34.000000000 -0400
55590+++ linux-2.6.39.4/init/do_mounts.h 2011-08-05 19:44:37.000000000 -0400
55591@@ -15,15 +15,15 @@ extern int root_mountflags;
55592
55593 static inline int create_dev(char *name, dev_t dev)
55594 {
55595- sys_unlink(name);
55596- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55597+ sys_unlink((__force char __user *)name);
55598+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55599 }
55600
55601 #if BITS_PER_LONG == 32
55602 static inline u32 bstat(char *name)
55603 {
55604 struct stat64 stat;
55605- if (sys_stat64(name, &stat) != 0)
55606+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55607 return 0;
55608 if (!S_ISBLK(stat.st_mode))
55609 return 0;
55610diff -urNp linux-2.6.39.4/init/do_mounts_initrd.c linux-2.6.39.4/init/do_mounts_initrd.c
55611--- linux-2.6.39.4/init/do_mounts_initrd.c 2011-05-19 00:06:34.000000000 -0400
55612+++ linux-2.6.39.4/init/do_mounts_initrd.c 2011-08-05 19:44:37.000000000 -0400
55613@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55614 create_dev("/dev/root.old", Root_RAM0);
55615 /* mount initrd on rootfs' /root */
55616 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55617- sys_mkdir("/old", 0700);
55618- root_fd = sys_open("/", 0, 0);
55619- old_fd = sys_open("/old", 0, 0);
55620+ sys_mkdir((__force const char __user *)"/old", 0700);
55621+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
55622+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55623 /* move initrd over / and chdir/chroot in initrd root */
55624- sys_chdir("/root");
55625- sys_mount(".", "/", NULL, MS_MOVE, NULL);
55626- sys_chroot(".");
55627+ sys_chdir((__force const char __user *)"/root");
55628+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55629+ sys_chroot((__force const char __user *)".");
55630
55631 /*
55632 * In case that a resume from disk is carried out by linuxrc or one of
55633@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55634
55635 /* move initrd to rootfs' /old */
55636 sys_fchdir(old_fd);
55637- sys_mount("/", ".", NULL, MS_MOVE, NULL);
55638+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55639 /* switch root and cwd back to / of rootfs */
55640 sys_fchdir(root_fd);
55641- sys_chroot(".");
55642+ sys_chroot((__force const char __user *)".");
55643 sys_close(old_fd);
55644 sys_close(root_fd);
55645
55646 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55647- sys_chdir("/old");
55648+ sys_chdir((__force const char __user *)"/old");
55649 return;
55650 }
55651
55652@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55653 mount_root();
55654
55655 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55656- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55657+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55658 if (!error)
55659 printk("okay\n");
55660 else {
55661- int fd = sys_open("/dev/root.old", O_RDWR, 0);
55662+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55663 if (error == -ENOENT)
55664 printk("/initrd does not exist. Ignored.\n");
55665 else
55666 printk("failed\n");
55667 printk(KERN_NOTICE "Unmounting old root\n");
55668- sys_umount("/old", MNT_DETACH);
55669+ sys_umount((__force char __user *)"/old", MNT_DETACH);
55670 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55671 if (fd < 0) {
55672 error = fd;
55673@@ -116,11 +116,11 @@ int __init initrd_load(void)
55674 * mounted in the normal path.
55675 */
55676 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55677- sys_unlink("/initrd.image");
55678+ sys_unlink((__force const char __user *)"/initrd.image");
55679 handle_initrd();
55680 return 1;
55681 }
55682 }
55683- sys_unlink("/initrd.image");
55684+ sys_unlink((__force const char __user *)"/initrd.image");
55685 return 0;
55686 }
55687diff -urNp linux-2.6.39.4/init/do_mounts_md.c linux-2.6.39.4/init/do_mounts_md.c
55688--- linux-2.6.39.4/init/do_mounts_md.c 2011-05-19 00:06:34.000000000 -0400
55689+++ linux-2.6.39.4/init/do_mounts_md.c 2011-08-05 19:44:37.000000000 -0400
55690@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55691 partitioned ? "_d" : "", minor,
55692 md_setup_args[ent].device_names);
55693
55694- fd = sys_open(name, 0, 0);
55695+ fd = sys_open((__force char __user *)name, 0, 0);
55696 if (fd < 0) {
55697 printk(KERN_ERR "md: open failed - cannot start "
55698 "array %s\n", name);
55699@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55700 * array without it
55701 */
55702 sys_close(fd);
55703- fd = sys_open(name, 0, 0);
55704+ fd = sys_open((__force char __user *)name, 0, 0);
55705 sys_ioctl(fd, BLKRRPART, 0);
55706 }
55707 sys_close(fd);
55708diff -urNp linux-2.6.39.4/init/initramfs.c linux-2.6.39.4/init/initramfs.c
55709--- linux-2.6.39.4/init/initramfs.c 2011-05-19 00:06:34.000000000 -0400
55710+++ linux-2.6.39.4/init/initramfs.c 2011-08-05 19:44:37.000000000 -0400
55711@@ -74,7 +74,7 @@ static void __init free_hash(void)
55712 }
55713 }
55714
55715-static long __init do_utime(char __user *filename, time_t mtime)
55716+static long __init do_utime(__force char __user *filename, time_t mtime)
55717 {
55718 struct timespec t[2];
55719
55720@@ -109,7 +109,7 @@ static void __init dir_utime(void)
55721 struct dir_entry *de, *tmp;
55722 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55723 list_del(&de->list);
55724- do_utime(de->name, de->mtime);
55725+ do_utime((__force char __user *)de->name, de->mtime);
55726 kfree(de->name);
55727 kfree(de);
55728 }
55729@@ -271,7 +271,7 @@ static int __init maybe_link(void)
55730 if (nlink >= 2) {
55731 char *old = find_link(major, minor, ino, mode, collected);
55732 if (old)
55733- return (sys_link(old, collected) < 0) ? -1 : 1;
55734+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55735 }
55736 return 0;
55737 }
55738@@ -280,11 +280,11 @@ static void __init clean_path(char *path
55739 {
55740 struct stat st;
55741
55742- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55743+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55744 if (S_ISDIR(st.st_mode))
55745- sys_rmdir(path);
55746+ sys_rmdir((__force char __user *)path);
55747 else
55748- sys_unlink(path);
55749+ sys_unlink((__force char __user *)path);
55750 }
55751 }
55752
55753@@ -305,7 +305,7 @@ static int __init do_name(void)
55754 int openflags = O_WRONLY|O_CREAT;
55755 if (ml != 1)
55756 openflags |= O_TRUNC;
55757- wfd = sys_open(collected, openflags, mode);
55758+ wfd = sys_open((__force char __user *)collected, openflags, mode);
55759
55760 if (wfd >= 0) {
55761 sys_fchown(wfd, uid, gid);
55762@@ -317,17 +317,17 @@ static int __init do_name(void)
55763 }
55764 }
55765 } else if (S_ISDIR(mode)) {
55766- sys_mkdir(collected, mode);
55767- sys_chown(collected, uid, gid);
55768- sys_chmod(collected, mode);
55769+ sys_mkdir((__force char __user *)collected, mode);
55770+ sys_chown((__force char __user *)collected, uid, gid);
55771+ sys_chmod((__force char __user *)collected, mode);
55772 dir_add(collected, mtime);
55773 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55774 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55775 if (maybe_link() == 0) {
55776- sys_mknod(collected, mode, rdev);
55777- sys_chown(collected, uid, gid);
55778- sys_chmod(collected, mode);
55779- do_utime(collected, mtime);
55780+ sys_mknod((__force char __user *)collected, mode, rdev);
55781+ sys_chown((__force char __user *)collected, uid, gid);
55782+ sys_chmod((__force char __user *)collected, mode);
55783+ do_utime((__force char __user *)collected, mtime);
55784 }
55785 }
55786 return 0;
55787@@ -336,15 +336,15 @@ static int __init do_name(void)
55788 static int __init do_copy(void)
55789 {
55790 if (count >= body_len) {
55791- sys_write(wfd, victim, body_len);
55792+ sys_write(wfd, (__force char __user *)victim, body_len);
55793 sys_close(wfd);
55794- do_utime(vcollected, mtime);
55795+ do_utime((__force char __user *)vcollected, mtime);
55796 kfree(vcollected);
55797 eat(body_len);
55798 state = SkipIt;
55799 return 0;
55800 } else {
55801- sys_write(wfd, victim, count);
55802+ sys_write(wfd, (__force char __user *)victim, count);
55803 body_len -= count;
55804 eat(count);
55805 return 1;
55806@@ -355,9 +355,9 @@ static int __init do_symlink(void)
55807 {
55808 collected[N_ALIGN(name_len) + body_len] = '\0';
55809 clean_path(collected, 0);
55810- sys_symlink(collected + N_ALIGN(name_len), collected);
55811- sys_lchown(collected, uid, gid);
55812- do_utime(collected, mtime);
55813+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55814+ sys_lchown((__force char __user *)collected, uid, gid);
55815+ do_utime((__force char __user *)collected, mtime);
55816 state = SkipIt;
55817 next_state = Reset;
55818 return 0;
55819diff -urNp linux-2.6.39.4/init/Kconfig linux-2.6.39.4/init/Kconfig
55820--- linux-2.6.39.4/init/Kconfig 2011-05-19 00:06:34.000000000 -0400
55821+++ linux-2.6.39.4/init/Kconfig 2011-08-05 19:44:37.000000000 -0400
55822@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
55823
55824 config COMPAT_BRK
55825 bool "Disable heap randomization"
55826- default y
55827+ default n
55828 help
55829 Randomizing heap placement makes heap exploits harder, but it
55830 also breaks ancient binaries (including anything libc5 based).
55831diff -urNp linux-2.6.39.4/init/main.c linux-2.6.39.4/init/main.c
55832--- linux-2.6.39.4/init/main.c 2011-06-03 00:04:14.000000000 -0400
55833+++ linux-2.6.39.4/init/main.c 2011-08-05 20:34:06.000000000 -0400
55834@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55835 extern void tc_init(void);
55836 #endif
55837
55838+extern void grsecurity_init(void);
55839+
55840 /*
55841 * Debug helper: via this flag we know that we are in 'early bootup code'
55842 * where only the boot processor is running with IRQ disabled. This means
55843@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55844
55845 __setup("reset_devices", set_reset_devices);
55846
55847+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55848+extern char pax_enter_kernel_user[];
55849+extern char pax_exit_kernel_user[];
55850+extern pgdval_t clone_pgd_mask;
55851+#endif
55852+
55853+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
55854+static int __init setup_pax_nouderef(char *str)
55855+{
55856+#ifdef CONFIG_X86_32
55857+ unsigned int cpu;
55858+ struct desc_struct *gdt;
55859+
55860+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
55861+ gdt = get_cpu_gdt_table(cpu);
55862+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
55863+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
55864+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
55865+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
55866+ }
55867+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
55868+#else
55869+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
55870+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
55871+ clone_pgd_mask = ~(pgdval_t)0UL;
55872+#endif
55873+
55874+ return 0;
55875+}
55876+early_param("pax_nouderef", setup_pax_nouderef);
55877+#endif
55878+
55879+#ifdef CONFIG_PAX_SOFTMODE
55880+int pax_softmode;
55881+
55882+static int __init setup_pax_softmode(char *str)
55883+{
55884+ get_option(&str, &pax_softmode);
55885+ return 1;
55886+}
55887+__setup("pax_softmode=", setup_pax_softmode);
55888+#endif
55889+
55890 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
55891 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
55892 static const char *panic_later, *panic_param;
55893@@ -663,6 +708,7 @@ int __init_or_module do_one_initcall(ini
55894 {
55895 int count = preempt_count();
55896 int ret;
55897+ const char *msg1 = "", *msg2 = "";
55898
55899 if (initcall_debug)
55900 ret = do_one_initcall_debug(fn);
55901@@ -675,15 +721,15 @@ int __init_or_module do_one_initcall(ini
55902 sprintf(msgbuf, "error code %d ", ret);
55903
55904 if (preempt_count() != count) {
55905- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
55906+ msg1 = " preemption imbalance";
55907 preempt_count() = count;
55908 }
55909 if (irqs_disabled()) {
55910- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
55911+ msg2 = " disabled interrupts";
55912 local_irq_enable();
55913 }
55914- if (msgbuf[0]) {
55915- printk("initcall %pF returned with %s\n", fn, msgbuf);
55916+ if (msgbuf[0] || *msg1 || *msg2) {
55917+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
55918 }
55919
55920 return ret;
55921@@ -801,7 +847,7 @@ static int __init kernel_init(void * unu
55922 do_basic_setup();
55923
55924 /* Open the /dev/console on the rootfs, this should never fail */
55925- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
55926+ if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
55927 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
55928
55929 (void) sys_dup(0);
55930@@ -814,11 +860,13 @@ static int __init kernel_init(void * unu
55931 if (!ramdisk_execute_command)
55932 ramdisk_execute_command = "/init";
55933
55934- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
55935+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
55936 ramdisk_execute_command = NULL;
55937 prepare_namespace();
55938 }
55939
55940+ grsecurity_init();
55941+
55942 /*
55943 * Ok, we have completed the initial bootup, and
55944 * we're essentially up and running. Get rid of the
55945diff -urNp linux-2.6.39.4/ipc/mqueue.c linux-2.6.39.4/ipc/mqueue.c
55946--- linux-2.6.39.4/ipc/mqueue.c 2011-05-19 00:06:34.000000000 -0400
55947+++ linux-2.6.39.4/ipc/mqueue.c 2011-08-05 19:44:37.000000000 -0400
55948@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
55949 mq_bytes = (mq_msg_tblsz +
55950 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
55951
55952+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
55953 spin_lock(&mq_lock);
55954 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
55955 u->mq_bytes + mq_bytes >
55956diff -urNp linux-2.6.39.4/ipc/msg.c linux-2.6.39.4/ipc/msg.c
55957--- linux-2.6.39.4/ipc/msg.c 2011-05-19 00:06:34.000000000 -0400
55958+++ linux-2.6.39.4/ipc/msg.c 2011-08-05 20:34:06.000000000 -0400
55959@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
55960 return security_msg_queue_associate(msq, msgflg);
55961 }
55962
55963+static struct ipc_ops msg_ops = {
55964+ .getnew = newque,
55965+ .associate = msg_security,
55966+ .more_checks = NULL
55967+};
55968+
55969 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
55970 {
55971 struct ipc_namespace *ns;
55972- struct ipc_ops msg_ops;
55973 struct ipc_params msg_params;
55974
55975 ns = current->nsproxy->ipc_ns;
55976
55977- msg_ops.getnew = newque;
55978- msg_ops.associate = msg_security;
55979- msg_ops.more_checks = NULL;
55980-
55981 msg_params.key = key;
55982 msg_params.flg = msgflg;
55983
55984diff -urNp linux-2.6.39.4/ipc/sem.c linux-2.6.39.4/ipc/sem.c
55985--- linux-2.6.39.4/ipc/sem.c 2011-05-19 00:06:34.000000000 -0400
55986+++ linux-2.6.39.4/ipc/sem.c 2011-08-05 20:34:06.000000000 -0400
55987@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
55988 return 0;
55989 }
55990
55991+static struct ipc_ops sem_ops = {
55992+ .getnew = newary,
55993+ .associate = sem_security,
55994+ .more_checks = sem_more_checks
55995+};
55996+
55997 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
55998 {
55999 struct ipc_namespace *ns;
56000- struct ipc_ops sem_ops;
56001 struct ipc_params sem_params;
56002
56003 ns = current->nsproxy->ipc_ns;
56004@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
56005 if (nsems < 0 || nsems > ns->sc_semmsl)
56006 return -EINVAL;
56007
56008- sem_ops.getnew = newary;
56009- sem_ops.associate = sem_security;
56010- sem_ops.more_checks = sem_more_checks;
56011-
56012 sem_params.key = key;
56013 sem_params.flg = semflg;
56014 sem_params.u.nsems = nsems;
56015@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
56016 int nsems;
56017 struct list_head tasks;
56018
56019+ pax_track_stack();
56020+
56021 sma = sem_lock_check(ns, semid);
56022 if (IS_ERR(sma))
56023 return PTR_ERR(sma);
56024@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
56025 struct ipc_namespace *ns;
56026 struct list_head tasks;
56027
56028+ pax_track_stack();
56029+
56030 ns = current->nsproxy->ipc_ns;
56031
56032 if (nsops < 1 || semid < 0)
56033diff -urNp linux-2.6.39.4/ipc/shm.c linux-2.6.39.4/ipc/shm.c
56034--- linux-2.6.39.4/ipc/shm.c 2011-05-19 00:06:34.000000000 -0400
56035+++ linux-2.6.39.4/ipc/shm.c 2011-08-05 20:34:06.000000000 -0400
56036@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
56037 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
56038 #endif
56039
56040+#ifdef CONFIG_GRKERNSEC
56041+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56042+ const time_t shm_createtime, const uid_t cuid,
56043+ const int shmid);
56044+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56045+ const time_t shm_createtime);
56046+#endif
56047+
56048 void shm_init_ns(struct ipc_namespace *ns)
56049 {
56050 ns->shm_ctlmax = SHMMAX;
56051@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
56052 shp->shm_lprid = 0;
56053 shp->shm_atim = shp->shm_dtim = 0;
56054 shp->shm_ctim = get_seconds();
56055+#ifdef CONFIG_GRKERNSEC
56056+ {
56057+ struct timespec timeval;
56058+ do_posix_clock_monotonic_gettime(&timeval);
56059+
56060+ shp->shm_createtime = timeval.tv_sec;
56061+ }
56062+#endif
56063 shp->shm_segsz = size;
56064 shp->shm_nattch = 0;
56065 shp->shm_file = file;
56066@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
56067 return 0;
56068 }
56069
56070+static struct ipc_ops shm_ops = {
56071+ .getnew = newseg,
56072+ .associate = shm_security,
56073+ .more_checks = shm_more_checks
56074+};
56075+
56076 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
56077 {
56078 struct ipc_namespace *ns;
56079- struct ipc_ops shm_ops;
56080 struct ipc_params shm_params;
56081
56082 ns = current->nsproxy->ipc_ns;
56083
56084- shm_ops.getnew = newseg;
56085- shm_ops.associate = shm_security;
56086- shm_ops.more_checks = shm_more_checks;
56087-
56088 shm_params.key = key;
56089 shm_params.flg = shmflg;
56090 shm_params.u.size = size;
56091@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
56092 case SHM_LOCK:
56093 case SHM_UNLOCK:
56094 {
56095- struct file *uninitialized_var(shm_file);
56096-
56097 lru_add_drain_all(); /* drain pagevecs to lru lists */
56098
56099 shp = shm_lock_check(ns, shmid);
56100@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
56101 if (err)
56102 goto out_unlock;
56103
56104+#ifdef CONFIG_GRKERNSEC
56105+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
56106+ shp->shm_perm.cuid, shmid) ||
56107+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56108+ err = -EACCES;
56109+ goto out_unlock;
56110+ }
56111+#endif
56112+
56113 path = shp->shm_file->f_path;
56114 path_get(&path);
56115 shp->shm_nattch++;
56116+#ifdef CONFIG_GRKERNSEC
56117+ shp->shm_lapid = current->pid;
56118+#endif
56119 size = i_size_read(path.dentry->d_inode);
56120 shm_unlock(shp);
56121
56122diff -urNp linux-2.6.39.4/kernel/acct.c linux-2.6.39.4/kernel/acct.c
56123--- linux-2.6.39.4/kernel/acct.c 2011-05-19 00:06:34.000000000 -0400
56124+++ linux-2.6.39.4/kernel/acct.c 2011-08-05 19:44:37.000000000 -0400
56125@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56126 */
56127 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56128 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56129- file->f_op->write(file, (char *)&ac,
56130+ file->f_op->write(file, (__force char __user *)&ac,
56131 sizeof(acct_t), &file->f_pos);
56132 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56133 set_fs(fs);
56134diff -urNp linux-2.6.39.4/kernel/audit.c linux-2.6.39.4/kernel/audit.c
56135--- linux-2.6.39.4/kernel/audit.c 2011-05-19 00:06:34.000000000 -0400
56136+++ linux-2.6.39.4/kernel/audit.c 2011-08-05 19:44:37.000000000 -0400
56137@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56138 3) suppressed due to audit_rate_limit
56139 4) suppressed due to audit_backlog_limit
56140 */
56141-static atomic_t audit_lost = ATOMIC_INIT(0);
56142+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56143
56144 /* The netlink socket. */
56145 static struct sock *audit_sock;
56146@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56147 unsigned long now;
56148 int print;
56149
56150- atomic_inc(&audit_lost);
56151+ atomic_inc_unchecked(&audit_lost);
56152
56153 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56154
56155@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56156 printk(KERN_WARNING
56157 "audit: audit_lost=%d audit_rate_limit=%d "
56158 "audit_backlog_limit=%d\n",
56159- atomic_read(&audit_lost),
56160+ atomic_read_unchecked(&audit_lost),
56161 audit_rate_limit,
56162 audit_backlog_limit);
56163 audit_panic(message);
56164@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56165 status_set.pid = audit_pid;
56166 status_set.rate_limit = audit_rate_limit;
56167 status_set.backlog_limit = audit_backlog_limit;
56168- status_set.lost = atomic_read(&audit_lost);
56169+ status_set.lost = atomic_read_unchecked(&audit_lost);
56170 status_set.backlog = skb_queue_len(&audit_skb_queue);
56171 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56172 &status_set, sizeof(status_set));
56173diff -urNp linux-2.6.39.4/kernel/auditsc.c linux-2.6.39.4/kernel/auditsc.c
56174--- linux-2.6.39.4/kernel/auditsc.c 2011-05-19 00:06:34.000000000 -0400
56175+++ linux-2.6.39.4/kernel/auditsc.c 2011-08-05 19:44:37.000000000 -0400
56176@@ -2111,7 +2111,7 @@ int auditsc_get_stamp(struct audit_conte
56177 }
56178
56179 /* global counter which is incremented every time something logs in */
56180-static atomic_t session_id = ATOMIC_INIT(0);
56181+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56182
56183 /**
56184 * audit_set_loginuid - set a task's audit_context loginuid
56185@@ -2124,7 +2124,7 @@ static atomic_t session_id = ATOMIC_INIT
56186 */
56187 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56188 {
56189- unsigned int sessionid = atomic_inc_return(&session_id);
56190+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56191 struct audit_context *context = task->audit_context;
56192
56193 if (context && context->in_syscall) {
56194diff -urNp linux-2.6.39.4/kernel/capability.c linux-2.6.39.4/kernel/capability.c
56195--- linux-2.6.39.4/kernel/capability.c 2011-05-19 00:06:34.000000000 -0400
56196+++ linux-2.6.39.4/kernel/capability.c 2011-08-05 19:44:37.000000000 -0400
56197@@ -206,6 +206,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56198 * before modification is attempted and the application
56199 * fails.
56200 */
56201+ if (tocopy > ARRAY_SIZE(kdata))
56202+ return -EFAULT;
56203+
56204 if (copy_to_user(dataptr, kdata, tocopy
56205 * sizeof(struct __user_cap_data_struct))) {
56206 return -EFAULT;
56207@@ -378,7 +381,7 @@ bool ns_capable(struct user_namespace *n
56208 BUG();
56209 }
56210
56211- if (security_capable(ns, current_cred(), cap) == 0) {
56212+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56213 current->flags |= PF_SUPERPRIV;
56214 return true;
56215 }
56216@@ -386,6 +389,27 @@ bool ns_capable(struct user_namespace *n
56217 }
56218 EXPORT_SYMBOL(ns_capable);
56219
56220+bool ns_capable_nolog(struct user_namespace *ns, int cap)
56221+{
56222+ if (unlikely(!cap_valid(cap))) {
56223+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56224+ BUG();
56225+ }
56226+
56227+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56228+ current->flags |= PF_SUPERPRIV;
56229+ return true;
56230+ }
56231+ return false;
56232+}
56233+EXPORT_SYMBOL(ns_capable_nolog);
56234+
56235+bool capable_nolog(int cap)
56236+{
56237+ return ns_capable_nolog(&init_user_ns, cap);
56238+}
56239+EXPORT_SYMBOL(capable_nolog);
56240+
56241 /**
56242 * task_ns_capable - Determine whether current task has a superior
56243 * capability targeted at a specific task's user namespace.
56244@@ -400,6 +424,12 @@ bool task_ns_capable(struct task_struct
56245 }
56246 EXPORT_SYMBOL(task_ns_capable);
56247
56248+bool task_ns_capable_nolog(struct task_struct *t, int cap)
56249+{
56250+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56251+}
56252+EXPORT_SYMBOL(task_ns_capable_nolog);
56253+
56254 /**
56255 * nsown_capable - Check superior capability to one's own user_ns
56256 * @cap: The capability in question
56257diff -urNp linux-2.6.39.4/kernel/cgroup.c linux-2.6.39.4/kernel/cgroup.c
56258--- linux-2.6.39.4/kernel/cgroup.c 2011-05-19 00:06:34.000000000 -0400
56259+++ linux-2.6.39.4/kernel/cgroup.c 2011-08-05 19:44:37.000000000 -0400
56260@@ -598,6 +598,8 @@ static struct css_set *find_css_set(
56261 struct hlist_head *hhead;
56262 struct cg_cgroup_link *link;
56263
56264+ pax_track_stack();
56265+
56266 /* First see if we already have a cgroup group that matches
56267 * the desired set */
56268 read_lock(&css_set_lock);
56269diff -urNp linux-2.6.39.4/kernel/compat.c linux-2.6.39.4/kernel/compat.c
56270--- linux-2.6.39.4/kernel/compat.c 2011-05-19 00:06:34.000000000 -0400
56271+++ linux-2.6.39.4/kernel/compat.c 2011-08-05 19:44:37.000000000 -0400
56272@@ -13,6 +13,7 @@
56273
56274 #include <linux/linkage.h>
56275 #include <linux/compat.h>
56276+#include <linux/module.h>
56277 #include <linux/errno.h>
56278 #include <linux/time.h>
56279 #include <linux/signal.h>
56280diff -urNp linux-2.6.39.4/kernel/configs.c linux-2.6.39.4/kernel/configs.c
56281--- linux-2.6.39.4/kernel/configs.c 2011-05-19 00:06:34.000000000 -0400
56282+++ linux-2.6.39.4/kernel/configs.c 2011-08-05 19:44:37.000000000 -0400
56283@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56284 struct proc_dir_entry *entry;
56285
56286 /* create the current config file */
56287+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56288+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56289+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56290+ &ikconfig_file_ops);
56291+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56292+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56293+ &ikconfig_file_ops);
56294+#endif
56295+#else
56296 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56297 &ikconfig_file_ops);
56298+#endif
56299+
56300 if (!entry)
56301 return -ENOMEM;
56302
56303diff -urNp linux-2.6.39.4/kernel/cred.c linux-2.6.39.4/kernel/cred.c
56304--- linux-2.6.39.4/kernel/cred.c 2011-05-19 00:06:34.000000000 -0400
56305+++ linux-2.6.39.4/kernel/cred.c 2011-08-05 19:44:37.000000000 -0400
56306@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56307 */
56308 void __put_cred(struct cred *cred)
56309 {
56310+ pax_track_stack();
56311+
56312 kdebug("__put_cred(%p{%d,%d})", cred,
56313 atomic_read(&cred->usage),
56314 read_cred_subscribers(cred));
56315@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56316 {
56317 struct cred *cred;
56318
56319+ pax_track_stack();
56320+
56321 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56322 atomic_read(&tsk->cred->usage),
56323 read_cred_subscribers(tsk->cred));
56324@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56325 {
56326 const struct cred *cred;
56327
56328+ pax_track_stack();
56329+
56330 rcu_read_lock();
56331
56332 do {
56333@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56334 {
56335 struct cred *new;
56336
56337+ pax_track_stack();
56338+
56339 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56340 if (!new)
56341 return NULL;
56342@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56343 const struct cred *old;
56344 struct cred *new;
56345
56346+ pax_track_stack();
56347+
56348 validate_process_creds();
56349
56350 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56351@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56352 struct thread_group_cred *tgcred = NULL;
56353 struct cred *new;
56354
56355+ pax_track_stack();
56356+
56357 #ifdef CONFIG_KEYS
56358 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56359 if (!tgcred)
56360@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56361 struct cred *new;
56362 int ret;
56363
56364+ pax_track_stack();
56365+
56366 if (
56367 #ifdef CONFIG_KEYS
56368 !p->cred->thread_keyring &&
56369@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56370 struct task_struct *task = current;
56371 const struct cred *old = task->real_cred;
56372
56373+ pax_track_stack();
56374+
56375 kdebug("commit_creds(%p{%d,%d})", new,
56376 atomic_read(&new->usage),
56377 read_cred_subscribers(new));
56378@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56379
56380 get_cred(new); /* we will require a ref for the subj creds too */
56381
56382+ gr_set_role_label(task, new->uid, new->gid);
56383+
56384 /* dumpability changes */
56385 if (old->euid != new->euid ||
56386 old->egid != new->egid ||
56387@@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
56388 */
56389 void abort_creds(struct cred *new)
56390 {
56391+ pax_track_stack();
56392+
56393 kdebug("abort_creds(%p{%d,%d})", new,
56394 atomic_read(&new->usage),
56395 read_cred_subscribers(new));
56396@@ -574,6 +594,8 @@ const struct cred *override_creds(const
56397 {
56398 const struct cred *old = current->cred;
56399
56400+ pax_track_stack();
56401+
56402 kdebug("override_creds(%p{%d,%d})", new,
56403 atomic_read(&new->usage),
56404 read_cred_subscribers(new));
56405@@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
56406 {
56407 const struct cred *override = current->cred;
56408
56409+ pax_track_stack();
56410+
56411 kdebug("revert_creds(%p{%d,%d})", old,
56412 atomic_read(&old->usage),
56413 read_cred_subscribers(old));
56414@@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
56415 const struct cred *old;
56416 struct cred *new;
56417
56418+ pax_track_stack();
56419+
56420 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56421 if (!new)
56422 return NULL;
56423@@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56424 */
56425 int set_security_override(struct cred *new, u32 secid)
56426 {
56427+ pax_track_stack();
56428+
56429 return security_kernel_act_as(new, secid);
56430 }
56431 EXPORT_SYMBOL(set_security_override);
56432@@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
56433 u32 secid;
56434 int ret;
56435
56436+ pax_track_stack();
56437+
56438 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56439 if (ret < 0)
56440 return ret;
56441diff -urNp linux-2.6.39.4/kernel/debug/debug_core.c linux-2.6.39.4/kernel/debug/debug_core.c
56442--- linux-2.6.39.4/kernel/debug/debug_core.c 2011-05-19 00:06:34.000000000 -0400
56443+++ linux-2.6.39.4/kernel/debug/debug_core.c 2011-08-05 20:34:06.000000000 -0400
56444@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56445 */
56446 static atomic_t masters_in_kgdb;
56447 static atomic_t slaves_in_kgdb;
56448-static atomic_t kgdb_break_tasklet_var;
56449+static atomic_unchecked_t kgdb_break_tasklet_var;
56450 atomic_t kgdb_setting_breakpoint;
56451
56452 struct task_struct *kgdb_usethread;
56453@@ -129,7 +129,7 @@ int kgdb_single_step;
56454 static pid_t kgdb_sstep_pid;
56455
56456 /* to keep track of the CPU which is doing the single stepping*/
56457-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56458+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56459
56460 /*
56461 * If you are debugging a problem where roundup (the collection of
56462@@ -542,7 +542,7 @@ return_normal:
56463 * kernel will only try for the value of sstep_tries before
56464 * giving up and continuing on.
56465 */
56466- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56467+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56468 (kgdb_info[cpu].task &&
56469 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56470 atomic_set(&kgdb_active, -1);
56471@@ -636,8 +636,8 @@ cpu_master_loop:
56472 }
56473
56474 kgdb_restore:
56475- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56476- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56477+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56478+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56479 if (kgdb_info[sstep_cpu].task)
56480 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56481 else
56482@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56483 static void kgdb_tasklet_bpt(unsigned long ing)
56484 {
56485 kgdb_breakpoint();
56486- atomic_set(&kgdb_break_tasklet_var, 0);
56487+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56488 }
56489
56490 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56491
56492 void kgdb_schedule_breakpoint(void)
56493 {
56494- if (atomic_read(&kgdb_break_tasklet_var) ||
56495+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56496 atomic_read(&kgdb_active) != -1 ||
56497 atomic_read(&kgdb_setting_breakpoint))
56498 return;
56499- atomic_inc(&kgdb_break_tasklet_var);
56500+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
56501 tasklet_schedule(&kgdb_tasklet_breakpoint);
56502 }
56503 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56504diff -urNp linux-2.6.39.4/kernel/debug/kdb/kdb_main.c linux-2.6.39.4/kernel/debug/kdb/kdb_main.c
56505--- linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-05-19 00:06:34.000000000 -0400
56506+++ linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-08-05 19:44:37.000000000 -0400
56507@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56508 list_for_each_entry(mod, kdb_modules, list) {
56509
56510 kdb_printf("%-20s%8u 0x%p ", mod->name,
56511- mod->core_size, (void *)mod);
56512+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
56513 #ifdef CONFIG_MODULE_UNLOAD
56514 kdb_printf("%4d ", module_refcount(mod));
56515 #endif
56516@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56517 kdb_printf(" (Loading)");
56518 else
56519 kdb_printf(" (Live)");
56520- kdb_printf(" 0x%p", mod->module_core);
56521+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56522
56523 #ifdef CONFIG_MODULE_UNLOAD
56524 {
56525diff -urNp linux-2.6.39.4/kernel/exit.c linux-2.6.39.4/kernel/exit.c
56526--- linux-2.6.39.4/kernel/exit.c 2011-05-19 00:06:34.000000000 -0400
56527+++ linux-2.6.39.4/kernel/exit.c 2011-08-05 19:44:37.000000000 -0400
56528@@ -57,6 +57,10 @@
56529 #include <asm/pgtable.h>
56530 #include <asm/mmu_context.h>
56531
56532+#ifdef CONFIG_GRKERNSEC
56533+extern rwlock_t grsec_exec_file_lock;
56534+#endif
56535+
56536 static void exit_mm(struct task_struct * tsk);
56537
56538 static void __unhash_process(struct task_struct *p, bool group_dead)
56539@@ -169,6 +173,8 @@ void release_task(struct task_struct * p
56540 struct task_struct *leader;
56541 int zap_leader;
56542 repeat:
56543+ gr_del_task_from_ip_table(p);
56544+
56545 tracehook_prepare_release_task(p);
56546 /* don't need to get the RCU readlock here - the process is dead and
56547 * can't be modifying its own credentials. But shut RCU-lockdep up */
56548@@ -338,11 +344,22 @@ static void reparent_to_kthreadd(void)
56549 {
56550 write_lock_irq(&tasklist_lock);
56551
56552+#ifdef CONFIG_GRKERNSEC
56553+ write_lock(&grsec_exec_file_lock);
56554+ if (current->exec_file) {
56555+ fput(current->exec_file);
56556+ current->exec_file = NULL;
56557+ }
56558+ write_unlock(&grsec_exec_file_lock);
56559+#endif
56560+
56561 ptrace_unlink(current);
56562 /* Reparent to init */
56563 current->real_parent = current->parent = kthreadd_task;
56564 list_move_tail(&current->sibling, &current->real_parent->children);
56565
56566+ gr_set_kernel_label(current);
56567+
56568 /* Set the exit signal to SIGCHLD so we signal init on exit */
56569 current->exit_signal = SIGCHLD;
56570
56571@@ -394,7 +411,7 @@ int allow_signal(int sig)
56572 * know it'll be handled, so that they don't get converted to
56573 * SIGKILL or just silently dropped.
56574 */
56575- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56576+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56577 recalc_sigpending();
56578 spin_unlock_irq(&current->sighand->siglock);
56579 return 0;
56580@@ -430,6 +447,17 @@ void daemonize(const char *name, ...)
56581 vsnprintf(current->comm, sizeof(current->comm), name, args);
56582 va_end(args);
56583
56584+#ifdef CONFIG_GRKERNSEC
56585+ write_lock(&grsec_exec_file_lock);
56586+ if (current->exec_file) {
56587+ fput(current->exec_file);
56588+ current->exec_file = NULL;
56589+ }
56590+ write_unlock(&grsec_exec_file_lock);
56591+#endif
56592+
56593+ gr_set_kernel_label(current);
56594+
56595 /*
56596 * If we were started as result of loading a module, close all of the
56597 * user space pages. We don't need them, and if we didn't close them
56598@@ -905,15 +933,8 @@ NORET_TYPE void do_exit(long code)
56599 struct task_struct *tsk = current;
56600 int group_dead;
56601
56602- profile_task_exit(tsk);
56603-
56604- WARN_ON(atomic_read(&tsk->fs_excl));
56605- WARN_ON(blk_needs_flush_plug(tsk));
56606-
56607 if (unlikely(in_interrupt()))
56608 panic("Aiee, killing interrupt handler!");
56609- if (unlikely(!tsk->pid))
56610- panic("Attempted to kill the idle task!");
56611
56612 /*
56613 * If do_exit is called because this processes oopsed, it's possible
56614@@ -924,6 +945,14 @@ NORET_TYPE void do_exit(long code)
56615 */
56616 set_fs(USER_DS);
56617
56618+ profile_task_exit(tsk);
56619+
56620+ WARN_ON(atomic_read(&tsk->fs_excl));
56621+ WARN_ON(blk_needs_flush_plug(tsk));
56622+
56623+ if (unlikely(!tsk->pid))
56624+ panic("Attempted to kill the idle task!");
56625+
56626 tracehook_report_exit(&code);
56627
56628 validate_creds_for_do_exit(tsk);
56629@@ -984,6 +1013,9 @@ NORET_TYPE void do_exit(long code)
56630 tsk->exit_code = code;
56631 taskstats_exit(tsk, group_dead);
56632
56633+ gr_acl_handle_psacct(tsk, code);
56634+ gr_acl_handle_exit();
56635+
56636 exit_mm(tsk);
56637
56638 if (group_dead)
56639diff -urNp linux-2.6.39.4/kernel/fork.c linux-2.6.39.4/kernel/fork.c
56640--- linux-2.6.39.4/kernel/fork.c 2011-05-19 00:06:34.000000000 -0400
56641+++ linux-2.6.39.4/kernel/fork.c 2011-08-05 19:44:37.000000000 -0400
56642@@ -287,7 +287,7 @@ static struct task_struct *dup_task_stru
56643 *stackend = STACK_END_MAGIC; /* for overflow detection */
56644
56645 #ifdef CONFIG_CC_STACKPROTECTOR
56646- tsk->stack_canary = get_random_int();
56647+ tsk->stack_canary = pax_get_random_long();
56648 #endif
56649
56650 /* One for us, one for whoever does the "release_task()" (usually parent) */
56651@@ -309,13 +309,78 @@ out:
56652 }
56653
56654 #ifdef CONFIG_MMU
56655+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56656+{
56657+ struct vm_area_struct *tmp;
56658+ unsigned long charge;
56659+ struct mempolicy *pol;
56660+ struct file *file;
56661+
56662+ charge = 0;
56663+ if (mpnt->vm_flags & VM_ACCOUNT) {
56664+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56665+ if (security_vm_enough_memory(len))
56666+ goto fail_nomem;
56667+ charge = len;
56668+ }
56669+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56670+ if (!tmp)
56671+ goto fail_nomem;
56672+ *tmp = *mpnt;
56673+ tmp->vm_mm = mm;
56674+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
56675+ pol = mpol_dup(vma_policy(mpnt));
56676+ if (IS_ERR(pol))
56677+ goto fail_nomem_policy;
56678+ vma_set_policy(tmp, pol);
56679+ if (anon_vma_fork(tmp, mpnt))
56680+ goto fail_nomem_anon_vma_fork;
56681+ tmp->vm_flags &= ~VM_LOCKED;
56682+ tmp->vm_next = tmp->vm_prev = NULL;
56683+ tmp->vm_mirror = NULL;
56684+ file = tmp->vm_file;
56685+ if (file) {
56686+ struct inode *inode = file->f_path.dentry->d_inode;
56687+ struct address_space *mapping = file->f_mapping;
56688+
56689+ get_file(file);
56690+ if (tmp->vm_flags & VM_DENYWRITE)
56691+ atomic_dec(&inode->i_writecount);
56692+ spin_lock(&mapping->i_mmap_lock);
56693+ if (tmp->vm_flags & VM_SHARED)
56694+ mapping->i_mmap_writable++;
56695+ tmp->vm_truncate_count = mpnt->vm_truncate_count;
56696+ flush_dcache_mmap_lock(mapping);
56697+ /* insert tmp into the share list, just after mpnt */
56698+ vma_prio_tree_add(tmp, mpnt);
56699+ flush_dcache_mmap_unlock(mapping);
56700+ spin_unlock(&mapping->i_mmap_lock);
56701+ }
56702+
56703+ /*
56704+ * Clear hugetlb-related page reserves for children. This only
56705+ * affects MAP_PRIVATE mappings. Faults generated by the child
56706+ * are not guaranteed to succeed, even if read-only
56707+ */
56708+ if (is_vm_hugetlb_page(tmp))
56709+ reset_vma_resv_huge_pages(tmp);
56710+
56711+ return tmp;
56712+
56713+fail_nomem_anon_vma_fork:
56714+ mpol_put(pol);
56715+fail_nomem_policy:
56716+ kmem_cache_free(vm_area_cachep, tmp);
56717+fail_nomem:
56718+ vm_unacct_memory(charge);
56719+ return NULL;
56720+}
56721+
56722 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56723 {
56724 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56725 struct rb_node **rb_link, *rb_parent;
56726 int retval;
56727- unsigned long charge;
56728- struct mempolicy *pol;
56729
56730 down_write(&oldmm->mmap_sem);
56731 flush_cache_dup_mm(oldmm);
56732@@ -327,8 +392,8 @@ static int dup_mmap(struct mm_struct *mm
56733 mm->locked_vm = 0;
56734 mm->mmap = NULL;
56735 mm->mmap_cache = NULL;
56736- mm->free_area_cache = oldmm->mmap_base;
56737- mm->cached_hole_size = ~0UL;
56738+ mm->free_area_cache = oldmm->free_area_cache;
56739+ mm->cached_hole_size = oldmm->cached_hole_size;
56740 mm->map_count = 0;
56741 cpumask_clear(mm_cpumask(mm));
56742 mm->mm_rb = RB_ROOT;
56743@@ -344,8 +409,6 @@ static int dup_mmap(struct mm_struct *mm
56744
56745 prev = NULL;
56746 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56747- struct file *file;
56748-
56749 if (mpnt->vm_flags & VM_DONTCOPY) {
56750 long pages = vma_pages(mpnt);
56751 mm->total_vm -= pages;
56752@@ -353,56 +416,13 @@ static int dup_mmap(struct mm_struct *mm
56753 -pages);
56754 continue;
56755 }
56756- charge = 0;
56757- if (mpnt->vm_flags & VM_ACCOUNT) {
56758- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56759- if (security_vm_enough_memory(len))
56760- goto fail_nomem;
56761- charge = len;
56762- }
56763- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56764- if (!tmp)
56765- goto fail_nomem;
56766- *tmp = *mpnt;
56767- INIT_LIST_HEAD(&tmp->anon_vma_chain);
56768- pol = mpol_dup(vma_policy(mpnt));
56769- retval = PTR_ERR(pol);
56770- if (IS_ERR(pol))
56771- goto fail_nomem_policy;
56772- vma_set_policy(tmp, pol);
56773- tmp->vm_mm = mm;
56774- if (anon_vma_fork(tmp, mpnt))
56775- goto fail_nomem_anon_vma_fork;
56776- tmp->vm_flags &= ~VM_LOCKED;
56777- tmp->vm_next = tmp->vm_prev = NULL;
56778- file = tmp->vm_file;
56779- if (file) {
56780- struct inode *inode = file->f_path.dentry->d_inode;
56781- struct address_space *mapping = file->f_mapping;
56782-
56783- get_file(file);
56784- if (tmp->vm_flags & VM_DENYWRITE)
56785- atomic_dec(&inode->i_writecount);
56786- spin_lock(&mapping->i_mmap_lock);
56787- if (tmp->vm_flags & VM_SHARED)
56788- mapping->i_mmap_writable++;
56789- tmp->vm_truncate_count = mpnt->vm_truncate_count;
56790- flush_dcache_mmap_lock(mapping);
56791- /* insert tmp into the share list, just after mpnt */
56792- vma_prio_tree_add(tmp, mpnt);
56793- flush_dcache_mmap_unlock(mapping);
56794- spin_unlock(&mapping->i_mmap_lock);
56795+ tmp = dup_vma(mm, mpnt);
56796+ if (!tmp) {
56797+ retval = -ENOMEM;
56798+ goto out;
56799 }
56800
56801 /*
56802- * Clear hugetlb-related page reserves for children. This only
56803- * affects MAP_PRIVATE mappings. Faults generated by the child
56804- * are not guaranteed to succeed, even if read-only
56805- */
56806- if (is_vm_hugetlb_page(tmp))
56807- reset_vma_resv_huge_pages(tmp);
56808-
56809- /*
56810 * Link in the new vma and copy the page table entries.
56811 */
56812 *pprev = tmp;
56813@@ -423,6 +443,31 @@ static int dup_mmap(struct mm_struct *mm
56814 if (retval)
56815 goto out;
56816 }
56817+
56818+#ifdef CONFIG_PAX_SEGMEXEC
56819+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56820+ struct vm_area_struct *mpnt_m;
56821+
56822+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56823+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56824+
56825+ if (!mpnt->vm_mirror)
56826+ continue;
56827+
56828+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56829+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56830+ mpnt->vm_mirror = mpnt_m;
56831+ } else {
56832+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56833+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56834+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56835+ mpnt->vm_mirror->vm_mirror = mpnt;
56836+ }
56837+ }
56838+ BUG_ON(mpnt_m);
56839+ }
56840+#endif
56841+
56842 /* a new mm has just been created */
56843 arch_dup_mmap(oldmm, mm);
56844 retval = 0;
56845@@ -431,14 +476,6 @@ out:
56846 flush_tlb_mm(oldmm);
56847 up_write(&oldmm->mmap_sem);
56848 return retval;
56849-fail_nomem_anon_vma_fork:
56850- mpol_put(pol);
56851-fail_nomem_policy:
56852- kmem_cache_free(vm_area_cachep, tmp);
56853-fail_nomem:
56854- retval = -ENOMEM;
56855- vm_unacct_memory(charge);
56856- goto out;
56857 }
56858
56859 static inline int mm_alloc_pgd(struct mm_struct * mm)
56860@@ -785,13 +822,14 @@ static int copy_fs(unsigned long clone_f
56861 spin_unlock(&fs->lock);
56862 return -EAGAIN;
56863 }
56864- fs->users++;
56865+ atomic_inc(&fs->users);
56866 spin_unlock(&fs->lock);
56867 return 0;
56868 }
56869 tsk->fs = copy_fs_struct(fs);
56870 if (!tsk->fs)
56871 return -ENOMEM;
56872+ gr_set_chroot_entries(tsk, &tsk->fs->root);
56873 return 0;
56874 }
56875
56876@@ -1049,10 +1087,13 @@ static struct task_struct *copy_process(
56877 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
56878 #endif
56879 retval = -EAGAIN;
56880+
56881+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
56882+
56883 if (atomic_read(&p->real_cred->user->processes) >=
56884 task_rlimit(p, RLIMIT_NPROC)) {
56885- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
56886- p->real_cred->user != INIT_USER)
56887+ if (p->real_cred->user != INIT_USER &&
56888+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
56889 goto bad_fork_free;
56890 }
56891
56892@@ -1200,6 +1241,8 @@ static struct task_struct *copy_process(
56893 goto bad_fork_free_pid;
56894 }
56895
56896+ gr_copy_label(p);
56897+
56898 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
56899 /*
56900 * Clear TID on mm_release()?
56901@@ -1360,6 +1403,8 @@ bad_fork_cleanup_count:
56902 bad_fork_free:
56903 free_task(p);
56904 fork_out:
56905+ gr_log_forkfail(retval);
56906+
56907 return ERR_PTR(retval);
56908 }
56909
56910@@ -1448,6 +1493,8 @@ long do_fork(unsigned long clone_flags,
56911 if (clone_flags & CLONE_PARENT_SETTID)
56912 put_user(nr, parent_tidptr);
56913
56914+ gr_handle_brute_check();
56915+
56916 if (clone_flags & CLONE_VFORK) {
56917 p->vfork_done = &vfork;
56918 init_completion(&vfork);
56919@@ -1549,7 +1596,7 @@ static int unshare_fs(unsigned long unsh
56920 return 0;
56921
56922 /* don't need lock here; in the worst case we'll do useless copy */
56923- if (fs->users == 1)
56924+ if (atomic_read(&fs->users) == 1)
56925 return 0;
56926
56927 *new_fsp = copy_fs_struct(fs);
56928@@ -1636,7 +1683,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
56929 fs = current->fs;
56930 spin_lock(&fs->lock);
56931 current->fs = new_fs;
56932- if (--fs->users)
56933+ gr_set_chroot_entries(current, &current->fs->root);
56934+ if (atomic_dec_return(&fs->users))
56935 new_fs = NULL;
56936 else
56937 new_fs = fs;
56938diff -urNp linux-2.6.39.4/kernel/futex.c linux-2.6.39.4/kernel/futex.c
56939--- linux-2.6.39.4/kernel/futex.c 2011-05-19 00:06:34.000000000 -0400
56940+++ linux-2.6.39.4/kernel/futex.c 2011-08-05 19:44:37.000000000 -0400
56941@@ -54,6 +54,7 @@
56942 #include <linux/mount.h>
56943 #include <linux/pagemap.h>
56944 #include <linux/syscalls.h>
56945+#include <linux/ptrace.h>
56946 #include <linux/signal.h>
56947 #include <linux/module.h>
56948 #include <linux/magic.h>
56949@@ -236,6 +237,11 @@ get_futex_key(u32 __user *uaddr, int fsh
56950 struct page *page, *page_head;
56951 int err;
56952
56953+#ifdef CONFIG_PAX_SEGMEXEC
56954+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
56955+ return -EFAULT;
56956+#endif
56957+
56958 /*
56959 * The futex address must be "naturally" aligned.
56960 */
56961@@ -1833,6 +1839,8 @@ static int futex_wait(u32 __user *uaddr,
56962 struct futex_q q = futex_q_init;
56963 int ret;
56964
56965+ pax_track_stack();
56966+
56967 if (!bitset)
56968 return -EINVAL;
56969 q.bitset = bitset;
56970@@ -2229,6 +2237,8 @@ static int futex_wait_requeue_pi(u32 __u
56971 struct futex_q q = futex_q_init;
56972 int res, ret;
56973
56974+ pax_track_stack();
56975+
56976 if (!bitset)
56977 return -EINVAL;
56978
56979@@ -2401,7 +2411,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56980 {
56981 struct robust_list_head __user *head;
56982 unsigned long ret;
56983+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
56984 const struct cred *cred = current_cred(), *pcred;
56985+#endif
56986
56987 if (!futex_cmpxchg_enabled)
56988 return -ENOSYS;
56989@@ -2417,6 +2429,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56990 if (!p)
56991 goto err_unlock;
56992 ret = -EPERM;
56993+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56994+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
56995+ goto err_unlock;
56996+#else
56997 pcred = __task_cred(p);
56998 /* If victim is in different user_ns, then uids are not
56999 comparable, so we must have CAP_SYS_PTRACE */
57000@@ -2431,6 +2447,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57001 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57002 goto err_unlock;
57003 ok:
57004+#endif
57005 head = p->robust_list;
57006 rcu_read_unlock();
57007 }
57008@@ -2682,6 +2699,7 @@ static int __init futex_init(void)
57009 {
57010 u32 curval;
57011 int i;
57012+ mm_segment_t oldfs;
57013
57014 /*
57015 * This will fail and we want it. Some arch implementations do
57016@@ -2693,8 +2711,11 @@ static int __init futex_init(void)
57017 * implementation, the non-functional ones will return
57018 * -ENOSYS.
57019 */
57020+ oldfs = get_fs();
57021+ set_fs(USER_DS);
57022 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57023 futex_cmpxchg_enabled = 1;
57024+ set_fs(oldfs);
57025
57026 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57027 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57028diff -urNp linux-2.6.39.4/kernel/futex_compat.c linux-2.6.39.4/kernel/futex_compat.c
57029--- linux-2.6.39.4/kernel/futex_compat.c 2011-05-19 00:06:34.000000000 -0400
57030+++ linux-2.6.39.4/kernel/futex_compat.c 2011-08-05 19:44:37.000000000 -0400
57031@@ -10,6 +10,7 @@
57032 #include <linux/compat.h>
57033 #include <linux/nsproxy.h>
57034 #include <linux/futex.h>
57035+#include <linux/ptrace.h>
57036
57037 #include <asm/uaccess.h>
57038
57039@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57040 {
57041 struct compat_robust_list_head __user *head;
57042 unsigned long ret;
57043- const struct cred *cred = current_cred(), *pcred;
57044+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57045+ const struct cred *cred = current_cred();
57046+ const struct cred *pcred;
57047+#endif
57048
57049 if (!futex_cmpxchg_enabled)
57050 return -ENOSYS;
57051@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57052 if (!p)
57053 goto err_unlock;
57054 ret = -EPERM;
57055+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57056+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
57057+ goto err_unlock;
57058+#else
57059 pcred = __task_cred(p);
57060 /* If victim is in different user_ns, then uids are not
57061 comparable, so we must have CAP_SYS_PTRACE */
57062@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57063 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57064 goto err_unlock;
57065 ok:
57066+#endif
57067 head = p->compat_robust_list;
57068 rcu_read_unlock();
57069 }
57070diff -urNp linux-2.6.39.4/kernel/gcov/base.c linux-2.6.39.4/kernel/gcov/base.c
57071--- linux-2.6.39.4/kernel/gcov/base.c 2011-05-19 00:06:34.000000000 -0400
57072+++ linux-2.6.39.4/kernel/gcov/base.c 2011-08-05 19:44:37.000000000 -0400
57073@@ -102,11 +102,6 @@ void gcov_enable_events(void)
57074 }
57075
57076 #ifdef CONFIG_MODULES
57077-static inline int within(void *addr, void *start, unsigned long size)
57078-{
57079- return ((addr >= start) && (addr < start + size));
57080-}
57081-
57082 /* Update list and generate events when modules are unloaded. */
57083 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57084 void *data)
57085@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57086 prev = NULL;
57087 /* Remove entries located in module from linked list. */
57088 for (info = gcov_info_head; info; info = info->next) {
57089- if (within(info, mod->module_core, mod->core_size)) {
57090+ if (within_module_core_rw((unsigned long)info, mod)) {
57091 if (prev)
57092 prev->next = info->next;
57093 else
57094diff -urNp linux-2.6.39.4/kernel/hrtimer.c linux-2.6.39.4/kernel/hrtimer.c
57095--- linux-2.6.39.4/kernel/hrtimer.c 2011-05-19 00:06:34.000000000 -0400
57096+++ linux-2.6.39.4/kernel/hrtimer.c 2011-08-05 19:44:37.000000000 -0400
57097@@ -1383,7 +1383,7 @@ void hrtimer_peek_ahead_timers(void)
57098 local_irq_restore(flags);
57099 }
57100
57101-static void run_hrtimer_softirq(struct softirq_action *h)
57102+static void run_hrtimer_softirq(void)
57103 {
57104 hrtimer_peek_ahead_timers();
57105 }
57106diff -urNp linux-2.6.39.4/kernel/irq/manage.c linux-2.6.39.4/kernel/irq/manage.c
57107--- linux-2.6.39.4/kernel/irq/manage.c 2011-05-19 00:06:34.000000000 -0400
57108+++ linux-2.6.39.4/kernel/irq/manage.c 2011-08-05 19:44:37.000000000 -0400
57109@@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, u
57110 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
57111 int ret = 0;
57112
57113+ if (!desc)
57114+ return -EINVAL;
57115+
57116 /* wakeup-capable irqs can be shared between drivers that
57117 * don't need to have the same sleep mode behaviors.
57118 */
57119diff -urNp linux-2.6.39.4/kernel/jump_label.c linux-2.6.39.4/kernel/jump_label.c
57120--- linux-2.6.39.4/kernel/jump_label.c 2011-05-19 00:06:34.000000000 -0400
57121+++ linux-2.6.39.4/kernel/jump_label.c 2011-08-05 19:44:37.000000000 -0400
57122@@ -49,6 +49,17 @@ void jump_label_unlock(void)
57123 mutex_unlock(&jump_label_mutex);
57124 }
57125
57126+static void jump_label_swap(void *a, void *b, int size)
57127+{
57128+ struct jump_entry t;
57129+
57130+ t = *(struct jump_entry *)a;
57131+ pax_open_kernel();
57132+ *(struct jump_entry *)a = *(struct jump_entry *)b;
57133+ *(struct jump_entry *)b = t;
57134+ pax_close_kernel();
57135+}
57136+
57137 static int jump_label_cmp(const void *a, const void *b)
57138 {
57139 const struct jump_entry *jea = a;
57140@@ -70,7 +81,7 @@ sort_jump_label_entries(struct jump_entr
57141
57142 size = (((unsigned long)stop - (unsigned long)start)
57143 / sizeof(struct jump_entry));
57144- sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57145+ sort(start, size, sizeof(struct jump_entry), jump_label_cmp, jump_label_swap);
57146 }
57147
57148 static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
57149@@ -407,8 +418,11 @@ static void remove_jump_label_module_ini
57150 count = e_module->nr_entries;
57151 iter = e_module->table;
57152 while (count--) {
57153- if (within_module_init(iter->code, mod))
57154+ if (within_module_init(iter->code, mod)) {
57155+ pax_open_kernel();
57156 iter->key = 0;
57157+ pax_close_kernel();
57158+ }
57159 iter++;
57160 }
57161 }
57162diff -urNp linux-2.6.39.4/kernel/kallsyms.c linux-2.6.39.4/kernel/kallsyms.c
57163--- linux-2.6.39.4/kernel/kallsyms.c 2011-05-19 00:06:34.000000000 -0400
57164+++ linux-2.6.39.4/kernel/kallsyms.c 2011-08-05 19:44:37.000000000 -0400
57165@@ -11,6 +11,9 @@
57166 * Changed the compression method from stem compression to "table lookup"
57167 * compression (see scripts/kallsyms.c for a more complete description)
57168 */
57169+#ifdef CONFIG_GRKERNSEC_HIDESYM
57170+#define __INCLUDED_BY_HIDESYM 1
57171+#endif
57172 #include <linux/kallsyms.h>
57173 #include <linux/module.h>
57174 #include <linux/init.h>
57175@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57176
57177 static inline int is_kernel_inittext(unsigned long addr)
57178 {
57179+ if (system_state != SYSTEM_BOOTING)
57180+ return 0;
57181+
57182 if (addr >= (unsigned long)_sinittext
57183 && addr <= (unsigned long)_einittext)
57184 return 1;
57185 return 0;
57186 }
57187
57188+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57189+#ifdef CONFIG_MODULES
57190+static inline int is_module_text(unsigned long addr)
57191+{
57192+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57193+ return 1;
57194+
57195+ addr = ktla_ktva(addr);
57196+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57197+}
57198+#else
57199+static inline int is_module_text(unsigned long addr)
57200+{
57201+ return 0;
57202+}
57203+#endif
57204+#endif
57205+
57206 static inline int is_kernel_text(unsigned long addr)
57207 {
57208 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57209@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57210
57211 static inline int is_kernel(unsigned long addr)
57212 {
57213+
57214+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57215+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
57216+ return 1;
57217+
57218+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57219+#else
57220 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57221+#endif
57222+
57223 return 1;
57224 return in_gate_area_no_mm(addr);
57225 }
57226
57227 static int is_ksym_addr(unsigned long addr)
57228 {
57229+
57230+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57231+ if (is_module_text(addr))
57232+ return 0;
57233+#endif
57234+
57235 if (all_var)
57236 return is_kernel(addr);
57237
57238@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57239
57240 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57241 {
57242- iter->name[0] = '\0';
57243 iter->nameoff = get_symbol_offset(new_pos);
57244 iter->pos = new_pos;
57245 }
57246@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57247 {
57248 struct kallsym_iter *iter = m->private;
57249
57250+#ifdef CONFIG_GRKERNSEC_HIDESYM
57251+ if (current_uid())
57252+ return 0;
57253+#endif
57254+
57255 /* Some debugging symbols have no name. Ignore them. */
57256 if (!iter->name[0])
57257 return 0;
57258@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57259 struct kallsym_iter *iter;
57260 int ret;
57261
57262- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57263+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57264 if (!iter)
57265 return -ENOMEM;
57266 reset_iter(iter, 0);
57267diff -urNp linux-2.6.39.4/kernel/kmod.c linux-2.6.39.4/kernel/kmod.c
57268--- linux-2.6.39.4/kernel/kmod.c 2011-05-19 00:06:34.000000000 -0400
57269+++ linux-2.6.39.4/kernel/kmod.c 2011-08-05 19:44:37.000000000 -0400
57270@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57271 * If module auto-loading support is disabled then this function
57272 * becomes a no-operation.
57273 */
57274-int __request_module(bool wait, const char *fmt, ...)
57275+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57276 {
57277- va_list args;
57278 char module_name[MODULE_NAME_LEN];
57279 unsigned int max_modprobes;
57280 int ret;
57281- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57282+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57283 static char *envp[] = { "HOME=/",
57284 "TERM=linux",
57285 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57286@@ -80,9 +79,7 @@ int __request_module(bool wait, const ch
57287 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57288 static int kmod_loop_msg;
57289
57290- va_start(args, fmt);
57291- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57292- va_end(args);
57293+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57294 if (ret >= MODULE_NAME_LEN)
57295 return -ENAMETOOLONG;
57296
57297@@ -90,6 +87,20 @@ int __request_module(bool wait, const ch
57298 if (ret)
57299 return ret;
57300
57301+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57302+ if (!current_uid()) {
57303+ /* hack to workaround consolekit/udisks stupidity */
57304+ read_lock(&tasklist_lock);
57305+ if (!strcmp(current->comm, "mount") &&
57306+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57307+ read_unlock(&tasklist_lock);
57308+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57309+ return -EPERM;
57310+ }
57311+ read_unlock(&tasklist_lock);
57312+ }
57313+#endif
57314+
57315 /* If modprobe needs a service that is in a module, we get a recursive
57316 * loop. Limit the number of running kmod threads to max_threads/2 or
57317 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57318@@ -123,6 +134,47 @@ int __request_module(bool wait, const ch
57319 atomic_dec(&kmod_concurrent);
57320 return ret;
57321 }
57322+
57323+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57324+{
57325+ va_list args;
57326+ int ret;
57327+
57328+ va_start(args, fmt);
57329+ ret = ____request_module(wait, module_param, fmt, args);
57330+ va_end(args);
57331+
57332+ return ret;
57333+}
57334+
57335+int __request_module(bool wait, const char *fmt, ...)
57336+{
57337+ va_list args;
57338+ int ret;
57339+
57340+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57341+ if (current_uid()) {
57342+ char module_param[MODULE_NAME_LEN];
57343+
57344+ memset(module_param, 0, sizeof(module_param));
57345+
57346+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57347+
57348+ va_start(args, fmt);
57349+ ret = ____request_module(wait, module_param, fmt, args);
57350+ va_end(args);
57351+
57352+ return ret;
57353+ }
57354+#endif
57355+
57356+ va_start(args, fmt);
57357+ ret = ____request_module(wait, NULL, fmt, args);
57358+ va_end(args);
57359+
57360+ return ret;
57361+}
57362+
57363 EXPORT_SYMBOL(__request_module);
57364 #endif /* CONFIG_MODULES */
57365
57366diff -urNp linux-2.6.39.4/kernel/kprobes.c linux-2.6.39.4/kernel/kprobes.c
57367--- linux-2.6.39.4/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
57368+++ linux-2.6.39.4/kernel/kprobes.c 2011-08-05 19:44:37.000000000 -0400
57369@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57370 * kernel image and loaded module images reside. This is required
57371 * so x86_64 can correctly handle the %rip-relative fixups.
57372 */
57373- kip->insns = module_alloc(PAGE_SIZE);
57374+ kip->insns = module_alloc_exec(PAGE_SIZE);
57375 if (!kip->insns) {
57376 kfree(kip);
57377 return NULL;
57378@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57379 */
57380 if (!list_is_singular(&kip->list)) {
57381 list_del(&kip->list);
57382- module_free(NULL, kip->insns);
57383+ module_free_exec(NULL, kip->insns);
57384 kfree(kip);
57385 }
57386 return 1;
57387@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57388 {
57389 int i, err = 0;
57390 unsigned long offset = 0, size = 0;
57391- char *modname, namebuf[128];
57392+ char *modname, namebuf[KSYM_NAME_LEN];
57393 const char *symbol_name;
57394 void *addr;
57395 struct kprobe_blackpoint *kb;
57396@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57397 const char *sym = NULL;
57398 unsigned int i = *(loff_t *) v;
57399 unsigned long offset = 0;
57400- char *modname, namebuf[128];
57401+ char *modname, namebuf[KSYM_NAME_LEN];
57402
57403 head = &kprobe_table[i];
57404 preempt_disable();
57405diff -urNp linux-2.6.39.4/kernel/lockdep.c linux-2.6.39.4/kernel/lockdep.c
57406--- linux-2.6.39.4/kernel/lockdep.c 2011-06-25 12:55:23.000000000 -0400
57407+++ linux-2.6.39.4/kernel/lockdep.c 2011-08-05 19:44:37.000000000 -0400
57408@@ -571,6 +571,10 @@ static int static_obj(void *obj)
57409 end = (unsigned long) &_end,
57410 addr = (unsigned long) obj;
57411
57412+#ifdef CONFIG_PAX_KERNEXEC
57413+ start = ktla_ktva(start);
57414+#endif
57415+
57416 /*
57417 * static variable?
57418 */
57419@@ -706,6 +710,7 @@ register_lock_class(struct lockdep_map *
57420 if (!static_obj(lock->key)) {
57421 debug_locks_off();
57422 printk("INFO: trying to register non-static key.\n");
57423+ printk("lock:%pS key:%pS.\n", lock, lock->key);
57424 printk("the code is fine but needs lockdep annotation.\n");
57425 printk("turning off the locking correctness validator.\n");
57426 dump_stack();
57427@@ -2752,7 +2757,7 @@ static int __lock_acquire(struct lockdep
57428 if (!class)
57429 return 0;
57430 }
57431- atomic_inc((atomic_t *)&class->ops);
57432+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57433 if (very_verbose(class)) {
57434 printk("\nacquire class [%p] %s", class->key, class->name);
57435 if (class->name_version > 1)
57436diff -urNp linux-2.6.39.4/kernel/lockdep_proc.c linux-2.6.39.4/kernel/lockdep_proc.c
57437--- linux-2.6.39.4/kernel/lockdep_proc.c 2011-05-19 00:06:34.000000000 -0400
57438+++ linux-2.6.39.4/kernel/lockdep_proc.c 2011-08-05 19:44:37.000000000 -0400
57439@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57440
57441 static void print_name(struct seq_file *m, struct lock_class *class)
57442 {
57443- char str[128];
57444+ char str[KSYM_NAME_LEN];
57445 const char *name = class->name;
57446
57447 if (!name) {
57448diff -urNp linux-2.6.39.4/kernel/module.c linux-2.6.39.4/kernel/module.c
57449--- linux-2.6.39.4/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
57450+++ linux-2.6.39.4/kernel/module.c 2011-08-05 19:44:37.000000000 -0400
57451@@ -57,6 +57,7 @@
57452 #include <linux/kmemleak.h>
57453 #include <linux/jump_label.h>
57454 #include <linux/pfn.h>
57455+#include <linux/grsecurity.h>
57456
57457 #define CREATE_TRACE_POINTS
57458 #include <trace/events/module.h>
57459@@ -118,7 +119,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57460
57461 /* Bounds of module allocation, for speeding __module_address.
57462 * Protected by module_mutex. */
57463-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57464+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57465+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57466
57467 int register_module_notifier(struct notifier_block * nb)
57468 {
57469@@ -282,7 +284,7 @@ bool each_symbol(bool (*fn)(const struct
57470 return true;
57471
57472 list_for_each_entry_rcu(mod, &modules, list) {
57473- struct symsearch arr[] = {
57474+ struct symsearch modarr[] = {
57475 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57476 NOT_GPL_ONLY, false },
57477 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57478@@ -304,7 +306,7 @@ bool each_symbol(bool (*fn)(const struct
57479 #endif
57480 };
57481
57482- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57483+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57484 return true;
57485 }
57486 return false;
57487@@ -415,7 +417,7 @@ static inline void __percpu *mod_percpu(
57488 static int percpu_modalloc(struct module *mod,
57489 unsigned long size, unsigned long align)
57490 {
57491- if (align > PAGE_SIZE) {
57492+ if (align-1 >= PAGE_SIZE) {
57493 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57494 mod->name, align, PAGE_SIZE);
57495 align = PAGE_SIZE;
57496@@ -1143,7 +1145,7 @@ resolve_symbol_wait(struct module *mod,
57497 */
57498 #ifdef CONFIG_SYSFS
57499
57500-#ifdef CONFIG_KALLSYMS
57501+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57502 static inline bool sect_empty(const Elf_Shdr *sect)
57503 {
57504 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57505@@ -1612,17 +1614,17 @@ void unset_section_ro_nx(struct module *
57506 {
57507 unsigned long total_pages;
57508
57509- if (mod->module_core == module_region) {
57510+ if (mod->module_core_rx == module_region) {
57511 /* Set core as NX+RW */
57512- total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size);
57513- set_memory_nx((unsigned long)mod->module_core, total_pages);
57514- set_memory_rw((unsigned long)mod->module_core, total_pages);
57515+ total_pages = MOD_NUMBER_OF_PAGES(mod->module_core_rx, mod->core_size_rx);
57516+ set_memory_nx((unsigned long)mod->module_core_rx, total_pages);
57517+ set_memory_rw((unsigned long)mod->module_core_rx, total_pages);
57518
57519- } else if (mod->module_init == module_region) {
57520+ } else if (mod->module_init_rx == module_region) {
57521 /* Set init as NX+RW */
57522- total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size);
57523- set_memory_nx((unsigned long)mod->module_init, total_pages);
57524- set_memory_rw((unsigned long)mod->module_init, total_pages);
57525+ total_pages = MOD_NUMBER_OF_PAGES(mod->module_init_rx, mod->init_size_rx);
57526+ set_memory_nx((unsigned long)mod->module_init_rx, total_pages);
57527+ set_memory_rw((unsigned long)mod->module_init_rx, total_pages);
57528 }
57529 }
57530
57531@@ -1633,14 +1635,14 @@ void set_all_modules_text_rw()
57532
57533 mutex_lock(&module_mutex);
57534 list_for_each_entry_rcu(mod, &modules, list) {
57535- if ((mod->module_core) && (mod->core_text_size)) {
57536- set_page_attributes(mod->module_core,
57537- mod->module_core + mod->core_text_size,
57538+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
57539+ set_page_attributes(mod->module_core_rx,
57540+ mod->module_core_rx + mod->core_size_rx,
57541 set_memory_rw);
57542 }
57543- if ((mod->module_init) && (mod->init_text_size)) {
57544- set_page_attributes(mod->module_init,
57545- mod->module_init + mod->init_text_size,
57546+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
57547+ set_page_attributes(mod->module_init_rx,
57548+ mod->module_init_rx + mod->init_size_rx,
57549 set_memory_rw);
57550 }
57551 }
57552@@ -1654,14 +1656,14 @@ void set_all_modules_text_ro()
57553
57554 mutex_lock(&module_mutex);
57555 list_for_each_entry_rcu(mod, &modules, list) {
57556- if ((mod->module_core) && (mod->core_text_size)) {
57557- set_page_attributes(mod->module_core,
57558- mod->module_core + mod->core_text_size,
57559+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
57560+ set_page_attributes(mod->module_core_rx,
57561+ mod->module_core_rx + mod->core_size_rx,
57562 set_memory_ro);
57563 }
57564- if ((mod->module_init) && (mod->init_text_size)) {
57565- set_page_attributes(mod->module_init,
57566- mod->module_init + mod->init_text_size,
57567+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
57568+ set_page_attributes(mod->module_init_rx,
57569+ mod->module_init_rx + mod->init_size_rx,
57570 set_memory_ro);
57571 }
57572 }
57573@@ -1696,17 +1698,20 @@ static void free_module(struct module *m
57574 destroy_params(mod->kp, mod->num_kp);
57575
57576 /* This may be NULL, but that's OK */
57577- unset_section_ro_nx(mod, mod->module_init);
57578- module_free(mod, mod->module_init);
57579+ unset_section_ro_nx(mod, mod->module_init_rx);
57580+ module_free(mod, mod->module_init_rw);
57581+ module_free_exec(mod, mod->module_init_rx);
57582 kfree(mod->args);
57583 percpu_modfree(mod);
57584
57585 /* Free lock-classes: */
57586- lockdep_free_key_range(mod->module_core, mod->core_size);
57587+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57588+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57589
57590 /* Finally, free the core (containing the module structure) */
57591- unset_section_ro_nx(mod, mod->module_core);
57592- module_free(mod, mod->module_core);
57593+ unset_section_ro_nx(mod, mod->module_core_rx);
57594+ module_free_exec(mod, mod->module_core_rx);
57595+ module_free(mod, mod->module_core_rw);
57596
57597 #ifdef CONFIG_MPU
57598 update_protections(current->mm);
57599@@ -1775,10 +1780,31 @@ static int simplify_symbols(struct modul
57600 unsigned int i;
57601 int ret = 0;
57602 const struct kernel_symbol *ksym;
57603+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57604+ int is_fs_load = 0;
57605+ int register_filesystem_found = 0;
57606+ char *p;
57607+
57608+ p = strstr(mod->args, "grsec_modharden_fs");
57609+ if (p) {
57610+ char *endptr = p + strlen("grsec_modharden_fs");
57611+ /* copy \0 as well */
57612+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57613+ is_fs_load = 1;
57614+ }
57615+#endif
57616
57617 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57618 const char *name = info->strtab + sym[i].st_name;
57619
57620+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57621+ /* it's a real shame this will never get ripped and copied
57622+ upstream! ;(
57623+ */
57624+ if (is_fs_load && !strcmp(name, "register_filesystem"))
57625+ register_filesystem_found = 1;
57626+#endif
57627+
57628 switch (sym[i].st_shndx) {
57629 case SHN_COMMON:
57630 /* We compiled with -fno-common. These are not
57631@@ -1799,7 +1825,9 @@ static int simplify_symbols(struct modul
57632 ksym = resolve_symbol_wait(mod, info, name);
57633 /* Ok if resolved. */
57634 if (ksym && !IS_ERR(ksym)) {
57635+ pax_open_kernel();
57636 sym[i].st_value = ksym->value;
57637+ pax_close_kernel();
57638 break;
57639 }
57640
57641@@ -1818,11 +1846,20 @@ static int simplify_symbols(struct modul
57642 secbase = (unsigned long)mod_percpu(mod);
57643 else
57644 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57645+ pax_open_kernel();
57646 sym[i].st_value += secbase;
57647+ pax_close_kernel();
57648 break;
57649 }
57650 }
57651
57652+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57653+ if (is_fs_load && !register_filesystem_found) {
57654+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57655+ ret = -EPERM;
57656+ }
57657+#endif
57658+
57659 return ret;
57660 }
57661
57662@@ -1906,22 +1943,12 @@ static void layout_sections(struct modul
57663 || s->sh_entsize != ~0UL
57664 || strstarts(sname, ".init"))
57665 continue;
57666- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57667+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57668+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57669+ else
57670+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57671 DEBUGP("\t%s\n", name);
57672 }
57673- switch (m) {
57674- case 0: /* executable */
57675- mod->core_size = debug_align(mod->core_size);
57676- mod->core_text_size = mod->core_size;
57677- break;
57678- case 1: /* RO: text and ro-data */
57679- mod->core_size = debug_align(mod->core_size);
57680- mod->core_ro_size = mod->core_size;
57681- break;
57682- case 3: /* whole core */
57683- mod->core_size = debug_align(mod->core_size);
57684- break;
57685- }
57686 }
57687
57688 DEBUGP("Init section allocation order:\n");
57689@@ -1935,23 +1962,13 @@ static void layout_sections(struct modul
57690 || s->sh_entsize != ~0UL
57691 || !strstarts(sname, ".init"))
57692 continue;
57693- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57694- | INIT_OFFSET_MASK);
57695+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57696+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57697+ else
57698+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57699+ s->sh_entsize |= INIT_OFFSET_MASK;
57700 DEBUGP("\t%s\n", sname);
57701 }
57702- switch (m) {
57703- case 0: /* executable */
57704- mod->init_size = debug_align(mod->init_size);
57705- mod->init_text_size = mod->init_size;
57706- break;
57707- case 1: /* RO: text and ro-data */
57708- mod->init_size = debug_align(mod->init_size);
57709- mod->init_ro_size = mod->init_size;
57710- break;
57711- case 3: /* whole init */
57712- mod->init_size = debug_align(mod->init_size);
57713- break;
57714- }
57715 }
57716 }
57717
57718@@ -2119,7 +2136,7 @@ static void layout_symtab(struct module
57719
57720 /* Put symbol section at end of init part of module. */
57721 symsect->sh_flags |= SHF_ALLOC;
57722- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57723+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57724 info->index.sym) | INIT_OFFSET_MASK;
57725 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57726
57727@@ -2136,19 +2153,19 @@ static void layout_symtab(struct module
57728 }
57729
57730 /* Append room for core symbols at end of core part. */
57731- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57732- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57733+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57734+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57735
57736 /* Put string table section at end of init part of module. */
57737 strsect->sh_flags |= SHF_ALLOC;
57738- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57739+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57740 info->index.str) | INIT_OFFSET_MASK;
57741 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57742
57743 /* Append room for core symbols' strings at end of core part. */
57744- info->stroffs = mod->core_size;
57745+ info->stroffs = mod->core_size_rx;
57746 __set_bit(0, info->strmap);
57747- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57748+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57749 }
57750
57751 static void add_kallsyms(struct module *mod, const struct load_info *info)
57752@@ -2164,11 +2181,13 @@ static void add_kallsyms(struct module *
57753 /* Make sure we get permanent strtab: don't use info->strtab. */
57754 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57755
57756+ pax_open_kernel();
57757+
57758 /* Set types up while we still have access to sections. */
57759 for (i = 0; i < mod->num_symtab; i++)
57760 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57761
57762- mod->core_symtab = dst = mod->module_core + info->symoffs;
57763+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57764 src = mod->symtab;
57765 *dst = *src;
57766 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57767@@ -2181,10 +2200,12 @@ static void add_kallsyms(struct module *
57768 }
57769 mod->core_num_syms = ndst;
57770
57771- mod->core_strtab = s = mod->module_core + info->stroffs;
57772+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57773 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57774 if (test_bit(i, info->strmap))
57775 *++s = mod->strtab[i];
57776+
57777+ pax_close_kernel();
57778 }
57779 #else
57780 static inline void layout_symtab(struct module *mod, struct load_info *info)
57781@@ -2213,17 +2234,33 @@ static void dynamic_debug_remove(struct
57782 ddebug_remove_module(debug->modname);
57783 }
57784
57785-static void *module_alloc_update_bounds(unsigned long size)
57786+static void *module_alloc_update_bounds_rw(unsigned long size)
57787 {
57788 void *ret = module_alloc(size);
57789
57790 if (ret) {
57791 mutex_lock(&module_mutex);
57792 /* Update module bounds. */
57793- if ((unsigned long)ret < module_addr_min)
57794- module_addr_min = (unsigned long)ret;
57795- if ((unsigned long)ret + size > module_addr_max)
57796- module_addr_max = (unsigned long)ret + size;
57797+ if ((unsigned long)ret < module_addr_min_rw)
57798+ module_addr_min_rw = (unsigned long)ret;
57799+ if ((unsigned long)ret + size > module_addr_max_rw)
57800+ module_addr_max_rw = (unsigned long)ret + size;
57801+ mutex_unlock(&module_mutex);
57802+ }
57803+ return ret;
57804+}
57805+
57806+static void *module_alloc_update_bounds_rx(unsigned long size)
57807+{
57808+ void *ret = module_alloc_exec(size);
57809+
57810+ if (ret) {
57811+ mutex_lock(&module_mutex);
57812+ /* Update module bounds. */
57813+ if ((unsigned long)ret < module_addr_min_rx)
57814+ module_addr_min_rx = (unsigned long)ret;
57815+ if ((unsigned long)ret + size > module_addr_max_rx)
57816+ module_addr_max_rx = (unsigned long)ret + size;
57817 mutex_unlock(&module_mutex);
57818 }
57819 return ret;
57820@@ -2516,7 +2553,7 @@ static int move_module(struct module *mo
57821 void *ptr;
57822
57823 /* Do the allocs. */
57824- ptr = module_alloc_update_bounds(mod->core_size);
57825+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57826 /*
57827 * The pointer to this block is stored in the module structure
57828 * which is inside the block. Just mark it as not being a
57829@@ -2526,23 +2563,50 @@ static int move_module(struct module *mo
57830 if (!ptr)
57831 return -ENOMEM;
57832
57833- memset(ptr, 0, mod->core_size);
57834- mod->module_core = ptr;
57835+ memset(ptr, 0, mod->core_size_rw);
57836+ mod->module_core_rw = ptr;
57837
57838- ptr = module_alloc_update_bounds(mod->init_size);
57839+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57840 /*
57841 * The pointer to this block is stored in the module structure
57842 * which is inside the block. This block doesn't need to be
57843 * scanned as it contains data and code that will be freed
57844 * after the module is initialized.
57845 */
57846- kmemleak_ignore(ptr);
57847- if (!ptr && mod->init_size) {
57848- module_free(mod, mod->module_core);
57849+ kmemleak_not_leak(ptr);
57850+ if (!ptr && mod->init_size_rw) {
57851+ module_free(mod, mod->module_core_rw);
57852 return -ENOMEM;
57853 }
57854- memset(ptr, 0, mod->init_size);
57855- mod->module_init = ptr;
57856+ memset(ptr, 0, mod->init_size_rw);
57857+ mod->module_init_rw = ptr;
57858+
57859+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
57860+ kmemleak_not_leak(ptr);
57861+ if (!ptr) {
57862+ module_free(mod, mod->module_init_rw);
57863+ module_free(mod, mod->module_core_rw);
57864+ return -ENOMEM;
57865+ }
57866+
57867+ pax_open_kernel();
57868+ memset(ptr, 0, mod->core_size_rx);
57869+ pax_close_kernel();
57870+ mod->module_core_rx = ptr;
57871+
57872+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
57873+ kmemleak_not_leak(ptr);
57874+ if (!ptr && mod->init_size_rx) {
57875+ module_free_exec(mod, mod->module_core_rx);
57876+ module_free(mod, mod->module_init_rw);
57877+ module_free(mod, mod->module_core_rw);
57878+ return -ENOMEM;
57879+ }
57880+
57881+ pax_open_kernel();
57882+ memset(ptr, 0, mod->init_size_rx);
57883+ pax_close_kernel();
57884+ mod->module_init_rx = ptr;
57885
57886 /* Transfer each section which specifies SHF_ALLOC */
57887 DEBUGP("final section addresses:\n");
57888@@ -2553,16 +2617,45 @@ static int move_module(struct module *mo
57889 if (!(shdr->sh_flags & SHF_ALLOC))
57890 continue;
57891
57892- if (shdr->sh_entsize & INIT_OFFSET_MASK)
57893- dest = mod->module_init
57894- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57895- else
57896- dest = mod->module_core + shdr->sh_entsize;
57897+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
57898+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57899+ dest = mod->module_init_rw
57900+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57901+ else
57902+ dest = mod->module_init_rx
57903+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57904+ } else {
57905+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57906+ dest = mod->module_core_rw + shdr->sh_entsize;
57907+ else
57908+ dest = mod->module_core_rx + shdr->sh_entsize;
57909+ }
57910+
57911+ if (shdr->sh_type != SHT_NOBITS) {
57912+
57913+#ifdef CONFIG_PAX_KERNEXEC
57914+#ifdef CONFIG_X86_64
57915+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
57916+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
57917+#endif
57918+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
57919+ pax_open_kernel();
57920+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57921+ pax_close_kernel();
57922+ } else
57923+#endif
57924
57925- if (shdr->sh_type != SHT_NOBITS)
57926 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57927+ }
57928 /* Update sh_addr to point to copy in image. */
57929- shdr->sh_addr = (unsigned long)dest;
57930+
57931+#ifdef CONFIG_PAX_KERNEXEC
57932+ if (shdr->sh_flags & SHF_EXECINSTR)
57933+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
57934+ else
57935+#endif
57936+
57937+ shdr->sh_addr = (unsigned long)dest;
57938 DEBUGP("\t0x%lx %s\n",
57939 shdr->sh_addr, info->secstrings + shdr->sh_name);
57940 }
57941@@ -2613,12 +2706,12 @@ static void flush_module_icache(const st
57942 * Do it before processing of module parameters, so the module
57943 * can provide parameter accessor functions of its own.
57944 */
57945- if (mod->module_init)
57946- flush_icache_range((unsigned long)mod->module_init,
57947- (unsigned long)mod->module_init
57948- + mod->init_size);
57949- flush_icache_range((unsigned long)mod->module_core,
57950- (unsigned long)mod->module_core + mod->core_size);
57951+ if (mod->module_init_rx)
57952+ flush_icache_range((unsigned long)mod->module_init_rx,
57953+ (unsigned long)mod->module_init_rx
57954+ + mod->init_size_rx);
57955+ flush_icache_range((unsigned long)mod->module_core_rx,
57956+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
57957
57958 set_fs(old_fs);
57959 }
57960@@ -2690,8 +2783,10 @@ static void module_deallocate(struct mod
57961 {
57962 kfree(info->strmap);
57963 percpu_modfree(mod);
57964- module_free(mod, mod->module_init);
57965- module_free(mod, mod->module_core);
57966+ module_free_exec(mod, mod->module_init_rx);
57967+ module_free_exec(mod, mod->module_core_rx);
57968+ module_free(mod, mod->module_init_rw);
57969+ module_free(mod, mod->module_core_rw);
57970 }
57971
57972 static int post_relocation(struct module *mod, const struct load_info *info)
57973@@ -2748,9 +2843,38 @@ static struct module *load_module(void _
57974 if (err)
57975 goto free_unload;
57976
57977+ /* Now copy in args */
57978+ mod->args = strndup_user(uargs, ~0UL >> 1);
57979+ if (IS_ERR(mod->args)) {
57980+ err = PTR_ERR(mod->args);
57981+ goto free_unload;
57982+ }
57983+
57984 /* Set up MODINFO_ATTR fields */
57985 setup_modinfo(mod, &info);
57986
57987+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57988+ {
57989+ char *p, *p2;
57990+
57991+ if (strstr(mod->args, "grsec_modharden_netdev")) {
57992+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
57993+ err = -EPERM;
57994+ goto free_modinfo;
57995+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
57996+ p += strlen("grsec_modharden_normal");
57997+ p2 = strstr(p, "_");
57998+ if (p2) {
57999+ *p2 = '\0';
58000+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
58001+ *p2 = '_';
58002+ }
58003+ err = -EPERM;
58004+ goto free_modinfo;
58005+ }
58006+ }
58007+#endif
58008+
58009 /* Fix up syms, so that st_value is a pointer to location. */
58010 err = simplify_symbols(mod, &info);
58011 if (err < 0)
58012@@ -2766,13 +2890,6 @@ static struct module *load_module(void _
58013
58014 flush_module_icache(mod);
58015
58016- /* Now copy in args */
58017- mod->args = strndup_user(uargs, ~0UL >> 1);
58018- if (IS_ERR(mod->args)) {
58019- err = PTR_ERR(mod->args);
58020- goto free_arch_cleanup;
58021- }
58022-
58023 /* Mark state as coming so strong_try_module_get() ignores us. */
58024 mod->state = MODULE_STATE_COMING;
58025
58026@@ -2832,11 +2949,10 @@ static struct module *load_module(void _
58027 unlock:
58028 mutex_unlock(&module_mutex);
58029 synchronize_sched();
58030- kfree(mod->args);
58031- free_arch_cleanup:
58032 module_arch_cleanup(mod);
58033 free_modinfo:
58034 free_modinfo(mod);
58035+ kfree(mod->args);
58036 free_unload:
58037 module_unload_free(mod);
58038 free_module:
58039@@ -2877,16 +2993,16 @@ SYSCALL_DEFINE3(init_module, void __user
58040 MODULE_STATE_COMING, mod);
58041
58042 /* Set RO and NX regions for core */
58043- set_section_ro_nx(mod->module_core,
58044- mod->core_text_size,
58045- mod->core_ro_size,
58046- mod->core_size);
58047+ set_section_ro_nx(mod->module_core_rx,
58048+ mod->core_size_rx,
58049+ mod->core_size_rx,
58050+ mod->core_size_rx);
58051
58052 /* Set RO and NX regions for init */
58053- set_section_ro_nx(mod->module_init,
58054- mod->init_text_size,
58055- mod->init_ro_size,
58056- mod->init_size);
58057+ set_section_ro_nx(mod->module_init_rx,
58058+ mod->init_size_rx,
58059+ mod->init_size_rx,
58060+ mod->init_size_rx);
58061
58062 do_mod_ctors(mod);
58063 /* Start the module */
58064@@ -2931,11 +3047,13 @@ SYSCALL_DEFINE3(init_module, void __user
58065 mod->symtab = mod->core_symtab;
58066 mod->strtab = mod->core_strtab;
58067 #endif
58068- unset_section_ro_nx(mod, mod->module_init);
58069- module_free(mod, mod->module_init);
58070- mod->module_init = NULL;
58071- mod->init_size = 0;
58072- mod->init_text_size = 0;
58073+ unset_section_ro_nx(mod, mod->module_init_rx);
58074+ module_free(mod, mod->module_init_rw);
58075+ module_free_exec(mod, mod->module_init_rx);
58076+ mod->module_init_rw = NULL;
58077+ mod->module_init_rx = NULL;
58078+ mod->init_size_rw = 0;
58079+ mod->init_size_rx = 0;
58080 mutex_unlock(&module_mutex);
58081
58082 return 0;
58083@@ -2966,10 +3084,16 @@ static const char *get_ksymbol(struct mo
58084 unsigned long nextval;
58085
58086 /* At worse, next value is at end of module */
58087- if (within_module_init(addr, mod))
58088- nextval = (unsigned long)mod->module_init+mod->init_text_size;
58089+ if (within_module_init_rx(addr, mod))
58090+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58091+ else if (within_module_init_rw(addr, mod))
58092+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58093+ else if (within_module_core_rx(addr, mod))
58094+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58095+ else if (within_module_core_rw(addr, mod))
58096+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58097 else
58098- nextval = (unsigned long)mod->module_core+mod->core_text_size;
58099+ return NULL;
58100
58101 /* Scan for closest preceding symbol, and next symbol. (ELF
58102 starts real symbols at 1). */
58103@@ -3215,7 +3339,7 @@ static int m_show(struct seq_file *m, vo
58104 char buf[8];
58105
58106 seq_printf(m, "%s %u",
58107- mod->name, mod->init_size + mod->core_size);
58108+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58109 print_unload_info(m, mod);
58110
58111 /* Informative for users. */
58112@@ -3224,7 +3348,7 @@ static int m_show(struct seq_file *m, vo
58113 mod->state == MODULE_STATE_COMING ? "Loading":
58114 "Live");
58115 /* Used by oprofile and other similar tools. */
58116- seq_printf(m, " 0x%pK", mod->module_core);
58117+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58118
58119 /* Taints info */
58120 if (mod->taints)
58121@@ -3260,7 +3384,17 @@ static const struct file_operations proc
58122
58123 static int __init proc_modules_init(void)
58124 {
58125+#ifndef CONFIG_GRKERNSEC_HIDESYM
58126+#ifdef CONFIG_GRKERNSEC_PROC_USER
58127+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58128+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58129+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58130+#else
58131 proc_create("modules", 0, NULL, &proc_modules_operations);
58132+#endif
58133+#else
58134+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58135+#endif
58136 return 0;
58137 }
58138 module_init(proc_modules_init);
58139@@ -3319,12 +3453,12 @@ struct module *__module_address(unsigned
58140 {
58141 struct module *mod;
58142
58143- if (addr < module_addr_min || addr > module_addr_max)
58144+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58145+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
58146 return NULL;
58147
58148 list_for_each_entry_rcu(mod, &modules, list)
58149- if (within_module_core(addr, mod)
58150- || within_module_init(addr, mod))
58151+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
58152 return mod;
58153 return NULL;
58154 }
58155@@ -3358,11 +3492,20 @@ bool is_module_text_address(unsigned lon
58156 */
58157 struct module *__module_text_address(unsigned long addr)
58158 {
58159- struct module *mod = __module_address(addr);
58160+ struct module *mod;
58161+
58162+#ifdef CONFIG_X86_32
58163+ addr = ktla_ktva(addr);
58164+#endif
58165+
58166+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58167+ return NULL;
58168+
58169+ mod = __module_address(addr);
58170+
58171 if (mod) {
58172 /* Make sure it's within the text section. */
58173- if (!within(addr, mod->module_init, mod->init_text_size)
58174- && !within(addr, mod->module_core, mod->core_text_size))
58175+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58176 mod = NULL;
58177 }
58178 return mod;
58179diff -urNp linux-2.6.39.4/kernel/mutex.c linux-2.6.39.4/kernel/mutex.c
58180--- linux-2.6.39.4/kernel/mutex.c 2011-05-19 00:06:34.000000000 -0400
58181+++ linux-2.6.39.4/kernel/mutex.c 2011-08-05 19:44:37.000000000 -0400
58182@@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock,
58183 */
58184
58185 for (;;) {
58186- struct thread_info *owner;
58187+ struct task_struct *owner;
58188
58189 /*
58190 * If we own the BKL, then don't spin. The owner of
58191@@ -205,7 +205,7 @@ __mutex_lock_common(struct mutex *lock,
58192 spin_lock_mutex(&lock->wait_lock, flags);
58193
58194 debug_mutex_lock_common(lock, &waiter);
58195- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58196+ debug_mutex_add_waiter(lock, &waiter, task);
58197
58198 /* add waiting tasks to the end of the waitqueue (FIFO): */
58199 list_add_tail(&waiter.list, &lock->wait_list);
58200@@ -234,8 +234,7 @@ __mutex_lock_common(struct mutex *lock,
58201 * TASK_UNINTERRUPTIBLE case.)
58202 */
58203 if (unlikely(signal_pending_state(state, task))) {
58204- mutex_remove_waiter(lock, &waiter,
58205- task_thread_info(task));
58206+ mutex_remove_waiter(lock, &waiter, task);
58207 mutex_release(&lock->dep_map, 1, ip);
58208 spin_unlock_mutex(&lock->wait_lock, flags);
58209
58210@@ -256,7 +255,7 @@ __mutex_lock_common(struct mutex *lock,
58211 done:
58212 lock_acquired(&lock->dep_map, ip);
58213 /* got the lock - rejoice! */
58214- mutex_remove_waiter(lock, &waiter, current_thread_info());
58215+ mutex_remove_waiter(lock, &waiter, task);
58216 mutex_set_owner(lock);
58217
58218 /* set it to 0 if there are no waiters left: */
58219diff -urNp linux-2.6.39.4/kernel/mutex-debug.c linux-2.6.39.4/kernel/mutex-debug.c
58220--- linux-2.6.39.4/kernel/mutex-debug.c 2011-05-19 00:06:34.000000000 -0400
58221+++ linux-2.6.39.4/kernel/mutex-debug.c 2011-08-05 19:44:37.000000000 -0400
58222@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58223 }
58224
58225 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58226- struct thread_info *ti)
58227+ struct task_struct *task)
58228 {
58229 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58230
58231 /* Mark the current thread as blocked on the lock: */
58232- ti->task->blocked_on = waiter;
58233+ task->blocked_on = waiter;
58234 }
58235
58236 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58237- struct thread_info *ti)
58238+ struct task_struct *task)
58239 {
58240 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58241- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58242- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58243- ti->task->blocked_on = NULL;
58244+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
58245+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58246+ task->blocked_on = NULL;
58247
58248 list_del_init(&waiter->list);
58249 waiter->task = NULL;
58250@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
58251 return;
58252
58253 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
58254- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
58255+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
58256 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
58257 mutex_clear_owner(lock);
58258 }
58259diff -urNp linux-2.6.39.4/kernel/mutex-debug.h linux-2.6.39.4/kernel/mutex-debug.h
58260--- linux-2.6.39.4/kernel/mutex-debug.h 2011-05-19 00:06:34.000000000 -0400
58261+++ linux-2.6.39.4/kernel/mutex-debug.h 2011-08-05 19:44:37.000000000 -0400
58262@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
58263 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58264 extern void debug_mutex_add_waiter(struct mutex *lock,
58265 struct mutex_waiter *waiter,
58266- struct thread_info *ti);
58267+ struct task_struct *task);
58268 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58269- struct thread_info *ti);
58270+ struct task_struct *task);
58271 extern void debug_mutex_unlock(struct mutex *lock);
58272 extern void debug_mutex_init(struct mutex *lock, const char *name,
58273 struct lock_class_key *key);
58274
58275 static inline void mutex_set_owner(struct mutex *lock)
58276 {
58277- lock->owner = current_thread_info();
58278+ lock->owner = current;
58279 }
58280
58281 static inline void mutex_clear_owner(struct mutex *lock)
58282diff -urNp linux-2.6.39.4/kernel/mutex.h linux-2.6.39.4/kernel/mutex.h
58283--- linux-2.6.39.4/kernel/mutex.h 2011-05-19 00:06:34.000000000 -0400
58284+++ linux-2.6.39.4/kernel/mutex.h 2011-08-05 19:44:37.000000000 -0400
58285@@ -19,7 +19,7 @@
58286 #ifdef CONFIG_SMP
58287 static inline void mutex_set_owner(struct mutex *lock)
58288 {
58289- lock->owner = current_thread_info();
58290+ lock->owner = current;
58291 }
58292
58293 static inline void mutex_clear_owner(struct mutex *lock)
58294diff -urNp linux-2.6.39.4/kernel/padata.c linux-2.6.39.4/kernel/padata.c
58295--- linux-2.6.39.4/kernel/padata.c 2011-05-19 00:06:34.000000000 -0400
58296+++ linux-2.6.39.4/kernel/padata.c 2011-08-05 19:44:37.000000000 -0400
58297@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58298 padata->pd = pd;
58299 padata->cb_cpu = cb_cpu;
58300
58301- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58302- atomic_set(&pd->seq_nr, -1);
58303+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58304+ atomic_set_unchecked(&pd->seq_nr, -1);
58305
58306- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58307+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58308
58309 target_cpu = padata_cpu_hash(padata);
58310 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58311@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58312 padata_init_pqueues(pd);
58313 padata_init_squeues(pd);
58314 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58315- atomic_set(&pd->seq_nr, -1);
58316+ atomic_set_unchecked(&pd->seq_nr, -1);
58317 atomic_set(&pd->reorder_objects, 0);
58318 atomic_set(&pd->refcnt, 0);
58319 pd->pinst = pinst;
58320diff -urNp linux-2.6.39.4/kernel/panic.c linux-2.6.39.4/kernel/panic.c
58321--- linux-2.6.39.4/kernel/panic.c 2011-05-19 00:06:34.000000000 -0400
58322+++ linux-2.6.39.4/kernel/panic.c 2011-08-05 19:44:37.000000000 -0400
58323@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58324 const char *board;
58325
58326 printk(KERN_WARNING "------------[ cut here ]------------\n");
58327- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58328+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58329 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58330 if (board)
58331 printk(KERN_WARNING "Hardware name: %s\n", board);
58332@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58333 */
58334 void __stack_chk_fail(void)
58335 {
58336- panic("stack-protector: Kernel stack is corrupted in: %p\n",
58337+ dump_stack();
58338+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58339 __builtin_return_address(0));
58340 }
58341 EXPORT_SYMBOL(__stack_chk_fail);
58342diff -urNp linux-2.6.39.4/kernel/perf_event.c linux-2.6.39.4/kernel/perf_event.c
58343--- linux-2.6.39.4/kernel/perf_event.c 2011-05-19 00:06:34.000000000 -0400
58344+++ linux-2.6.39.4/kernel/perf_event.c 2011-08-05 20:34:06.000000000 -0400
58345@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
58346 return 0;
58347 }
58348
58349-static atomic64_t perf_event_id;
58350+static atomic64_unchecked_t perf_event_id;
58351
58352 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
58353 enum event_type_t event_type);
58354@@ -2496,7 +2496,7 @@ static void __perf_event_read(void *info
58355
58356 static inline u64 perf_event_count(struct perf_event *event)
58357 {
58358- return local64_read(&event->count) + atomic64_read(&event->child_count);
58359+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
58360 }
58361
58362 static u64 perf_event_read(struct perf_event *event)
58363@@ -3031,9 +3031,9 @@ u64 perf_event_read_value(struct perf_ev
58364 mutex_lock(&event->child_mutex);
58365 total += perf_event_read(event);
58366 *enabled += event->total_time_enabled +
58367- atomic64_read(&event->child_total_time_enabled);
58368+ atomic64_read_unchecked(&event->child_total_time_enabled);
58369 *running += event->total_time_running +
58370- atomic64_read(&event->child_total_time_running);
58371+ atomic64_read_unchecked(&event->child_total_time_running);
58372
58373 list_for_each_entry(child, &event->child_list, child_list) {
58374 total += perf_event_read(child);
58375@@ -3396,10 +3396,10 @@ void perf_event_update_userpage(struct p
58376 userpg->offset -= local64_read(&event->hw.prev_count);
58377
58378 userpg->time_enabled = event->total_time_enabled +
58379- atomic64_read(&event->child_total_time_enabled);
58380+ atomic64_read_unchecked(&event->child_total_time_enabled);
58381
58382 userpg->time_running = event->total_time_running +
58383- atomic64_read(&event->child_total_time_running);
58384+ atomic64_read_unchecked(&event->child_total_time_running);
58385
58386 barrier();
58387 ++userpg->lock;
58388@@ -4196,11 +4196,11 @@ static void perf_output_read_one(struct
58389 values[n++] = perf_event_count(event);
58390 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
58391 values[n++] = enabled +
58392- atomic64_read(&event->child_total_time_enabled);
58393+ atomic64_read_unchecked(&event->child_total_time_enabled);
58394 }
58395 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
58396 values[n++] = running +
58397- atomic64_read(&event->child_total_time_running);
58398+ atomic64_read_unchecked(&event->child_total_time_running);
58399 }
58400 if (read_format & PERF_FORMAT_ID)
58401 values[n++] = primary_event_id(event);
58402@@ -6201,7 +6201,7 @@ perf_event_alloc(struct perf_event_attr
58403 event->parent = parent_event;
58404
58405 event->ns = get_pid_ns(current->nsproxy->pid_ns);
58406- event->id = atomic64_inc_return(&perf_event_id);
58407+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
58408
58409 event->state = PERF_EVENT_STATE_INACTIVE;
58410
58411@@ -6724,10 +6724,10 @@ static void sync_child_event(struct perf
58412 /*
58413 * Add back the child's count to the parent's count:
58414 */
58415- atomic64_add(child_val, &parent_event->child_count);
58416- atomic64_add(child_event->total_time_enabled,
58417+ atomic64_add_unchecked(child_val, &parent_event->child_count);
58418+ atomic64_add_unchecked(child_event->total_time_enabled,
58419 &parent_event->child_total_time_enabled);
58420- atomic64_add(child_event->total_time_running,
58421+ atomic64_add_unchecked(child_event->total_time_running,
58422 &parent_event->child_total_time_running);
58423
58424 /*
58425diff -urNp linux-2.6.39.4/kernel/pid.c linux-2.6.39.4/kernel/pid.c
58426--- linux-2.6.39.4/kernel/pid.c 2011-05-19 00:06:34.000000000 -0400
58427+++ linux-2.6.39.4/kernel/pid.c 2011-08-05 19:44:37.000000000 -0400
58428@@ -33,6 +33,7 @@
58429 #include <linux/rculist.h>
58430 #include <linux/bootmem.h>
58431 #include <linux/hash.h>
58432+#include <linux/security.h>
58433 #include <linux/pid_namespace.h>
58434 #include <linux/init_task.h>
58435 #include <linux/syscalls.h>
58436@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58437
58438 int pid_max = PID_MAX_DEFAULT;
58439
58440-#define RESERVED_PIDS 300
58441+#define RESERVED_PIDS 500
58442
58443 int pid_max_min = RESERVED_PIDS + 1;
58444 int pid_max_max = PID_MAX_LIMIT;
58445@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58446 */
58447 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58448 {
58449+ struct task_struct *task;
58450+
58451 rcu_lockdep_assert(rcu_read_lock_held());
58452- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58453+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58454+
58455+ if (gr_pid_is_chrooted(task))
58456+ return NULL;
58457+
58458+ return task;
58459 }
58460
58461 struct task_struct *find_task_by_vpid(pid_t vnr)
58462@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58463 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58464 }
58465
58466+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58467+{
58468+ rcu_lockdep_assert(rcu_read_lock_held());
58469+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58470+}
58471+
58472 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58473 {
58474 struct pid *pid;
58475diff -urNp linux-2.6.39.4/kernel/posix-cpu-timers.c linux-2.6.39.4/kernel/posix-cpu-timers.c
58476--- linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-05-19 00:06:34.000000000 -0400
58477+++ linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-08-06 09:34:48.000000000 -0400
58478@@ -6,6 +6,7 @@
58479 #include <linux/posix-timers.h>
58480 #include <linux/errno.h>
58481 #include <linux/math64.h>
58482+#include <linux/security.h>
58483 #include <asm/uaccess.h>
58484 #include <linux/kernel_stat.h>
58485 #include <trace/events/timer.h>
58486@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58487
58488 static __init int init_posix_cpu_timers(void)
58489 {
58490- struct k_clock process = {
58491+ static struct k_clock process = {
58492 .clock_getres = process_cpu_clock_getres,
58493 .clock_get = process_cpu_clock_get,
58494 .timer_create = process_cpu_timer_create,
58495 .nsleep = process_cpu_nsleep,
58496 .nsleep_restart = process_cpu_nsleep_restart,
58497 };
58498- struct k_clock thread = {
58499+ static struct k_clock thread = {
58500 .clock_getres = thread_cpu_clock_getres,
58501 .clock_get = thread_cpu_clock_get,
58502 .timer_create = thread_cpu_timer_create,
58503diff -urNp linux-2.6.39.4/kernel/posix-timers.c linux-2.6.39.4/kernel/posix-timers.c
58504--- linux-2.6.39.4/kernel/posix-timers.c 2011-05-19 00:06:34.000000000 -0400
58505+++ linux-2.6.39.4/kernel/posix-timers.c 2011-08-06 09:30:46.000000000 -0400
58506@@ -43,6 +43,7 @@
58507 #include <linux/idr.h>
58508 #include <linux/posix-clock.h>
58509 #include <linux/posix-timers.h>
58510+#include <linux/grsecurity.h>
58511 #include <linux/syscalls.h>
58512 #include <linux/wait.h>
58513 #include <linux/workqueue.h>
58514@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58515 * which we beg off on and pass to do_sys_settimeofday().
58516 */
58517
58518-static struct k_clock posix_clocks[MAX_CLOCKS];
58519+static struct k_clock *posix_clocks[MAX_CLOCKS];
58520
58521 /*
58522 * These ones are defined below.
58523@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58524 */
58525 static __init int init_posix_timers(void)
58526 {
58527- struct k_clock clock_realtime = {
58528+ static struct k_clock clock_realtime = {
58529 .clock_getres = hrtimer_get_res,
58530 .clock_get = posix_clock_realtime_get,
58531 .clock_set = posix_clock_realtime_set,
58532@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58533 .timer_get = common_timer_get,
58534 .timer_del = common_timer_del,
58535 };
58536- struct k_clock clock_monotonic = {
58537+ static struct k_clock clock_monotonic = {
58538 .clock_getres = hrtimer_get_res,
58539 .clock_get = posix_ktime_get_ts,
58540 .nsleep = common_nsleep,
58541@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58542 .timer_get = common_timer_get,
58543 .timer_del = common_timer_del,
58544 };
58545- struct k_clock clock_monotonic_raw = {
58546+ static struct k_clock clock_monotonic_raw = {
58547 .clock_getres = hrtimer_get_res,
58548 .clock_get = posix_get_monotonic_raw,
58549 };
58550- struct k_clock clock_realtime_coarse = {
58551+ static struct k_clock clock_realtime_coarse = {
58552 .clock_getres = posix_get_coarse_res,
58553 .clock_get = posix_get_realtime_coarse,
58554 };
58555- struct k_clock clock_monotonic_coarse = {
58556+ static struct k_clock clock_monotonic_coarse = {
58557 .clock_getres = posix_get_coarse_res,
58558 .clock_get = posix_get_monotonic_coarse,
58559 };
58560- struct k_clock clock_boottime = {
58561+ static struct k_clock clock_boottime = {
58562 .clock_getres = hrtimer_get_res,
58563 .clock_get = posix_get_boottime,
58564 .nsleep = common_nsleep,
58565@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58566 .timer_del = common_timer_del,
58567 };
58568
58569+ pax_track_stack();
58570+
58571 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58572 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58573 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58574@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58575 return;
58576 }
58577
58578- posix_clocks[clock_id] = *new_clock;
58579+ posix_clocks[clock_id] = new_clock;
58580 }
58581 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58582
58583@@ -512,9 +515,9 @@ static struct k_clock *clockid_to_kclock
58584 return (id & CLOCKFD_MASK) == CLOCKFD ?
58585 &clock_posix_dynamic : &clock_posix_cpu;
58586
58587- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58588+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58589 return NULL;
58590- return &posix_clocks[id];
58591+ return posix_clocks[id];
58592 }
58593
58594 static int common_timer_create(struct k_itimer *new_timer)
58595@@ -956,6 +959,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58596 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58597 return -EFAULT;
58598
58599+ /* only the CLOCK_REALTIME clock can be set, all other clocks
58600+ have their clock_set fptr set to a nosettime dummy function
58601+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58602+ call common_clock_set, which calls do_sys_settimeofday, which
58603+ we hook
58604+ */
58605+
58606 return kc->clock_set(which_clock, &new_tp);
58607 }
58608
58609diff -urNp linux-2.6.39.4/kernel/power/poweroff.c linux-2.6.39.4/kernel/power/poweroff.c
58610--- linux-2.6.39.4/kernel/power/poweroff.c 2011-05-19 00:06:34.000000000 -0400
58611+++ linux-2.6.39.4/kernel/power/poweroff.c 2011-08-05 19:44:37.000000000 -0400
58612@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58613 .enable_mask = SYSRQ_ENABLE_BOOT,
58614 };
58615
58616-static int pm_sysrq_init(void)
58617+static int __init pm_sysrq_init(void)
58618 {
58619 register_sysrq_key('o', &sysrq_poweroff_op);
58620 return 0;
58621diff -urNp linux-2.6.39.4/kernel/power/process.c linux-2.6.39.4/kernel/power/process.c
58622--- linux-2.6.39.4/kernel/power/process.c 2011-05-19 00:06:34.000000000 -0400
58623+++ linux-2.6.39.4/kernel/power/process.c 2011-08-05 19:44:37.000000000 -0400
58624@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58625 u64 elapsed_csecs64;
58626 unsigned int elapsed_csecs;
58627 bool wakeup = false;
58628+ bool timedout = false;
58629
58630 do_gettimeofday(&start);
58631
58632@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58633
58634 while (true) {
58635 todo = 0;
58636+ if (time_after(jiffies, end_time))
58637+ timedout = true;
58638 read_lock(&tasklist_lock);
58639 do_each_thread(g, p) {
58640 if (frozen(p) || !freezable(p))
58641@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58642 * try_to_stop() after schedule() in ptrace/signal
58643 * stop sees TIF_FREEZE.
58644 */
58645- if (!task_is_stopped_or_traced(p) &&
58646- !freezer_should_skip(p))
58647+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58648 todo++;
58649+ if (timedout) {
58650+ printk(KERN_ERR "Task refusing to freeze:\n");
58651+ sched_show_task(p);
58652+ }
58653+ }
58654 } while_each_thread(g, p);
58655 read_unlock(&tasklist_lock);
58656
58657@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58658 todo += wq_busy;
58659 }
58660
58661- if (!todo || time_after(jiffies, end_time))
58662+ if (!todo || timedout)
58663 break;
58664
58665 if (pm_wakeup_pending()) {
58666diff -urNp linux-2.6.39.4/kernel/printk.c linux-2.6.39.4/kernel/printk.c
58667--- linux-2.6.39.4/kernel/printk.c 2011-05-19 00:06:34.000000000 -0400
58668+++ linux-2.6.39.4/kernel/printk.c 2011-08-05 19:44:37.000000000 -0400
58669@@ -284,12 +284,17 @@ static int check_syslog_permissions(int
58670 if (from_file && type != SYSLOG_ACTION_OPEN)
58671 return 0;
58672
58673+#ifdef CONFIG_GRKERNSEC_DMESG
58674+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58675+ return -EPERM;
58676+#endif
58677+
58678 if (syslog_action_restricted(type)) {
58679 if (capable(CAP_SYSLOG))
58680 return 0;
58681 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58682 if (capable(CAP_SYS_ADMIN)) {
58683- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58684+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58685 "but no CAP_SYSLOG (deprecated).\n");
58686 return 0;
58687 }
58688diff -urNp linux-2.6.39.4/kernel/profile.c linux-2.6.39.4/kernel/profile.c
58689--- linux-2.6.39.4/kernel/profile.c 2011-05-19 00:06:34.000000000 -0400
58690+++ linux-2.6.39.4/kernel/profile.c 2011-08-05 19:44:37.000000000 -0400
58691@@ -39,7 +39,7 @@ struct profile_hit {
58692 /* Oprofile timer tick hook */
58693 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58694
58695-static atomic_t *prof_buffer;
58696+static atomic_unchecked_t *prof_buffer;
58697 static unsigned long prof_len, prof_shift;
58698
58699 int prof_on __read_mostly;
58700@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
58701 hits[i].pc = 0;
58702 continue;
58703 }
58704- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58705+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58706 hits[i].hits = hits[i].pc = 0;
58707 }
58708 }
58709@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
58710 * Add the current hit(s) and flush the write-queue out
58711 * to the global buffer:
58712 */
58713- atomic_add(nr_hits, &prof_buffer[pc]);
58714+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58715 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58716- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58717+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58718 hits[i].pc = hits[i].hits = 0;
58719 }
58720 out:
58721@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
58722 if (prof_on != type || !prof_buffer)
58723 return;
58724 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58725- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58726+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58727 }
58728 #endif /* !CONFIG_SMP */
58729 EXPORT_SYMBOL_GPL(profile_hits);
58730@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58731 return -EFAULT;
58732 buf++; p++; count--; read++;
58733 }
58734- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58735+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58736 if (copy_to_user(buf, (void *)pnt, count))
58737 return -EFAULT;
58738 read += count;
58739@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58740 }
58741 #endif
58742 profile_discard_flip_buffers();
58743- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58744+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58745 return count;
58746 }
58747
58748diff -urNp linux-2.6.39.4/kernel/ptrace.c linux-2.6.39.4/kernel/ptrace.c
58749--- linux-2.6.39.4/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
58750+++ linux-2.6.39.4/kernel/ptrace.c 2011-08-05 19:44:37.000000000 -0400
58751@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
58752 return ret;
58753 }
58754
58755-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58756+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58757+ unsigned int log)
58758 {
58759 const struct cred *cred = current_cred(), *tcred;
58760
58761@@ -143,7 +144,8 @@ int __ptrace_may_access(struct task_stru
58762 cred->gid == tcred->sgid &&
58763 cred->gid == tcred->gid))
58764 goto ok;
58765- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58766+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58767+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58768 goto ok;
58769 rcu_read_unlock();
58770 return -EPERM;
58771@@ -152,7 +154,9 @@ ok:
58772 smp_rmb();
58773 if (task->mm)
58774 dumpable = get_dumpable(task->mm);
58775- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58776+ if (!dumpable &&
58777+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58778+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58779 return -EPERM;
58780
58781 return security_ptrace_access_check(task, mode);
58782@@ -162,7 +166,16 @@ bool ptrace_may_access(struct task_struc
58783 {
58784 int err;
58785 task_lock(task);
58786- err = __ptrace_may_access(task, mode);
58787+ err = __ptrace_may_access(task, mode, 0);
58788+ task_unlock(task);
58789+ return !err;
58790+}
58791+
58792+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58793+{
58794+ int err;
58795+ task_lock(task);
58796+ err = __ptrace_may_access(task, mode, 1);
58797 task_unlock(task);
58798 return !err;
58799 }
58800@@ -189,7 +202,7 @@ static int ptrace_attach(struct task_str
58801 goto out;
58802
58803 task_lock(task);
58804- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58805+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58806 task_unlock(task);
58807 if (retval)
58808 goto unlock_creds;
58809@@ -202,7 +215,7 @@ static int ptrace_attach(struct task_str
58810 goto unlock_tasklist;
58811
58812 task->ptrace = PT_PTRACED;
58813- if (task_ns_capable(task, CAP_SYS_PTRACE))
58814+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58815 task->ptrace |= PT_PTRACE_CAP;
58816
58817 __ptrace_link(task, current);
58818@@ -362,6 +375,8 @@ int ptrace_readdata(struct task_struct *
58819 {
58820 int copied = 0;
58821
58822+ pax_track_stack();
58823+
58824 while (len > 0) {
58825 char buf[128];
58826 int this_len, retval;
58827@@ -373,7 +388,7 @@ int ptrace_readdata(struct task_struct *
58828 break;
58829 return -EIO;
58830 }
58831- if (copy_to_user(dst, buf, retval))
58832+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58833 return -EFAULT;
58834 copied += retval;
58835 src += retval;
58836@@ -387,6 +402,8 @@ int ptrace_writedata(struct task_struct
58837 {
58838 int copied = 0;
58839
58840+ pax_track_stack();
58841+
58842 while (len > 0) {
58843 char buf[128];
58844 int this_len, retval;
58845@@ -569,9 +586,11 @@ int ptrace_request(struct task_struct *c
58846 {
58847 int ret = -EIO;
58848 siginfo_t siginfo;
58849- void __user *datavp = (void __user *) data;
58850+ void __user *datavp = (__force void __user *) data;
58851 unsigned long __user *datalp = datavp;
58852
58853+ pax_track_stack();
58854+
58855 switch (request) {
58856 case PTRACE_PEEKTEXT:
58857 case PTRACE_PEEKDATA:
58858@@ -717,14 +736,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
58859 goto out;
58860 }
58861
58862+ if (gr_handle_ptrace(child, request)) {
58863+ ret = -EPERM;
58864+ goto out_put_task_struct;
58865+ }
58866+
58867 if (request == PTRACE_ATTACH) {
58868 ret = ptrace_attach(child);
58869 /*
58870 * Some architectures need to do book-keeping after
58871 * a ptrace attach.
58872 */
58873- if (!ret)
58874+ if (!ret) {
58875 arch_ptrace_attach(child);
58876+ gr_audit_ptrace(child);
58877+ }
58878 goto out_put_task_struct;
58879 }
58880
58881@@ -749,7 +775,7 @@ int generic_ptrace_peekdata(struct task_
58882 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
58883 if (copied != sizeof(tmp))
58884 return -EIO;
58885- return put_user(tmp, (unsigned long __user *)data);
58886+ return put_user(tmp, (__force unsigned long __user *)data);
58887 }
58888
58889 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
58890@@ -772,6 +798,8 @@ int compat_ptrace_request(struct task_st
58891 siginfo_t siginfo;
58892 int ret;
58893
58894+ pax_track_stack();
58895+
58896 switch (request) {
58897 case PTRACE_PEEKTEXT:
58898 case PTRACE_PEEKDATA:
58899@@ -859,14 +887,21 @@ asmlinkage long compat_sys_ptrace(compat
58900 goto out;
58901 }
58902
58903+ if (gr_handle_ptrace(child, request)) {
58904+ ret = -EPERM;
58905+ goto out_put_task_struct;
58906+ }
58907+
58908 if (request == PTRACE_ATTACH) {
58909 ret = ptrace_attach(child);
58910 /*
58911 * Some architectures need to do book-keeping after
58912 * a ptrace attach.
58913 */
58914- if (!ret)
58915+ if (!ret) {
58916 arch_ptrace_attach(child);
58917+ gr_audit_ptrace(child);
58918+ }
58919 goto out_put_task_struct;
58920 }
58921
58922diff -urNp linux-2.6.39.4/kernel/rcutorture.c linux-2.6.39.4/kernel/rcutorture.c
58923--- linux-2.6.39.4/kernel/rcutorture.c 2011-05-19 00:06:34.000000000 -0400
58924+++ linux-2.6.39.4/kernel/rcutorture.c 2011-08-05 19:44:37.000000000 -0400
58925@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
58926 { 0 };
58927 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
58928 { 0 };
58929-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58930-static atomic_t n_rcu_torture_alloc;
58931-static atomic_t n_rcu_torture_alloc_fail;
58932-static atomic_t n_rcu_torture_free;
58933-static atomic_t n_rcu_torture_mberror;
58934-static atomic_t n_rcu_torture_error;
58935+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58936+static atomic_unchecked_t n_rcu_torture_alloc;
58937+static atomic_unchecked_t n_rcu_torture_alloc_fail;
58938+static atomic_unchecked_t n_rcu_torture_free;
58939+static atomic_unchecked_t n_rcu_torture_mberror;
58940+static atomic_unchecked_t n_rcu_torture_error;
58941 static long n_rcu_torture_boost_ktrerror;
58942 static long n_rcu_torture_boost_rterror;
58943 static long n_rcu_torture_boost_allocerror;
58944@@ -225,11 +225,11 @@ rcu_torture_alloc(void)
58945
58946 spin_lock_bh(&rcu_torture_lock);
58947 if (list_empty(&rcu_torture_freelist)) {
58948- atomic_inc(&n_rcu_torture_alloc_fail);
58949+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
58950 spin_unlock_bh(&rcu_torture_lock);
58951 return NULL;
58952 }
58953- atomic_inc(&n_rcu_torture_alloc);
58954+ atomic_inc_unchecked(&n_rcu_torture_alloc);
58955 p = rcu_torture_freelist.next;
58956 list_del_init(p);
58957 spin_unlock_bh(&rcu_torture_lock);
58958@@ -242,7 +242,7 @@ rcu_torture_alloc(void)
58959 static void
58960 rcu_torture_free(struct rcu_torture *p)
58961 {
58962- atomic_inc(&n_rcu_torture_free);
58963+ atomic_inc_unchecked(&n_rcu_torture_free);
58964 spin_lock_bh(&rcu_torture_lock);
58965 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
58966 spin_unlock_bh(&rcu_torture_lock);
58967@@ -362,7 +362,7 @@ rcu_torture_cb(struct rcu_head *p)
58968 i = rp->rtort_pipe_count;
58969 if (i > RCU_TORTURE_PIPE_LEN)
58970 i = RCU_TORTURE_PIPE_LEN;
58971- atomic_inc(&rcu_torture_wcount[i]);
58972+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
58973 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58974 rp->rtort_mbtest = 0;
58975 rcu_torture_free(rp);
58976@@ -409,7 +409,7 @@ static void rcu_sync_torture_deferred_fr
58977 i = rp->rtort_pipe_count;
58978 if (i > RCU_TORTURE_PIPE_LEN)
58979 i = RCU_TORTURE_PIPE_LEN;
58980- atomic_inc(&rcu_torture_wcount[i]);
58981+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
58982 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58983 rp->rtort_mbtest = 0;
58984 list_del(&rp->rtort_free);
58985@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
58986 i = old_rp->rtort_pipe_count;
58987 if (i > RCU_TORTURE_PIPE_LEN)
58988 i = RCU_TORTURE_PIPE_LEN;
58989- atomic_inc(&rcu_torture_wcount[i]);
58990+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
58991 old_rp->rtort_pipe_count++;
58992 cur_ops->deferred_free(old_rp);
58993 }
58994@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
58995 return;
58996 }
58997 if (p->rtort_mbtest == 0)
58998- atomic_inc(&n_rcu_torture_mberror);
58999+ atomic_inc_unchecked(&n_rcu_torture_mberror);
59000 spin_lock(&rand_lock);
59001 cur_ops->read_delay(&rand);
59002 n_rcu_torture_timers++;
59003@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
59004 continue;
59005 }
59006 if (p->rtort_mbtest == 0)
59007- atomic_inc(&n_rcu_torture_mberror);
59008+ atomic_inc_unchecked(&n_rcu_torture_mberror);
59009 cur_ops->read_delay(&rand);
59010 preempt_disable();
59011 pipe_count = p->rtort_pipe_count;
59012@@ -1072,10 +1072,10 @@ rcu_torture_printk(char *page)
59013 rcu_torture_current,
59014 rcu_torture_current_version,
59015 list_empty(&rcu_torture_freelist),
59016- atomic_read(&n_rcu_torture_alloc),
59017- atomic_read(&n_rcu_torture_alloc_fail),
59018- atomic_read(&n_rcu_torture_free),
59019- atomic_read(&n_rcu_torture_mberror),
59020+ atomic_read_unchecked(&n_rcu_torture_alloc),
59021+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
59022+ atomic_read_unchecked(&n_rcu_torture_free),
59023+ atomic_read_unchecked(&n_rcu_torture_mberror),
59024 n_rcu_torture_boost_ktrerror,
59025 n_rcu_torture_boost_rterror,
59026 n_rcu_torture_boost_allocerror,
59027@@ -1083,7 +1083,7 @@ rcu_torture_printk(char *page)
59028 n_rcu_torture_boost_failure,
59029 n_rcu_torture_boosts,
59030 n_rcu_torture_timers);
59031- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
59032+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
59033 n_rcu_torture_boost_ktrerror != 0 ||
59034 n_rcu_torture_boost_rterror != 0 ||
59035 n_rcu_torture_boost_allocerror != 0 ||
59036@@ -1093,7 +1093,7 @@ rcu_torture_printk(char *page)
59037 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
59038 if (i > 1) {
59039 cnt += sprintf(&page[cnt], "!!! ");
59040- atomic_inc(&n_rcu_torture_error);
59041+ atomic_inc_unchecked(&n_rcu_torture_error);
59042 WARN_ON_ONCE(1);
59043 }
59044 cnt += sprintf(&page[cnt], "Reader Pipe: ");
59045@@ -1107,7 +1107,7 @@ rcu_torture_printk(char *page)
59046 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
59047 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59048 cnt += sprintf(&page[cnt], " %d",
59049- atomic_read(&rcu_torture_wcount[i]));
59050+ atomic_read_unchecked(&rcu_torture_wcount[i]));
59051 }
59052 cnt += sprintf(&page[cnt], "\n");
59053 if (cur_ops->stats)
59054@@ -1415,7 +1415,7 @@ rcu_torture_cleanup(void)
59055
59056 if (cur_ops->cleanup)
59057 cur_ops->cleanup();
59058- if (atomic_read(&n_rcu_torture_error))
59059+ if (atomic_read_unchecked(&n_rcu_torture_error))
59060 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
59061 else
59062 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
59063@@ -1479,11 +1479,11 @@ rcu_torture_init(void)
59064
59065 rcu_torture_current = NULL;
59066 rcu_torture_current_version = 0;
59067- atomic_set(&n_rcu_torture_alloc, 0);
59068- atomic_set(&n_rcu_torture_alloc_fail, 0);
59069- atomic_set(&n_rcu_torture_free, 0);
59070- atomic_set(&n_rcu_torture_mberror, 0);
59071- atomic_set(&n_rcu_torture_error, 0);
59072+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
59073+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
59074+ atomic_set_unchecked(&n_rcu_torture_free, 0);
59075+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
59076+ atomic_set_unchecked(&n_rcu_torture_error, 0);
59077 n_rcu_torture_boost_ktrerror = 0;
59078 n_rcu_torture_boost_rterror = 0;
59079 n_rcu_torture_boost_allocerror = 0;
59080@@ -1491,7 +1491,7 @@ rcu_torture_init(void)
59081 n_rcu_torture_boost_failure = 0;
59082 n_rcu_torture_boosts = 0;
59083 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
59084- atomic_set(&rcu_torture_wcount[i], 0);
59085+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
59086 for_each_possible_cpu(cpu) {
59087 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59088 per_cpu(rcu_torture_count, cpu)[i] = 0;
59089diff -urNp linux-2.6.39.4/kernel/rcutree.c linux-2.6.39.4/kernel/rcutree.c
59090--- linux-2.6.39.4/kernel/rcutree.c 2011-05-19 00:06:34.000000000 -0400
59091+++ linux-2.6.39.4/kernel/rcutree.c 2011-08-05 19:44:37.000000000 -0400
59092@@ -1389,7 +1389,7 @@ __rcu_process_callbacks(struct rcu_state
59093 /*
59094 * Do softirq processing for the current CPU.
59095 */
59096-static void rcu_process_callbacks(struct softirq_action *unused)
59097+static void rcu_process_callbacks(void)
59098 {
59099 /*
59100 * Memory references from any prior RCU read-side critical sections
59101diff -urNp linux-2.6.39.4/kernel/rcutree_plugin.h linux-2.6.39.4/kernel/rcutree_plugin.h
59102--- linux-2.6.39.4/kernel/rcutree_plugin.h 2011-05-19 00:06:34.000000000 -0400
59103+++ linux-2.6.39.4/kernel/rcutree_plugin.h 2011-08-05 19:44:37.000000000 -0400
59104@@ -730,7 +730,7 @@ void synchronize_rcu_expedited(void)
59105
59106 /* Clean up and exit. */
59107 smp_mb(); /* ensure expedited GP seen before counter increment. */
59108- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
59109+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
59110 unlock_mb_ret:
59111 mutex_unlock(&sync_rcu_preempt_exp_mutex);
59112 mb_ret:
59113@@ -1025,8 +1025,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
59114
59115 #else /* #ifndef CONFIG_SMP */
59116
59117-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
59118-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
59119+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
59120+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
59121
59122 static int synchronize_sched_expedited_cpu_stop(void *data)
59123 {
59124@@ -1081,7 +1081,7 @@ void synchronize_sched_expedited(void)
59125 int firstsnap, s, snap, trycount = 0;
59126
59127 /* Note that atomic_inc_return() implies full memory barrier. */
59128- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
59129+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
59130 get_online_cpus();
59131
59132 /*
59133@@ -1102,7 +1102,7 @@ void synchronize_sched_expedited(void)
59134 }
59135
59136 /* Check to see if someone else did our work for us. */
59137- s = atomic_read(&sync_sched_expedited_done);
59138+ s = atomic_read_unchecked(&sync_sched_expedited_done);
59139 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
59140 smp_mb(); /* ensure test happens before caller kfree */
59141 return;
59142@@ -1117,7 +1117,7 @@ void synchronize_sched_expedited(void)
59143 * grace period works for us.
59144 */
59145 get_online_cpus();
59146- snap = atomic_read(&sync_sched_expedited_started) - 1;
59147+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
59148 smp_mb(); /* ensure read is before try_stop_cpus(). */
59149 }
59150
59151@@ -1128,12 +1128,12 @@ void synchronize_sched_expedited(void)
59152 * than we did beat us to the punch.
59153 */
59154 do {
59155- s = atomic_read(&sync_sched_expedited_done);
59156+ s = atomic_read_unchecked(&sync_sched_expedited_done);
59157 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
59158 smp_mb(); /* ensure test happens before caller kfree */
59159 break;
59160 }
59161- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
59162+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59163
59164 put_online_cpus();
59165 }
59166diff -urNp linux-2.6.39.4/kernel/relay.c linux-2.6.39.4/kernel/relay.c
59167--- linux-2.6.39.4/kernel/relay.c 2011-05-19 00:06:34.000000000 -0400
59168+++ linux-2.6.39.4/kernel/relay.c 2011-08-05 19:44:37.000000000 -0400
59169@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59170 };
59171 ssize_t ret;
59172
59173+ pax_track_stack();
59174+
59175 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59176 return 0;
59177 if (splice_grow_spd(pipe, &spd))
59178diff -urNp linux-2.6.39.4/kernel/resource.c linux-2.6.39.4/kernel/resource.c
59179--- linux-2.6.39.4/kernel/resource.c 2011-05-19 00:06:34.000000000 -0400
59180+++ linux-2.6.39.4/kernel/resource.c 2011-08-05 19:44:37.000000000 -0400
59181@@ -133,8 +133,18 @@ static const struct file_operations proc
59182
59183 static int __init ioresources_init(void)
59184 {
59185+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59186+#ifdef CONFIG_GRKERNSEC_PROC_USER
59187+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59188+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59189+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59190+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59191+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59192+#endif
59193+#else
59194 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59195 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59196+#endif
59197 return 0;
59198 }
59199 __initcall(ioresources_init);
59200diff -urNp linux-2.6.39.4/kernel/rtmutex-tester.c linux-2.6.39.4/kernel/rtmutex-tester.c
59201--- linux-2.6.39.4/kernel/rtmutex-tester.c 2011-05-19 00:06:34.000000000 -0400
59202+++ linux-2.6.39.4/kernel/rtmutex-tester.c 2011-08-05 19:44:37.000000000 -0400
59203@@ -20,7 +20,7 @@
59204 #define MAX_RT_TEST_MUTEXES 8
59205
59206 static spinlock_t rttest_lock;
59207-static atomic_t rttest_event;
59208+static atomic_unchecked_t rttest_event;
59209
59210 struct test_thread_data {
59211 int opcode;
59212@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59213
59214 case RTTEST_LOCKCONT:
59215 td->mutexes[td->opdata] = 1;
59216- td->event = atomic_add_return(1, &rttest_event);
59217+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59218 return 0;
59219
59220 case RTTEST_RESET:
59221@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59222 return 0;
59223
59224 case RTTEST_RESETEVENT:
59225- atomic_set(&rttest_event, 0);
59226+ atomic_set_unchecked(&rttest_event, 0);
59227 return 0;
59228
59229 default:
59230@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59231 return ret;
59232
59233 td->mutexes[id] = 1;
59234- td->event = atomic_add_return(1, &rttest_event);
59235+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59236 rt_mutex_lock(&mutexes[id]);
59237- td->event = atomic_add_return(1, &rttest_event);
59238+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59239 td->mutexes[id] = 4;
59240 return 0;
59241
59242@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59243 return ret;
59244
59245 td->mutexes[id] = 1;
59246- td->event = atomic_add_return(1, &rttest_event);
59247+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59248 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59249- td->event = atomic_add_return(1, &rttest_event);
59250+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59251 td->mutexes[id] = ret ? 0 : 4;
59252 return ret ? -EINTR : 0;
59253
59254@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59255 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59256 return ret;
59257
59258- td->event = atomic_add_return(1, &rttest_event);
59259+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59260 rt_mutex_unlock(&mutexes[id]);
59261- td->event = atomic_add_return(1, &rttest_event);
59262+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59263 td->mutexes[id] = 0;
59264 return 0;
59265
59266@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59267 break;
59268
59269 td->mutexes[dat] = 2;
59270- td->event = atomic_add_return(1, &rttest_event);
59271+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59272 break;
59273
59274 default:
59275@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59276 return;
59277
59278 td->mutexes[dat] = 3;
59279- td->event = atomic_add_return(1, &rttest_event);
59280+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59281 break;
59282
59283 case RTTEST_LOCKNOWAIT:
59284@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59285 return;
59286
59287 td->mutexes[dat] = 1;
59288- td->event = atomic_add_return(1, &rttest_event);
59289+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59290 return;
59291
59292 default:
59293diff -urNp linux-2.6.39.4/kernel/sched_autogroup.c linux-2.6.39.4/kernel/sched_autogroup.c
59294--- linux-2.6.39.4/kernel/sched_autogroup.c 2011-05-19 00:06:34.000000000 -0400
59295+++ linux-2.6.39.4/kernel/sched_autogroup.c 2011-08-05 19:44:37.000000000 -0400
59296@@ -7,7 +7,7 @@
59297
59298 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59299 static struct autogroup autogroup_default;
59300-static atomic_t autogroup_seq_nr;
59301+static atomic_unchecked_t autogroup_seq_nr;
59302
59303 static void __init autogroup_init(struct task_struct *init_task)
59304 {
59305@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59306
59307 kref_init(&ag->kref);
59308 init_rwsem(&ag->lock);
59309- ag->id = atomic_inc_return(&autogroup_seq_nr);
59310+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59311 ag->tg = tg;
59312 #ifdef CONFIG_RT_GROUP_SCHED
59313 /*
59314diff -urNp linux-2.6.39.4/kernel/sched.c linux-2.6.39.4/kernel/sched.c
59315--- linux-2.6.39.4/kernel/sched.c 2011-05-19 00:06:34.000000000 -0400
59316+++ linux-2.6.39.4/kernel/sched.c 2011-08-05 19:44:37.000000000 -0400
59317@@ -4078,6 +4078,8 @@ asmlinkage void __sched schedule(void)
59318 struct rq *rq;
59319 int cpu;
59320
59321+ pax_track_stack();
59322+
59323 need_resched:
59324 preempt_disable();
59325 cpu = smp_processor_id();
59326@@ -4165,7 +4167,7 @@ EXPORT_SYMBOL(schedule);
59327 * Look out! "owner" is an entirely speculative pointer
59328 * access and not reliable.
59329 */
59330-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
59331+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
59332 {
59333 unsigned int cpu;
59334 struct rq *rq;
59335@@ -4179,10 +4181,10 @@ int mutex_spin_on_owner(struct mutex *lo
59336 * DEBUG_PAGEALLOC could have unmapped it if
59337 * the mutex owner just released it and exited.
59338 */
59339- if (probe_kernel_address(&owner->cpu, cpu))
59340+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
59341 return 0;
59342 #else
59343- cpu = owner->cpu;
59344+ cpu = task_thread_info(owner)->cpu;
59345 #endif
59346
59347 /*
59348@@ -4219,7 +4221,7 @@ int mutex_spin_on_owner(struct mutex *lo
59349 /*
59350 * Is that owner really running on that cpu?
59351 */
59352- if (task_thread_info(rq->curr) != owner || need_resched())
59353+ if (rq->curr != owner || need_resched())
59354 return 0;
59355
59356 arch_mutex_cpu_relax();
59357@@ -4778,6 +4780,8 @@ int can_nice(const struct task_struct *p
59358 /* convert nice value [19,-20] to rlimit style value [1,40] */
59359 int nice_rlim = 20 - nice;
59360
59361+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59362+
59363 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59364 capable(CAP_SYS_NICE));
59365 }
59366@@ -4811,7 +4815,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59367 if (nice > 19)
59368 nice = 19;
59369
59370- if (increment < 0 && !can_nice(current, nice))
59371+ if (increment < 0 && (!can_nice(current, nice) ||
59372+ gr_handle_chroot_nice()))
59373 return -EPERM;
59374
59375 retval = security_task_setnice(current, nice);
59376@@ -4957,6 +4962,7 @@ recheck:
59377 unsigned long rlim_rtprio =
59378 task_rlimit(p, RLIMIT_RTPRIO);
59379
59380+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59381 /* can't set/change the rt policy */
59382 if (policy != p->policy && !rlim_rtprio)
59383 return -EPERM;
59384@@ -7164,7 +7170,7 @@ static void init_sched_groups_power(int
59385 long power;
59386 int weight;
59387
59388- WARN_ON(!sd || !sd->groups);
59389+ BUG_ON(!sd || !sd->groups);
59390
59391 if (cpu != group_first_cpu(sd->groups))
59392 return;
59393diff -urNp linux-2.6.39.4/kernel/sched_fair.c linux-2.6.39.4/kernel/sched_fair.c
59394--- linux-2.6.39.4/kernel/sched_fair.c 2011-05-19 00:06:34.000000000 -0400
59395+++ linux-2.6.39.4/kernel/sched_fair.c 2011-08-05 19:44:37.000000000 -0400
59396@@ -3999,7 +3999,7 @@ static void nohz_idle_balance(int this_c
59397 * run_rebalance_domains is triggered when needed from the scheduler tick.
59398 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59399 */
59400-static void run_rebalance_domains(struct softirq_action *h)
59401+static void run_rebalance_domains(void)
59402 {
59403 int this_cpu = smp_processor_id();
59404 struct rq *this_rq = cpu_rq(this_cpu);
59405diff -urNp linux-2.6.39.4/kernel/signal.c linux-2.6.39.4/kernel/signal.c
59406--- linux-2.6.39.4/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
59407+++ linux-2.6.39.4/kernel/signal.c 2011-08-05 19:44:37.000000000 -0400
59408@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59409
59410 int print_fatal_signals __read_mostly;
59411
59412-static void __user *sig_handler(struct task_struct *t, int sig)
59413+static __sighandler_t sig_handler(struct task_struct *t, int sig)
59414 {
59415 return t->sighand->action[sig - 1].sa.sa_handler;
59416 }
59417
59418-static int sig_handler_ignored(void __user *handler, int sig)
59419+static int sig_handler_ignored(__sighandler_t handler, int sig)
59420 {
59421 /* Is it explicitly or implicitly ignored? */
59422 return handler == SIG_IGN ||
59423@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59424 static int sig_task_ignored(struct task_struct *t, int sig,
59425 int from_ancestor_ns)
59426 {
59427- void __user *handler;
59428+ __sighandler_t handler;
59429
59430 handler = sig_handler(t, sig);
59431
59432@@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
59433 atomic_inc(&user->sigpending);
59434 rcu_read_unlock();
59435
59436+ if (!override_rlimit)
59437+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59438+
59439 if (override_rlimit ||
59440 atomic_read(&user->sigpending) <=
59441 task_rlimit(t, RLIMIT_SIGPENDING)) {
59442@@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
59443
59444 int unhandled_signal(struct task_struct *tsk, int sig)
59445 {
59446- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59447+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59448 if (is_global_init(tsk))
59449 return 1;
59450 if (handler != SIG_IGN && handler != SIG_DFL)
59451@@ -693,6 +696,12 @@ static int check_kill_permission(int sig
59452 }
59453 }
59454
59455+ /* allow glibc communication via tgkill to other threads in our
59456+ thread group */
59457+ if ((info->si_code != SI_TKILL || sig != (SIGRTMIN+1) ||
59458+ task_tgid_vnr(t) != info->si_pid) && gr_handle_signal(t, sig))
59459+ return -EPERM;
59460+
59461 return security_task_kill(t, info, sig, 0);
59462 }
59463
59464@@ -1041,7 +1050,7 @@ __group_send_sig_info(int sig, struct si
59465 return send_signal(sig, info, p, 1);
59466 }
59467
59468-static int
59469+int
59470 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59471 {
59472 return send_signal(sig, info, t, 0);
59473@@ -1078,6 +1087,7 @@ force_sig_info(int sig, struct siginfo *
59474 unsigned long int flags;
59475 int ret, blocked, ignored;
59476 struct k_sigaction *action;
59477+ int is_unhandled = 0;
59478
59479 spin_lock_irqsave(&t->sighand->siglock, flags);
59480 action = &t->sighand->action[sig-1];
59481@@ -1092,9 +1102,18 @@ force_sig_info(int sig, struct siginfo *
59482 }
59483 if (action->sa.sa_handler == SIG_DFL)
59484 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59485+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59486+ is_unhandled = 1;
59487 ret = specific_send_sig_info(sig, info, t);
59488 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59489
59490+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
59491+ normal operation */
59492+ if (is_unhandled) {
59493+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59494+ gr_handle_crash(t, sig);
59495+ }
59496+
59497 return ret;
59498 }
59499
59500@@ -1153,8 +1172,11 @@ int group_send_sig_info(int sig, struct
59501 ret = check_kill_permission(sig, info, p);
59502 rcu_read_unlock();
59503
59504- if (!ret && sig)
59505+ if (!ret && sig) {
59506 ret = do_send_sig_info(sig, info, p, true);
59507+ if (!ret)
59508+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59509+ }
59510
59511 return ret;
59512 }
59513@@ -1718,6 +1740,8 @@ void ptrace_notify(int exit_code)
59514 {
59515 siginfo_t info;
59516
59517+ pax_track_stack();
59518+
59519 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59520
59521 memset(&info, 0, sizeof info);
59522@@ -2393,7 +2417,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59523 int error = -ESRCH;
59524
59525 rcu_read_lock();
59526- p = find_task_by_vpid(pid);
59527+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59528+ /* allow glibc communication via tgkill to other threads in our
59529+ thread group */
59530+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59531+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
59532+ p = find_task_by_vpid_unrestricted(pid);
59533+ else
59534+#endif
59535+ p = find_task_by_vpid(pid);
59536 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59537 error = check_kill_permission(sig, info, p);
59538 /*
59539diff -urNp linux-2.6.39.4/kernel/smp.c linux-2.6.39.4/kernel/smp.c
59540--- linux-2.6.39.4/kernel/smp.c 2011-05-19 00:06:34.000000000 -0400
59541+++ linux-2.6.39.4/kernel/smp.c 2011-08-05 19:44:37.000000000 -0400
59542@@ -583,22 +583,22 @@ int smp_call_function(smp_call_func_t fu
59543 }
59544 EXPORT_SYMBOL(smp_call_function);
59545
59546-void ipi_call_lock(void)
59547+void ipi_call_lock(void) __acquires(call_function.lock)
59548 {
59549 raw_spin_lock(&call_function.lock);
59550 }
59551
59552-void ipi_call_unlock(void)
59553+void ipi_call_unlock(void) __releases(call_function.lock)
59554 {
59555 raw_spin_unlock(&call_function.lock);
59556 }
59557
59558-void ipi_call_lock_irq(void)
59559+void ipi_call_lock_irq(void) __acquires(call_function.lock)
59560 {
59561 raw_spin_lock_irq(&call_function.lock);
59562 }
59563
59564-void ipi_call_unlock_irq(void)
59565+void ipi_call_unlock_irq(void) __releases(call_function.lock)
59566 {
59567 raw_spin_unlock_irq(&call_function.lock);
59568 }
59569diff -urNp linux-2.6.39.4/kernel/softirq.c linux-2.6.39.4/kernel/softirq.c
59570--- linux-2.6.39.4/kernel/softirq.c 2011-05-19 00:06:34.000000000 -0400
59571+++ linux-2.6.39.4/kernel/softirq.c 2011-08-05 20:34:06.000000000 -0400
59572@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59573
59574 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59575
59576-char *softirq_to_name[NR_SOFTIRQS] = {
59577+const char * const softirq_to_name[NR_SOFTIRQS] = {
59578 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59579 "TASKLET", "SCHED", "HRTIMER", "RCU"
59580 };
59581@@ -235,7 +235,7 @@ restart:
59582 kstat_incr_softirqs_this_cpu(vec_nr);
59583
59584 trace_softirq_entry(vec_nr);
59585- h->action(h);
59586+ h->action();
59587 trace_softirq_exit(vec_nr);
59588 if (unlikely(prev_count != preempt_count())) {
59589 printk(KERN_ERR "huh, entered softirq %u %s %p"
59590@@ -377,9 +377,11 @@ void raise_softirq(unsigned int nr)
59591 local_irq_restore(flags);
59592 }
59593
59594-void open_softirq(int nr, void (*action)(struct softirq_action *))
59595+void open_softirq(int nr, void (*action)(void))
59596 {
59597- softirq_vec[nr].action = action;
59598+ pax_open_kernel();
59599+ *(void **)&softirq_vec[nr].action = action;
59600+ pax_close_kernel();
59601 }
59602
59603 /*
59604@@ -433,7 +435,7 @@ void __tasklet_hi_schedule_first(struct
59605
59606 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59607
59608-static void tasklet_action(struct softirq_action *a)
59609+static void tasklet_action(void)
59610 {
59611 struct tasklet_struct *list;
59612
59613@@ -468,7 +470,7 @@ static void tasklet_action(struct softir
59614 }
59615 }
59616
59617-static void tasklet_hi_action(struct softirq_action *a)
59618+static void tasklet_hi_action(void)
59619 {
59620 struct tasklet_struct *list;
59621
59622diff -urNp linux-2.6.39.4/kernel/sys.c linux-2.6.39.4/kernel/sys.c
59623--- linux-2.6.39.4/kernel/sys.c 2011-05-19 00:06:34.000000000 -0400
59624+++ linux-2.6.39.4/kernel/sys.c 2011-08-05 19:44:37.000000000 -0400
59625@@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59626 error = -EACCES;
59627 goto out;
59628 }
59629+
59630+ if (gr_handle_chroot_setpriority(p, niceval)) {
59631+ error = -EACCES;
59632+ goto out;
59633+ }
59634+
59635 no_nice = security_task_setnice(p, niceval);
59636 if (no_nice) {
59637 error = no_nice;
59638@@ -538,6 +544,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59639 goto error;
59640 }
59641
59642+ if (gr_check_group_change(new->gid, new->egid, -1))
59643+ goto error;
59644+
59645 if (rgid != (gid_t) -1 ||
59646 (egid != (gid_t) -1 && egid != old->gid))
59647 new->sgid = new->egid;
59648@@ -567,6 +576,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59649 old = current_cred();
59650
59651 retval = -EPERM;
59652+
59653+ if (gr_check_group_change(gid, gid, gid))
59654+ goto error;
59655+
59656 if (nsown_capable(CAP_SETGID))
59657 new->gid = new->egid = new->sgid = new->fsgid = gid;
59658 else if (gid == old->gid || gid == old->sgid)
59659@@ -647,6 +660,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59660 goto error;
59661 }
59662
59663+ if (gr_check_user_change(new->uid, new->euid, -1))
59664+ goto error;
59665+
59666 if (new->uid != old->uid) {
59667 retval = set_user(new);
59668 if (retval < 0)
59669@@ -691,6 +707,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59670 old = current_cred();
59671
59672 retval = -EPERM;
59673+
59674+ if (gr_check_crash_uid(uid))
59675+ goto error;
59676+ if (gr_check_user_change(uid, uid, uid))
59677+ goto error;
59678+
59679 if (nsown_capable(CAP_SETUID)) {
59680 new->suid = new->uid = uid;
59681 if (uid != old->uid) {
59682@@ -745,6 +767,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59683 goto error;
59684 }
59685
59686+ if (gr_check_user_change(ruid, euid, -1))
59687+ goto error;
59688+
59689 if (ruid != (uid_t) -1) {
59690 new->uid = ruid;
59691 if (ruid != old->uid) {
59692@@ -809,6 +834,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59693 goto error;
59694 }
59695
59696+ if (gr_check_group_change(rgid, egid, -1))
59697+ goto error;
59698+
59699 if (rgid != (gid_t) -1)
59700 new->gid = rgid;
59701 if (egid != (gid_t) -1)
59702@@ -855,6 +883,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59703 old = current_cred();
59704 old_fsuid = old->fsuid;
59705
59706+ if (gr_check_user_change(-1, -1, uid))
59707+ goto error;
59708+
59709 if (uid == old->uid || uid == old->euid ||
59710 uid == old->suid || uid == old->fsuid ||
59711 nsown_capable(CAP_SETUID)) {
59712@@ -865,6 +896,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59713 }
59714 }
59715
59716+error:
59717 abort_creds(new);
59718 return old_fsuid;
59719
59720@@ -891,12 +923,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59721 if (gid == old->gid || gid == old->egid ||
59722 gid == old->sgid || gid == old->fsgid ||
59723 nsown_capable(CAP_SETGID)) {
59724+ if (gr_check_group_change(-1, -1, gid))
59725+ goto error;
59726+
59727 if (gid != old_fsgid) {
59728 new->fsgid = gid;
59729 goto change_okay;
59730 }
59731 }
59732
59733+error:
59734 abort_creds(new);
59735 return old_fsgid;
59736
59737@@ -1643,7 +1679,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59738 error = get_dumpable(me->mm);
59739 break;
59740 case PR_SET_DUMPABLE:
59741- if (arg2 < 0 || arg2 > 1) {
59742+ if (arg2 > 1) {
59743 error = -EINVAL;
59744 break;
59745 }
59746diff -urNp linux-2.6.39.4/kernel/sysctl.c linux-2.6.39.4/kernel/sysctl.c
59747--- linux-2.6.39.4/kernel/sysctl.c 2011-05-19 00:06:34.000000000 -0400
59748+++ linux-2.6.39.4/kernel/sysctl.c 2011-08-05 19:44:37.000000000 -0400
59749@@ -84,6 +84,13 @@
59750
59751
59752 #if defined(CONFIG_SYSCTL)
59753+#include <linux/grsecurity.h>
59754+#include <linux/grinternal.h>
59755+
59756+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59757+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59758+ const int op);
59759+extern int gr_handle_chroot_sysctl(const int op);
59760
59761 /* External variables not in a header file. */
59762 extern int sysctl_overcommit_memory;
59763@@ -196,6 +203,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59764 }
59765
59766 #endif
59767+extern struct ctl_table grsecurity_table[];
59768
59769 static struct ctl_table root_table[];
59770 static struct ctl_table_root sysctl_table_root;
59771@@ -225,6 +233,20 @@ extern struct ctl_table epoll_table[];
59772 int sysctl_legacy_va_layout;
59773 #endif
59774
59775+#ifdef CONFIG_PAX_SOFTMODE
59776+static ctl_table pax_table[] = {
59777+ {
59778+ .procname = "softmode",
59779+ .data = &pax_softmode,
59780+ .maxlen = sizeof(unsigned int),
59781+ .mode = 0600,
59782+ .proc_handler = &proc_dointvec,
59783+ },
59784+
59785+ { }
59786+};
59787+#endif
59788+
59789 /* The default sysctl tables: */
59790
59791 static struct ctl_table root_table[] = {
59792@@ -271,6 +293,22 @@ static int max_extfrag_threshold = 1000;
59793 #endif
59794
59795 static struct ctl_table kern_table[] = {
59796+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59797+ {
59798+ .procname = "grsecurity",
59799+ .mode = 0500,
59800+ .child = grsecurity_table,
59801+ },
59802+#endif
59803+
59804+#ifdef CONFIG_PAX_SOFTMODE
59805+ {
59806+ .procname = "pax",
59807+ .mode = 0500,
59808+ .child = pax_table,
59809+ },
59810+#endif
59811+
59812 {
59813 .procname = "sched_child_runs_first",
59814 .data = &sysctl_sched_child_runs_first,
59815@@ -545,7 +583,7 @@ static struct ctl_table kern_table[] = {
59816 .data = &modprobe_path,
59817 .maxlen = KMOD_PATH_LEN,
59818 .mode = 0644,
59819- .proc_handler = proc_dostring,
59820+ .proc_handler = proc_dostring_modpriv,
59821 },
59822 {
59823 .procname = "modules_disabled",
59824@@ -707,16 +745,20 @@ static struct ctl_table kern_table[] = {
59825 .extra1 = &zero,
59826 .extra2 = &one,
59827 },
59828+#endif
59829 {
59830 .procname = "kptr_restrict",
59831 .data = &kptr_restrict,
59832 .maxlen = sizeof(int),
59833 .mode = 0644,
59834 .proc_handler = proc_dmesg_restrict,
59835+#ifdef CONFIG_GRKERNSEC_HIDESYM
59836+ .extra1 = &two,
59837+#else
59838 .extra1 = &zero,
59839+#endif
59840 .extra2 = &two,
59841 },
59842-#endif
59843 {
59844 .procname = "ngroups_max",
59845 .data = &ngroups_max,
59846@@ -1189,6 +1231,13 @@ static struct ctl_table vm_table[] = {
59847 .proc_handler = proc_dointvec_minmax,
59848 .extra1 = &zero,
59849 },
59850+ {
59851+ .procname = "heap_stack_gap",
59852+ .data = &sysctl_heap_stack_gap,
59853+ .maxlen = sizeof(sysctl_heap_stack_gap),
59854+ .mode = 0644,
59855+ .proc_handler = proc_doulongvec_minmax,
59856+ },
59857 #else
59858 {
59859 .procname = "nr_trim_pages",
59860@@ -1698,6 +1747,17 @@ static int test_perm(int mode, int op)
59861 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
59862 {
59863 int mode;
59864+ int error;
59865+
59866+ if (table->parent != NULL && table->parent->procname != NULL &&
59867+ table->procname != NULL &&
59868+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
59869+ return -EACCES;
59870+ if (gr_handle_chroot_sysctl(op))
59871+ return -EACCES;
59872+ error = gr_handle_sysctl(table, op);
59873+ if (error)
59874+ return error;
59875
59876 if (root->permissions)
59877 mode = root->permissions(root, current->nsproxy, table);
59878@@ -2102,6 +2162,16 @@ int proc_dostring(struct ctl_table *tabl
59879 buffer, lenp, ppos);
59880 }
59881
59882+int proc_dostring_modpriv(struct ctl_table *table, int write,
59883+ void __user *buffer, size_t *lenp, loff_t *ppos)
59884+{
59885+ if (write && !capable(CAP_SYS_MODULE))
59886+ return -EPERM;
59887+
59888+ return _proc_do_string(table->data, table->maxlen, write,
59889+ buffer, lenp, ppos);
59890+}
59891+
59892 static size_t proc_skip_spaces(char **buf)
59893 {
59894 size_t ret;
59895@@ -2207,6 +2277,8 @@ static int proc_put_long(void __user **b
59896 len = strlen(tmp);
59897 if (len > *size)
59898 len = *size;
59899+ if (len > sizeof(tmp))
59900+ len = sizeof(tmp);
59901 if (copy_to_user(*buf, tmp, len))
59902 return -EFAULT;
59903 *size -= len;
59904@@ -2523,8 +2595,11 @@ static int __do_proc_doulongvec_minmax(v
59905 *i = val;
59906 } else {
59907 val = convdiv * (*i) / convmul;
59908- if (!first)
59909+ if (!first) {
59910 err = proc_put_char(&buffer, &left, '\t');
59911+ if (err)
59912+ break;
59913+ }
59914 err = proc_put_long(&buffer, &left, val, false);
59915 if (err)
59916 break;
59917@@ -2919,6 +2994,12 @@ int proc_dostring(struct ctl_table *tabl
59918 return -ENOSYS;
59919 }
59920
59921+int proc_dostring_modpriv(struct ctl_table *table, int write,
59922+ void __user *buffer, size_t *lenp, loff_t *ppos)
59923+{
59924+ return -ENOSYS;
59925+}
59926+
59927 int proc_dointvec(struct ctl_table *table, int write,
59928 void __user *buffer, size_t *lenp, loff_t *ppos)
59929 {
59930@@ -2975,6 +3056,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
59931 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
59932 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
59933 EXPORT_SYMBOL(proc_dostring);
59934+EXPORT_SYMBOL(proc_dostring_modpriv);
59935 EXPORT_SYMBOL(proc_doulongvec_minmax);
59936 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
59937 EXPORT_SYMBOL(register_sysctl_table);
59938diff -urNp linux-2.6.39.4/kernel/sysctl_check.c linux-2.6.39.4/kernel/sysctl_check.c
59939--- linux-2.6.39.4/kernel/sysctl_check.c 2011-05-19 00:06:34.000000000 -0400
59940+++ linux-2.6.39.4/kernel/sysctl_check.c 2011-08-05 19:44:37.000000000 -0400
59941@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
59942 set_fail(&fail, table, "Directory with extra2");
59943 } else {
59944 if ((table->proc_handler == proc_dostring) ||
59945+ (table->proc_handler == proc_dostring_modpriv) ||
59946 (table->proc_handler == proc_dointvec) ||
59947 (table->proc_handler == proc_dointvec_minmax) ||
59948 (table->proc_handler == proc_dointvec_jiffies) ||
59949diff -urNp linux-2.6.39.4/kernel/taskstats.c linux-2.6.39.4/kernel/taskstats.c
59950--- linux-2.6.39.4/kernel/taskstats.c 2011-07-09 09:18:51.000000000 -0400
59951+++ linux-2.6.39.4/kernel/taskstats.c 2011-08-05 19:44:37.000000000 -0400
59952@@ -27,9 +27,12 @@
59953 #include <linux/cgroup.h>
59954 #include <linux/fs.h>
59955 #include <linux/file.h>
59956+#include <linux/grsecurity.h>
59957 #include <net/genetlink.h>
59958 #include <asm/atomic.h>
59959
59960+extern int gr_is_taskstats_denied(int pid);
59961+
59962 /*
59963 * Maximum length of a cpumask that can be specified in
59964 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
59965@@ -558,6 +561,9 @@ err:
59966
59967 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
59968 {
59969+ if (gr_is_taskstats_denied(current->pid))
59970+ return -EACCES;
59971+
59972 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
59973 return cmd_attr_register_cpumask(info);
59974 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
59975diff -urNp linux-2.6.39.4/kernel/time/tick-broadcast.c linux-2.6.39.4/kernel/time/tick-broadcast.c
59976--- linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-05-19 00:06:34.000000000 -0400
59977+++ linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-08-05 19:44:37.000000000 -0400
59978@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
59979 * then clear the broadcast bit.
59980 */
59981 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
59982- int cpu = smp_processor_id();
59983+ cpu = smp_processor_id();
59984
59985 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
59986 tick_broadcast_clear_oneshot(cpu);
59987diff -urNp linux-2.6.39.4/kernel/time/timekeeping.c linux-2.6.39.4/kernel/time/timekeeping.c
59988--- linux-2.6.39.4/kernel/time/timekeeping.c 2011-05-19 00:06:34.000000000 -0400
59989+++ linux-2.6.39.4/kernel/time/timekeeping.c 2011-08-05 19:44:37.000000000 -0400
59990@@ -14,6 +14,7 @@
59991 #include <linux/init.h>
59992 #include <linux/mm.h>
59993 #include <linux/sched.h>
59994+#include <linux/grsecurity.h>
59995 #include <linux/syscore_ops.h>
59996 #include <linux/clocksource.h>
59997 #include <linux/jiffies.h>
59998@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
59999 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
60000 return -EINVAL;
60001
60002+ gr_log_timechange();
60003+
60004 write_seqlock_irqsave(&xtime_lock, flags);
60005
60006 timekeeping_forward_now();
60007diff -urNp linux-2.6.39.4/kernel/time/timer_list.c linux-2.6.39.4/kernel/time/timer_list.c
60008--- linux-2.6.39.4/kernel/time/timer_list.c 2011-05-19 00:06:34.000000000 -0400
60009+++ linux-2.6.39.4/kernel/time/timer_list.c 2011-08-05 19:44:37.000000000 -0400
60010@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
60011
60012 static void print_name_offset(struct seq_file *m, void *sym)
60013 {
60014+#ifdef CONFIG_GRKERNSEC_HIDESYM
60015+ SEQ_printf(m, "<%p>", NULL);
60016+#else
60017 char symname[KSYM_NAME_LEN];
60018
60019 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
60020 SEQ_printf(m, "<%pK>", sym);
60021 else
60022 SEQ_printf(m, "%s", symname);
60023+#endif
60024 }
60025
60026 static void
60027@@ -112,7 +116,11 @@ next_one:
60028 static void
60029 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
60030 {
60031+#ifdef CONFIG_GRKERNSEC_HIDESYM
60032+ SEQ_printf(m, " .base: %p\n", NULL);
60033+#else
60034 SEQ_printf(m, " .base: %pK\n", base);
60035+#endif
60036 SEQ_printf(m, " .index: %d\n",
60037 base->index);
60038 SEQ_printf(m, " .resolution: %Lu nsecs\n",
60039@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
60040 {
60041 struct proc_dir_entry *pe;
60042
60043+#ifdef CONFIG_GRKERNSEC_PROC_ADD
60044+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
60045+#else
60046 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
60047+#endif
60048 if (!pe)
60049 return -ENOMEM;
60050 return 0;
60051diff -urNp linux-2.6.39.4/kernel/time/timer_stats.c linux-2.6.39.4/kernel/time/timer_stats.c
60052--- linux-2.6.39.4/kernel/time/timer_stats.c 2011-05-19 00:06:34.000000000 -0400
60053+++ linux-2.6.39.4/kernel/time/timer_stats.c 2011-08-05 19:44:37.000000000 -0400
60054@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
60055 static unsigned long nr_entries;
60056 static struct entry entries[MAX_ENTRIES];
60057
60058-static atomic_t overflow_count;
60059+static atomic_unchecked_t overflow_count;
60060
60061 /*
60062 * The entries are in a hash-table, for fast lookup:
60063@@ -140,7 +140,7 @@ static void reset_entries(void)
60064 nr_entries = 0;
60065 memset(entries, 0, sizeof(entries));
60066 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
60067- atomic_set(&overflow_count, 0);
60068+ atomic_set_unchecked(&overflow_count, 0);
60069 }
60070
60071 static struct entry *alloc_entry(void)
60072@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
60073 if (likely(entry))
60074 entry->count++;
60075 else
60076- atomic_inc(&overflow_count);
60077+ atomic_inc_unchecked(&overflow_count);
60078
60079 out_unlock:
60080 raw_spin_unlock_irqrestore(lock, flags);
60081@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
60082
60083 static void print_name_offset(struct seq_file *m, unsigned long addr)
60084 {
60085+#ifdef CONFIG_GRKERNSEC_HIDESYM
60086+ seq_printf(m, "<%p>", NULL);
60087+#else
60088 char symname[KSYM_NAME_LEN];
60089
60090 if (lookup_symbol_name(addr, symname) < 0)
60091 seq_printf(m, "<%p>", (void *)addr);
60092 else
60093 seq_printf(m, "%s", symname);
60094+#endif
60095 }
60096
60097 static int tstats_show(struct seq_file *m, void *v)
60098@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
60099
60100 seq_puts(m, "Timer Stats Version: v0.2\n");
60101 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
60102- if (atomic_read(&overflow_count))
60103+ if (atomic_read_unchecked(&overflow_count))
60104 seq_printf(m, "Overflow: %d entries\n",
60105- atomic_read(&overflow_count));
60106+ atomic_read_unchecked(&overflow_count));
60107
60108 for (i = 0; i < nr_entries; i++) {
60109 entry = entries + i;
60110@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
60111 {
60112 struct proc_dir_entry *pe;
60113
60114+#ifdef CONFIG_GRKERNSEC_PROC_ADD
60115+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
60116+#else
60117 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
60118+#endif
60119 if (!pe)
60120 return -ENOMEM;
60121 return 0;
60122diff -urNp linux-2.6.39.4/kernel/time.c linux-2.6.39.4/kernel/time.c
60123--- linux-2.6.39.4/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
60124+++ linux-2.6.39.4/kernel/time.c 2011-08-05 19:44:37.000000000 -0400
60125@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
60126 return error;
60127
60128 if (tz) {
60129+ /* we log in do_settimeofday called below, so don't log twice
60130+ */
60131+ if (!tv)
60132+ gr_log_timechange();
60133+
60134 /* SMP safe, global irq locking makes it work. */
60135 sys_tz = *tz;
60136 update_vsyscall_tz();
60137diff -urNp linux-2.6.39.4/kernel/timer.c linux-2.6.39.4/kernel/timer.c
60138--- linux-2.6.39.4/kernel/timer.c 2011-05-19 00:06:34.000000000 -0400
60139+++ linux-2.6.39.4/kernel/timer.c 2011-08-05 19:44:37.000000000 -0400
60140@@ -1305,7 +1305,7 @@ void update_process_times(int user_tick)
60141 /*
60142 * This function runs timers and the timer-tq in bottom half context.
60143 */
60144-static void run_timer_softirq(struct softirq_action *h)
60145+static void run_timer_softirq(void)
60146 {
60147 struct tvec_base *base = __this_cpu_read(tvec_bases);
60148
60149diff -urNp linux-2.6.39.4/kernel/trace/blktrace.c linux-2.6.39.4/kernel/trace/blktrace.c
60150--- linux-2.6.39.4/kernel/trace/blktrace.c 2011-05-19 00:06:34.000000000 -0400
60151+++ linux-2.6.39.4/kernel/trace/blktrace.c 2011-08-05 19:44:37.000000000 -0400
60152@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
60153 struct blk_trace *bt = filp->private_data;
60154 char buf[16];
60155
60156- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
60157+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
60158
60159 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
60160 }
60161@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
60162 return 1;
60163
60164 bt = buf->chan->private_data;
60165- atomic_inc(&bt->dropped);
60166+ atomic_inc_unchecked(&bt->dropped);
60167 return 0;
60168 }
60169
60170@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60171
60172 bt->dir = dir;
60173 bt->dev = dev;
60174- atomic_set(&bt->dropped, 0);
60175+ atomic_set_unchecked(&bt->dropped, 0);
60176
60177 ret = -EIO;
60178 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60179diff -urNp linux-2.6.39.4/kernel/trace/ftrace.c linux-2.6.39.4/kernel/trace/ftrace.c
60180--- linux-2.6.39.4/kernel/trace/ftrace.c 2011-06-03 00:04:14.000000000 -0400
60181+++ linux-2.6.39.4/kernel/trace/ftrace.c 2011-08-05 20:34:06.000000000 -0400
60182@@ -1107,13 +1107,18 @@ ftrace_code_disable(struct module *mod,
60183
60184 ip = rec->ip;
60185
60186+ ret = ftrace_arch_code_modify_prepare();
60187+ FTRACE_WARN_ON(ret);
60188+ if (ret)
60189+ return 0;
60190+
60191 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60192+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60193 if (ret) {
60194 ftrace_bug(ret, ip);
60195 rec->flags |= FTRACE_FL_FAILED;
60196- return 0;
60197 }
60198- return 1;
60199+ return ret ? 0 : 1;
60200 }
60201
60202 /*
60203@@ -2011,7 +2016,7 @@ static void ftrace_free_entry_rcu(struct
60204
60205 int
60206 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60207- void *data)
60208+ void *data)
60209 {
60210 struct ftrace_func_probe *entry;
60211 struct ftrace_page *pg;
60212diff -urNp linux-2.6.39.4/kernel/trace/trace.c linux-2.6.39.4/kernel/trace/trace.c
60213--- linux-2.6.39.4/kernel/trace/trace.c 2011-05-19 00:06:34.000000000 -0400
60214+++ linux-2.6.39.4/kernel/trace/trace.c 2011-08-05 19:44:37.000000000 -0400
60215@@ -3330,6 +3330,8 @@ static ssize_t tracing_splice_read_pipe(
60216 size_t rem;
60217 unsigned int i;
60218
60219+ pax_track_stack();
60220+
60221 if (splice_grow_spd(pipe, &spd))
60222 return -ENOMEM;
60223
60224@@ -3813,6 +3815,8 @@ tracing_buffers_splice_read(struct file
60225 int entries, size, i;
60226 size_t ret;
60227
60228+ pax_track_stack();
60229+
60230 if (splice_grow_spd(pipe, &spd))
60231 return -ENOMEM;
60232
60233@@ -3981,10 +3985,9 @@ static const struct file_operations trac
60234 };
60235 #endif
60236
60237-static struct dentry *d_tracer;
60238-
60239 struct dentry *tracing_init_dentry(void)
60240 {
60241+ static struct dentry *d_tracer;
60242 static int once;
60243
60244 if (d_tracer)
60245@@ -4004,10 +4007,9 @@ struct dentry *tracing_init_dentry(void)
60246 return d_tracer;
60247 }
60248
60249-static struct dentry *d_percpu;
60250-
60251 struct dentry *tracing_dentry_percpu(void)
60252 {
60253+ static struct dentry *d_percpu;
60254 static int once;
60255 struct dentry *d_tracer;
60256
60257diff -urNp linux-2.6.39.4/kernel/trace/trace_events.c linux-2.6.39.4/kernel/trace/trace_events.c
60258--- linux-2.6.39.4/kernel/trace/trace_events.c 2011-05-19 00:06:34.000000000 -0400
60259+++ linux-2.6.39.4/kernel/trace/trace_events.c 2011-08-05 20:34:06.000000000 -0400
60260@@ -1241,10 +1241,6 @@ static LIST_HEAD(ftrace_module_file_list
60261 struct ftrace_module_file_ops {
60262 struct list_head list;
60263 struct module *mod;
60264- struct file_operations id;
60265- struct file_operations enable;
60266- struct file_operations format;
60267- struct file_operations filter;
60268 };
60269
60270 static struct ftrace_module_file_ops *
60271@@ -1265,17 +1261,12 @@ trace_create_file_ops(struct module *mod
60272
60273 file_ops->mod = mod;
60274
60275- file_ops->id = ftrace_event_id_fops;
60276- file_ops->id.owner = mod;
60277-
60278- file_ops->enable = ftrace_enable_fops;
60279- file_ops->enable.owner = mod;
60280-
60281- file_ops->filter = ftrace_event_filter_fops;
60282- file_ops->filter.owner = mod;
60283-
60284- file_ops->format = ftrace_event_format_fops;
60285- file_ops->format.owner = mod;
60286+ pax_open_kernel();
60287+ *(void **)&mod->trace_id.owner = mod;
60288+ *(void **)&mod->trace_enable.owner = mod;
60289+ *(void **)&mod->trace_filter.owner = mod;
60290+ *(void **)&mod->trace_format.owner = mod;
60291+ pax_close_kernel();
60292
60293 list_add(&file_ops->list, &ftrace_module_file_list);
60294
60295@@ -1299,8 +1290,8 @@ static void trace_module_add_events(stru
60296
60297 for_each_event(call, start, end) {
60298 __trace_add_event_call(*call, mod,
60299- &file_ops->id, &file_ops->enable,
60300- &file_ops->filter, &file_ops->format);
60301+ &mod->trace_id, &mod->trace_enable,
60302+ &mod->trace_filter, &mod->trace_format);
60303 }
60304 }
60305
60306diff -urNp linux-2.6.39.4/kernel/trace/trace_mmiotrace.c linux-2.6.39.4/kernel/trace/trace_mmiotrace.c
60307--- linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-05-19 00:06:34.000000000 -0400
60308+++ linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-08-05 19:44:37.000000000 -0400
60309@@ -24,7 +24,7 @@ struct header_iter {
60310 static struct trace_array *mmio_trace_array;
60311 static bool overrun_detected;
60312 static unsigned long prev_overruns;
60313-static atomic_t dropped_count;
60314+static atomic_unchecked_t dropped_count;
60315
60316 static void mmio_reset_data(struct trace_array *tr)
60317 {
60318@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60319
60320 static unsigned long count_overruns(struct trace_iterator *iter)
60321 {
60322- unsigned long cnt = atomic_xchg(&dropped_count, 0);
60323+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60324 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60325
60326 if (over > prev_overruns)
60327@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60328 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60329 sizeof(*entry), 0, pc);
60330 if (!event) {
60331- atomic_inc(&dropped_count);
60332+ atomic_inc_unchecked(&dropped_count);
60333 return;
60334 }
60335 entry = ring_buffer_event_data(event);
60336@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60337 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60338 sizeof(*entry), 0, pc);
60339 if (!event) {
60340- atomic_inc(&dropped_count);
60341+ atomic_inc_unchecked(&dropped_count);
60342 return;
60343 }
60344 entry = ring_buffer_event_data(event);
60345diff -urNp linux-2.6.39.4/kernel/trace/trace_output.c linux-2.6.39.4/kernel/trace/trace_output.c
60346--- linux-2.6.39.4/kernel/trace/trace_output.c 2011-05-19 00:06:34.000000000 -0400
60347+++ linux-2.6.39.4/kernel/trace/trace_output.c 2011-08-05 19:44:37.000000000 -0400
60348@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60349
60350 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60351 if (!IS_ERR(p)) {
60352- p = mangle_path(s->buffer + s->len, p, "\n");
60353+ p = mangle_path(s->buffer + s->len, p, "\n\\");
60354 if (p) {
60355 s->len = p - s->buffer;
60356 return 1;
60357diff -urNp linux-2.6.39.4/kernel/trace/trace_stack.c linux-2.6.39.4/kernel/trace/trace_stack.c
60358--- linux-2.6.39.4/kernel/trace/trace_stack.c 2011-05-19 00:06:34.000000000 -0400
60359+++ linux-2.6.39.4/kernel/trace/trace_stack.c 2011-08-05 19:44:37.000000000 -0400
60360@@ -50,7 +50,7 @@ static inline void check_stack(void)
60361 return;
60362
60363 /* we do not handle interrupt stacks yet */
60364- if (!object_is_on_stack(&this_size))
60365+ if (!object_starts_on_stack(&this_size))
60366 return;
60367
60368 local_irq_save(flags);
60369diff -urNp linux-2.6.39.4/kernel/trace/trace_workqueue.c linux-2.6.39.4/kernel/trace/trace_workqueue.c
60370--- linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-05-19 00:06:34.000000000 -0400
60371+++ linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-08-05 19:44:37.000000000 -0400
60372@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60373 int cpu;
60374 pid_t pid;
60375 /* Can be inserted from interrupt or user context, need to be atomic */
60376- atomic_t inserted;
60377+ atomic_unchecked_t inserted;
60378 /*
60379 * Don't need to be atomic, works are serialized in a single workqueue thread
60380 * on a single CPU.
60381@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60382 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60383 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60384 if (node->pid == wq_thread->pid) {
60385- atomic_inc(&node->inserted);
60386+ atomic_inc_unchecked(&node->inserted);
60387 goto found;
60388 }
60389 }
60390@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60391 tsk = get_pid_task(pid, PIDTYPE_PID);
60392 if (tsk) {
60393 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60394- atomic_read(&cws->inserted), cws->executed,
60395+ atomic_read_unchecked(&cws->inserted), cws->executed,
60396 tsk->comm);
60397 put_task_struct(tsk);
60398 }
60399diff -urNp linux-2.6.39.4/lib/bug.c linux-2.6.39.4/lib/bug.c
60400--- linux-2.6.39.4/lib/bug.c 2011-05-19 00:06:34.000000000 -0400
60401+++ linux-2.6.39.4/lib/bug.c 2011-08-05 19:44:37.000000000 -0400
60402@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60403 return BUG_TRAP_TYPE_NONE;
60404
60405 bug = find_bug(bugaddr);
60406+ if (!bug)
60407+ return BUG_TRAP_TYPE_NONE;
60408
60409 file = NULL;
60410 line = 0;
60411diff -urNp linux-2.6.39.4/lib/debugobjects.c linux-2.6.39.4/lib/debugobjects.c
60412--- linux-2.6.39.4/lib/debugobjects.c 2011-07-09 09:18:51.000000000 -0400
60413+++ linux-2.6.39.4/lib/debugobjects.c 2011-08-05 19:44:37.000000000 -0400
60414@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60415 if (limit > 4)
60416 return;
60417
60418- is_on_stack = object_is_on_stack(addr);
60419+ is_on_stack = object_starts_on_stack(addr);
60420 if (is_on_stack == onstack)
60421 return;
60422
60423diff -urNp linux-2.6.39.4/lib/dma-debug.c linux-2.6.39.4/lib/dma-debug.c
60424--- linux-2.6.39.4/lib/dma-debug.c 2011-05-19 00:06:34.000000000 -0400
60425+++ linux-2.6.39.4/lib/dma-debug.c 2011-08-05 19:44:37.000000000 -0400
60426@@ -862,7 +862,7 @@ out:
60427
60428 static void check_for_stack(struct device *dev, void *addr)
60429 {
60430- if (object_is_on_stack(addr))
60431+ if (object_starts_on_stack(addr))
60432 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60433 "stack [addr=%p]\n", addr);
60434 }
60435diff -urNp linux-2.6.39.4/lib/inflate.c linux-2.6.39.4/lib/inflate.c
60436--- linux-2.6.39.4/lib/inflate.c 2011-05-19 00:06:34.000000000 -0400
60437+++ linux-2.6.39.4/lib/inflate.c 2011-08-05 19:44:37.000000000 -0400
60438@@ -269,7 +269,7 @@ static void free(void *where)
60439 malloc_ptr = free_mem_ptr;
60440 }
60441 #else
60442-#define malloc(a) kmalloc(a, GFP_KERNEL)
60443+#define malloc(a) kmalloc((a), GFP_KERNEL)
60444 #define free(a) kfree(a)
60445 #endif
60446
60447diff -urNp linux-2.6.39.4/lib/Kconfig.debug linux-2.6.39.4/lib/Kconfig.debug
60448--- linux-2.6.39.4/lib/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
60449+++ linux-2.6.39.4/lib/Kconfig.debug 2011-08-05 19:44:37.000000000 -0400
60450@@ -1078,6 +1078,7 @@ config LATENCYTOP
60451 depends on DEBUG_KERNEL
60452 depends on STACKTRACE_SUPPORT
60453 depends on PROC_FS
60454+ depends on !GRKERNSEC_HIDESYM
60455 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60456 select KALLSYMS
60457 select KALLSYMS_ALL
60458diff -urNp linux-2.6.39.4/lib/kref.c linux-2.6.39.4/lib/kref.c
60459--- linux-2.6.39.4/lib/kref.c 2011-05-19 00:06:34.000000000 -0400
60460+++ linux-2.6.39.4/lib/kref.c 2011-08-05 19:44:37.000000000 -0400
60461@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60462 */
60463 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60464 {
60465- WARN_ON(release == NULL);
60466+ BUG_ON(release == NULL);
60467 WARN_ON(release == (void (*)(struct kref *))kfree);
60468
60469 if (atomic_dec_and_test(&kref->refcount)) {
60470diff -urNp linux-2.6.39.4/lib/radix-tree.c linux-2.6.39.4/lib/radix-tree.c
60471--- linux-2.6.39.4/lib/radix-tree.c 2011-05-19 00:06:34.000000000 -0400
60472+++ linux-2.6.39.4/lib/radix-tree.c 2011-08-05 19:44:37.000000000 -0400
60473@@ -80,7 +80,7 @@ struct radix_tree_preload {
60474 int nr;
60475 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60476 };
60477-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60478+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60479
60480 static inline void *ptr_to_indirect(void *ptr)
60481 {
60482diff -urNp linux-2.6.39.4/lib/vsprintf.c linux-2.6.39.4/lib/vsprintf.c
60483--- linux-2.6.39.4/lib/vsprintf.c 2011-05-19 00:06:34.000000000 -0400
60484+++ linux-2.6.39.4/lib/vsprintf.c 2011-08-05 19:44:37.000000000 -0400
60485@@ -16,6 +16,9 @@
60486 * - scnprintf and vscnprintf
60487 */
60488
60489+#ifdef CONFIG_GRKERNSEC_HIDESYM
60490+#define __INCLUDED_BY_HIDESYM 1
60491+#endif
60492 #include <stdarg.h>
60493 #include <linux/module.h>
60494 #include <linux/types.h>
60495@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60496 char sym[KSYM_SYMBOL_LEN];
60497 if (ext == 'B')
60498 sprint_backtrace(sym, value);
60499- else if (ext != 'f' && ext != 's')
60500+ else if (ext != 'f' && ext != 's' && ext != 'a')
60501 sprint_symbol(sym, value);
60502 else
60503 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60504@@ -797,7 +800,11 @@ char *uuid_string(char *buf, char *end,
60505 return string(buf, end, uuid, spec);
60506 }
60507
60508+#ifdef CONFIG_GRKERNSEC_HIDESYM
60509+int kptr_restrict __read_mostly = 2;
60510+#else
60511 int kptr_restrict __read_mostly;
60512+#endif
60513
60514 /*
60515 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60516@@ -811,6 +818,8 @@ int kptr_restrict __read_mostly;
60517 * - 'S' For symbolic direct pointers with offset
60518 * - 's' For symbolic direct pointers without offset
60519 * - 'B' For backtraced symbolic direct pointers with offset
60520+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60521+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60522 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60523 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60524 * - 'M' For a 6-byte MAC address, it prints the address in the
60525@@ -855,12 +864,12 @@ char *pointer(const char *fmt, char *buf
60526 {
60527 if (!ptr && *fmt != 'K') {
60528 /*
60529- * Print (null) with the same width as a pointer so it makes
60530+ * Print (nil) with the same width as a pointer so it makes
60531 * tabular output look nice.
60532 */
60533 if (spec.field_width == -1)
60534 spec.field_width = 2 * sizeof(void *);
60535- return string(buf, end, "(null)", spec);
60536+ return string(buf, end, "(nil)", spec);
60537 }
60538
60539 switch (*fmt) {
60540@@ -870,6 +879,13 @@ char *pointer(const char *fmt, char *buf
60541 /* Fallthrough */
60542 case 'S':
60543 case 's':
60544+#ifdef CONFIG_GRKERNSEC_HIDESYM
60545+ break;
60546+#else
60547+ return symbol_string(buf, end, ptr, spec, *fmt);
60548+#endif
60549+ case 'A':
60550+ case 'a':
60551 case 'B':
60552 return symbol_string(buf, end, ptr, spec, *fmt);
60553 case 'R':
60554@@ -1632,11 +1648,11 @@ int bstr_printf(char *buf, size_t size,
60555 typeof(type) value; \
60556 if (sizeof(type) == 8) { \
60557 args = PTR_ALIGN(args, sizeof(u32)); \
60558- *(u32 *)&value = *(u32 *)args; \
60559- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60560+ *(u32 *)&value = *(const u32 *)args; \
60561+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60562 } else { \
60563 args = PTR_ALIGN(args, sizeof(type)); \
60564- value = *(typeof(type) *)args; \
60565+ value = *(const typeof(type) *)args; \
60566 } \
60567 args += sizeof(type); \
60568 value; \
60569@@ -1699,7 +1715,7 @@ int bstr_printf(char *buf, size_t size,
60570 case FORMAT_TYPE_STR: {
60571 const char *str_arg = args;
60572 args += strlen(str_arg) + 1;
60573- str = string(str, end, (char *)str_arg, spec);
60574+ str = string(str, end, str_arg, spec);
60575 break;
60576 }
60577
60578diff -urNp linux-2.6.39.4/localversion-grsec linux-2.6.39.4/localversion-grsec
60579--- linux-2.6.39.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60580+++ linux-2.6.39.4/localversion-grsec 2011-08-05 19:44:37.000000000 -0400
60581@@ -0,0 +1 @@
60582+-grsec
60583diff -urNp linux-2.6.39.4/Makefile linux-2.6.39.4/Makefile
60584--- linux-2.6.39.4/Makefile 2011-08-05 21:11:51.000000000 -0400
60585+++ linux-2.6.39.4/Makefile 2011-08-05 21:12:17.000000000 -0400
60586@@ -237,8 +237,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60587
60588 HOSTCC = gcc
60589 HOSTCXX = g++
60590-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60591-HOSTCXXFLAGS = -O2
60592+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60593+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60594+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60595
60596 # Decide whether to build built-in, modular, or both.
60597 # Normally, just do built-in.
60598@@ -356,10 +357,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60599 KBUILD_CPPFLAGS := -D__KERNEL__
60600
60601 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60602+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
60603 -fno-strict-aliasing -fno-common \
60604 -Werror-implicit-function-declaration \
60605 -Wno-format-security \
60606 -fno-delete-null-pointer-checks
60607+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60608 KBUILD_AFLAGS_KERNEL :=
60609 KBUILD_CFLAGS_KERNEL :=
60610 KBUILD_AFLAGS := -D__ASSEMBLY__
60611@@ -397,8 +400,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
60612 # Rules shared between *config targets and build targets
60613
60614 # Basic helpers built in scripts/
60615-PHONY += scripts_basic
60616-scripts_basic:
60617+PHONY += scripts_basic gcc-plugins
60618+scripts_basic: gcc-plugins
60619 $(Q)$(MAKE) $(build)=scripts/basic
60620 $(Q)rm -f .tmp_quiet_recordmcount
60621
60622@@ -548,6 +551,25 @@ else
60623 KBUILD_CFLAGS += -O2
60624 endif
60625
60626+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60627+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60628+ifdef CONFIG_PAX_MEMORY_STACKLEAK
60629+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60630+endif
60631+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60632+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60633+gcc-plugins:
60634+ $(Q)$(MAKE) $(build)=tools/gcc
60635+else
60636+gcc-plugins:
60637+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60638+ $(Q)echo "warning, your gcc installation does not support plugins, perhaps the necessary headers are missing?"
60639+else
60640+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60641+endif
60642+ $(Q)echo "PAX_MEMORY_STACKLEAK and other will be less secure"
60643+endif
60644+
60645 include $(srctree)/arch/$(SRCARCH)/Makefile
60646
60647 ifneq ($(CONFIG_FRAME_WARN),0)
60648@@ -685,7 +707,7 @@ export mod_strip_cmd
60649
60650
60651 ifeq ($(KBUILD_EXTMOD),)
60652-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60653+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60654
60655 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60656 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60657@@ -947,7 +969,7 @@ ifneq ($(KBUILD_SRC),)
60658 endif
60659
60660 # prepare2 creates a makefile if using a separate output directory
60661-prepare2: prepare3 outputmakefile
60662+prepare2: prepare3 outputmakefile gcc-plugins
60663
60664 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60665 include/config/auto.conf
60666@@ -1375,7 +1397,7 @@ clean: $(clean-dirs)
60667 $(call cmd,rmdirs)
60668 $(call cmd,rmfiles)
60669 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60670- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60671+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60672 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60673 -o -name '*.symtypes' -o -name 'modules.order' \
60674 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60675diff -urNp linux-2.6.39.4/mm/filemap.c linux-2.6.39.4/mm/filemap.c
60676--- linux-2.6.39.4/mm/filemap.c 2011-05-19 00:06:34.000000000 -0400
60677+++ linux-2.6.39.4/mm/filemap.c 2011-08-05 19:44:37.000000000 -0400
60678@@ -1724,7 +1724,7 @@ int generic_file_mmap(struct file * file
60679 struct address_space *mapping = file->f_mapping;
60680
60681 if (!mapping->a_ops->readpage)
60682- return -ENOEXEC;
60683+ return -ENODEV;
60684 file_accessed(file);
60685 vma->vm_ops = &generic_file_vm_ops;
60686 vma->vm_flags |= VM_CAN_NONLINEAR;
60687@@ -2120,6 +2120,7 @@ inline int generic_write_checks(struct f
60688 *pos = i_size_read(inode);
60689
60690 if (limit != RLIM_INFINITY) {
60691+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60692 if (*pos >= limit) {
60693 send_sig(SIGXFSZ, current, 0);
60694 return -EFBIG;
60695diff -urNp linux-2.6.39.4/mm/fremap.c linux-2.6.39.4/mm/fremap.c
60696--- linux-2.6.39.4/mm/fremap.c 2011-05-19 00:06:34.000000000 -0400
60697+++ linux-2.6.39.4/mm/fremap.c 2011-08-05 19:44:37.000000000 -0400
60698@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60699 retry:
60700 vma = find_vma(mm, start);
60701
60702+#ifdef CONFIG_PAX_SEGMEXEC
60703+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60704+ goto out;
60705+#endif
60706+
60707 /*
60708 * Make sure the vma is shared, that it supports prefaulting,
60709 * and that the remapped range is valid and fully within
60710@@ -224,7 +229,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60711 /*
60712 * drop PG_Mlocked flag for over-mapped range
60713 */
60714- unsigned int saved_flags = vma->vm_flags;
60715+ unsigned long saved_flags = vma->vm_flags;
60716 munlock_vma_pages_range(vma, start, start + size);
60717 vma->vm_flags = saved_flags;
60718 }
60719diff -urNp linux-2.6.39.4/mm/highmem.c linux-2.6.39.4/mm/highmem.c
60720--- linux-2.6.39.4/mm/highmem.c 2011-05-19 00:06:34.000000000 -0400
60721+++ linux-2.6.39.4/mm/highmem.c 2011-08-05 19:44:37.000000000 -0400
60722@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60723 * So no dangers, even with speculative execution.
60724 */
60725 page = pte_page(pkmap_page_table[i]);
60726+ pax_open_kernel();
60727 pte_clear(&init_mm, (unsigned long)page_address(page),
60728 &pkmap_page_table[i]);
60729-
60730+ pax_close_kernel();
60731 set_page_address(page, NULL);
60732 need_flush = 1;
60733 }
60734@@ -186,9 +187,11 @@ start:
60735 }
60736 }
60737 vaddr = PKMAP_ADDR(last_pkmap_nr);
60738+
60739+ pax_open_kernel();
60740 set_pte_at(&init_mm, vaddr,
60741 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60742-
60743+ pax_close_kernel();
60744 pkmap_count[last_pkmap_nr] = 1;
60745 set_page_address(page, (void *)vaddr);
60746
60747diff -urNp linux-2.6.39.4/mm/huge_memory.c linux-2.6.39.4/mm/huge_memory.c
60748--- linux-2.6.39.4/mm/huge_memory.c 2011-05-19 00:06:34.000000000 -0400
60749+++ linux-2.6.39.4/mm/huge_memory.c 2011-08-05 19:44:37.000000000 -0400
60750@@ -702,7 +702,7 @@ out:
60751 * run pte_offset_map on the pmd, if an huge pmd could
60752 * materialize from under us from a different thread.
60753 */
60754- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60755+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60756 return VM_FAULT_OOM;
60757 /* if an huge pmd materialized from under us just retry later */
60758 if (unlikely(pmd_trans_huge(*pmd)))
60759diff -urNp linux-2.6.39.4/mm/hugetlb.c linux-2.6.39.4/mm/hugetlb.c
60760--- linux-2.6.39.4/mm/hugetlb.c 2011-07-09 09:18:51.000000000 -0400
60761+++ linux-2.6.39.4/mm/hugetlb.c 2011-08-05 19:44:37.000000000 -0400
60762@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60763 return 1;
60764 }
60765
60766+#ifdef CONFIG_PAX_SEGMEXEC
60767+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60768+{
60769+ struct mm_struct *mm = vma->vm_mm;
60770+ struct vm_area_struct *vma_m;
60771+ unsigned long address_m;
60772+ pte_t *ptep_m;
60773+
60774+ vma_m = pax_find_mirror_vma(vma);
60775+ if (!vma_m)
60776+ return;
60777+
60778+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60779+ address_m = address + SEGMEXEC_TASK_SIZE;
60780+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60781+ get_page(page_m);
60782+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
60783+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60784+}
60785+#endif
60786+
60787 /*
60788 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60789 */
60790@@ -2440,6 +2461,11 @@ retry_avoidcopy:
60791 make_huge_pte(vma, new_page, 1));
60792 page_remove_rmap(old_page);
60793 hugepage_add_new_anon_rmap(new_page, vma, address);
60794+
60795+#ifdef CONFIG_PAX_SEGMEXEC
60796+ pax_mirror_huge_pte(vma, address, new_page);
60797+#endif
60798+
60799 /* Make the old page be freed below */
60800 new_page = old_page;
60801 mmu_notifier_invalidate_range_end(mm,
60802@@ -2591,6 +2617,10 @@ retry:
60803 && (vma->vm_flags & VM_SHARED)));
60804 set_huge_pte_at(mm, address, ptep, new_pte);
60805
60806+#ifdef CONFIG_PAX_SEGMEXEC
60807+ pax_mirror_huge_pte(vma, address, page);
60808+#endif
60809+
60810 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60811 /* Optimization, do the COW without a second fault */
60812 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60813@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60814 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60815 struct hstate *h = hstate_vma(vma);
60816
60817+#ifdef CONFIG_PAX_SEGMEXEC
60818+ struct vm_area_struct *vma_m;
60819+#endif
60820+
60821 ptep = huge_pte_offset(mm, address);
60822 if (ptep) {
60823 entry = huge_ptep_get(ptep);
60824@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60825 VM_FAULT_SET_HINDEX(h - hstates);
60826 }
60827
60828+#ifdef CONFIG_PAX_SEGMEXEC
60829+ vma_m = pax_find_mirror_vma(vma);
60830+ if (vma_m) {
60831+ unsigned long address_m;
60832+
60833+ if (vma->vm_start > vma_m->vm_start) {
60834+ address_m = address;
60835+ address -= SEGMEXEC_TASK_SIZE;
60836+ vma = vma_m;
60837+ h = hstate_vma(vma);
60838+ } else
60839+ address_m = address + SEGMEXEC_TASK_SIZE;
60840+
60841+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60842+ return VM_FAULT_OOM;
60843+ address_m &= HPAGE_MASK;
60844+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60845+ }
60846+#endif
60847+
60848 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60849 if (!ptep)
60850 return VM_FAULT_OOM;
60851diff -urNp linux-2.6.39.4/mm/internal.h linux-2.6.39.4/mm/internal.h
60852--- linux-2.6.39.4/mm/internal.h 2011-05-19 00:06:34.000000000 -0400
60853+++ linux-2.6.39.4/mm/internal.h 2011-08-05 19:44:37.000000000 -0400
60854@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
60855 * in mm/page_alloc.c
60856 */
60857 extern void __free_pages_bootmem(struct page *page, unsigned int order);
60858+extern void free_compound_page(struct page *page);
60859 extern void prep_compound_page(struct page *page, unsigned long order);
60860 #ifdef CONFIG_MEMORY_FAILURE
60861 extern bool is_free_buddy_page(struct page *page);
60862diff -urNp linux-2.6.39.4/mm/Kconfig linux-2.6.39.4/mm/Kconfig
60863--- linux-2.6.39.4/mm/Kconfig 2011-05-19 00:06:34.000000000 -0400
60864+++ linux-2.6.39.4/mm/Kconfig 2011-08-05 19:44:37.000000000 -0400
60865@@ -240,7 +240,7 @@ config KSM
60866 config DEFAULT_MMAP_MIN_ADDR
60867 int "Low address space to protect from user allocation"
60868 depends on MMU
60869- default 4096
60870+ default 65536
60871 help
60872 This is the portion of low virtual memory which should be protected
60873 from userspace allocation. Keeping a user from writing to low pages
60874diff -urNp linux-2.6.39.4/mm/kmemleak.c linux-2.6.39.4/mm/kmemleak.c
60875--- linux-2.6.39.4/mm/kmemleak.c 2011-06-03 00:04:14.000000000 -0400
60876+++ linux-2.6.39.4/mm/kmemleak.c 2011-08-05 19:44:37.000000000 -0400
60877@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
60878
60879 for (i = 0; i < object->trace_len; i++) {
60880 void *ptr = (void *)object->trace[i];
60881- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
60882+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
60883 }
60884 }
60885
60886diff -urNp linux-2.6.39.4/mm/maccess.c linux-2.6.39.4/mm/maccess.c
60887--- linux-2.6.39.4/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
60888+++ linux-2.6.39.4/mm/maccess.c 2011-08-05 19:44:37.000000000 -0400
60889@@ -15,10 +15,10 @@
60890 * happens, handle that and return -EFAULT.
60891 */
60892
60893-long __weak probe_kernel_read(void *dst, void *src, size_t size)
60894+long __weak probe_kernel_read(void *dst, const void *src, size_t size)
60895 __attribute__((alias("__probe_kernel_read")));
60896
60897-long __probe_kernel_read(void *dst, void *src, size_t size)
60898+long __probe_kernel_read(void *dst, const void *src, size_t size)
60899 {
60900 long ret;
60901 mm_segment_t old_fs = get_fs();
60902@@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
60903 * Safely write to address @dst from the buffer at @src. If a kernel fault
60904 * happens, handle that and return -EFAULT.
60905 */
60906-long __weak probe_kernel_write(void *dst, void *src, size_t size)
60907+long __weak probe_kernel_write(void *dst, const void *src, size_t size)
60908 __attribute__((alias("__probe_kernel_write")));
60909
60910-long __probe_kernel_write(void *dst, void *src, size_t size)
60911+long __probe_kernel_write(void *dst, const void *src, size_t size)
60912 {
60913 long ret;
60914 mm_segment_t old_fs = get_fs();
60915diff -urNp linux-2.6.39.4/mm/madvise.c linux-2.6.39.4/mm/madvise.c
60916--- linux-2.6.39.4/mm/madvise.c 2011-05-19 00:06:34.000000000 -0400
60917+++ linux-2.6.39.4/mm/madvise.c 2011-08-05 19:44:37.000000000 -0400
60918@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
60919 pgoff_t pgoff;
60920 unsigned long new_flags = vma->vm_flags;
60921
60922+#ifdef CONFIG_PAX_SEGMEXEC
60923+ struct vm_area_struct *vma_m;
60924+#endif
60925+
60926 switch (behavior) {
60927 case MADV_NORMAL:
60928 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
60929@@ -110,6 +114,13 @@ success:
60930 /*
60931 * vm_flags is protected by the mmap_sem held in write mode.
60932 */
60933+
60934+#ifdef CONFIG_PAX_SEGMEXEC
60935+ vma_m = pax_find_mirror_vma(vma);
60936+ if (vma_m)
60937+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
60938+#endif
60939+
60940 vma->vm_flags = new_flags;
60941
60942 out:
60943@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
60944 struct vm_area_struct ** prev,
60945 unsigned long start, unsigned long end)
60946 {
60947+
60948+#ifdef CONFIG_PAX_SEGMEXEC
60949+ struct vm_area_struct *vma_m;
60950+#endif
60951+
60952 *prev = vma;
60953 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
60954 return -EINVAL;
60955@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
60956 zap_page_range(vma, start, end - start, &details);
60957 } else
60958 zap_page_range(vma, start, end - start, NULL);
60959+
60960+#ifdef CONFIG_PAX_SEGMEXEC
60961+ vma_m = pax_find_mirror_vma(vma);
60962+ if (vma_m) {
60963+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
60964+ struct zap_details details = {
60965+ .nonlinear_vma = vma_m,
60966+ .last_index = ULONG_MAX,
60967+ };
60968+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
60969+ } else
60970+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
60971+ }
60972+#endif
60973+
60974 return 0;
60975 }
60976
60977@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
60978 if (end < start)
60979 goto out;
60980
60981+#ifdef CONFIG_PAX_SEGMEXEC
60982+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60983+ if (end > SEGMEXEC_TASK_SIZE)
60984+ goto out;
60985+ } else
60986+#endif
60987+
60988+ if (end > TASK_SIZE)
60989+ goto out;
60990+
60991 error = 0;
60992 if (end == start)
60993 goto out;
60994diff -urNp linux-2.6.39.4/mm/memory.c linux-2.6.39.4/mm/memory.c
60995--- linux-2.6.39.4/mm/memory.c 2011-05-19 00:06:34.000000000 -0400
60996+++ linux-2.6.39.4/mm/memory.c 2011-08-05 19:44:37.000000000 -0400
60997@@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
60998 return;
60999
61000 pmd = pmd_offset(pud, start);
61001+
61002+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
61003 pud_clear(pud);
61004 pmd_free_tlb(tlb, pmd, start);
61005+#endif
61006+
61007 }
61008
61009 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
61010@@ -291,9 +295,12 @@ static inline void free_pud_range(struct
61011 if (end - 1 > ceiling - 1)
61012 return;
61013
61014+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
61015 pud = pud_offset(pgd, start);
61016 pgd_clear(pgd);
61017 pud_free_tlb(tlb, pud, start);
61018+#endif
61019+
61020 }
61021
61022 /*
61023@@ -1410,12 +1417,6 @@ no_page_table:
61024 return page;
61025 }
61026
61027-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
61028-{
61029- return stack_guard_page_start(vma, addr) ||
61030- stack_guard_page_end(vma, addr+PAGE_SIZE);
61031-}
61032-
61033 /**
61034 * __get_user_pages() - pin user pages in memory
61035 * @tsk: task_struct of target task
61036@@ -1488,10 +1489,10 @@ int __get_user_pages(struct task_struct
61037 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
61038 i = 0;
61039
61040- do {
61041+ while (nr_pages) {
61042 struct vm_area_struct *vma;
61043
61044- vma = find_extend_vma(mm, start);
61045+ vma = find_vma(mm, start);
61046 if (!vma && in_gate_area(mm, start)) {
61047 unsigned long pg = start & PAGE_MASK;
61048 pgd_t *pgd;
61049@@ -1539,7 +1540,7 @@ int __get_user_pages(struct task_struct
61050 goto next_page;
61051 }
61052
61053- if (!vma ||
61054+ if (!vma || start < vma->vm_start ||
61055 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
61056 !(vm_flags & vma->vm_flags))
61057 return i ? : -EFAULT;
61058@@ -1566,11 +1567,6 @@ int __get_user_pages(struct task_struct
61059 int ret;
61060 unsigned int fault_flags = 0;
61061
61062- /* For mlock, just skip the stack guard page. */
61063- if (foll_flags & FOLL_MLOCK) {
61064- if (stack_guard_page(vma, start))
61065- goto next_page;
61066- }
61067 if (foll_flags & FOLL_WRITE)
61068 fault_flags |= FAULT_FLAG_WRITE;
61069 if (nonblocking)
61070@@ -1644,7 +1640,7 @@ next_page:
61071 start += PAGE_SIZE;
61072 nr_pages--;
61073 } while (nr_pages && start < vma->vm_end);
61074- } while (nr_pages);
61075+ }
61076 return i;
61077 }
61078 EXPORT_SYMBOL(__get_user_pages);
61079@@ -1795,6 +1791,10 @@ static int insert_page(struct vm_area_st
61080 page_add_file_rmap(page);
61081 set_pte_at(mm, addr, pte, mk_pte(page, prot));
61082
61083+#ifdef CONFIG_PAX_SEGMEXEC
61084+ pax_mirror_file_pte(vma, addr, page, ptl);
61085+#endif
61086+
61087 retval = 0;
61088 pte_unmap_unlock(pte, ptl);
61089 return retval;
61090@@ -1829,10 +1829,22 @@ out:
61091 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
61092 struct page *page)
61093 {
61094+
61095+#ifdef CONFIG_PAX_SEGMEXEC
61096+ struct vm_area_struct *vma_m;
61097+#endif
61098+
61099 if (addr < vma->vm_start || addr >= vma->vm_end)
61100 return -EFAULT;
61101 if (!page_count(page))
61102 return -EINVAL;
61103+
61104+#ifdef CONFIG_PAX_SEGMEXEC
61105+ vma_m = pax_find_mirror_vma(vma);
61106+ if (vma_m)
61107+ vma_m->vm_flags |= VM_INSERTPAGE;
61108+#endif
61109+
61110 vma->vm_flags |= VM_INSERTPAGE;
61111 return insert_page(vma, addr, page, vma->vm_page_prot);
61112 }
61113@@ -1918,6 +1930,7 @@ int vm_insert_mixed(struct vm_area_struc
61114 unsigned long pfn)
61115 {
61116 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
61117+ BUG_ON(vma->vm_mirror);
61118
61119 if (addr < vma->vm_start || addr >= vma->vm_end)
61120 return -EFAULT;
61121@@ -2233,6 +2246,186 @@ static inline void cow_user_page(struct
61122 copy_user_highpage(dst, src, va, vma);
61123 }
61124
61125+#ifdef CONFIG_PAX_SEGMEXEC
61126+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
61127+{
61128+ struct mm_struct *mm = vma->vm_mm;
61129+ spinlock_t *ptl;
61130+ pte_t *pte, entry;
61131+
61132+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
61133+ entry = *pte;
61134+ if (!pte_present(entry)) {
61135+ if (!pte_none(entry)) {
61136+ BUG_ON(pte_file(entry));
61137+ free_swap_and_cache(pte_to_swp_entry(entry));
61138+ pte_clear_not_present_full(mm, address, pte, 0);
61139+ }
61140+ } else {
61141+ struct page *page;
61142+
61143+ flush_cache_page(vma, address, pte_pfn(entry));
61144+ entry = ptep_clear_flush(vma, address, pte);
61145+ BUG_ON(pte_dirty(entry));
61146+ page = vm_normal_page(vma, address, entry);
61147+ if (page) {
61148+ update_hiwater_rss(mm);
61149+ if (PageAnon(page))
61150+ dec_mm_counter_fast(mm, MM_ANONPAGES);
61151+ else
61152+ dec_mm_counter_fast(mm, MM_FILEPAGES);
61153+ page_remove_rmap(page);
61154+ page_cache_release(page);
61155+ }
61156+ }
61157+ pte_unmap_unlock(pte, ptl);
61158+}
61159+
61160+/* PaX: if vma is mirrored, synchronize the mirror's PTE
61161+ *
61162+ * the ptl of the lower mapped page is held on entry and is not released on exit
61163+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
61164+ */
61165+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61166+{
61167+ struct mm_struct *mm = vma->vm_mm;
61168+ unsigned long address_m;
61169+ spinlock_t *ptl_m;
61170+ struct vm_area_struct *vma_m;
61171+ pmd_t *pmd_m;
61172+ pte_t *pte_m, entry_m;
61173+
61174+ BUG_ON(!page_m || !PageAnon(page_m));
61175+
61176+ vma_m = pax_find_mirror_vma(vma);
61177+ if (!vma_m)
61178+ return;
61179+
61180+ BUG_ON(!PageLocked(page_m));
61181+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61182+ address_m = address + SEGMEXEC_TASK_SIZE;
61183+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61184+ pte_m = pte_offset_map(pmd_m, address_m);
61185+ ptl_m = pte_lockptr(mm, pmd_m);
61186+ if (ptl != ptl_m) {
61187+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61188+ if (!pte_none(*pte_m))
61189+ goto out;
61190+ }
61191+
61192+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61193+ page_cache_get(page_m);
61194+ page_add_anon_rmap(page_m, vma_m, address_m);
61195+ inc_mm_counter_fast(mm, MM_ANONPAGES);
61196+ set_pte_at(mm, address_m, pte_m, entry_m);
61197+ update_mmu_cache(vma_m, address_m, entry_m);
61198+out:
61199+ if (ptl != ptl_m)
61200+ spin_unlock(ptl_m);
61201+ pte_unmap(pte_m);
61202+ unlock_page(page_m);
61203+}
61204+
61205+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61206+{
61207+ struct mm_struct *mm = vma->vm_mm;
61208+ unsigned long address_m;
61209+ spinlock_t *ptl_m;
61210+ struct vm_area_struct *vma_m;
61211+ pmd_t *pmd_m;
61212+ pte_t *pte_m, entry_m;
61213+
61214+ BUG_ON(!page_m || PageAnon(page_m));
61215+
61216+ vma_m = pax_find_mirror_vma(vma);
61217+ if (!vma_m)
61218+ return;
61219+
61220+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61221+ address_m = address + SEGMEXEC_TASK_SIZE;
61222+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61223+ pte_m = pte_offset_map(pmd_m, address_m);
61224+ ptl_m = pte_lockptr(mm, pmd_m);
61225+ if (ptl != ptl_m) {
61226+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61227+ if (!pte_none(*pte_m))
61228+ goto out;
61229+ }
61230+
61231+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61232+ page_cache_get(page_m);
61233+ page_add_file_rmap(page_m);
61234+ inc_mm_counter_fast(mm, MM_FILEPAGES);
61235+ set_pte_at(mm, address_m, pte_m, entry_m);
61236+ update_mmu_cache(vma_m, address_m, entry_m);
61237+out:
61238+ if (ptl != ptl_m)
61239+ spin_unlock(ptl_m);
61240+ pte_unmap(pte_m);
61241+}
61242+
61243+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61244+{
61245+ struct mm_struct *mm = vma->vm_mm;
61246+ unsigned long address_m;
61247+ spinlock_t *ptl_m;
61248+ struct vm_area_struct *vma_m;
61249+ pmd_t *pmd_m;
61250+ pte_t *pte_m, entry_m;
61251+
61252+ vma_m = pax_find_mirror_vma(vma);
61253+ if (!vma_m)
61254+ return;
61255+
61256+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61257+ address_m = address + SEGMEXEC_TASK_SIZE;
61258+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61259+ pte_m = pte_offset_map(pmd_m, address_m);
61260+ ptl_m = pte_lockptr(mm, pmd_m);
61261+ if (ptl != ptl_m) {
61262+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61263+ if (!pte_none(*pte_m))
61264+ goto out;
61265+ }
61266+
61267+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61268+ set_pte_at(mm, address_m, pte_m, entry_m);
61269+out:
61270+ if (ptl != ptl_m)
61271+ spin_unlock(ptl_m);
61272+ pte_unmap(pte_m);
61273+}
61274+
61275+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61276+{
61277+ struct page *page_m;
61278+ pte_t entry;
61279+
61280+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61281+ goto out;
61282+
61283+ entry = *pte;
61284+ page_m = vm_normal_page(vma, address, entry);
61285+ if (!page_m)
61286+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61287+ else if (PageAnon(page_m)) {
61288+ if (pax_find_mirror_vma(vma)) {
61289+ pte_unmap_unlock(pte, ptl);
61290+ lock_page(page_m);
61291+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61292+ if (pte_same(entry, *pte))
61293+ pax_mirror_anon_pte(vma, address, page_m, ptl);
61294+ else
61295+ unlock_page(page_m);
61296+ }
61297+ } else
61298+ pax_mirror_file_pte(vma, address, page_m, ptl);
61299+
61300+out:
61301+ pte_unmap_unlock(pte, ptl);
61302+}
61303+#endif
61304+
61305 /*
61306 * This routine handles present pages, when users try to write
61307 * to a shared page. It is done by copying the page to a new address
61308@@ -2444,6 +2637,12 @@ gotten:
61309 */
61310 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61311 if (likely(pte_same(*page_table, orig_pte))) {
61312+
61313+#ifdef CONFIG_PAX_SEGMEXEC
61314+ if (pax_find_mirror_vma(vma))
61315+ BUG_ON(!trylock_page(new_page));
61316+#endif
61317+
61318 if (old_page) {
61319 if (!PageAnon(old_page)) {
61320 dec_mm_counter_fast(mm, MM_FILEPAGES);
61321@@ -2495,6 +2694,10 @@ gotten:
61322 page_remove_rmap(old_page);
61323 }
61324
61325+#ifdef CONFIG_PAX_SEGMEXEC
61326+ pax_mirror_anon_pte(vma, address, new_page, ptl);
61327+#endif
61328+
61329 /* Free the old page.. */
61330 new_page = old_page;
61331 ret |= VM_FAULT_WRITE;
61332@@ -2905,6 +3108,11 @@ static int do_swap_page(struct mm_struct
61333 swap_free(entry);
61334 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61335 try_to_free_swap(page);
61336+
61337+#ifdef CONFIG_PAX_SEGMEXEC
61338+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61339+#endif
61340+
61341 unlock_page(page);
61342 if (swapcache) {
61343 /*
61344@@ -2928,6 +3136,11 @@ static int do_swap_page(struct mm_struct
61345
61346 /* No need to invalidate - it was non-present before */
61347 update_mmu_cache(vma, address, page_table);
61348+
61349+#ifdef CONFIG_PAX_SEGMEXEC
61350+ pax_mirror_anon_pte(vma, address, page, ptl);
61351+#endif
61352+
61353 unlock:
61354 pte_unmap_unlock(page_table, ptl);
61355 out:
61356@@ -2947,40 +3160,6 @@ out_release:
61357 }
61358
61359 /*
61360- * This is like a special single-page "expand_{down|up}wards()",
61361- * except we must first make sure that 'address{-|+}PAGE_SIZE'
61362- * doesn't hit another vma.
61363- */
61364-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61365-{
61366- address &= PAGE_MASK;
61367- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61368- struct vm_area_struct *prev = vma->vm_prev;
61369-
61370- /*
61371- * Is there a mapping abutting this one below?
61372- *
61373- * That's only ok if it's the same stack mapping
61374- * that has gotten split..
61375- */
61376- if (prev && prev->vm_end == address)
61377- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61378-
61379- expand_stack(vma, address - PAGE_SIZE);
61380- }
61381- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61382- struct vm_area_struct *next = vma->vm_next;
61383-
61384- /* As VM_GROWSDOWN but s/below/above/ */
61385- if (next && next->vm_start == address + PAGE_SIZE)
61386- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61387-
61388- expand_upwards(vma, address + PAGE_SIZE);
61389- }
61390- return 0;
61391-}
61392-
61393-/*
61394 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61395 * but allow concurrent faults), and pte mapped but not yet locked.
61396 * We return with mmap_sem still held, but pte unmapped and unlocked.
61397@@ -2989,27 +3168,23 @@ static int do_anonymous_page(struct mm_s
61398 unsigned long address, pte_t *page_table, pmd_t *pmd,
61399 unsigned int flags)
61400 {
61401- struct page *page;
61402+ struct page *page = NULL;
61403 spinlock_t *ptl;
61404 pte_t entry;
61405
61406- pte_unmap(page_table);
61407-
61408- /* Check if we need to add a guard page to the stack */
61409- if (check_stack_guard_page(vma, address) < 0)
61410- return VM_FAULT_SIGBUS;
61411-
61412- /* Use the zero-page for reads */
61413 if (!(flags & FAULT_FLAG_WRITE)) {
61414 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61415 vma->vm_page_prot));
61416- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61417+ ptl = pte_lockptr(mm, pmd);
61418+ spin_lock(ptl);
61419 if (!pte_none(*page_table))
61420 goto unlock;
61421 goto setpte;
61422 }
61423
61424 /* Allocate our own private page. */
61425+ pte_unmap(page_table);
61426+
61427 if (unlikely(anon_vma_prepare(vma)))
61428 goto oom;
61429 page = alloc_zeroed_user_highpage_movable(vma, address);
61430@@ -3028,6 +3203,11 @@ static int do_anonymous_page(struct mm_s
61431 if (!pte_none(*page_table))
61432 goto release;
61433
61434+#ifdef CONFIG_PAX_SEGMEXEC
61435+ if (pax_find_mirror_vma(vma))
61436+ BUG_ON(!trylock_page(page));
61437+#endif
61438+
61439 inc_mm_counter_fast(mm, MM_ANONPAGES);
61440 page_add_new_anon_rmap(page, vma, address);
61441 setpte:
61442@@ -3035,6 +3215,12 @@ setpte:
61443
61444 /* No need to invalidate - it was non-present before */
61445 update_mmu_cache(vma, address, page_table);
61446+
61447+#ifdef CONFIG_PAX_SEGMEXEC
61448+ if (page)
61449+ pax_mirror_anon_pte(vma, address, page, ptl);
61450+#endif
61451+
61452 unlock:
61453 pte_unmap_unlock(page_table, ptl);
61454 return 0;
61455@@ -3172,6 +3358,12 @@ static int __do_fault(struct mm_struct *
61456 */
61457 /* Only go through if we didn't race with anybody else... */
61458 if (likely(pte_same(*page_table, orig_pte))) {
61459+
61460+#ifdef CONFIG_PAX_SEGMEXEC
61461+ if (anon && pax_find_mirror_vma(vma))
61462+ BUG_ON(!trylock_page(page));
61463+#endif
61464+
61465 flush_icache_page(vma, page);
61466 entry = mk_pte(page, vma->vm_page_prot);
61467 if (flags & FAULT_FLAG_WRITE)
61468@@ -3191,6 +3383,14 @@ static int __do_fault(struct mm_struct *
61469
61470 /* no need to invalidate: a not-present page won't be cached */
61471 update_mmu_cache(vma, address, page_table);
61472+
61473+#ifdef CONFIG_PAX_SEGMEXEC
61474+ if (anon)
61475+ pax_mirror_anon_pte(vma, address, page, ptl);
61476+ else
61477+ pax_mirror_file_pte(vma, address, page, ptl);
61478+#endif
61479+
61480 } else {
61481 if (charged)
61482 mem_cgroup_uncharge_page(page);
61483@@ -3338,6 +3538,12 @@ int handle_pte_fault(struct mm_struct *m
61484 if (flags & FAULT_FLAG_WRITE)
61485 flush_tlb_fix_spurious_fault(vma, address);
61486 }
61487+
61488+#ifdef CONFIG_PAX_SEGMEXEC
61489+ pax_mirror_pte(vma, address, pte, pmd, ptl);
61490+ return 0;
61491+#endif
61492+
61493 unlock:
61494 pte_unmap_unlock(pte, ptl);
61495 return 0;
61496@@ -3354,6 +3560,10 @@ int handle_mm_fault(struct mm_struct *mm
61497 pmd_t *pmd;
61498 pte_t *pte;
61499
61500+#ifdef CONFIG_PAX_SEGMEXEC
61501+ struct vm_area_struct *vma_m;
61502+#endif
61503+
61504 __set_current_state(TASK_RUNNING);
61505
61506 count_vm_event(PGFAULT);
61507@@ -3364,6 +3574,34 @@ int handle_mm_fault(struct mm_struct *mm
61508 if (unlikely(is_vm_hugetlb_page(vma)))
61509 return hugetlb_fault(mm, vma, address, flags);
61510
61511+#ifdef CONFIG_PAX_SEGMEXEC
61512+ vma_m = pax_find_mirror_vma(vma);
61513+ if (vma_m) {
61514+ unsigned long address_m;
61515+ pgd_t *pgd_m;
61516+ pud_t *pud_m;
61517+ pmd_t *pmd_m;
61518+
61519+ if (vma->vm_start > vma_m->vm_start) {
61520+ address_m = address;
61521+ address -= SEGMEXEC_TASK_SIZE;
61522+ vma = vma_m;
61523+ } else
61524+ address_m = address + SEGMEXEC_TASK_SIZE;
61525+
61526+ pgd_m = pgd_offset(mm, address_m);
61527+ pud_m = pud_alloc(mm, pgd_m, address_m);
61528+ if (!pud_m)
61529+ return VM_FAULT_OOM;
61530+ pmd_m = pmd_alloc(mm, pud_m, address_m);
61531+ if (!pmd_m)
61532+ return VM_FAULT_OOM;
61533+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61534+ return VM_FAULT_OOM;
61535+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61536+ }
61537+#endif
61538+
61539 pgd = pgd_offset(mm, address);
61540 pud = pud_alloc(mm, pgd, address);
61541 if (!pud)
61542@@ -3393,7 +3631,7 @@ int handle_mm_fault(struct mm_struct *mm
61543 * run pte_offset_map on the pmd, if an huge pmd could
61544 * materialize from under us from a different thread.
61545 */
61546- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61547+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61548 return VM_FAULT_OOM;
61549 /* if an huge pmd materialized from under us just retry later */
61550 if (unlikely(pmd_trans_huge(*pmd)))
61551@@ -3497,7 +3735,7 @@ static int __init gate_vma_init(void)
61552 gate_vma.vm_start = FIXADDR_USER_START;
61553 gate_vma.vm_end = FIXADDR_USER_END;
61554 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61555- gate_vma.vm_page_prot = __P101;
61556+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61557 /*
61558 * Make sure the vDSO gets into every core dump.
61559 * Dumping its contents makes post-mortem fully interpretable later
61560diff -urNp linux-2.6.39.4/mm/memory-failure.c linux-2.6.39.4/mm/memory-failure.c
61561--- linux-2.6.39.4/mm/memory-failure.c 2011-07-09 09:18:51.000000000 -0400
61562+++ linux-2.6.39.4/mm/memory-failure.c 2011-08-05 19:44:37.000000000 -0400
61563@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61564
61565 int sysctl_memory_failure_recovery __read_mostly = 1;
61566
61567-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61568+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61569
61570 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61571
61572@@ -1013,7 +1013,7 @@ int __memory_failure(unsigned long pfn,
61573 }
61574
61575 nr_pages = 1 << compound_trans_order(hpage);
61576- atomic_long_add(nr_pages, &mce_bad_pages);
61577+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61578
61579 /*
61580 * We need/can do nothing about count=0 pages.
61581@@ -1043,7 +1043,7 @@ int __memory_failure(unsigned long pfn,
61582 if (!PageHWPoison(hpage)
61583 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61584 || (p != hpage && TestSetPageHWPoison(hpage))) {
61585- atomic_long_sub(nr_pages, &mce_bad_pages);
61586+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61587 return 0;
61588 }
61589 set_page_hwpoison_huge_page(hpage);
61590@@ -1101,7 +1101,7 @@ int __memory_failure(unsigned long pfn,
61591 }
61592 if (hwpoison_filter(p)) {
61593 if (TestClearPageHWPoison(p))
61594- atomic_long_sub(nr_pages, &mce_bad_pages);
61595+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61596 unlock_page(hpage);
61597 put_page(hpage);
61598 return 0;
61599@@ -1227,7 +1227,7 @@ int unpoison_memory(unsigned long pfn)
61600 return 0;
61601 }
61602 if (TestClearPageHWPoison(p))
61603- atomic_long_sub(nr_pages, &mce_bad_pages);
61604+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61605 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61606 return 0;
61607 }
61608@@ -1241,7 +1241,7 @@ int unpoison_memory(unsigned long pfn)
61609 */
61610 if (TestClearPageHWPoison(page)) {
61611 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61612- atomic_long_sub(nr_pages, &mce_bad_pages);
61613+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61614 freeit = 1;
61615 if (PageHuge(page))
61616 clear_page_hwpoison_huge_page(page);
61617@@ -1354,7 +1354,7 @@ static int soft_offline_huge_page(struct
61618 }
61619 done:
61620 if (!PageHWPoison(hpage))
61621- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61622+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61623 set_page_hwpoison_huge_page(hpage);
61624 dequeue_hwpoisoned_huge_page(hpage);
61625 /* keep elevated page count for bad page */
61626@@ -1484,7 +1484,7 @@ int soft_offline_page(struct page *page,
61627 return ret;
61628
61629 done:
61630- atomic_long_add(1, &mce_bad_pages);
61631+ atomic_long_add_unchecked(1, &mce_bad_pages);
61632 SetPageHWPoison(page);
61633 /* keep elevated page count for bad page */
61634 return ret;
61635diff -urNp linux-2.6.39.4/mm/mempolicy.c linux-2.6.39.4/mm/mempolicy.c
61636--- linux-2.6.39.4/mm/mempolicy.c 2011-05-19 00:06:34.000000000 -0400
61637+++ linux-2.6.39.4/mm/mempolicy.c 2011-08-05 19:44:37.000000000 -0400
61638@@ -643,6 +643,10 @@ static int mbind_range(struct mm_struct
61639 unsigned long vmstart;
61640 unsigned long vmend;
61641
61642+#ifdef CONFIG_PAX_SEGMEXEC
61643+ struct vm_area_struct *vma_m;
61644+#endif
61645+
61646 vma = find_vma_prev(mm, start, &prev);
61647 if (!vma || vma->vm_start > start)
61648 return -EFAULT;
61649@@ -673,6 +677,16 @@ static int mbind_range(struct mm_struct
61650 err = policy_vma(vma, new_pol);
61651 if (err)
61652 goto out;
61653+
61654+#ifdef CONFIG_PAX_SEGMEXEC
61655+ vma_m = pax_find_mirror_vma(vma);
61656+ if (vma_m) {
61657+ err = policy_vma(vma_m, new_pol);
61658+ if (err)
61659+ goto out;
61660+ }
61661+#endif
61662+
61663 }
61664
61665 out:
61666@@ -1106,6 +1120,17 @@ static long do_mbind(unsigned long start
61667
61668 if (end < start)
61669 return -EINVAL;
61670+
61671+#ifdef CONFIG_PAX_SEGMEXEC
61672+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61673+ if (end > SEGMEXEC_TASK_SIZE)
61674+ return -EINVAL;
61675+ } else
61676+#endif
61677+
61678+ if (end > TASK_SIZE)
61679+ return -EINVAL;
61680+
61681 if (end == start)
61682 return 0;
61683
61684@@ -1324,6 +1349,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61685 if (!mm)
61686 goto out;
61687
61688+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61689+ if (mm != current->mm &&
61690+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61691+ err = -EPERM;
61692+ goto out;
61693+ }
61694+#endif
61695+
61696 /*
61697 * Check if this process has the right to modify the specified
61698 * process. The right exists if the process has administrative
61699@@ -1333,8 +1366,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61700 rcu_read_lock();
61701 tcred = __task_cred(task);
61702 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61703- cred->uid != tcred->suid && cred->uid != tcred->uid &&
61704- !capable(CAP_SYS_NICE)) {
61705+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61706 rcu_read_unlock();
61707 err = -EPERM;
61708 goto out;
61709@@ -2634,7 +2666,7 @@ int show_numa_map(struct seq_file *m, vo
61710
61711 if (file) {
61712 seq_printf(m, " file=");
61713- seq_path(m, &file->f_path, "\n\t= ");
61714+ seq_path(m, &file->f_path, "\n\t\\= ");
61715 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
61716 seq_printf(m, " heap");
61717 } else if (vma->vm_start <= mm->start_stack &&
61718diff -urNp linux-2.6.39.4/mm/migrate.c linux-2.6.39.4/mm/migrate.c
61719--- linux-2.6.39.4/mm/migrate.c 2011-07-09 09:18:51.000000000 -0400
61720+++ linux-2.6.39.4/mm/migrate.c 2011-08-05 19:44:37.000000000 -0400
61721@@ -1133,6 +1133,8 @@ static int do_pages_move(struct mm_struc
61722 unsigned long chunk_start;
61723 int err;
61724
61725+ pax_track_stack();
61726+
61727 task_nodes = cpuset_mems_allowed(task);
61728
61729 err = -ENOMEM;
61730@@ -1317,6 +1319,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61731 if (!mm)
61732 return -EINVAL;
61733
61734+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61735+ if (mm != current->mm &&
61736+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61737+ err = -EPERM;
61738+ goto out;
61739+ }
61740+#endif
61741+
61742 /*
61743 * Check if this process has the right to modify the specified
61744 * process. The right exists if the process has administrative
61745@@ -1326,8 +1336,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61746 rcu_read_lock();
61747 tcred = __task_cred(task);
61748 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61749- cred->uid != tcred->suid && cred->uid != tcred->uid &&
61750- !capable(CAP_SYS_NICE)) {
61751+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61752 rcu_read_unlock();
61753 err = -EPERM;
61754 goto out;
61755diff -urNp linux-2.6.39.4/mm/mlock.c linux-2.6.39.4/mm/mlock.c
61756--- linux-2.6.39.4/mm/mlock.c 2011-05-19 00:06:34.000000000 -0400
61757+++ linux-2.6.39.4/mm/mlock.c 2011-08-05 19:44:37.000000000 -0400
61758@@ -13,6 +13,7 @@
61759 #include <linux/pagemap.h>
61760 #include <linux/mempolicy.h>
61761 #include <linux/syscalls.h>
61762+#include <linux/security.h>
61763 #include <linux/sched.h>
61764 #include <linux/module.h>
61765 #include <linux/rmap.h>
61766@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61767 return -EINVAL;
61768 if (end == start)
61769 return 0;
61770+ if (end > TASK_SIZE)
61771+ return -EINVAL;
61772+
61773 vma = find_vma_prev(current->mm, start, &prev);
61774 if (!vma || vma->vm_start > start)
61775 return -ENOMEM;
61776@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61777 for (nstart = start ; ; ) {
61778 unsigned int newflags;
61779
61780+#ifdef CONFIG_PAX_SEGMEXEC
61781+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61782+ break;
61783+#endif
61784+
61785 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61786
61787 newflags = vma->vm_flags | VM_LOCKED;
61788@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61789 lock_limit >>= PAGE_SHIFT;
61790
61791 /* check against resource limits */
61792+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61793 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61794 error = do_mlock(start, len, 1);
61795 up_write(&current->mm->mmap_sem);
61796@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61797 static int do_mlockall(int flags)
61798 {
61799 struct vm_area_struct * vma, * prev = NULL;
61800- unsigned int def_flags = 0;
61801
61802 if (flags & MCL_FUTURE)
61803- def_flags = VM_LOCKED;
61804- current->mm->def_flags = def_flags;
61805+ current->mm->def_flags |= VM_LOCKED;
61806+ else
61807+ current->mm->def_flags &= ~VM_LOCKED;
61808 if (flags == MCL_FUTURE)
61809 goto out;
61810
61811 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61812- unsigned int newflags;
61813+ unsigned long newflags;
61814+
61815+#ifdef CONFIG_PAX_SEGMEXEC
61816+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61817+ break;
61818+#endif
61819
61820+ BUG_ON(vma->vm_end > TASK_SIZE);
61821 newflags = vma->vm_flags | VM_LOCKED;
61822 if (!(flags & MCL_CURRENT))
61823 newflags &= ~VM_LOCKED;
61824@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61825 lock_limit >>= PAGE_SHIFT;
61826
61827 ret = -ENOMEM;
61828+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61829 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61830 capable(CAP_IPC_LOCK))
61831 ret = do_mlockall(flags);
61832diff -urNp linux-2.6.39.4/mm/mmap.c linux-2.6.39.4/mm/mmap.c
61833--- linux-2.6.39.4/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
61834+++ linux-2.6.39.4/mm/mmap.c 2011-08-05 20:34:06.000000000 -0400
61835@@ -46,6 +46,16 @@
61836 #define arch_rebalance_pgtables(addr, len) (addr)
61837 #endif
61838
61839+static inline void verify_mm_writelocked(struct mm_struct *mm)
61840+{
61841+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61842+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61843+ up_read(&mm->mmap_sem);
61844+ BUG();
61845+ }
61846+#endif
61847+}
61848+
61849 static void unmap_region(struct mm_struct *mm,
61850 struct vm_area_struct *vma, struct vm_area_struct *prev,
61851 unsigned long start, unsigned long end);
61852@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
61853 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
61854 *
61855 */
61856-pgprot_t protection_map[16] = {
61857+pgprot_t protection_map[16] __read_only = {
61858 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
61859 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61860 };
61861
61862 pgprot_t vm_get_page_prot(unsigned long vm_flags)
61863 {
61864- return __pgprot(pgprot_val(protection_map[vm_flags &
61865+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
61866 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
61867 pgprot_val(arch_vm_get_page_prot(vm_flags)));
61868+
61869+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61870+ if (!(__supported_pte_mask & _PAGE_NX) &&
61871+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
61872+ (vm_flags & (VM_READ | VM_WRITE)))
61873+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
61874+#endif
61875+
61876+ return prot;
61877 }
61878 EXPORT_SYMBOL(vm_get_page_prot);
61879
61880 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
61881 int sysctl_overcommit_ratio = 50; /* default is 50% */
61882 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
61883+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
61884 struct percpu_counter vm_committed_as;
61885
61886 /*
61887@@ -232,6 +252,7 @@ static struct vm_area_struct *remove_vma
61888 struct vm_area_struct *next = vma->vm_next;
61889
61890 might_sleep();
61891+ BUG_ON(vma->vm_mirror);
61892 if (vma->vm_ops && vma->vm_ops->close)
61893 vma->vm_ops->close(vma);
61894 if (vma->vm_file) {
61895@@ -276,6 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
61896 * not page aligned -Ram Gupta
61897 */
61898 rlim = rlimit(RLIMIT_DATA);
61899+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
61900 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
61901 (mm->end_data - mm->start_data) > rlim)
61902 goto out;
61903@@ -719,6 +741,12 @@ static int
61904 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
61905 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61906 {
61907+
61908+#ifdef CONFIG_PAX_SEGMEXEC
61909+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
61910+ return 0;
61911+#endif
61912+
61913 if (is_mergeable_vma(vma, file, vm_flags) &&
61914 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
61915 if (vma->vm_pgoff == vm_pgoff)
61916@@ -738,6 +766,12 @@ static int
61917 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
61918 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61919 {
61920+
61921+#ifdef CONFIG_PAX_SEGMEXEC
61922+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
61923+ return 0;
61924+#endif
61925+
61926 if (is_mergeable_vma(vma, file, vm_flags) &&
61927 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
61928 pgoff_t vm_pglen;
61929@@ -780,13 +814,20 @@ can_vma_merge_after(struct vm_area_struc
61930 struct vm_area_struct *vma_merge(struct mm_struct *mm,
61931 struct vm_area_struct *prev, unsigned long addr,
61932 unsigned long end, unsigned long vm_flags,
61933- struct anon_vma *anon_vma, struct file *file,
61934+ struct anon_vma *anon_vma, struct file *file,
61935 pgoff_t pgoff, struct mempolicy *policy)
61936 {
61937 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
61938 struct vm_area_struct *area, *next;
61939 int err;
61940
61941+#ifdef CONFIG_PAX_SEGMEXEC
61942+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
61943+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
61944+
61945+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
61946+#endif
61947+
61948 /*
61949 * We later require that vma->vm_flags == vm_flags,
61950 * so this tests vma->vm_flags & VM_SPECIAL, too.
61951@@ -802,6 +843,15 @@ struct vm_area_struct *vma_merge(struct
61952 if (next && next->vm_end == end) /* cases 6, 7, 8 */
61953 next = next->vm_next;
61954
61955+#ifdef CONFIG_PAX_SEGMEXEC
61956+ if (prev)
61957+ prev_m = pax_find_mirror_vma(prev);
61958+ if (area)
61959+ area_m = pax_find_mirror_vma(area);
61960+ if (next)
61961+ next_m = pax_find_mirror_vma(next);
61962+#endif
61963+
61964 /*
61965 * Can it merge with the predecessor?
61966 */
61967@@ -821,9 +871,24 @@ struct vm_area_struct *vma_merge(struct
61968 /* cases 1, 6 */
61969 err = vma_adjust(prev, prev->vm_start,
61970 next->vm_end, prev->vm_pgoff, NULL);
61971- } else /* cases 2, 5, 7 */
61972+
61973+#ifdef CONFIG_PAX_SEGMEXEC
61974+ if (!err && prev_m)
61975+ err = vma_adjust(prev_m, prev_m->vm_start,
61976+ next_m->vm_end, prev_m->vm_pgoff, NULL);
61977+#endif
61978+
61979+ } else { /* cases 2, 5, 7 */
61980 err = vma_adjust(prev, prev->vm_start,
61981 end, prev->vm_pgoff, NULL);
61982+
61983+#ifdef CONFIG_PAX_SEGMEXEC
61984+ if (!err && prev_m)
61985+ err = vma_adjust(prev_m, prev_m->vm_start,
61986+ end_m, prev_m->vm_pgoff, NULL);
61987+#endif
61988+
61989+ }
61990 if (err)
61991 return NULL;
61992 khugepaged_enter_vma_merge(prev);
61993@@ -837,12 +902,27 @@ struct vm_area_struct *vma_merge(struct
61994 mpol_equal(policy, vma_policy(next)) &&
61995 can_vma_merge_before(next, vm_flags,
61996 anon_vma, file, pgoff+pglen)) {
61997- if (prev && addr < prev->vm_end) /* case 4 */
61998+ if (prev && addr < prev->vm_end) { /* case 4 */
61999 err = vma_adjust(prev, prev->vm_start,
62000 addr, prev->vm_pgoff, NULL);
62001- else /* cases 3, 8 */
62002+
62003+#ifdef CONFIG_PAX_SEGMEXEC
62004+ if (!err && prev_m)
62005+ err = vma_adjust(prev_m, prev_m->vm_start,
62006+ addr_m, prev_m->vm_pgoff, NULL);
62007+#endif
62008+
62009+ } else { /* cases 3, 8 */
62010 err = vma_adjust(area, addr, next->vm_end,
62011 next->vm_pgoff - pglen, NULL);
62012+
62013+#ifdef CONFIG_PAX_SEGMEXEC
62014+ if (!err && area_m)
62015+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
62016+ next_m->vm_pgoff - pglen, NULL);
62017+#endif
62018+
62019+ }
62020 if (err)
62021 return NULL;
62022 khugepaged_enter_vma_merge(area);
62023@@ -958,14 +1038,11 @@ none:
62024 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
62025 struct file *file, long pages)
62026 {
62027- const unsigned long stack_flags
62028- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
62029-
62030 if (file) {
62031 mm->shared_vm += pages;
62032 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
62033 mm->exec_vm += pages;
62034- } else if (flags & stack_flags)
62035+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
62036 mm->stack_vm += pages;
62037 if (flags & (VM_RESERVED|VM_IO))
62038 mm->reserved_vm += pages;
62039@@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
62040 * (the exception is when the underlying filesystem is noexec
62041 * mounted, in which case we dont add PROT_EXEC.)
62042 */
62043- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
62044+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
62045 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
62046 prot |= PROT_EXEC;
62047
62048@@ -1018,7 +1095,7 @@ unsigned long do_mmap_pgoff(struct file
62049 /* Obtain the address to map to. we verify (or select) it and ensure
62050 * that it represents a valid section of the address space.
62051 */
62052- addr = get_unmapped_area(file, addr, len, pgoff, flags);
62053+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
62054 if (addr & ~PAGE_MASK)
62055 return addr;
62056
62057@@ -1029,6 +1106,36 @@ unsigned long do_mmap_pgoff(struct file
62058 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
62059 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
62060
62061+#ifdef CONFIG_PAX_MPROTECT
62062+ if (mm->pax_flags & MF_PAX_MPROTECT) {
62063+#ifndef CONFIG_PAX_MPROTECT_COMPAT
62064+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
62065+ gr_log_rwxmmap(file);
62066+
62067+#ifdef CONFIG_PAX_EMUPLT
62068+ vm_flags &= ~VM_EXEC;
62069+#else
62070+ return -EPERM;
62071+#endif
62072+
62073+ }
62074+
62075+ if (!(vm_flags & VM_EXEC))
62076+ vm_flags &= ~VM_MAYEXEC;
62077+#else
62078+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62079+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62080+#endif
62081+ else
62082+ vm_flags &= ~VM_MAYWRITE;
62083+ }
62084+#endif
62085+
62086+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62087+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
62088+ vm_flags &= ~VM_PAGEEXEC;
62089+#endif
62090+
62091 if (flags & MAP_LOCKED)
62092 if (!can_do_mlock())
62093 return -EPERM;
62094@@ -1040,6 +1147,7 @@ unsigned long do_mmap_pgoff(struct file
62095 locked += mm->locked_vm;
62096 lock_limit = rlimit(RLIMIT_MEMLOCK);
62097 lock_limit >>= PAGE_SHIFT;
62098+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62099 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
62100 return -EAGAIN;
62101 }
62102@@ -1110,6 +1218,9 @@ unsigned long do_mmap_pgoff(struct file
62103 if (error)
62104 return error;
62105
62106+ if (!gr_acl_handle_mmap(file, prot))
62107+ return -EACCES;
62108+
62109 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
62110 }
62111 EXPORT_SYMBOL(do_mmap_pgoff);
62112@@ -1187,10 +1298,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
62113 */
62114 int vma_wants_writenotify(struct vm_area_struct *vma)
62115 {
62116- unsigned int vm_flags = vma->vm_flags;
62117+ unsigned long vm_flags = vma->vm_flags;
62118
62119 /* If it was private or non-writable, the write bit is already clear */
62120- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
62121+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
62122 return 0;
62123
62124 /* The backer wishes to know when pages are first written to? */
62125@@ -1239,14 +1350,24 @@ unsigned long mmap_region(struct file *f
62126 unsigned long charged = 0;
62127 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
62128
62129+#ifdef CONFIG_PAX_SEGMEXEC
62130+ struct vm_area_struct *vma_m = NULL;
62131+#endif
62132+
62133+ /*
62134+ * mm->mmap_sem is required to protect against another thread
62135+ * changing the mappings in case we sleep.
62136+ */
62137+ verify_mm_writelocked(mm);
62138+
62139 /* Clear old maps */
62140 error = -ENOMEM;
62141-munmap_back:
62142 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62143 if (vma && vma->vm_start < addr + len) {
62144 if (do_munmap(mm, addr, len))
62145 return -ENOMEM;
62146- goto munmap_back;
62147+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62148+ BUG_ON(vma && vma->vm_start < addr + len);
62149 }
62150
62151 /* Check against address space limit. */
62152@@ -1295,6 +1416,16 @@ munmap_back:
62153 goto unacct_error;
62154 }
62155
62156+#ifdef CONFIG_PAX_SEGMEXEC
62157+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
62158+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62159+ if (!vma_m) {
62160+ error = -ENOMEM;
62161+ goto free_vma;
62162+ }
62163+ }
62164+#endif
62165+
62166 vma->vm_mm = mm;
62167 vma->vm_start = addr;
62168 vma->vm_end = addr + len;
62169@@ -1318,6 +1449,19 @@ munmap_back:
62170 error = file->f_op->mmap(file, vma);
62171 if (error)
62172 goto unmap_and_free_vma;
62173+
62174+#ifdef CONFIG_PAX_SEGMEXEC
62175+ if (vma_m && (vm_flags & VM_EXECUTABLE))
62176+ added_exe_file_vma(mm);
62177+#endif
62178+
62179+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62180+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
62181+ vma->vm_flags |= VM_PAGEEXEC;
62182+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62183+ }
62184+#endif
62185+
62186 if (vm_flags & VM_EXECUTABLE)
62187 added_exe_file_vma(mm);
62188
62189@@ -1353,6 +1497,11 @@ munmap_back:
62190 vma_link(mm, vma, prev, rb_link, rb_parent);
62191 file = vma->vm_file;
62192
62193+#ifdef CONFIG_PAX_SEGMEXEC
62194+ if (vma_m)
62195+ BUG_ON(pax_mirror_vma(vma_m, vma));
62196+#endif
62197+
62198 /* Once vma denies write, undo our temporary denial count */
62199 if (correct_wcount)
62200 atomic_inc(&inode->i_writecount);
62201@@ -1361,6 +1510,7 @@ out:
62202
62203 mm->total_vm += len >> PAGE_SHIFT;
62204 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62205+ track_exec_limit(mm, addr, addr + len, vm_flags);
62206 if (vm_flags & VM_LOCKED) {
62207 if (!mlock_vma_pages_range(vma, addr, addr + len))
62208 mm->locked_vm += (len >> PAGE_SHIFT);
62209@@ -1378,6 +1528,12 @@ unmap_and_free_vma:
62210 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62211 charged = 0;
62212 free_vma:
62213+
62214+#ifdef CONFIG_PAX_SEGMEXEC
62215+ if (vma_m)
62216+ kmem_cache_free(vm_area_cachep, vma_m);
62217+#endif
62218+
62219 kmem_cache_free(vm_area_cachep, vma);
62220 unacct_error:
62221 if (charged)
62222@@ -1385,6 +1541,44 @@ unacct_error:
62223 return error;
62224 }
62225
62226+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62227+{
62228+ if (!vma) {
62229+#ifdef CONFIG_STACK_GROWSUP
62230+ if (addr > sysctl_heap_stack_gap)
62231+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62232+ else
62233+ vma = find_vma(current->mm, 0);
62234+ if (vma && (vma->vm_flags & VM_GROWSUP))
62235+ return false;
62236+#endif
62237+ return true;
62238+ }
62239+
62240+ if (addr + len > vma->vm_start)
62241+ return false;
62242+
62243+ if (vma->vm_flags & VM_GROWSDOWN)
62244+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62245+#ifdef CONFIG_STACK_GROWSUP
62246+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62247+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62248+#endif
62249+
62250+ return true;
62251+}
62252+
62253+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62254+{
62255+ if (vma->vm_start < len)
62256+ return -ENOMEM;
62257+ if (!(vma->vm_flags & VM_GROWSDOWN))
62258+ return vma->vm_start - len;
62259+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
62260+ return vma->vm_start - len - sysctl_heap_stack_gap;
62261+ return -ENOMEM;
62262+}
62263+
62264 /* Get an address range which is currently unmapped.
62265 * For shmat() with addr=0.
62266 *
62267@@ -1411,18 +1605,23 @@ arch_get_unmapped_area(struct file *filp
62268 if (flags & MAP_FIXED)
62269 return addr;
62270
62271+#ifdef CONFIG_PAX_RANDMMAP
62272+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62273+#endif
62274+
62275 if (addr) {
62276 addr = PAGE_ALIGN(addr);
62277- vma = find_vma(mm, addr);
62278- if (TASK_SIZE - len >= addr &&
62279- (!vma || addr + len <= vma->vm_start))
62280- return addr;
62281+ if (TASK_SIZE - len >= addr) {
62282+ vma = find_vma(mm, addr);
62283+ if (check_heap_stack_gap(vma, addr, len))
62284+ return addr;
62285+ }
62286 }
62287 if (len > mm->cached_hole_size) {
62288- start_addr = addr = mm->free_area_cache;
62289+ start_addr = addr = mm->free_area_cache;
62290 } else {
62291- start_addr = addr = TASK_UNMAPPED_BASE;
62292- mm->cached_hole_size = 0;
62293+ start_addr = addr = mm->mmap_base;
62294+ mm->cached_hole_size = 0;
62295 }
62296
62297 full_search:
62298@@ -1433,34 +1632,40 @@ full_search:
62299 * Start a new search - just in case we missed
62300 * some holes.
62301 */
62302- if (start_addr != TASK_UNMAPPED_BASE) {
62303- addr = TASK_UNMAPPED_BASE;
62304- start_addr = addr;
62305+ if (start_addr != mm->mmap_base) {
62306+ start_addr = addr = mm->mmap_base;
62307 mm->cached_hole_size = 0;
62308 goto full_search;
62309 }
62310 return -ENOMEM;
62311 }
62312- if (!vma || addr + len <= vma->vm_start) {
62313- /*
62314- * Remember the place where we stopped the search:
62315- */
62316- mm->free_area_cache = addr + len;
62317- return addr;
62318- }
62319+ if (check_heap_stack_gap(vma, addr, len))
62320+ break;
62321 if (addr + mm->cached_hole_size < vma->vm_start)
62322 mm->cached_hole_size = vma->vm_start - addr;
62323 addr = vma->vm_end;
62324 }
62325+
62326+ /*
62327+ * Remember the place where we stopped the search:
62328+ */
62329+ mm->free_area_cache = addr + len;
62330+ return addr;
62331 }
62332 #endif
62333
62334 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62335 {
62336+
62337+#ifdef CONFIG_PAX_SEGMEXEC
62338+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62339+ return;
62340+#endif
62341+
62342 /*
62343 * Is this a new hole at the lowest possible address?
62344 */
62345- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62346+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62347 mm->free_area_cache = addr;
62348 mm->cached_hole_size = ~0UL;
62349 }
62350@@ -1478,7 +1683,7 @@ arch_get_unmapped_area_topdown(struct fi
62351 {
62352 struct vm_area_struct *vma;
62353 struct mm_struct *mm = current->mm;
62354- unsigned long addr = addr0;
62355+ unsigned long base = mm->mmap_base, addr = addr0;
62356
62357 /* requested length too big for entire address space */
62358 if (len > TASK_SIZE)
62359@@ -1487,13 +1692,18 @@ arch_get_unmapped_area_topdown(struct fi
62360 if (flags & MAP_FIXED)
62361 return addr;
62362
62363+#ifdef CONFIG_PAX_RANDMMAP
62364+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62365+#endif
62366+
62367 /* requesting a specific address */
62368 if (addr) {
62369 addr = PAGE_ALIGN(addr);
62370- vma = find_vma(mm, addr);
62371- if (TASK_SIZE - len >= addr &&
62372- (!vma || addr + len <= vma->vm_start))
62373- return addr;
62374+ if (TASK_SIZE - len >= addr) {
62375+ vma = find_vma(mm, addr);
62376+ if (check_heap_stack_gap(vma, addr, len))
62377+ return addr;
62378+ }
62379 }
62380
62381 /* check if free_area_cache is useful for us */
62382@@ -1508,7 +1718,7 @@ arch_get_unmapped_area_topdown(struct fi
62383 /* make sure it can fit in the remaining address space */
62384 if (addr > len) {
62385 vma = find_vma(mm, addr-len);
62386- if (!vma || addr <= vma->vm_start)
62387+ if (check_heap_stack_gap(vma, addr - len, len))
62388 /* remember the address as a hint for next time */
62389 return (mm->free_area_cache = addr-len);
62390 }
62391@@ -1525,7 +1735,7 @@ arch_get_unmapped_area_topdown(struct fi
62392 * return with success:
62393 */
62394 vma = find_vma(mm, addr);
62395- if (!vma || addr+len <= vma->vm_start)
62396+ if (check_heap_stack_gap(vma, addr, len))
62397 /* remember the address as a hint for next time */
62398 return (mm->free_area_cache = addr);
62399
62400@@ -1534,8 +1744,8 @@ arch_get_unmapped_area_topdown(struct fi
62401 mm->cached_hole_size = vma->vm_start - addr;
62402
62403 /* try just below the current vma->vm_start */
62404- addr = vma->vm_start-len;
62405- } while (len < vma->vm_start);
62406+ addr = skip_heap_stack_gap(vma, len);
62407+ } while (!IS_ERR_VALUE(addr));
62408
62409 bottomup:
62410 /*
62411@@ -1544,13 +1754,21 @@ bottomup:
62412 * can happen with large stack limits and large mmap()
62413 * allocations.
62414 */
62415+ mm->mmap_base = TASK_UNMAPPED_BASE;
62416+
62417+#ifdef CONFIG_PAX_RANDMMAP
62418+ if (mm->pax_flags & MF_PAX_RANDMMAP)
62419+ mm->mmap_base += mm->delta_mmap;
62420+#endif
62421+
62422+ mm->free_area_cache = mm->mmap_base;
62423 mm->cached_hole_size = ~0UL;
62424- mm->free_area_cache = TASK_UNMAPPED_BASE;
62425 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62426 /*
62427 * Restore the topdown base:
62428 */
62429- mm->free_area_cache = mm->mmap_base;
62430+ mm->mmap_base = base;
62431+ mm->free_area_cache = base;
62432 mm->cached_hole_size = ~0UL;
62433
62434 return addr;
62435@@ -1559,6 +1777,12 @@ bottomup:
62436
62437 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62438 {
62439+
62440+#ifdef CONFIG_PAX_SEGMEXEC
62441+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62442+ return;
62443+#endif
62444+
62445 /*
62446 * Is this a new hole at the highest possible address?
62447 */
62448@@ -1566,8 +1790,10 @@ void arch_unmap_area_topdown(struct mm_s
62449 mm->free_area_cache = addr;
62450
62451 /* dont allow allocations above current base */
62452- if (mm->free_area_cache > mm->mmap_base)
62453+ if (mm->free_area_cache > mm->mmap_base) {
62454 mm->free_area_cache = mm->mmap_base;
62455+ mm->cached_hole_size = ~0UL;
62456+ }
62457 }
62458
62459 unsigned long
62460@@ -1675,6 +1901,28 @@ out:
62461 return prev ? prev->vm_next : vma;
62462 }
62463
62464+#ifdef CONFIG_PAX_SEGMEXEC
62465+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62466+{
62467+ struct vm_area_struct *vma_m;
62468+
62469+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62470+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62471+ BUG_ON(vma->vm_mirror);
62472+ return NULL;
62473+ }
62474+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62475+ vma_m = vma->vm_mirror;
62476+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62477+ BUG_ON(vma->vm_file != vma_m->vm_file);
62478+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62479+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62480+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62481+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62482+ return vma_m;
62483+}
62484+#endif
62485+
62486 /*
62487 * Verify that the stack growth is acceptable and
62488 * update accounting. This is shared with both the
62489@@ -1691,6 +1939,7 @@ static int acct_stack_growth(struct vm_a
62490 return -ENOMEM;
62491
62492 /* Stack limit test */
62493+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
62494 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62495 return -ENOMEM;
62496
62497@@ -1701,6 +1950,7 @@ static int acct_stack_growth(struct vm_a
62498 locked = mm->locked_vm + grow;
62499 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62500 limit >>= PAGE_SHIFT;
62501+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62502 if (locked > limit && !capable(CAP_IPC_LOCK))
62503 return -ENOMEM;
62504 }
62505@@ -1731,37 +1981,48 @@ static int acct_stack_growth(struct vm_a
62506 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62507 * vma is the last one with address > vma->vm_end. Have to extend vma.
62508 */
62509+#ifndef CONFIG_IA64
62510+static
62511+#endif
62512 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62513 {
62514 int error;
62515+ bool locknext;
62516
62517 if (!(vma->vm_flags & VM_GROWSUP))
62518 return -EFAULT;
62519
62520+ /* Also guard against wrapping around to address 0. */
62521+ if (address < PAGE_ALIGN(address+1))
62522+ address = PAGE_ALIGN(address+1);
62523+ else
62524+ return -ENOMEM;
62525+
62526 /*
62527 * We must make sure the anon_vma is allocated
62528 * so that the anon_vma locking is not a noop.
62529 */
62530 if (unlikely(anon_vma_prepare(vma)))
62531 return -ENOMEM;
62532+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62533+ if (locknext && anon_vma_prepare(vma->vm_next))
62534+ return -ENOMEM;
62535 vma_lock_anon_vma(vma);
62536+ if (locknext)
62537+ vma_lock_anon_vma(vma->vm_next);
62538
62539 /*
62540 * vma->vm_start/vm_end cannot change under us because the caller
62541 * is required to hold the mmap_sem in read mode. We need the
62542- * anon_vma lock to serialize against concurrent expand_stacks.
62543- * Also guard against wrapping around to address 0.
62544+ * anon_vma locks to serialize against concurrent expand_stacks
62545+ * and expand_upwards.
62546 */
62547- if (address < PAGE_ALIGN(address+4))
62548- address = PAGE_ALIGN(address+4);
62549- else {
62550- vma_unlock_anon_vma(vma);
62551- return -ENOMEM;
62552- }
62553 error = 0;
62554
62555 /* Somebody else might have raced and expanded it already */
62556- if (address > vma->vm_end) {
62557+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62558+ error = -ENOMEM;
62559+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62560 unsigned long size, grow;
62561
62562 size = address - vma->vm_start;
62563@@ -1776,6 +2037,8 @@ int expand_upwards(struct vm_area_struct
62564 }
62565 }
62566 }
62567+ if (locknext)
62568+ vma_unlock_anon_vma(vma->vm_next);
62569 vma_unlock_anon_vma(vma);
62570 khugepaged_enter_vma_merge(vma);
62571 return error;
62572@@ -1789,6 +2052,8 @@ static int expand_downwards(struct vm_ar
62573 unsigned long address)
62574 {
62575 int error;
62576+ bool lockprev = false;
62577+ struct vm_area_struct *prev;
62578
62579 /*
62580 * We must make sure the anon_vma is allocated
62581@@ -1802,6 +2067,15 @@ static int expand_downwards(struct vm_ar
62582 if (error)
62583 return error;
62584
62585+ prev = vma->vm_prev;
62586+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62587+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62588+#endif
62589+ if (lockprev && anon_vma_prepare(prev))
62590+ return -ENOMEM;
62591+ if (lockprev)
62592+ vma_lock_anon_vma(prev);
62593+
62594 vma_lock_anon_vma(vma);
62595
62596 /*
62597@@ -1811,9 +2085,17 @@ static int expand_downwards(struct vm_ar
62598 */
62599
62600 /* Somebody else might have raced and expanded it already */
62601- if (address < vma->vm_start) {
62602+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62603+ error = -ENOMEM;
62604+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62605 unsigned long size, grow;
62606
62607+#ifdef CONFIG_PAX_SEGMEXEC
62608+ struct vm_area_struct *vma_m;
62609+
62610+ vma_m = pax_find_mirror_vma(vma);
62611+#endif
62612+
62613 size = vma->vm_end - address;
62614 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62615
62616@@ -1823,11 +2105,22 @@ static int expand_downwards(struct vm_ar
62617 if (!error) {
62618 vma->vm_start = address;
62619 vma->vm_pgoff -= grow;
62620+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62621+
62622+#ifdef CONFIG_PAX_SEGMEXEC
62623+ if (vma_m) {
62624+ vma_m->vm_start -= grow << PAGE_SHIFT;
62625+ vma_m->vm_pgoff -= grow;
62626+ }
62627+#endif
62628+
62629 perf_event_mmap(vma);
62630 }
62631 }
62632 }
62633 vma_unlock_anon_vma(vma);
62634+ if (lockprev)
62635+ vma_unlock_anon_vma(prev);
62636 khugepaged_enter_vma_merge(vma);
62637 return error;
62638 }
62639@@ -1902,6 +2195,13 @@ static void remove_vma_list(struct mm_st
62640 do {
62641 long nrpages = vma_pages(vma);
62642
62643+#ifdef CONFIG_PAX_SEGMEXEC
62644+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62645+ vma = remove_vma(vma);
62646+ continue;
62647+ }
62648+#endif
62649+
62650 mm->total_vm -= nrpages;
62651 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62652 vma = remove_vma(vma);
62653@@ -1947,6 +2247,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62654 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62655 vma->vm_prev = NULL;
62656 do {
62657+
62658+#ifdef CONFIG_PAX_SEGMEXEC
62659+ if (vma->vm_mirror) {
62660+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62661+ vma->vm_mirror->vm_mirror = NULL;
62662+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
62663+ vma->vm_mirror = NULL;
62664+ }
62665+#endif
62666+
62667 rb_erase(&vma->vm_rb, &mm->mm_rb);
62668 mm->map_count--;
62669 tail_vma = vma;
62670@@ -1975,14 +2285,33 @@ static int __split_vma(struct mm_struct
62671 struct vm_area_struct *new;
62672 int err = -ENOMEM;
62673
62674+#ifdef CONFIG_PAX_SEGMEXEC
62675+ struct vm_area_struct *vma_m, *new_m = NULL;
62676+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62677+#endif
62678+
62679 if (is_vm_hugetlb_page(vma) && (addr &
62680 ~(huge_page_mask(hstate_vma(vma)))))
62681 return -EINVAL;
62682
62683+#ifdef CONFIG_PAX_SEGMEXEC
62684+ vma_m = pax_find_mirror_vma(vma);
62685+#endif
62686+
62687 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62688 if (!new)
62689 goto out_err;
62690
62691+#ifdef CONFIG_PAX_SEGMEXEC
62692+ if (vma_m) {
62693+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62694+ if (!new_m) {
62695+ kmem_cache_free(vm_area_cachep, new);
62696+ goto out_err;
62697+ }
62698+ }
62699+#endif
62700+
62701 /* most fields are the same, copy all, and then fixup */
62702 *new = *vma;
62703
62704@@ -1995,6 +2324,22 @@ static int __split_vma(struct mm_struct
62705 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62706 }
62707
62708+#ifdef CONFIG_PAX_SEGMEXEC
62709+ if (vma_m) {
62710+ *new_m = *vma_m;
62711+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
62712+ new_m->vm_mirror = new;
62713+ new->vm_mirror = new_m;
62714+
62715+ if (new_below)
62716+ new_m->vm_end = addr_m;
62717+ else {
62718+ new_m->vm_start = addr_m;
62719+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62720+ }
62721+ }
62722+#endif
62723+
62724 pol = mpol_dup(vma_policy(vma));
62725 if (IS_ERR(pol)) {
62726 err = PTR_ERR(pol);
62727@@ -2020,6 +2365,42 @@ static int __split_vma(struct mm_struct
62728 else
62729 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62730
62731+#ifdef CONFIG_PAX_SEGMEXEC
62732+ if (!err && vma_m) {
62733+ if (anon_vma_clone(new_m, vma_m))
62734+ goto out_free_mpol;
62735+
62736+ mpol_get(pol);
62737+ vma_set_policy(new_m, pol);
62738+
62739+ if (new_m->vm_file) {
62740+ get_file(new_m->vm_file);
62741+ if (vma_m->vm_flags & VM_EXECUTABLE)
62742+ added_exe_file_vma(mm);
62743+ }
62744+
62745+ if (new_m->vm_ops && new_m->vm_ops->open)
62746+ new_m->vm_ops->open(new_m);
62747+
62748+ if (new_below)
62749+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62750+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62751+ else
62752+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62753+
62754+ if (err) {
62755+ if (new_m->vm_ops && new_m->vm_ops->close)
62756+ new_m->vm_ops->close(new_m);
62757+ if (new_m->vm_file) {
62758+ if (vma_m->vm_flags & VM_EXECUTABLE)
62759+ removed_exe_file_vma(mm);
62760+ fput(new_m->vm_file);
62761+ }
62762+ mpol_put(pol);
62763+ }
62764+ }
62765+#endif
62766+
62767 /* Success. */
62768 if (!err)
62769 return 0;
62770@@ -2032,10 +2413,18 @@ static int __split_vma(struct mm_struct
62771 removed_exe_file_vma(mm);
62772 fput(new->vm_file);
62773 }
62774- unlink_anon_vmas(new);
62775 out_free_mpol:
62776 mpol_put(pol);
62777 out_free_vma:
62778+
62779+#ifdef CONFIG_PAX_SEGMEXEC
62780+ if (new_m) {
62781+ unlink_anon_vmas(new_m);
62782+ kmem_cache_free(vm_area_cachep, new_m);
62783+ }
62784+#endif
62785+
62786+ unlink_anon_vmas(new);
62787 kmem_cache_free(vm_area_cachep, new);
62788 out_err:
62789 return err;
62790@@ -2048,6 +2437,15 @@ static int __split_vma(struct mm_struct
62791 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62792 unsigned long addr, int new_below)
62793 {
62794+
62795+#ifdef CONFIG_PAX_SEGMEXEC
62796+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62797+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62798+ if (mm->map_count >= sysctl_max_map_count-1)
62799+ return -ENOMEM;
62800+ } else
62801+#endif
62802+
62803 if (mm->map_count >= sysctl_max_map_count)
62804 return -ENOMEM;
62805
62806@@ -2059,11 +2457,30 @@ int split_vma(struct mm_struct *mm, stru
62807 * work. This now handles partial unmappings.
62808 * Jeremy Fitzhardinge <jeremy@goop.org>
62809 */
62810+#ifdef CONFIG_PAX_SEGMEXEC
62811 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62812 {
62813+ int ret = __do_munmap(mm, start, len);
62814+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62815+ return ret;
62816+
62817+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62818+}
62819+
62820+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62821+#else
62822+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62823+#endif
62824+{
62825 unsigned long end;
62826 struct vm_area_struct *vma, *prev, *last;
62827
62828+ /*
62829+ * mm->mmap_sem is required to protect against another thread
62830+ * changing the mappings in case we sleep.
62831+ */
62832+ verify_mm_writelocked(mm);
62833+
62834 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62835 return -EINVAL;
62836
62837@@ -2137,6 +2554,8 @@ int do_munmap(struct mm_struct *mm, unsi
62838 /* Fix up all other VM information */
62839 remove_vma_list(mm, vma);
62840
62841+ track_exec_limit(mm, start, end, 0UL);
62842+
62843 return 0;
62844 }
62845
62846@@ -2149,22 +2568,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62847
62848 profile_munmap(addr);
62849
62850+#ifdef CONFIG_PAX_SEGMEXEC
62851+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
62852+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
62853+ return -EINVAL;
62854+#endif
62855+
62856 down_write(&mm->mmap_sem);
62857 ret = do_munmap(mm, addr, len);
62858 up_write(&mm->mmap_sem);
62859 return ret;
62860 }
62861
62862-static inline void verify_mm_writelocked(struct mm_struct *mm)
62863-{
62864-#ifdef CONFIG_DEBUG_VM
62865- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62866- WARN_ON(1);
62867- up_read(&mm->mmap_sem);
62868- }
62869-#endif
62870-}
62871-
62872 /*
62873 * this is really a simplified "do_mmap". it only handles
62874 * anonymous maps. eventually we may be able to do some
62875@@ -2178,6 +2593,7 @@ unsigned long do_brk(unsigned long addr,
62876 struct rb_node ** rb_link, * rb_parent;
62877 pgoff_t pgoff = addr >> PAGE_SHIFT;
62878 int error;
62879+ unsigned long charged;
62880
62881 len = PAGE_ALIGN(len);
62882 if (!len)
62883@@ -2189,16 +2605,30 @@ unsigned long do_brk(unsigned long addr,
62884
62885 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
62886
62887+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62888+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62889+ flags &= ~VM_EXEC;
62890+
62891+#ifdef CONFIG_PAX_MPROTECT
62892+ if (mm->pax_flags & MF_PAX_MPROTECT)
62893+ flags &= ~VM_MAYEXEC;
62894+#endif
62895+
62896+ }
62897+#endif
62898+
62899 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
62900 if (error & ~PAGE_MASK)
62901 return error;
62902
62903+ charged = len >> PAGE_SHIFT;
62904+
62905 /*
62906 * mlock MCL_FUTURE?
62907 */
62908 if (mm->def_flags & VM_LOCKED) {
62909 unsigned long locked, lock_limit;
62910- locked = len >> PAGE_SHIFT;
62911+ locked = charged;
62912 locked += mm->locked_vm;
62913 lock_limit = rlimit(RLIMIT_MEMLOCK);
62914 lock_limit >>= PAGE_SHIFT;
62915@@ -2215,22 +2645,22 @@ unsigned long do_brk(unsigned long addr,
62916 /*
62917 * Clear old maps. this also does some error checking for us
62918 */
62919- munmap_back:
62920 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62921 if (vma && vma->vm_start < addr + len) {
62922 if (do_munmap(mm, addr, len))
62923 return -ENOMEM;
62924- goto munmap_back;
62925+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62926+ BUG_ON(vma && vma->vm_start < addr + len);
62927 }
62928
62929 /* Check against address space limits *after* clearing old maps... */
62930- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
62931+ if (!may_expand_vm(mm, charged))
62932 return -ENOMEM;
62933
62934 if (mm->map_count > sysctl_max_map_count)
62935 return -ENOMEM;
62936
62937- if (security_vm_enough_memory(len >> PAGE_SHIFT))
62938+ if (security_vm_enough_memory(charged))
62939 return -ENOMEM;
62940
62941 /* Can we just expand an old private anonymous mapping? */
62942@@ -2244,7 +2674,7 @@ unsigned long do_brk(unsigned long addr,
62943 */
62944 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62945 if (!vma) {
62946- vm_unacct_memory(len >> PAGE_SHIFT);
62947+ vm_unacct_memory(charged);
62948 return -ENOMEM;
62949 }
62950
62951@@ -2258,11 +2688,12 @@ unsigned long do_brk(unsigned long addr,
62952 vma_link(mm, vma, prev, rb_link, rb_parent);
62953 out:
62954 perf_event_mmap(vma);
62955- mm->total_vm += len >> PAGE_SHIFT;
62956+ mm->total_vm += charged;
62957 if (flags & VM_LOCKED) {
62958 if (!mlock_vma_pages_range(vma, addr, addr + len))
62959- mm->locked_vm += (len >> PAGE_SHIFT);
62960+ mm->locked_vm += charged;
62961 }
62962+ track_exec_limit(mm, addr, addr + len, flags);
62963 return addr;
62964 }
62965
62966@@ -2309,8 +2740,10 @@ void exit_mmap(struct mm_struct *mm)
62967 * Walk the list again, actually closing and freeing it,
62968 * with preemption enabled, without holding any MM locks.
62969 */
62970- while (vma)
62971+ while (vma) {
62972+ vma->vm_mirror = NULL;
62973 vma = remove_vma(vma);
62974+ }
62975
62976 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
62977 }
62978@@ -2324,6 +2757,13 @@ int insert_vm_struct(struct mm_struct *
62979 struct vm_area_struct * __vma, * prev;
62980 struct rb_node ** rb_link, * rb_parent;
62981
62982+#ifdef CONFIG_PAX_SEGMEXEC
62983+ struct vm_area_struct *vma_m = NULL;
62984+#endif
62985+
62986+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
62987+ return -EPERM;
62988+
62989 /*
62990 * The vm_pgoff of a purely anonymous vma should be irrelevant
62991 * until its first write fault, when page's anon_vma and index
62992@@ -2346,7 +2786,22 @@ int insert_vm_struct(struct mm_struct *
62993 if ((vma->vm_flags & VM_ACCOUNT) &&
62994 security_vm_enough_memory_mm(mm, vma_pages(vma)))
62995 return -ENOMEM;
62996+
62997+#ifdef CONFIG_PAX_SEGMEXEC
62998+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
62999+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63000+ if (!vma_m)
63001+ return -ENOMEM;
63002+ }
63003+#endif
63004+
63005 vma_link(mm, vma, prev, rb_link, rb_parent);
63006+
63007+#ifdef CONFIG_PAX_SEGMEXEC
63008+ if (vma_m)
63009+ BUG_ON(pax_mirror_vma(vma_m, vma));
63010+#endif
63011+
63012 return 0;
63013 }
63014
63015@@ -2364,6 +2819,8 @@ struct vm_area_struct *copy_vma(struct v
63016 struct rb_node **rb_link, *rb_parent;
63017 struct mempolicy *pol;
63018
63019+ BUG_ON(vma->vm_mirror);
63020+
63021 /*
63022 * If anonymous vma has not yet been faulted, update new pgoff
63023 * to match new location, to increase its chance of merging.
63024@@ -2414,6 +2871,39 @@ struct vm_area_struct *copy_vma(struct v
63025 return NULL;
63026 }
63027
63028+#ifdef CONFIG_PAX_SEGMEXEC
63029+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
63030+{
63031+ struct vm_area_struct *prev_m;
63032+ struct rb_node **rb_link_m, *rb_parent_m;
63033+ struct mempolicy *pol_m;
63034+
63035+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
63036+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
63037+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
63038+ *vma_m = *vma;
63039+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
63040+ if (anon_vma_clone(vma_m, vma))
63041+ return -ENOMEM;
63042+ pol_m = vma_policy(vma_m);
63043+ mpol_get(pol_m);
63044+ vma_set_policy(vma_m, pol_m);
63045+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
63046+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
63047+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
63048+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
63049+ if (vma_m->vm_file)
63050+ get_file(vma_m->vm_file);
63051+ if (vma_m->vm_ops && vma_m->vm_ops->open)
63052+ vma_m->vm_ops->open(vma_m);
63053+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
63054+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
63055+ vma_m->vm_mirror = vma;
63056+ vma->vm_mirror = vma_m;
63057+ return 0;
63058+}
63059+#endif
63060+
63061 /*
63062 * Return true if the calling process may expand its vm space by the passed
63063 * number of pages
63064@@ -2424,7 +2914,7 @@ int may_expand_vm(struct mm_struct *mm,
63065 unsigned long lim;
63066
63067 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
63068-
63069+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
63070 if (cur + npages > lim)
63071 return 0;
63072 return 1;
63073@@ -2495,6 +2985,22 @@ int install_special_mapping(struct mm_st
63074 vma->vm_start = addr;
63075 vma->vm_end = addr + len;
63076
63077+#ifdef CONFIG_PAX_MPROTECT
63078+ if (mm->pax_flags & MF_PAX_MPROTECT) {
63079+#ifndef CONFIG_PAX_MPROTECT_COMPAT
63080+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
63081+ return -EPERM;
63082+ if (!(vm_flags & VM_EXEC))
63083+ vm_flags &= ~VM_MAYEXEC;
63084+#else
63085+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
63086+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
63087+#endif
63088+ else
63089+ vm_flags &= ~VM_MAYWRITE;
63090+ }
63091+#endif
63092+
63093 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
63094 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
63095
63096diff -urNp linux-2.6.39.4/mm/mprotect.c linux-2.6.39.4/mm/mprotect.c
63097--- linux-2.6.39.4/mm/mprotect.c 2011-05-19 00:06:34.000000000 -0400
63098+++ linux-2.6.39.4/mm/mprotect.c 2011-08-05 19:44:37.000000000 -0400
63099@@ -23,10 +23,16 @@
63100 #include <linux/mmu_notifier.h>
63101 #include <linux/migrate.h>
63102 #include <linux/perf_event.h>
63103+
63104+#ifdef CONFIG_PAX_MPROTECT
63105+#include <linux/elf.h>
63106+#endif
63107+
63108 #include <asm/uaccess.h>
63109 #include <asm/pgtable.h>
63110 #include <asm/cacheflush.h>
63111 #include <asm/tlbflush.h>
63112+#include <asm/mmu_context.h>
63113
63114 #ifndef pgprot_modify
63115 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
63116@@ -141,6 +147,48 @@ static void change_protection(struct vm_
63117 flush_tlb_range(vma, start, end);
63118 }
63119
63120+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63121+/* called while holding the mmap semaphor for writing except stack expansion */
63122+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
63123+{
63124+ unsigned long oldlimit, newlimit = 0UL;
63125+
63126+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
63127+ return;
63128+
63129+ spin_lock(&mm->page_table_lock);
63130+ oldlimit = mm->context.user_cs_limit;
63131+ if ((prot & VM_EXEC) && oldlimit < end)
63132+ /* USER_CS limit moved up */
63133+ newlimit = end;
63134+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
63135+ /* USER_CS limit moved down */
63136+ newlimit = start;
63137+
63138+ if (newlimit) {
63139+ mm->context.user_cs_limit = newlimit;
63140+
63141+#ifdef CONFIG_SMP
63142+ wmb();
63143+ cpus_clear(mm->context.cpu_user_cs_mask);
63144+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
63145+#endif
63146+
63147+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
63148+ }
63149+ spin_unlock(&mm->page_table_lock);
63150+ if (newlimit == end) {
63151+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
63152+
63153+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
63154+ if (is_vm_hugetlb_page(vma))
63155+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
63156+ else
63157+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
63158+ }
63159+}
63160+#endif
63161+
63162 int
63163 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
63164 unsigned long start, unsigned long end, unsigned long newflags)
63165@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
63166 int error;
63167 int dirty_accountable = 0;
63168
63169+#ifdef CONFIG_PAX_SEGMEXEC
63170+ struct vm_area_struct *vma_m = NULL;
63171+ unsigned long start_m, end_m;
63172+
63173+ start_m = start + SEGMEXEC_TASK_SIZE;
63174+ end_m = end + SEGMEXEC_TASK_SIZE;
63175+#endif
63176+
63177 if (newflags == oldflags) {
63178 *pprev = vma;
63179 return 0;
63180 }
63181
63182+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
63183+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63184+
63185+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63186+ return -ENOMEM;
63187+
63188+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63189+ return -ENOMEM;
63190+ }
63191+
63192 /*
63193 * If we make a private mapping writable we increase our commit;
63194 * but (without finer accounting) cannot reduce our commit if we
63195@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63196 }
63197 }
63198
63199+#ifdef CONFIG_PAX_SEGMEXEC
63200+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63201+ if (start != vma->vm_start) {
63202+ error = split_vma(mm, vma, start, 1);
63203+ if (error)
63204+ goto fail;
63205+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63206+ *pprev = (*pprev)->vm_next;
63207+ }
63208+
63209+ if (end != vma->vm_end) {
63210+ error = split_vma(mm, vma, end, 0);
63211+ if (error)
63212+ goto fail;
63213+ }
63214+
63215+ if (pax_find_mirror_vma(vma)) {
63216+ error = __do_munmap(mm, start_m, end_m - start_m);
63217+ if (error)
63218+ goto fail;
63219+ } else {
63220+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63221+ if (!vma_m) {
63222+ error = -ENOMEM;
63223+ goto fail;
63224+ }
63225+ vma->vm_flags = newflags;
63226+ error = pax_mirror_vma(vma_m, vma);
63227+ if (error) {
63228+ vma->vm_flags = oldflags;
63229+ goto fail;
63230+ }
63231+ }
63232+ }
63233+#endif
63234+
63235 /*
63236 * First try to merge with previous and/or next vma.
63237 */
63238@@ -204,9 +306,21 @@ success:
63239 * vm_flags and vm_page_prot are protected by the mmap_sem
63240 * held in write mode.
63241 */
63242+
63243+#ifdef CONFIG_PAX_SEGMEXEC
63244+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63245+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63246+#endif
63247+
63248 vma->vm_flags = newflags;
63249+
63250+#ifdef CONFIG_PAX_MPROTECT
63251+ if (mm->binfmt && mm->binfmt->handle_mprotect)
63252+ mm->binfmt->handle_mprotect(vma, newflags);
63253+#endif
63254+
63255 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63256- vm_get_page_prot(newflags));
63257+ vm_get_page_prot(vma->vm_flags));
63258
63259 if (vma_wants_writenotify(vma)) {
63260 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63261@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63262 end = start + len;
63263 if (end <= start)
63264 return -ENOMEM;
63265+
63266+#ifdef CONFIG_PAX_SEGMEXEC
63267+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63268+ if (end > SEGMEXEC_TASK_SIZE)
63269+ return -EINVAL;
63270+ } else
63271+#endif
63272+
63273+ if (end > TASK_SIZE)
63274+ return -EINVAL;
63275+
63276 if (!arch_validate_prot(prot))
63277 return -EINVAL;
63278
63279@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63280 /*
63281 * Does the application expect PROT_READ to imply PROT_EXEC:
63282 */
63283- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63284+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63285 prot |= PROT_EXEC;
63286
63287 vm_flags = calc_vm_prot_bits(prot);
63288@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63289 if (start > vma->vm_start)
63290 prev = vma;
63291
63292+#ifdef CONFIG_PAX_MPROTECT
63293+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63294+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
63295+#endif
63296+
63297 for (nstart = start ; ; ) {
63298 unsigned long newflags;
63299
63300@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63301
63302 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63303 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63304+ if (prot & (PROT_WRITE | PROT_EXEC))
63305+ gr_log_rwxmprotect(vma->vm_file);
63306+
63307+ error = -EACCES;
63308+ goto out;
63309+ }
63310+
63311+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63312 error = -EACCES;
63313 goto out;
63314 }
63315@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63316 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63317 if (error)
63318 goto out;
63319+
63320+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
63321+
63322 nstart = tmp;
63323
63324 if (nstart < prev->vm_end)
63325diff -urNp linux-2.6.39.4/mm/mremap.c linux-2.6.39.4/mm/mremap.c
63326--- linux-2.6.39.4/mm/mremap.c 2011-05-19 00:06:34.000000000 -0400
63327+++ linux-2.6.39.4/mm/mremap.c 2011-08-05 19:44:37.000000000 -0400
63328@@ -114,6 +114,12 @@ static void move_ptes(struct vm_area_str
63329 continue;
63330 pte = ptep_clear_flush(vma, old_addr, old_pte);
63331 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63332+
63333+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63334+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63335+ pte = pte_exprotect(pte);
63336+#endif
63337+
63338 set_pte_at(mm, new_addr, new_pte, pte);
63339 }
63340
63341@@ -273,6 +279,11 @@ static struct vm_area_struct *vma_to_res
63342 if (is_vm_hugetlb_page(vma))
63343 goto Einval;
63344
63345+#ifdef CONFIG_PAX_SEGMEXEC
63346+ if (pax_find_mirror_vma(vma))
63347+ goto Einval;
63348+#endif
63349+
63350 /* We can't remap across vm area boundaries */
63351 if (old_len > vma->vm_end - addr)
63352 goto Efault;
63353@@ -329,20 +340,25 @@ static unsigned long mremap_to(unsigned
63354 unsigned long ret = -EINVAL;
63355 unsigned long charged = 0;
63356 unsigned long map_flags;
63357+ unsigned long pax_task_size = TASK_SIZE;
63358
63359 if (new_addr & ~PAGE_MASK)
63360 goto out;
63361
63362- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63363+#ifdef CONFIG_PAX_SEGMEXEC
63364+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
63365+ pax_task_size = SEGMEXEC_TASK_SIZE;
63366+#endif
63367+
63368+ pax_task_size -= PAGE_SIZE;
63369+
63370+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63371 goto out;
63372
63373 /* Check if the location we're moving into overlaps the
63374 * old location at all, and fail if it does.
63375 */
63376- if ((new_addr <= addr) && (new_addr+new_len) > addr)
63377- goto out;
63378-
63379- if ((addr <= new_addr) && (addr+old_len) > new_addr)
63380+ if (addr + old_len > new_addr && new_addr + new_len > addr)
63381 goto out;
63382
63383 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63384@@ -414,6 +430,7 @@ unsigned long do_mremap(unsigned long ad
63385 struct vm_area_struct *vma;
63386 unsigned long ret = -EINVAL;
63387 unsigned long charged = 0;
63388+ unsigned long pax_task_size = TASK_SIZE;
63389
63390 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63391 goto out;
63392@@ -432,6 +449,17 @@ unsigned long do_mremap(unsigned long ad
63393 if (!new_len)
63394 goto out;
63395
63396+#ifdef CONFIG_PAX_SEGMEXEC
63397+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
63398+ pax_task_size = SEGMEXEC_TASK_SIZE;
63399+#endif
63400+
63401+ pax_task_size -= PAGE_SIZE;
63402+
63403+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63404+ old_len > pax_task_size || addr > pax_task_size-old_len)
63405+ goto out;
63406+
63407 if (flags & MREMAP_FIXED) {
63408 if (flags & MREMAP_MAYMOVE)
63409 ret = mremap_to(addr, old_len, new_addr, new_len);
63410@@ -481,6 +509,7 @@ unsigned long do_mremap(unsigned long ad
63411 addr + new_len);
63412 }
63413 ret = addr;
63414+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63415 goto out;
63416 }
63417 }
63418@@ -507,7 +536,13 @@ unsigned long do_mremap(unsigned long ad
63419 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63420 if (ret)
63421 goto out;
63422+
63423+ map_flags = vma->vm_flags;
63424 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63425+ if (!(ret & ~PAGE_MASK)) {
63426+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63427+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63428+ }
63429 }
63430 out:
63431 if (ret & ~PAGE_MASK)
63432diff -urNp linux-2.6.39.4/mm/nobootmem.c linux-2.6.39.4/mm/nobootmem.c
63433--- linux-2.6.39.4/mm/nobootmem.c 2011-05-19 00:06:34.000000000 -0400
63434+++ linux-2.6.39.4/mm/nobootmem.c 2011-08-05 19:44:37.000000000 -0400
63435@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63436 unsigned long __init free_all_memory_core_early(int nodeid)
63437 {
63438 int i;
63439- u64 start, end;
63440+ u64 start, end, startrange, endrange;
63441 unsigned long count = 0;
63442- struct range *range = NULL;
63443+ struct range *range = NULL, rangerange = { 0, 0 };
63444 int nr_range;
63445
63446 nr_range = get_free_all_memory_range(&range, nodeid);
63447+ startrange = __pa(range) >> PAGE_SHIFT;
63448+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63449
63450 for (i = 0; i < nr_range; i++) {
63451 start = range[i].start;
63452 end = range[i].end;
63453+ if (start <= endrange && startrange < end) {
63454+ BUG_ON(rangerange.start | rangerange.end);
63455+ rangerange = range[i];
63456+ continue;
63457+ }
63458 count += end - start;
63459 __free_pages_memory(start, end);
63460 }
63461+ start = rangerange.start;
63462+ end = rangerange.end;
63463+ count += end - start;
63464+ __free_pages_memory(start, end);
63465
63466 return count;
63467 }
63468diff -urNp linux-2.6.39.4/mm/nommu.c linux-2.6.39.4/mm/nommu.c
63469--- linux-2.6.39.4/mm/nommu.c 2011-08-05 21:11:51.000000000 -0400
63470+++ linux-2.6.39.4/mm/nommu.c 2011-08-05 21:12:20.000000000 -0400
63471@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63472 int sysctl_overcommit_ratio = 50; /* default is 50% */
63473 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63474 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63475-int heap_stack_gap = 0;
63476
63477 atomic_long_t mmap_pages_allocated;
63478
63479@@ -833,15 +832,6 @@ struct vm_area_struct *find_vma(struct m
63480 EXPORT_SYMBOL(find_vma);
63481
63482 /*
63483- * find a VMA
63484- * - we don't extend stack VMAs under NOMMU conditions
63485- */
63486-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63487-{
63488- return find_vma(mm, addr);
63489-}
63490-
63491-/*
63492 * expand a stack to a given address
63493 * - not supported under NOMMU conditions
63494 */
63495@@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, stru
63496
63497 /* most fields are the same, copy all, and then fixup */
63498 *new = *vma;
63499+ INIT_LIST_HEAD(&new->anon_vma_chain);
63500 *region = *vma->vm_region;
63501 new->vm_region = region;
63502
63503diff -urNp linux-2.6.39.4/mm/page_alloc.c linux-2.6.39.4/mm/page_alloc.c
63504--- linux-2.6.39.4/mm/page_alloc.c 2011-06-03 00:04:14.000000000 -0400
63505+++ linux-2.6.39.4/mm/page_alloc.c 2011-08-05 19:44:37.000000000 -0400
63506@@ -337,7 +337,7 @@ out:
63507 * This usage means that zero-order pages may not be compound.
63508 */
63509
63510-static void free_compound_page(struct page *page)
63511+void free_compound_page(struct page *page)
63512 {
63513 __free_pages_ok(page, compound_order(page));
63514 }
63515@@ -650,6 +650,10 @@ static bool free_pages_prepare(struct pa
63516 int i;
63517 int bad = 0;
63518
63519+#ifdef CONFIG_PAX_MEMORY_SANITIZE
63520+ unsigned long index = 1UL << order;
63521+#endif
63522+
63523 trace_mm_page_free_direct(page, order);
63524 kmemcheck_free_shadow(page, order);
63525
63526@@ -665,6 +669,12 @@ static bool free_pages_prepare(struct pa
63527 debug_check_no_obj_freed(page_address(page),
63528 PAGE_SIZE << order);
63529 }
63530+
63531+#ifdef CONFIG_PAX_MEMORY_SANITIZE
63532+ for (; index; --index)
63533+ sanitize_highpage(page + index - 1);
63534+#endif
63535+
63536 arch_free_page(page, order);
63537 kernel_map_pages(page, 1 << order, 0);
63538
63539@@ -780,8 +790,10 @@ static int prep_new_page(struct page *pa
63540 arch_alloc_page(page, order);
63541 kernel_map_pages(page, 1 << order, 1);
63542
63543+#ifndef CONFIG_PAX_MEMORY_SANITIZE
63544 if (gfp_flags & __GFP_ZERO)
63545 prep_zero_page(page, order, gfp_flags);
63546+#endif
63547
63548 if (order && (gfp_flags & __GFP_COMP))
63549 prep_compound_page(page, order);
63550@@ -2504,6 +2516,8 @@ void __show_free_areas(unsigned int filt
63551 int cpu;
63552 struct zone *zone;
63553
63554+ pax_track_stack();
63555+
63556 for_each_populated_zone(zone) {
63557 if (skip_free_areas_zone(filter, zone))
63558 continue;
63559diff -urNp linux-2.6.39.4/mm/percpu.c linux-2.6.39.4/mm/percpu.c
63560--- linux-2.6.39.4/mm/percpu.c 2011-05-19 00:06:34.000000000 -0400
63561+++ linux-2.6.39.4/mm/percpu.c 2011-08-05 19:44:37.000000000 -0400
63562@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63563 static unsigned int pcpu_last_unit_cpu __read_mostly;
63564
63565 /* the address of the first chunk which starts with the kernel static area */
63566-void *pcpu_base_addr __read_mostly;
63567+void *pcpu_base_addr __read_only;
63568 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63569
63570 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63571diff -urNp linux-2.6.39.4/mm/rmap.c linux-2.6.39.4/mm/rmap.c
63572--- linux-2.6.39.4/mm/rmap.c 2011-05-19 00:06:34.000000000 -0400
63573+++ linux-2.6.39.4/mm/rmap.c 2011-08-05 19:44:37.000000000 -0400
63574@@ -131,6 +131,10 @@ int anon_vma_prepare(struct vm_area_stru
63575 struct anon_vma *anon_vma = vma->anon_vma;
63576 struct anon_vma_chain *avc;
63577
63578+#ifdef CONFIG_PAX_SEGMEXEC
63579+ struct anon_vma_chain *avc_m = NULL;
63580+#endif
63581+
63582 might_sleep();
63583 if (unlikely(!anon_vma)) {
63584 struct mm_struct *mm = vma->vm_mm;
63585@@ -140,6 +144,12 @@ int anon_vma_prepare(struct vm_area_stru
63586 if (!avc)
63587 goto out_enomem;
63588
63589+#ifdef CONFIG_PAX_SEGMEXEC
63590+ avc_m = anon_vma_chain_alloc();
63591+ if (!avc_m)
63592+ goto out_enomem_free_avc;
63593+#endif
63594+
63595 anon_vma = find_mergeable_anon_vma(vma);
63596 allocated = NULL;
63597 if (!anon_vma) {
63598@@ -153,6 +163,21 @@ int anon_vma_prepare(struct vm_area_stru
63599 /* page_table_lock to protect against threads */
63600 spin_lock(&mm->page_table_lock);
63601 if (likely(!vma->anon_vma)) {
63602+
63603+#ifdef CONFIG_PAX_SEGMEXEC
63604+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63605+
63606+ if (vma_m) {
63607+ BUG_ON(vma_m->anon_vma);
63608+ vma_m->anon_vma = anon_vma;
63609+ avc_m->anon_vma = anon_vma;
63610+ avc_m->vma = vma;
63611+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63612+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
63613+ avc_m = NULL;
63614+ }
63615+#endif
63616+
63617 vma->anon_vma = anon_vma;
63618 avc->anon_vma = anon_vma;
63619 avc->vma = vma;
63620@@ -166,12 +191,24 @@ int anon_vma_prepare(struct vm_area_stru
63621
63622 if (unlikely(allocated))
63623 put_anon_vma(allocated);
63624+
63625+#ifdef CONFIG_PAX_SEGMEXEC
63626+ if (unlikely(avc_m))
63627+ anon_vma_chain_free(avc_m);
63628+#endif
63629+
63630 if (unlikely(avc))
63631 anon_vma_chain_free(avc);
63632 }
63633 return 0;
63634
63635 out_enomem_free_avc:
63636+
63637+#ifdef CONFIG_PAX_SEGMEXEC
63638+ if (avc_m)
63639+ anon_vma_chain_free(avc_m);
63640+#endif
63641+
63642 anon_vma_chain_free(avc);
63643 out_enomem:
63644 return -ENOMEM;
63645@@ -198,7 +235,7 @@ static void anon_vma_chain_link(struct v
63646 * Attach the anon_vmas from src to dst.
63647 * Returns 0 on success, -ENOMEM on failure.
63648 */
63649-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63650+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63651 {
63652 struct anon_vma_chain *avc, *pavc;
63653
63654@@ -220,7 +257,7 @@ int anon_vma_clone(struct vm_area_struct
63655 * the corresponding VMA in the parent process is attached to.
63656 * Returns 0 on success, non-zero on failure.
63657 */
63658-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63659+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63660 {
63661 struct anon_vma_chain *avc;
63662 struct anon_vma *anon_vma;
63663diff -urNp linux-2.6.39.4/mm/shmem.c linux-2.6.39.4/mm/shmem.c
63664--- linux-2.6.39.4/mm/shmem.c 2011-06-03 00:04:14.000000000 -0400
63665+++ linux-2.6.39.4/mm/shmem.c 2011-08-05 19:44:37.000000000 -0400
63666@@ -31,7 +31,7 @@
63667 #include <linux/percpu_counter.h>
63668 #include <linux/swap.h>
63669
63670-static struct vfsmount *shm_mnt;
63671+struct vfsmount *shm_mnt;
63672
63673 #ifdef CONFIG_SHMEM
63674 /*
63675@@ -1087,6 +1087,8 @@ static int shmem_writepage(struct page *
63676 goto unlock;
63677 }
63678 entry = shmem_swp_entry(info, index, NULL);
63679+ if (!entry)
63680+ goto unlock;
63681 if (entry->val) {
63682 /*
63683 * The more uptodate page coming down from a stacked
63684@@ -1158,6 +1160,8 @@ static struct page *shmem_swapin(swp_ent
63685 struct vm_area_struct pvma;
63686 struct page *page;
63687
63688+ pax_track_stack();
63689+
63690 spol = mpol_cond_copy(&mpol,
63691 mpol_shared_policy_lookup(&info->policy, idx));
63692
63693@@ -2014,7 +2018,7 @@ static int shmem_symlink(struct inode *d
63694
63695 info = SHMEM_I(inode);
63696 inode->i_size = len-1;
63697- if (len <= (char *)inode - (char *)info) {
63698+ if (len <= (char *)inode - (char *)info && len <= 64) {
63699 /* do it inline */
63700 memcpy(info, symname, len);
63701 inode->i_op = &shmem_symlink_inline_operations;
63702@@ -2362,8 +2366,7 @@ int shmem_fill_super(struct super_block
63703 int err = -ENOMEM;
63704
63705 /* Round up to L1_CACHE_BYTES to resist false sharing */
63706- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63707- L1_CACHE_BYTES), GFP_KERNEL);
63708+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63709 if (!sbinfo)
63710 return -ENOMEM;
63711
63712diff -urNp linux-2.6.39.4/mm/slab.c linux-2.6.39.4/mm/slab.c
63713--- linux-2.6.39.4/mm/slab.c 2011-05-19 00:06:34.000000000 -0400
63714+++ linux-2.6.39.4/mm/slab.c 2011-08-05 19:44:37.000000000 -0400
63715@@ -150,7 +150,7 @@
63716
63717 /* Legal flag mask for kmem_cache_create(). */
63718 #if DEBUG
63719-# define CREATE_MASK (SLAB_RED_ZONE | \
63720+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63721 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63722 SLAB_CACHE_DMA | \
63723 SLAB_STORE_USER | \
63724@@ -158,7 +158,7 @@
63725 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63726 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63727 #else
63728-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63729+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63730 SLAB_CACHE_DMA | \
63731 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63732 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63733@@ -287,7 +287,7 @@ struct kmem_list3 {
63734 * Need this for bootstrapping a per node allocator.
63735 */
63736 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63737-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63738+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63739 #define CACHE_CACHE 0
63740 #define SIZE_AC MAX_NUMNODES
63741 #define SIZE_L3 (2 * MAX_NUMNODES)
63742@@ -388,10 +388,10 @@ static void kmem_list3_init(struct kmem_
63743 if ((x)->max_freeable < i) \
63744 (x)->max_freeable = i; \
63745 } while (0)
63746-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63747-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63748-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63749-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63750+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63751+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63752+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63753+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63754 #else
63755 #define STATS_INC_ACTIVE(x) do { } while (0)
63756 #define STATS_DEC_ACTIVE(x) do { } while (0)
63757@@ -537,7 +537,7 @@ static inline void *index_to_obj(struct
63758 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63759 */
63760 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63761- const struct slab *slab, void *obj)
63762+ const struct slab *slab, const void *obj)
63763 {
63764 u32 offset = (obj - slab->s_mem);
63765 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63766@@ -563,7 +563,7 @@ struct cache_names {
63767 static struct cache_names __initdata cache_names[] = {
63768 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63769 #include <linux/kmalloc_sizes.h>
63770- {NULL,}
63771+ {NULL}
63772 #undef CACHE
63773 };
63774
63775@@ -1529,7 +1529,7 @@ void __init kmem_cache_init(void)
63776 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63777 sizes[INDEX_AC].cs_size,
63778 ARCH_KMALLOC_MINALIGN,
63779- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63780+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63781 NULL);
63782
63783 if (INDEX_AC != INDEX_L3) {
63784@@ -1537,7 +1537,7 @@ void __init kmem_cache_init(void)
63785 kmem_cache_create(names[INDEX_L3].name,
63786 sizes[INDEX_L3].cs_size,
63787 ARCH_KMALLOC_MINALIGN,
63788- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63789+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63790 NULL);
63791 }
63792
63793@@ -1555,7 +1555,7 @@ void __init kmem_cache_init(void)
63794 sizes->cs_cachep = kmem_cache_create(names->name,
63795 sizes->cs_size,
63796 ARCH_KMALLOC_MINALIGN,
63797- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63798+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63799 NULL);
63800 }
63801 #ifdef CONFIG_ZONE_DMA
63802@@ -4270,10 +4270,10 @@ static int s_show(struct seq_file *m, vo
63803 }
63804 /* cpu stats */
63805 {
63806- unsigned long allochit = atomic_read(&cachep->allochit);
63807- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63808- unsigned long freehit = atomic_read(&cachep->freehit);
63809- unsigned long freemiss = atomic_read(&cachep->freemiss);
63810+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63811+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63812+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63813+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63814
63815 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63816 allochit, allocmiss, freehit, freemiss);
63817@@ -4530,15 +4530,66 @@ static const struct file_operations proc
63818
63819 static int __init slab_proc_init(void)
63820 {
63821- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63822+ mode_t gr_mode = S_IRUGO;
63823+
63824+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63825+ gr_mode = S_IRUSR;
63826+#endif
63827+
63828+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63829 #ifdef CONFIG_DEBUG_SLAB_LEAK
63830- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63831+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63832 #endif
63833 return 0;
63834 }
63835 module_init(slab_proc_init);
63836 #endif
63837
63838+void check_object_size(const void *ptr, unsigned long n, bool to)
63839+{
63840+
63841+#ifdef CONFIG_PAX_USERCOPY
63842+ struct page *page;
63843+ struct kmem_cache *cachep = NULL;
63844+ struct slab *slabp;
63845+ unsigned int objnr;
63846+ unsigned long offset;
63847+
63848+ if (!n)
63849+ return;
63850+
63851+ if (ZERO_OR_NULL_PTR(ptr))
63852+ goto report;
63853+
63854+ if (!virt_addr_valid(ptr))
63855+ return;
63856+
63857+ page = virt_to_head_page(ptr);
63858+
63859+ if (!PageSlab(page)) {
63860+ if (object_is_on_stack(ptr, n) == -1)
63861+ goto report;
63862+ return;
63863+ }
63864+
63865+ cachep = page_get_cache(page);
63866+ if (!(cachep->flags & SLAB_USERCOPY))
63867+ goto report;
63868+
63869+ slabp = page_get_slab(page);
63870+ objnr = obj_to_index(cachep, slabp, ptr);
63871+ BUG_ON(objnr >= cachep->num);
63872+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
63873+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
63874+ return;
63875+
63876+report:
63877+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
63878+#endif
63879+
63880+}
63881+EXPORT_SYMBOL(check_object_size);
63882+
63883 /**
63884 * ksize - get the actual amount of memory allocated for a given object
63885 * @objp: Pointer to the object
63886diff -urNp linux-2.6.39.4/mm/slob.c linux-2.6.39.4/mm/slob.c
63887--- linux-2.6.39.4/mm/slob.c 2011-05-19 00:06:34.000000000 -0400
63888+++ linux-2.6.39.4/mm/slob.c 2011-08-05 19:44:37.000000000 -0400
63889@@ -29,7 +29,7 @@
63890 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
63891 * alloc_pages() directly, allocating compound pages so the page order
63892 * does not have to be separately tracked, and also stores the exact
63893- * allocation size in page->private so that it can be used to accurately
63894+ * allocation size in slob_page->size so that it can be used to accurately
63895 * provide ksize(). These objects are detected in kfree() because slob_page()
63896 * is false for them.
63897 *
63898@@ -58,6 +58,7 @@
63899 */
63900
63901 #include <linux/kernel.h>
63902+#include <linux/sched.h>
63903 #include <linux/slab.h>
63904 #include <linux/mm.h>
63905 #include <linux/swap.h> /* struct reclaim_state */
63906@@ -102,7 +103,8 @@ struct slob_page {
63907 unsigned long flags; /* mandatory */
63908 atomic_t _count; /* mandatory */
63909 slobidx_t units; /* free units left in page */
63910- unsigned long pad[2];
63911+ unsigned long pad[1];
63912+ unsigned long size; /* size when >=PAGE_SIZE */
63913 slob_t *free; /* first free slob_t in page */
63914 struct list_head list; /* linked list of free pages */
63915 };
63916@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
63917 */
63918 static inline int is_slob_page(struct slob_page *sp)
63919 {
63920- return PageSlab((struct page *)sp);
63921+ return PageSlab((struct page *)sp) && !sp->size;
63922 }
63923
63924 static inline void set_slob_page(struct slob_page *sp)
63925@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
63926
63927 static inline struct slob_page *slob_page(const void *addr)
63928 {
63929- return (struct slob_page *)virt_to_page(addr);
63930+ return (struct slob_page *)virt_to_head_page(addr);
63931 }
63932
63933 /*
63934@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
63935 /*
63936 * Return the size of a slob block.
63937 */
63938-static slobidx_t slob_units(slob_t *s)
63939+static slobidx_t slob_units(const slob_t *s)
63940 {
63941 if (s->units > 0)
63942 return s->units;
63943@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
63944 /*
63945 * Return the next free slob block pointer after this one.
63946 */
63947-static slob_t *slob_next(slob_t *s)
63948+static slob_t *slob_next(const slob_t *s)
63949 {
63950 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
63951 slobidx_t next;
63952@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
63953 /*
63954 * Returns true if s is the last free block in its page.
63955 */
63956-static int slob_last(slob_t *s)
63957+static int slob_last(const slob_t *s)
63958 {
63959 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
63960 }
63961@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
63962 if (!page)
63963 return NULL;
63964
63965+ set_slob_page(page);
63966 return page_address(page);
63967 }
63968
63969@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
63970 if (!b)
63971 return NULL;
63972 sp = slob_page(b);
63973- set_slob_page(sp);
63974
63975 spin_lock_irqsave(&slob_lock, flags);
63976 sp->units = SLOB_UNITS(PAGE_SIZE);
63977 sp->free = b;
63978+ sp->size = 0;
63979 INIT_LIST_HEAD(&sp->list);
63980 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
63981 set_slob_page_free(sp, slob_list);
63982@@ -476,10 +479,9 @@ out:
63983 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
63984 */
63985
63986-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63987+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
63988 {
63989- unsigned int *m;
63990- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63991+ slob_t *m;
63992 void *ret;
63993
63994 lockdep_trace_alloc(gfp);
63995@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
63996
63997 if (!m)
63998 return NULL;
63999- *m = size;
64000+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
64001+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
64002+ m[0].units = size;
64003+ m[1].units = align;
64004 ret = (void *)m + align;
64005
64006 trace_kmalloc_node(_RET_IP_, ret,
64007@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
64008 gfp |= __GFP_COMP;
64009 ret = slob_new_pages(gfp, order, node);
64010 if (ret) {
64011- struct page *page;
64012- page = virt_to_page(ret);
64013- page->private = size;
64014+ struct slob_page *sp;
64015+ sp = slob_page(ret);
64016+ sp->size = size;
64017 }
64018
64019 trace_kmalloc_node(_RET_IP_, ret,
64020 size, PAGE_SIZE << order, gfp, node);
64021 }
64022
64023- kmemleak_alloc(ret, size, 1, gfp);
64024+ return ret;
64025+}
64026+
64027+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64028+{
64029+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64030+ void *ret = __kmalloc_node_align(size, gfp, node, align);
64031+
64032+ if (!ZERO_OR_NULL_PTR(ret))
64033+ kmemleak_alloc(ret, size, 1, gfp);
64034 return ret;
64035 }
64036 EXPORT_SYMBOL(__kmalloc_node);
64037@@ -531,13 +545,88 @@ void kfree(const void *block)
64038 sp = slob_page(block);
64039 if (is_slob_page(sp)) {
64040 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64041- unsigned int *m = (unsigned int *)(block - align);
64042- slob_free(m, *m + align);
64043- } else
64044+ slob_t *m = (slob_t *)(block - align);
64045+ slob_free(m, m[0].units + align);
64046+ } else {
64047+ clear_slob_page(sp);
64048+ free_slob_page(sp);
64049+ sp->size = 0;
64050 put_page(&sp->page);
64051+ }
64052 }
64053 EXPORT_SYMBOL(kfree);
64054
64055+void check_object_size(const void *ptr, unsigned long n, bool to)
64056+{
64057+
64058+#ifdef CONFIG_PAX_USERCOPY
64059+ struct slob_page *sp;
64060+ const slob_t *free;
64061+ const void *base;
64062+ unsigned long flags;
64063+
64064+ if (!n)
64065+ return;
64066+
64067+ if (ZERO_OR_NULL_PTR(ptr))
64068+ goto report;
64069+
64070+ if (!virt_addr_valid(ptr))
64071+ return;
64072+
64073+ sp = slob_page(ptr);
64074+ if (!PageSlab((struct page*)sp)) {
64075+ if (object_is_on_stack(ptr, n) == -1)
64076+ goto report;
64077+ return;
64078+ }
64079+
64080+ if (sp->size) {
64081+ base = page_address(&sp->page);
64082+ if (base <= ptr && n <= sp->size - (ptr - base))
64083+ return;
64084+ goto report;
64085+ }
64086+
64087+ /* some tricky double walking to find the chunk */
64088+ spin_lock_irqsave(&slob_lock, flags);
64089+ base = (void *)((unsigned long)ptr & PAGE_MASK);
64090+ free = sp->free;
64091+
64092+ while (!slob_last(free) && (void *)free <= ptr) {
64093+ base = free + slob_units(free);
64094+ free = slob_next(free);
64095+ }
64096+
64097+ while (base < (void *)free) {
64098+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
64099+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
64100+ int offset;
64101+
64102+ if (ptr < base + align)
64103+ break;
64104+
64105+ offset = ptr - base - align;
64106+ if (offset >= m) {
64107+ base += size;
64108+ continue;
64109+ }
64110+
64111+ if (n > m - offset)
64112+ break;
64113+
64114+ spin_unlock_irqrestore(&slob_lock, flags);
64115+ return;
64116+ }
64117+
64118+ spin_unlock_irqrestore(&slob_lock, flags);
64119+report:
64120+ pax_report_usercopy(ptr, n, to, NULL);
64121+#endif
64122+
64123+}
64124+EXPORT_SYMBOL(check_object_size);
64125+
64126 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
64127 size_t ksize(const void *block)
64128 {
64129@@ -550,10 +639,10 @@ size_t ksize(const void *block)
64130 sp = slob_page(block);
64131 if (is_slob_page(sp)) {
64132 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64133- unsigned int *m = (unsigned int *)(block - align);
64134- return SLOB_UNITS(*m) * SLOB_UNIT;
64135+ slob_t *m = (slob_t *)(block - align);
64136+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
64137 } else
64138- return sp->page.private;
64139+ return sp->size;
64140 }
64141 EXPORT_SYMBOL(ksize);
64142
64143@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
64144 {
64145 struct kmem_cache *c;
64146
64147+#ifdef CONFIG_PAX_USERCOPY
64148+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
64149+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
64150+#else
64151 c = slob_alloc(sizeof(struct kmem_cache),
64152 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
64153+#endif
64154
64155 if (c) {
64156 c->name = name;
64157@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
64158 {
64159 void *b;
64160
64161+#ifdef CONFIG_PAX_USERCOPY
64162+ b = __kmalloc_node_align(c->size, flags, node, c->align);
64163+#else
64164 if (c->size < PAGE_SIZE) {
64165 b = slob_alloc(c->size, flags, c->align, node);
64166 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64167 SLOB_UNITS(c->size) * SLOB_UNIT,
64168 flags, node);
64169 } else {
64170+ struct slob_page *sp;
64171+
64172 b = slob_new_pages(flags, get_order(c->size), node);
64173+ sp = slob_page(b);
64174+ sp->size = c->size;
64175 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64176 PAGE_SIZE << get_order(c->size),
64177 flags, node);
64178 }
64179+#endif
64180
64181 if (c->ctor)
64182 c->ctor(b);
64183@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
64184
64185 static void __kmem_cache_free(void *b, int size)
64186 {
64187- if (size < PAGE_SIZE)
64188+ struct slob_page *sp = slob_page(b);
64189+
64190+ if (is_slob_page(sp))
64191 slob_free(b, size);
64192- else
64193+ else {
64194+ clear_slob_page(sp);
64195+ free_slob_page(sp);
64196+ sp->size = 0;
64197 slob_free_pages(b, get_order(size));
64198+ }
64199 }
64200
64201 static void kmem_rcu_free(struct rcu_head *head)
64202@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64203
64204 void kmem_cache_free(struct kmem_cache *c, void *b)
64205 {
64206+ int size = c->size;
64207+
64208+#ifdef CONFIG_PAX_USERCOPY
64209+ if (size + c->align < PAGE_SIZE) {
64210+ size += c->align;
64211+ b -= c->align;
64212+ }
64213+#endif
64214+
64215 kmemleak_free_recursive(b, c->flags);
64216 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64217 struct slob_rcu *slob_rcu;
64218- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64219- slob_rcu->size = c->size;
64220+ slob_rcu = b + (size - sizeof(struct slob_rcu));
64221+ slob_rcu->size = size;
64222 call_rcu(&slob_rcu->head, kmem_rcu_free);
64223 } else {
64224- __kmem_cache_free(b, c->size);
64225+ __kmem_cache_free(b, size);
64226 }
64227
64228+#ifdef CONFIG_PAX_USERCOPY
64229+ trace_kfree(_RET_IP_, b);
64230+#else
64231 trace_kmem_cache_free(_RET_IP_, b);
64232+#endif
64233+
64234 }
64235 EXPORT_SYMBOL(kmem_cache_free);
64236
64237diff -urNp linux-2.6.39.4/mm/slub.c linux-2.6.39.4/mm/slub.c
64238--- linux-2.6.39.4/mm/slub.c 2011-06-03 00:04:14.000000000 -0400
64239+++ linux-2.6.39.4/mm/slub.c 2011-08-05 19:44:37.000000000 -0400
64240@@ -431,7 +431,7 @@ static void print_track(const char *s, s
64241 if (!t->addr)
64242 return;
64243
64244- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64245+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64246 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64247 }
64248
64249@@ -2183,6 +2183,8 @@ void kmem_cache_free(struct kmem_cache *
64250
64251 page = virt_to_head_page(x);
64252
64253+ BUG_ON(!PageSlab(page));
64254+
64255 slab_free(s, page, x, _RET_IP_);
64256
64257 trace_kmem_cache_free(_RET_IP_, x);
64258@@ -2216,7 +2218,7 @@ static int slub_min_objects;
64259 * Merge control. If this is set then no merging of slab caches will occur.
64260 * (Could be removed. This was introduced to pacify the merge skeptics.)
64261 */
64262-static int slub_nomerge;
64263+static int slub_nomerge = 1;
64264
64265 /*
64266 * Calculate the order of allocation given an slab object size.
64267@@ -2644,7 +2646,7 @@ static int kmem_cache_open(struct kmem_c
64268 * list to avoid pounding the page allocator excessively.
64269 */
64270 set_min_partial(s, ilog2(s->size));
64271- s->refcount = 1;
64272+ atomic_set(&s->refcount, 1);
64273 #ifdef CONFIG_NUMA
64274 s->remote_node_defrag_ratio = 1000;
64275 #endif
64276@@ -2750,8 +2752,7 @@ static inline int kmem_cache_close(struc
64277 void kmem_cache_destroy(struct kmem_cache *s)
64278 {
64279 down_write(&slub_lock);
64280- s->refcount--;
64281- if (!s->refcount) {
64282+ if (atomic_dec_and_test(&s->refcount)) {
64283 list_del(&s->list);
64284 if (kmem_cache_close(s)) {
64285 printk(KERN_ERR "SLUB %s: %s called for cache that "
64286@@ -2961,6 +2962,46 @@ void *__kmalloc_node(size_t size, gfp_t
64287 EXPORT_SYMBOL(__kmalloc_node);
64288 #endif
64289
64290+void check_object_size(const void *ptr, unsigned long n, bool to)
64291+{
64292+
64293+#ifdef CONFIG_PAX_USERCOPY
64294+ struct page *page;
64295+ struct kmem_cache *s = NULL;
64296+ unsigned long offset;
64297+
64298+ if (!n)
64299+ return;
64300+
64301+ if (ZERO_OR_NULL_PTR(ptr))
64302+ goto report;
64303+
64304+ if (!virt_addr_valid(ptr))
64305+ return;
64306+
64307+ page = virt_to_head_page(ptr);
64308+
64309+ if (!PageSlab(page)) {
64310+ if (object_is_on_stack(ptr, n) == -1)
64311+ goto report;
64312+ return;
64313+ }
64314+
64315+ s = page->slab;
64316+ if (!(s->flags & SLAB_USERCOPY))
64317+ goto report;
64318+
64319+ offset = (ptr - page_address(page)) % s->size;
64320+ if (offset <= s->objsize && n <= s->objsize - offset)
64321+ return;
64322+
64323+report:
64324+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64325+#endif
64326+
64327+}
64328+EXPORT_SYMBOL(check_object_size);
64329+
64330 size_t ksize(const void *object)
64331 {
64332 struct page *page;
64333@@ -3205,7 +3246,7 @@ static void __init kmem_cache_bootstrap_
64334 int node;
64335
64336 list_add(&s->list, &slab_caches);
64337- s->refcount = -1;
64338+ atomic_set(&s->refcount, -1);
64339
64340 for_each_node_state(node, N_NORMAL_MEMORY) {
64341 struct kmem_cache_node *n = get_node(s, node);
64342@@ -3322,17 +3363,17 @@ void __init kmem_cache_init(void)
64343
64344 /* Caches that are not of the two-to-the-power-of size */
64345 if (KMALLOC_MIN_SIZE <= 32) {
64346- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64347+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64348 caches++;
64349 }
64350
64351 if (KMALLOC_MIN_SIZE <= 64) {
64352- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64353+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64354 caches++;
64355 }
64356
64357 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64358- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64359+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64360 caches++;
64361 }
64362
64363@@ -3400,7 +3441,7 @@ static int slab_unmergeable(struct kmem_
64364 /*
64365 * We may have set a slab to be unmergeable during bootstrap.
64366 */
64367- if (s->refcount < 0)
64368+ if (atomic_read(&s->refcount) < 0)
64369 return 1;
64370
64371 return 0;
64372@@ -3459,7 +3500,7 @@ struct kmem_cache *kmem_cache_create(con
64373 down_write(&slub_lock);
64374 s = find_mergeable(size, align, flags, name, ctor);
64375 if (s) {
64376- s->refcount++;
64377+ atomic_inc(&s->refcount);
64378 /*
64379 * Adjust the object sizes so that we clear
64380 * the complete object on kzalloc.
64381@@ -3468,7 +3509,7 @@ struct kmem_cache *kmem_cache_create(con
64382 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64383
64384 if (sysfs_slab_alias(s, name)) {
64385- s->refcount--;
64386+ atomic_dec(&s->refcount);
64387 goto err;
64388 }
64389 up_write(&slub_lock);
64390@@ -4201,7 +4242,7 @@ SLAB_ATTR_RO(ctor);
64391
64392 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64393 {
64394- return sprintf(buf, "%d\n", s->refcount - 1);
64395+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64396 }
64397 SLAB_ATTR_RO(aliases);
64398
64399@@ -4945,7 +4986,13 @@ static const struct file_operations proc
64400
64401 static int __init slab_proc_init(void)
64402 {
64403- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64404+ mode_t gr_mode = S_IRUGO;
64405+
64406+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64407+ gr_mode = S_IRUSR;
64408+#endif
64409+
64410+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64411 return 0;
64412 }
64413 module_init(slab_proc_init);
64414diff -urNp linux-2.6.39.4/mm/swap.c linux-2.6.39.4/mm/swap.c
64415--- linux-2.6.39.4/mm/swap.c 2011-05-19 00:06:34.000000000 -0400
64416+++ linux-2.6.39.4/mm/swap.c 2011-08-05 19:44:37.000000000 -0400
64417@@ -31,6 +31,7 @@
64418 #include <linux/backing-dev.h>
64419 #include <linux/memcontrol.h>
64420 #include <linux/gfp.h>
64421+#include <linux/hugetlb.h>
64422
64423 #include "internal.h"
64424
64425@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64426
64427 __page_cache_release(page);
64428 dtor = get_compound_page_dtor(page);
64429+ if (!PageHuge(page))
64430+ BUG_ON(dtor != free_compound_page);
64431 (*dtor)(page);
64432 }
64433
64434diff -urNp linux-2.6.39.4/mm/swapfile.c linux-2.6.39.4/mm/swapfile.c
64435--- linux-2.6.39.4/mm/swapfile.c 2011-05-19 00:06:34.000000000 -0400
64436+++ linux-2.6.39.4/mm/swapfile.c 2011-08-05 19:44:37.000000000 -0400
64437@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
64438
64439 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64440 /* Activity counter to indicate that a swapon or swapoff has occurred */
64441-static atomic_t proc_poll_event = ATOMIC_INIT(0);
64442+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64443
64444 static inline unsigned char swap_count(unsigned char ent)
64445 {
64446@@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64447 }
64448 filp_close(swap_file, NULL);
64449 err = 0;
64450- atomic_inc(&proc_poll_event);
64451+ atomic_inc_unchecked(&proc_poll_event);
64452 wake_up_interruptible(&proc_poll_wait);
64453
64454 out_dput:
64455@@ -1690,8 +1690,8 @@ static unsigned swaps_poll(struct file *
64456
64457 poll_wait(file, &proc_poll_wait, wait);
64458
64459- if (s->event != atomic_read(&proc_poll_event)) {
64460- s->event = atomic_read(&proc_poll_event);
64461+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64462+ s->event = atomic_read_unchecked(&proc_poll_event);
64463 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64464 }
64465
64466@@ -1797,7 +1797,7 @@ static int swaps_open(struct inode *inod
64467 }
64468
64469 s->seq.private = s;
64470- s->event = atomic_read(&proc_poll_event);
64471+ s->event = atomic_read_unchecked(&proc_poll_event);
64472 return ret;
64473 }
64474
64475@@ -2131,7 +2131,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64476 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64477
64478 mutex_unlock(&swapon_mutex);
64479- atomic_inc(&proc_poll_event);
64480+ atomic_inc_unchecked(&proc_poll_event);
64481 wake_up_interruptible(&proc_poll_wait);
64482
64483 if (S_ISREG(inode->i_mode))
64484diff -urNp linux-2.6.39.4/mm/util.c linux-2.6.39.4/mm/util.c
64485--- linux-2.6.39.4/mm/util.c 2011-05-19 00:06:34.000000000 -0400
64486+++ linux-2.6.39.4/mm/util.c 2011-08-05 19:44:37.000000000 -0400
64487@@ -112,6 +112,7 @@ EXPORT_SYMBOL(memdup_user);
64488 * allocated buffer. Use this if you don't want to free the buffer immediately
64489 * like, for example, with RCU.
64490 */
64491+#undef __krealloc
64492 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64493 {
64494 void *ret;
64495@@ -145,6 +146,7 @@ EXPORT_SYMBOL(__krealloc);
64496 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64497 * %NULL pointer, the object pointed to is freed.
64498 */
64499+#undef krealloc
64500 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64501 {
64502 void *ret;
64503@@ -219,6 +221,12 @@ EXPORT_SYMBOL(strndup_user);
64504 void arch_pick_mmap_layout(struct mm_struct *mm)
64505 {
64506 mm->mmap_base = TASK_UNMAPPED_BASE;
64507+
64508+#ifdef CONFIG_PAX_RANDMMAP
64509+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64510+ mm->mmap_base += mm->delta_mmap;
64511+#endif
64512+
64513 mm->get_unmapped_area = arch_get_unmapped_area;
64514 mm->unmap_area = arch_unmap_area;
64515 }
64516diff -urNp linux-2.6.39.4/mm/vmalloc.c linux-2.6.39.4/mm/vmalloc.c
64517--- linux-2.6.39.4/mm/vmalloc.c 2011-05-19 00:06:34.000000000 -0400
64518+++ linux-2.6.39.4/mm/vmalloc.c 2011-08-05 19:44:37.000000000 -0400
64519@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64520
64521 pte = pte_offset_kernel(pmd, addr);
64522 do {
64523- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64524- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64525+
64526+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64527+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64528+ BUG_ON(!pte_exec(*pte));
64529+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64530+ continue;
64531+ }
64532+#endif
64533+
64534+ {
64535+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64536+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64537+ }
64538 } while (pte++, addr += PAGE_SIZE, addr != end);
64539 }
64540
64541@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64542 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64543 {
64544 pte_t *pte;
64545+ int ret = -ENOMEM;
64546
64547 /*
64548 * nr is a running index into the array which helps higher level
64549@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64550 pte = pte_alloc_kernel(pmd, addr);
64551 if (!pte)
64552 return -ENOMEM;
64553+
64554+ pax_open_kernel();
64555 do {
64556 struct page *page = pages[*nr];
64557
64558- if (WARN_ON(!pte_none(*pte)))
64559- return -EBUSY;
64560- if (WARN_ON(!page))
64561- return -ENOMEM;
64562+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64563+ if (pgprot_val(prot) & _PAGE_NX)
64564+#endif
64565+
64566+ if (WARN_ON(!pte_none(*pte))) {
64567+ ret = -EBUSY;
64568+ goto out;
64569+ }
64570+ if (WARN_ON(!page)) {
64571+ ret = -ENOMEM;
64572+ goto out;
64573+ }
64574 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64575 (*nr)++;
64576 } while (pte++, addr += PAGE_SIZE, addr != end);
64577- return 0;
64578+ ret = 0;
64579+out:
64580+ pax_close_kernel();
64581+ return ret;
64582 }
64583
64584 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64585@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64586 * and fall back on vmalloc() if that fails. Others
64587 * just put it in the vmalloc space.
64588 */
64589-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64590+#ifdef CONFIG_MODULES
64591+#ifdef MODULES_VADDR
64592 unsigned long addr = (unsigned long)x;
64593 if (addr >= MODULES_VADDR && addr < MODULES_END)
64594 return 1;
64595 #endif
64596+
64597+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64598+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64599+ return 1;
64600+#endif
64601+
64602+#endif
64603+
64604 return is_vmalloc_addr(x);
64605 }
64606
64607@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64608
64609 if (!pgd_none(*pgd)) {
64610 pud_t *pud = pud_offset(pgd, addr);
64611+#ifdef CONFIG_X86
64612+ if (!pud_large(*pud))
64613+#endif
64614 if (!pud_none(*pud)) {
64615 pmd_t *pmd = pmd_offset(pud, addr);
64616+#ifdef CONFIG_X86
64617+ if (!pmd_large(*pmd))
64618+#endif
64619 if (!pmd_none(*pmd)) {
64620 pte_t *ptep, pte;
64621
64622@@ -1296,6 +1336,16 @@ static struct vm_struct *__get_vm_area_n
64623 struct vm_struct *area;
64624
64625 BUG_ON(in_interrupt());
64626+
64627+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64628+ if (flags & VM_KERNEXEC) {
64629+ if (start != VMALLOC_START || end != VMALLOC_END)
64630+ return NULL;
64631+ start = (unsigned long)MODULES_EXEC_VADDR;
64632+ end = (unsigned long)MODULES_EXEC_END;
64633+ }
64634+#endif
64635+
64636 if (flags & VM_IOREMAP) {
64637 int bit = fls(size);
64638
64639@@ -1514,6 +1564,11 @@ void *vmap(struct page **pages, unsigned
64640 if (count > totalram_pages)
64641 return NULL;
64642
64643+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64644+ if (!(pgprot_val(prot) & _PAGE_NX))
64645+ flags |= VM_KERNEXEC;
64646+#endif
64647+
64648 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64649 __builtin_return_address(0));
64650 if (!area)
64651@@ -1610,6 +1665,13 @@ void *__vmalloc_node_range(unsigned long
64652 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64653 return NULL;
64654
64655+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64656+ if (!(pgprot_val(prot) & _PAGE_NX))
64657+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64658+ node, gfp_mask, caller);
64659+ else
64660+#endif
64661+
64662 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64663 gfp_mask, caller);
64664
64665@@ -1649,6 +1711,7 @@ static void *__vmalloc_node(unsigned lon
64666 gfp_mask, prot, node, caller);
64667 }
64668
64669+#undef __vmalloc
64670 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64671 {
64672 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64673@@ -1672,6 +1735,7 @@ static inline void *__vmalloc_node_flags
64674 * For tight control over page level allocator and protection flags
64675 * use __vmalloc() instead.
64676 */
64677+#undef vmalloc
64678 void *vmalloc(unsigned long size)
64679 {
64680 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64681@@ -1688,6 +1752,7 @@ EXPORT_SYMBOL(vmalloc);
64682 * For tight control over page level allocator and protection flags
64683 * use __vmalloc() instead.
64684 */
64685+#undef vzalloc
64686 void *vzalloc(unsigned long size)
64687 {
64688 return __vmalloc_node_flags(size, -1,
64689@@ -1702,6 +1767,7 @@ EXPORT_SYMBOL(vzalloc);
64690 * The resulting memory area is zeroed so it can be mapped to userspace
64691 * without leaking data.
64692 */
64693+#undef vmalloc_user
64694 void *vmalloc_user(unsigned long size)
64695 {
64696 struct vm_struct *area;
64697@@ -1729,6 +1795,7 @@ EXPORT_SYMBOL(vmalloc_user);
64698 * For tight control over page level allocator and protection flags
64699 * use __vmalloc() instead.
64700 */
64701+#undef vmalloc_node
64702 void *vmalloc_node(unsigned long size, int node)
64703 {
64704 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64705@@ -1748,6 +1815,7 @@ EXPORT_SYMBOL(vmalloc_node);
64706 * For tight control over page level allocator and protection flags
64707 * use __vmalloc_node() instead.
64708 */
64709+#undef vzalloc_node
64710 void *vzalloc_node(unsigned long size, int node)
64711 {
64712 return __vmalloc_node_flags(size, node,
64713@@ -1770,10 +1838,10 @@ EXPORT_SYMBOL(vzalloc_node);
64714 * For tight control over page level allocator and protection flags
64715 * use __vmalloc() instead.
64716 */
64717-
64718+#undef vmalloc_exec
64719 void *vmalloc_exec(unsigned long size)
64720 {
64721- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64722+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64723 -1, __builtin_return_address(0));
64724 }
64725
64726@@ -1792,6 +1860,7 @@ void *vmalloc_exec(unsigned long size)
64727 * Allocate enough 32bit PA addressable pages to cover @size from the
64728 * page level allocator and map them into contiguous kernel virtual space.
64729 */
64730+#undef vmalloc_32
64731 void *vmalloc_32(unsigned long size)
64732 {
64733 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64734@@ -1806,6 +1875,7 @@ EXPORT_SYMBOL(vmalloc_32);
64735 * The resulting memory area is 32bit addressable and zeroed so it can be
64736 * mapped to userspace without leaking data.
64737 */
64738+#undef vmalloc_32_user
64739 void *vmalloc_32_user(unsigned long size)
64740 {
64741 struct vm_struct *area;
64742@@ -2068,6 +2138,8 @@ int remap_vmalloc_range(struct vm_area_s
64743 unsigned long uaddr = vma->vm_start;
64744 unsigned long usize = vma->vm_end - vma->vm_start;
64745
64746+ BUG_ON(vma->vm_mirror);
64747+
64748 if ((PAGE_SIZE-1) & (unsigned long)addr)
64749 return -EINVAL;
64750
64751diff -urNp linux-2.6.39.4/mm/vmstat.c linux-2.6.39.4/mm/vmstat.c
64752--- linux-2.6.39.4/mm/vmstat.c 2011-05-19 00:06:34.000000000 -0400
64753+++ linux-2.6.39.4/mm/vmstat.c 2011-08-05 19:44:37.000000000 -0400
64754@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64755 *
64756 * vm_stat contains the global counters
64757 */
64758-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64759+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64760 EXPORT_SYMBOL(vm_stat);
64761
64762 #ifdef CONFIG_SMP
64763@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64764 v = p->vm_stat_diff[i];
64765 p->vm_stat_diff[i] = 0;
64766 local_irq_restore(flags);
64767- atomic_long_add(v, &zone->vm_stat[i]);
64768+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64769 global_diff[i] += v;
64770 #ifdef CONFIG_NUMA
64771 /* 3 seconds idle till flush */
64772@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64773
64774 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64775 if (global_diff[i])
64776- atomic_long_add(global_diff[i], &vm_stat[i]);
64777+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64778 }
64779
64780 #endif
64781@@ -1205,10 +1205,20 @@ static int __init setup_vmstat(void)
64782 start_cpu_timer(cpu);
64783 #endif
64784 #ifdef CONFIG_PROC_FS
64785- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64786- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64787- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64788- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64789+ {
64790+ mode_t gr_mode = S_IRUGO;
64791+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64792+ gr_mode = S_IRUSR;
64793+#endif
64794+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64795+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64796+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64797+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64798+#else
64799+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64800+#endif
64801+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64802+ }
64803 #endif
64804 return 0;
64805 }
64806diff -urNp linux-2.6.39.4/net/8021q/vlan.c linux-2.6.39.4/net/8021q/vlan.c
64807--- linux-2.6.39.4/net/8021q/vlan.c 2011-05-19 00:06:34.000000000 -0400
64808+++ linux-2.6.39.4/net/8021q/vlan.c 2011-08-05 19:44:37.000000000 -0400
64809@@ -592,8 +592,7 @@ static int vlan_ioctl_handler(struct net
64810 err = -EPERM;
64811 if (!capable(CAP_NET_ADMIN))
64812 break;
64813- if ((args.u.name_type >= 0) &&
64814- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64815+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64816 struct vlan_net *vn;
64817
64818 vn = net_generic(net, vlan_net_id);
64819diff -urNp linux-2.6.39.4/net/atm/atm_misc.c linux-2.6.39.4/net/atm/atm_misc.c
64820--- linux-2.6.39.4/net/atm/atm_misc.c 2011-05-19 00:06:34.000000000 -0400
64821+++ linux-2.6.39.4/net/atm/atm_misc.c 2011-08-05 19:44:37.000000000 -0400
64822@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64823 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64824 return 1;
64825 atm_return(vcc, truesize);
64826- atomic_inc(&vcc->stats->rx_drop);
64827+ atomic_inc_unchecked(&vcc->stats->rx_drop);
64828 return 0;
64829 }
64830 EXPORT_SYMBOL(atm_charge);
64831@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64832 }
64833 }
64834 atm_return(vcc, guess);
64835- atomic_inc(&vcc->stats->rx_drop);
64836+ atomic_inc_unchecked(&vcc->stats->rx_drop);
64837 return NULL;
64838 }
64839 EXPORT_SYMBOL(atm_alloc_charge);
64840@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64841
64842 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64843 {
64844-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64845+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64846 __SONET_ITEMS
64847 #undef __HANDLE_ITEM
64848 }
64849@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64850
64851 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64852 {
64853-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64854+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
64855 __SONET_ITEMS
64856 #undef __HANDLE_ITEM
64857 }
64858diff -urNp linux-2.6.39.4/net/atm/lec.h linux-2.6.39.4/net/atm/lec.h
64859--- linux-2.6.39.4/net/atm/lec.h 2011-05-19 00:06:34.000000000 -0400
64860+++ linux-2.6.39.4/net/atm/lec.h 2011-08-05 20:34:06.000000000 -0400
64861@@ -48,7 +48,7 @@ struct lane2_ops {
64862 const u8 *tlvs, u32 sizeoftlvs);
64863 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
64864 const u8 *tlvs, u32 sizeoftlvs);
64865-};
64866+} __no_const;
64867
64868 /*
64869 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
64870diff -urNp linux-2.6.39.4/net/atm/mpc.h linux-2.6.39.4/net/atm/mpc.h
64871--- linux-2.6.39.4/net/atm/mpc.h 2011-05-19 00:06:34.000000000 -0400
64872+++ linux-2.6.39.4/net/atm/mpc.h 2011-08-05 20:34:06.000000000 -0400
64873@@ -33,7 +33,7 @@ struct mpoa_client {
64874 struct mpc_parameters parameters; /* parameters for this client */
64875
64876 const struct net_device_ops *old_ops;
64877- struct net_device_ops new_ops;
64878+ net_device_ops_no_const new_ops;
64879 };
64880
64881
64882diff -urNp linux-2.6.39.4/net/atm/mpoa_caches.c linux-2.6.39.4/net/atm/mpoa_caches.c
64883--- linux-2.6.39.4/net/atm/mpoa_caches.c 2011-05-19 00:06:34.000000000 -0400
64884+++ linux-2.6.39.4/net/atm/mpoa_caches.c 2011-08-05 19:44:37.000000000 -0400
64885@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
64886 struct timeval now;
64887 struct k_message msg;
64888
64889+ pax_track_stack();
64890+
64891 do_gettimeofday(&now);
64892
64893 read_lock_bh(&client->ingress_lock);
64894diff -urNp linux-2.6.39.4/net/atm/proc.c linux-2.6.39.4/net/atm/proc.c
64895--- linux-2.6.39.4/net/atm/proc.c 2011-05-19 00:06:34.000000000 -0400
64896+++ linux-2.6.39.4/net/atm/proc.c 2011-08-05 19:44:37.000000000 -0400
64897@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
64898 const struct k_atm_aal_stats *stats)
64899 {
64900 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
64901- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
64902- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
64903- atomic_read(&stats->rx_drop));
64904+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
64905+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
64906+ atomic_read_unchecked(&stats->rx_drop));
64907 }
64908
64909 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
64910@@ -191,7 +191,12 @@ static void vcc_info(struct seq_file *se
64911 {
64912 struct sock *sk = sk_atm(vcc);
64913
64914+#ifdef CONFIG_GRKERNSEC_HIDESYM
64915+ seq_printf(seq, "%p ", NULL);
64916+#else
64917 seq_printf(seq, "%p ", vcc);
64918+#endif
64919+
64920 if (!vcc->dev)
64921 seq_printf(seq, "Unassigned ");
64922 else
64923@@ -218,7 +223,11 @@ static void svc_info(struct seq_file *se
64924 {
64925 if (!vcc->dev)
64926 seq_printf(seq, sizeof(void *) == 4 ?
64927+#ifdef CONFIG_GRKERNSEC_HIDESYM
64928+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
64929+#else
64930 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
64931+#endif
64932 else
64933 seq_printf(seq, "%3d %3d %5d ",
64934 vcc->dev->number, vcc->vpi, vcc->vci);
64935diff -urNp linux-2.6.39.4/net/atm/resources.c linux-2.6.39.4/net/atm/resources.c
64936--- linux-2.6.39.4/net/atm/resources.c 2011-05-19 00:06:34.000000000 -0400
64937+++ linux-2.6.39.4/net/atm/resources.c 2011-08-05 19:44:37.000000000 -0400
64938@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
64939 static void copy_aal_stats(struct k_atm_aal_stats *from,
64940 struct atm_aal_stats *to)
64941 {
64942-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64943+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64944 __AAL_STAT_ITEMS
64945 #undef __HANDLE_ITEM
64946 }
64947@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
64948 static void subtract_aal_stats(struct k_atm_aal_stats *from,
64949 struct atm_aal_stats *to)
64950 {
64951-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64952+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
64953 __AAL_STAT_ITEMS
64954 #undef __HANDLE_ITEM
64955 }
64956diff -urNp linux-2.6.39.4/net/batman-adv/hard-interface.c linux-2.6.39.4/net/batman-adv/hard-interface.c
64957--- linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-05-19 00:06:34.000000000 -0400
64958+++ linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-08-05 19:44:37.000000000 -0400
64959@@ -339,8 +339,8 @@ int hardif_enable_interface(struct hard_
64960 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
64961 dev_add_pack(&hard_iface->batman_adv_ptype);
64962
64963- atomic_set(&hard_iface->seqno, 1);
64964- atomic_set(&hard_iface->frag_seqno, 1);
64965+ atomic_set_unchecked(&hard_iface->seqno, 1);
64966+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
64967 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
64968 hard_iface->net_dev->name);
64969
64970diff -urNp linux-2.6.39.4/net/batman-adv/routing.c linux-2.6.39.4/net/batman-adv/routing.c
64971--- linux-2.6.39.4/net/batman-adv/routing.c 2011-05-19 00:06:34.000000000 -0400
64972+++ linux-2.6.39.4/net/batman-adv/routing.c 2011-08-05 19:44:37.000000000 -0400
64973@@ -625,7 +625,7 @@ void receive_bat_packet(struct ethhdr *e
64974 return;
64975
64976 /* could be changed by schedule_own_packet() */
64977- if_incoming_seqno = atomic_read(&if_incoming->seqno);
64978+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
64979
64980 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
64981
64982diff -urNp linux-2.6.39.4/net/batman-adv/send.c linux-2.6.39.4/net/batman-adv/send.c
64983--- linux-2.6.39.4/net/batman-adv/send.c 2011-05-19 00:06:34.000000000 -0400
64984+++ linux-2.6.39.4/net/batman-adv/send.c 2011-08-05 19:44:37.000000000 -0400
64985@@ -277,7 +277,7 @@ void schedule_own_packet(struct hard_ifa
64986
64987 /* change sequence number to network order */
64988 batman_packet->seqno =
64989- htonl((uint32_t)atomic_read(&hard_iface->seqno));
64990+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
64991
64992 if (vis_server == VIS_TYPE_SERVER_SYNC)
64993 batman_packet->flags |= VIS_SERVER;
64994@@ -291,7 +291,7 @@ void schedule_own_packet(struct hard_ifa
64995 else
64996 batman_packet->gw_flags = 0;
64997
64998- atomic_inc(&hard_iface->seqno);
64999+ atomic_inc_unchecked(&hard_iface->seqno);
65000
65001 slide_own_bcast_window(hard_iface);
65002 send_time = own_send_time(bat_priv);
65003diff -urNp linux-2.6.39.4/net/batman-adv/soft-interface.c linux-2.6.39.4/net/batman-adv/soft-interface.c
65004--- linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-05-19 00:06:34.000000000 -0400
65005+++ linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-08-05 19:44:37.000000000 -0400
65006@@ -386,7 +386,7 @@ int interface_tx(struct sk_buff *skb, st
65007
65008 /* set broadcast sequence number */
65009 bcast_packet->seqno =
65010- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
65011+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
65012
65013 add_bcast_packet_to_list(bat_priv, skb);
65014
65015@@ -579,7 +579,7 @@ struct net_device *softif_create(char *n
65016 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
65017
65018 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
65019- atomic_set(&bat_priv->bcast_seqno, 1);
65020+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
65021 atomic_set(&bat_priv->hna_local_changed, 0);
65022
65023 bat_priv->primary_if = NULL;
65024diff -urNp linux-2.6.39.4/net/batman-adv/types.h linux-2.6.39.4/net/batman-adv/types.h
65025--- linux-2.6.39.4/net/batman-adv/types.h 2011-05-19 00:06:34.000000000 -0400
65026+++ linux-2.6.39.4/net/batman-adv/types.h 2011-08-05 19:44:37.000000000 -0400
65027@@ -38,8 +38,8 @@ struct hard_iface {
65028 int16_t if_num;
65029 char if_status;
65030 struct net_device *net_dev;
65031- atomic_t seqno;
65032- atomic_t frag_seqno;
65033+ atomic_unchecked_t seqno;
65034+ atomic_unchecked_t frag_seqno;
65035 unsigned char *packet_buff;
65036 int packet_len;
65037 struct kobject *hardif_obj;
65038@@ -141,7 +141,7 @@ struct bat_priv {
65039 atomic_t orig_interval; /* uint */
65040 atomic_t hop_penalty; /* uint */
65041 atomic_t log_level; /* uint */
65042- atomic_t bcast_seqno;
65043+ atomic_unchecked_t bcast_seqno;
65044 atomic_t bcast_queue_left;
65045 atomic_t batman_queue_left;
65046 char num_ifaces;
65047diff -urNp linux-2.6.39.4/net/batman-adv/unicast.c linux-2.6.39.4/net/batman-adv/unicast.c
65048--- linux-2.6.39.4/net/batman-adv/unicast.c 2011-05-19 00:06:34.000000000 -0400
65049+++ linux-2.6.39.4/net/batman-adv/unicast.c 2011-08-05 19:44:37.000000000 -0400
65050@@ -263,7 +263,7 @@ int frag_send_skb(struct sk_buff *skb, s
65051 frag1->flags = UNI_FRAG_HEAD | large_tail;
65052 frag2->flags = large_tail;
65053
65054- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
65055+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
65056 frag1->seqno = htons(seqno - 1);
65057 frag2->seqno = htons(seqno);
65058
65059diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_core.c linux-2.6.39.4/net/bluetooth/l2cap_core.c
65060--- linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-05-19 00:06:34.000000000 -0400
65061+++ linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-08-05 19:44:37.000000000 -0400
65062@@ -2202,7 +2202,7 @@ static inline int l2cap_config_req(struc
65063
65064 /* Reject if config buffer is too small. */
65065 len = cmd_len - sizeof(*req);
65066- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65067+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65068 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
65069 l2cap_build_conf_rsp(sk, rsp,
65070 L2CAP_CONF_REJECT, flags), rsp);
65071diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_sock.c linux-2.6.39.4/net/bluetooth/l2cap_sock.c
65072--- linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-05-19 00:06:34.000000000 -0400
65073+++ linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-08-05 19:44:37.000000000 -0400
65074@@ -446,6 +446,7 @@ static int l2cap_sock_getsockopt_old(str
65075 break;
65076 }
65077
65078+ memset(&cinfo, 0, sizeof(cinfo));
65079 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
65080 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
65081
65082diff -urNp linux-2.6.39.4/net/bluetooth/rfcomm/sock.c linux-2.6.39.4/net/bluetooth/rfcomm/sock.c
65083--- linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-05-19 00:06:34.000000000 -0400
65084+++ linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-08-05 19:44:37.000000000 -0400
65085@@ -787,6 +787,7 @@ static int rfcomm_sock_getsockopt_old(st
65086
65087 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
65088
65089+ memset(&cinfo, 0, sizeof(cinfo));
65090 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
65091 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
65092
65093diff -urNp linux-2.6.39.4/net/bridge/br_multicast.c linux-2.6.39.4/net/bridge/br_multicast.c
65094--- linux-2.6.39.4/net/bridge/br_multicast.c 2011-05-19 00:06:34.000000000 -0400
65095+++ linux-2.6.39.4/net/bridge/br_multicast.c 2011-08-05 19:44:37.000000000 -0400
65096@@ -1482,7 +1482,7 @@ static int br_multicast_ipv6_rcv(struct
65097 nexthdr = ip6h->nexthdr;
65098 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
65099
65100- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
65101+ if (nexthdr != IPPROTO_ICMPV6)
65102 return 0;
65103
65104 /* Okay, we found ICMPv6 header */
65105diff -urNp linux-2.6.39.4/net/bridge/netfilter/ebtables.c linux-2.6.39.4/net/bridge/netfilter/ebtables.c
65106--- linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-05-19 00:06:34.000000000 -0400
65107+++ linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-08-05 19:44:37.000000000 -0400
65108@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
65109 tmp.valid_hooks = t->table->valid_hooks;
65110 }
65111 mutex_unlock(&ebt_mutex);
65112- if (copy_to_user(user, &tmp, *len) != 0){
65113+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
65114 BUGPRINT("c2u Didn't work\n");
65115 ret = -EFAULT;
65116 break;
65117@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
65118 int ret;
65119 void __user *pos;
65120
65121+ pax_track_stack();
65122+
65123 memset(&tinfo, 0, sizeof(tinfo));
65124
65125 if (cmd == EBT_SO_GET_ENTRIES) {
65126diff -urNp linux-2.6.39.4/net/caif/caif_socket.c linux-2.6.39.4/net/caif/caif_socket.c
65127--- linux-2.6.39.4/net/caif/caif_socket.c 2011-05-19 00:06:34.000000000 -0400
65128+++ linux-2.6.39.4/net/caif/caif_socket.c 2011-08-05 19:44:37.000000000 -0400
65129@@ -48,18 +48,19 @@ static struct dentry *debugfsdir;
65130 #ifdef CONFIG_DEBUG_FS
65131 struct debug_fs_counter {
65132 atomic_t caif_nr_socks;
65133- atomic_t num_connect_req;
65134- atomic_t num_connect_resp;
65135- atomic_t num_connect_fail_resp;
65136- atomic_t num_disconnect;
65137- atomic_t num_remote_shutdown_ind;
65138- atomic_t num_tx_flow_off_ind;
65139- atomic_t num_tx_flow_on_ind;
65140- atomic_t num_rx_flow_off;
65141- atomic_t num_rx_flow_on;
65142+ atomic_unchecked_t num_connect_req;
65143+ atomic_unchecked_t num_connect_resp;
65144+ atomic_unchecked_t num_connect_fail_resp;
65145+ atomic_unchecked_t num_disconnect;
65146+ atomic_unchecked_t num_remote_shutdown_ind;
65147+ atomic_unchecked_t num_tx_flow_off_ind;
65148+ atomic_unchecked_t num_tx_flow_on_ind;
65149+ atomic_unchecked_t num_rx_flow_off;
65150+ atomic_unchecked_t num_rx_flow_on;
65151 };
65152 static struct debug_fs_counter cnt;
65153 #define dbfs_atomic_inc(v) atomic_inc(v)
65154+#define dbfs_atomic_inc_unchecked(v) atomic_inc_unchecked(v)
65155 #define dbfs_atomic_dec(v) atomic_dec(v)
65156 #else
65157 #define dbfs_atomic_inc(v)
65158@@ -159,7 +160,7 @@ static int caif_queue_rcv_skb(struct soc
65159 atomic_read(&cf_sk->sk.sk_rmem_alloc),
65160 sk_rcvbuf_lowwater(cf_sk));
65161 set_rx_flow_off(cf_sk);
65162- dbfs_atomic_inc(&cnt.num_rx_flow_off);
65163+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65164 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65165 }
65166
65167@@ -169,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc
65168 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
65169 set_rx_flow_off(cf_sk);
65170 pr_debug("sending flow OFF due to rmem_schedule\n");
65171- dbfs_atomic_inc(&cnt.num_rx_flow_off);
65172+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65173 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65174 }
65175 skb->dev = NULL;
65176@@ -218,21 +219,21 @@ static void caif_ctrl_cb(struct cflayer
65177 switch (flow) {
65178 case CAIF_CTRLCMD_FLOW_ON_IND:
65179 /* OK from modem to start sending again */
65180- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
65181+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
65182 set_tx_flow_on(cf_sk);
65183 cf_sk->sk.sk_state_change(&cf_sk->sk);
65184 break;
65185
65186 case CAIF_CTRLCMD_FLOW_OFF_IND:
65187 /* Modem asks us to shut up */
65188- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
65189+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
65190 set_tx_flow_off(cf_sk);
65191 cf_sk->sk.sk_state_change(&cf_sk->sk);
65192 break;
65193
65194 case CAIF_CTRLCMD_INIT_RSP:
65195 /* We're now connected */
65196- dbfs_atomic_inc(&cnt.num_connect_resp);
65197+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
65198 cf_sk->sk.sk_state = CAIF_CONNECTED;
65199 set_tx_flow_on(cf_sk);
65200 cf_sk->sk.sk_state_change(&cf_sk->sk);
65201@@ -247,7 +248,7 @@ static void caif_ctrl_cb(struct cflayer
65202
65203 case CAIF_CTRLCMD_INIT_FAIL_RSP:
65204 /* Connect request failed */
65205- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
65206+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
65207 cf_sk->sk.sk_err = ECONNREFUSED;
65208 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
65209 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65210@@ -261,7 +262,7 @@ static void caif_ctrl_cb(struct cflayer
65211
65212 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
65213 /* Modem has closed this connection, or device is down. */
65214- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
65215+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
65216 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65217 cf_sk->sk.sk_err = ECONNRESET;
65218 set_rx_flow_on(cf_sk);
65219@@ -281,7 +282,7 @@ static void caif_check_flow_release(stru
65220 return;
65221
65222 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
65223- dbfs_atomic_inc(&cnt.num_rx_flow_on);
65224+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
65225 set_rx_flow_on(cf_sk);
65226 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
65227 }
65228@@ -864,7 +865,7 @@ static int caif_connect(struct socket *s
65229 /*ifindex = id of the interface.*/
65230 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
65231
65232- dbfs_atomic_inc(&cnt.num_connect_req);
65233+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
65234 cf_sk->layer.receive = caif_sktrecv_cb;
65235 err = caif_connect_client(&cf_sk->conn_req,
65236 &cf_sk->layer, &ifindex, &headroom, &tailroom);
65237@@ -952,7 +953,7 @@ static int caif_release(struct socket *s
65238 spin_unlock(&sk->sk_receive_queue.lock);
65239 sock->sk = NULL;
65240
65241- dbfs_atomic_inc(&cnt.num_disconnect);
65242+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
65243
65244 if (cf_sk->debugfs_socket_dir != NULL)
65245 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
65246diff -urNp linux-2.6.39.4/net/caif/cfctrl.c linux-2.6.39.4/net/caif/cfctrl.c
65247--- linux-2.6.39.4/net/caif/cfctrl.c 2011-05-19 00:06:34.000000000 -0400
65248+++ linux-2.6.39.4/net/caif/cfctrl.c 2011-08-05 19:44:37.000000000 -0400
65249@@ -9,6 +9,7 @@
65250 #include <linux/stddef.h>
65251 #include <linux/spinlock.h>
65252 #include <linux/slab.h>
65253+#include <linux/sched.h>
65254 #include <net/caif/caif_layer.h>
65255 #include <net/caif/cfpkt.h>
65256 #include <net/caif/cfctrl.h>
65257@@ -46,8 +47,8 @@ struct cflayer *cfctrl_create(void)
65258 dev_info.id = 0xff;
65259 memset(this, 0, sizeof(*this));
65260 cfsrvl_init(&this->serv, 0, &dev_info, false);
65261- atomic_set(&this->req_seq_no, 1);
65262- atomic_set(&this->rsp_seq_no, 1);
65263+ atomic_set_unchecked(&this->req_seq_no, 1);
65264+ atomic_set_unchecked(&this->rsp_seq_no, 1);
65265 this->serv.layer.receive = cfctrl_recv;
65266 sprintf(this->serv.layer.name, "ctrl");
65267 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65268@@ -116,8 +117,8 @@ void cfctrl_insert_req(struct cfctrl *ct
65269 struct cfctrl_request_info *req)
65270 {
65271 spin_lock(&ctrl->info_list_lock);
65272- atomic_inc(&ctrl->req_seq_no);
65273- req->sequence_no = atomic_read(&ctrl->req_seq_no);
65274+ atomic_inc_unchecked(&ctrl->req_seq_no);
65275+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65276 list_add_tail(&req->list, &ctrl->list);
65277 spin_unlock(&ctrl->info_list_lock);
65278 }
65279@@ -136,7 +137,7 @@ struct cfctrl_request_info *cfctrl_remov
65280 if (p != first)
65281 pr_warn("Requests are not received in order\n");
65282
65283- atomic_set(&ctrl->rsp_seq_no,
65284+ atomic_set_unchecked(&ctrl->rsp_seq_no,
65285 p->sequence_no);
65286 list_del(&p->list);
65287 goto out;
65288@@ -385,6 +386,7 @@ static int cfctrl_recv(struct cflayer *l
65289 struct cfctrl *cfctrl = container_obj(layer);
65290 struct cfctrl_request_info rsp, *req;
65291
65292+ pax_track_stack();
65293
65294 cfpkt_extr_head(pkt, &cmdrsp, 1);
65295 cmd = cmdrsp & CFCTRL_CMD_MASK;
65296diff -urNp linux-2.6.39.4/net/can/bcm.c linux-2.6.39.4/net/can/bcm.c
65297--- linux-2.6.39.4/net/can/bcm.c 2011-05-19 00:06:34.000000000 -0400
65298+++ linux-2.6.39.4/net/can/bcm.c 2011-08-05 19:44:37.000000000 -0400
65299@@ -165,9 +165,15 @@ static int bcm_proc_show(struct seq_file
65300 struct bcm_sock *bo = bcm_sk(sk);
65301 struct bcm_op *op;
65302
65303+#ifdef CONFIG_GRKERNSEC_HIDESYM
65304+ seq_printf(m, ">>> socket %p", NULL);
65305+ seq_printf(m, " / sk %p", NULL);
65306+ seq_printf(m, " / bo %p", NULL);
65307+#else
65308 seq_printf(m, ">>> socket %p", sk->sk_socket);
65309 seq_printf(m, " / sk %p", sk);
65310 seq_printf(m, " / bo %p", bo);
65311+#endif
65312 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
65313 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
65314 seq_printf(m, " <<<\n");
65315diff -urNp linux-2.6.39.4/net/core/datagram.c linux-2.6.39.4/net/core/datagram.c
65316--- linux-2.6.39.4/net/core/datagram.c 2011-05-19 00:06:34.000000000 -0400
65317+++ linux-2.6.39.4/net/core/datagram.c 2011-08-05 19:44:37.000000000 -0400
65318@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65319 }
65320
65321 kfree_skb(skb);
65322- atomic_inc(&sk->sk_drops);
65323+ atomic_inc_unchecked(&sk->sk_drops);
65324 sk_mem_reclaim_partial(sk);
65325
65326 return err;
65327diff -urNp linux-2.6.39.4/net/core/dev.c linux-2.6.39.4/net/core/dev.c
65328--- linux-2.6.39.4/net/core/dev.c 2011-06-03 00:04:14.000000000 -0400
65329+++ linux-2.6.39.4/net/core/dev.c 2011-08-05 20:34:06.000000000 -0400
65330@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65331 if (no_module && capable(CAP_NET_ADMIN))
65332 no_module = request_module("netdev-%s", name);
65333 if (no_module && capable(CAP_SYS_MODULE)) {
65334+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65335+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
65336+#else
65337 if (!request_module("%s", name))
65338 pr_err("Loading kernel module for a network device "
65339 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65340 "instead\n", name);
65341+#endif
65342 }
65343 }
65344 EXPORT_SYMBOL(dev_load);
65345@@ -1951,7 +1955,7 @@ static int illegal_highdma(struct net_de
65346
65347 struct dev_gso_cb {
65348 void (*destructor)(struct sk_buff *skb);
65349-};
65350+} __no_const;
65351
65352 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65353
65354@@ -2901,7 +2905,7 @@ int netif_rx_ni(struct sk_buff *skb)
65355 }
65356 EXPORT_SYMBOL(netif_rx_ni);
65357
65358-static void net_tx_action(struct softirq_action *h)
65359+static void net_tx_action(void)
65360 {
65361 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65362
65363@@ -3765,7 +3769,7 @@ void netif_napi_del(struct napi_struct *
65364 }
65365 EXPORT_SYMBOL(netif_napi_del);
65366
65367-static void net_rx_action(struct softirq_action *h)
65368+static void net_rx_action(void)
65369 {
65370 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65371 unsigned long time_limit = jiffies + 2;
65372diff -urNp linux-2.6.39.4/net/core/flow.c linux-2.6.39.4/net/core/flow.c
65373--- linux-2.6.39.4/net/core/flow.c 2011-05-19 00:06:34.000000000 -0400
65374+++ linux-2.6.39.4/net/core/flow.c 2011-08-05 19:44:37.000000000 -0400
65375@@ -60,7 +60,7 @@ struct flow_cache {
65376 struct timer_list rnd_timer;
65377 };
65378
65379-atomic_t flow_cache_genid = ATOMIC_INIT(0);
65380+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65381 EXPORT_SYMBOL(flow_cache_genid);
65382 static struct flow_cache flow_cache_global;
65383 static struct kmem_cache *flow_cachep __read_mostly;
65384@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65385
65386 static int flow_entry_valid(struct flow_cache_entry *fle)
65387 {
65388- if (atomic_read(&flow_cache_genid) != fle->genid)
65389+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65390 return 0;
65391 if (fle->object && !fle->object->ops->check(fle->object))
65392 return 0;
65393@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65394 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65395 fcp->hash_count++;
65396 }
65397- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65398+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65399 flo = fle->object;
65400 if (!flo)
65401 goto ret_object;
65402@@ -274,7 +274,7 @@ nocache:
65403 }
65404 flo = resolver(net, key, family, dir, flo, ctx);
65405 if (fle) {
65406- fle->genid = atomic_read(&flow_cache_genid);
65407+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
65408 if (!IS_ERR(flo))
65409 fle->object = flo;
65410 else
65411diff -urNp linux-2.6.39.4/net/core/rtnetlink.c linux-2.6.39.4/net/core/rtnetlink.c
65412--- linux-2.6.39.4/net/core/rtnetlink.c 2011-05-19 00:06:34.000000000 -0400
65413+++ linux-2.6.39.4/net/core/rtnetlink.c 2011-08-05 20:34:06.000000000 -0400
65414@@ -56,7 +56,7 @@
65415 struct rtnl_link {
65416 rtnl_doit_func doit;
65417 rtnl_dumpit_func dumpit;
65418-};
65419+} __no_const;
65420
65421 static DEFINE_MUTEX(rtnl_mutex);
65422
65423diff -urNp linux-2.6.39.4/net/core/skbuff.c linux-2.6.39.4/net/core/skbuff.c
65424--- linux-2.6.39.4/net/core/skbuff.c 2011-06-03 00:04:14.000000000 -0400
65425+++ linux-2.6.39.4/net/core/skbuff.c 2011-08-05 19:44:37.000000000 -0400
65426@@ -1542,6 +1542,8 @@ int skb_splice_bits(struct sk_buff *skb,
65427 struct sock *sk = skb->sk;
65428 int ret = 0;
65429
65430+ pax_track_stack();
65431+
65432 if (splice_grow_spd(pipe, &spd))
65433 return -ENOMEM;
65434
65435diff -urNp linux-2.6.39.4/net/core/sock.c linux-2.6.39.4/net/core/sock.c
65436--- linux-2.6.39.4/net/core/sock.c 2011-05-19 00:06:34.000000000 -0400
65437+++ linux-2.6.39.4/net/core/sock.c 2011-08-05 19:44:37.000000000 -0400
65438@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65439 */
65440 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65441 (unsigned)sk->sk_rcvbuf) {
65442- atomic_inc(&sk->sk_drops);
65443+ atomic_inc_unchecked(&sk->sk_drops);
65444 return -ENOMEM;
65445 }
65446
65447@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65448 return err;
65449
65450 if (!sk_rmem_schedule(sk, skb->truesize)) {
65451- atomic_inc(&sk->sk_drops);
65452+ atomic_inc_unchecked(&sk->sk_drops);
65453 return -ENOBUFS;
65454 }
65455
65456@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65457 skb_dst_force(skb);
65458
65459 spin_lock_irqsave(&list->lock, flags);
65460- skb->dropcount = atomic_read(&sk->sk_drops);
65461+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65462 __skb_queue_tail(list, skb);
65463 spin_unlock_irqrestore(&list->lock, flags);
65464
65465@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65466 skb->dev = NULL;
65467
65468 if (sk_rcvqueues_full(sk, skb)) {
65469- atomic_inc(&sk->sk_drops);
65470+ atomic_inc_unchecked(&sk->sk_drops);
65471 goto discard_and_relse;
65472 }
65473 if (nested)
65474@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65475 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65476 } else if (sk_add_backlog(sk, skb)) {
65477 bh_unlock_sock(sk);
65478- atomic_inc(&sk->sk_drops);
65479+ atomic_inc_unchecked(&sk->sk_drops);
65480 goto discard_and_relse;
65481 }
65482
65483@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65484 return -ENOTCONN;
65485 if (lv < len)
65486 return -EINVAL;
65487- if (copy_to_user(optval, address, len))
65488+ if (len > sizeof(address) || copy_to_user(optval, address, len))
65489 return -EFAULT;
65490 goto lenout;
65491 }
65492@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65493
65494 if (len > lv)
65495 len = lv;
65496- if (copy_to_user(optval, &v, len))
65497+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
65498 return -EFAULT;
65499 lenout:
65500 if (put_user(len, optlen))
65501@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65502 */
65503 smp_wmb();
65504 atomic_set(&sk->sk_refcnt, 1);
65505- atomic_set(&sk->sk_drops, 0);
65506+ atomic_set_unchecked(&sk->sk_drops, 0);
65507 }
65508 EXPORT_SYMBOL(sock_init_data);
65509
65510diff -urNp linux-2.6.39.4/net/decnet/sysctl_net_decnet.c linux-2.6.39.4/net/decnet/sysctl_net_decnet.c
65511--- linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-05-19 00:06:34.000000000 -0400
65512+++ linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-08-05 19:44:37.000000000 -0400
65513@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65514
65515 if (len > *lenp) len = *lenp;
65516
65517- if (copy_to_user(buffer, addr, len))
65518+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
65519 return -EFAULT;
65520
65521 *lenp = len;
65522@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65523
65524 if (len > *lenp) len = *lenp;
65525
65526- if (copy_to_user(buffer, devname, len))
65527+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
65528 return -EFAULT;
65529
65530 *lenp = len;
65531diff -urNp linux-2.6.39.4/net/econet/Kconfig linux-2.6.39.4/net/econet/Kconfig
65532--- linux-2.6.39.4/net/econet/Kconfig 2011-05-19 00:06:34.000000000 -0400
65533+++ linux-2.6.39.4/net/econet/Kconfig 2011-08-05 19:44:37.000000000 -0400
65534@@ -4,7 +4,7 @@
65535
65536 config ECONET
65537 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65538- depends on EXPERIMENTAL && INET
65539+ depends on EXPERIMENTAL && INET && BROKEN
65540 ---help---
65541 Econet is a fairly old and slow networking protocol mainly used by
65542 Acorn computers to access file and print servers. It uses native
65543diff -urNp linux-2.6.39.4/net/ipv4/fib_frontend.c linux-2.6.39.4/net/ipv4/fib_frontend.c
65544--- linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-05-19 00:06:34.000000000 -0400
65545+++ linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-08-05 19:44:37.000000000 -0400
65546@@ -968,12 +968,12 @@ static int fib_inetaddr_event(struct not
65547 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65548 fib_sync_up(dev);
65549 #endif
65550- atomic_inc(&net->ipv4.dev_addr_genid);
65551+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65552 rt_cache_flush(dev_net(dev), -1);
65553 break;
65554 case NETDEV_DOWN:
65555 fib_del_ifaddr(ifa, NULL);
65556- atomic_inc(&net->ipv4.dev_addr_genid);
65557+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65558 if (ifa->ifa_dev->ifa_list == NULL) {
65559 /* Last address was deleted from this interface.
65560 * Disable IP.
65561@@ -1009,7 +1009,7 @@ static int fib_netdev_event(struct notif
65562 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65563 fib_sync_up(dev);
65564 #endif
65565- atomic_inc(&net->ipv4.dev_addr_genid);
65566+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65567 rt_cache_flush(dev_net(dev), -1);
65568 break;
65569 case NETDEV_DOWN:
65570diff -urNp linux-2.6.39.4/net/ipv4/fib_semantics.c linux-2.6.39.4/net/ipv4/fib_semantics.c
65571--- linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-05-19 00:06:34.000000000 -0400
65572+++ linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-08-05 19:44:37.000000000 -0400
65573@@ -701,7 +701,7 @@ __be32 fib_info_update_nh_saddr(struct n
65574 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65575 nh->nh_gw,
65576 nh->nh_parent->fib_scope);
65577- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65578+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65579
65580 return nh->nh_saddr;
65581 }
65582diff -urNp linux-2.6.39.4/net/ipv4/inet_diag.c linux-2.6.39.4/net/ipv4/inet_diag.c
65583--- linux-2.6.39.4/net/ipv4/inet_diag.c 2011-07-09 09:18:51.000000000 -0400
65584+++ linux-2.6.39.4/net/ipv4/inet_diag.c 2011-08-05 19:44:37.000000000 -0400
65585@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65586 r->idiag_retrans = 0;
65587
65588 r->id.idiag_if = sk->sk_bound_dev_if;
65589+
65590+#ifdef CONFIG_GRKERNSEC_HIDESYM
65591+ r->id.idiag_cookie[0] = 0;
65592+ r->id.idiag_cookie[1] = 0;
65593+#else
65594 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65595 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65596+#endif
65597
65598 r->id.idiag_sport = inet->inet_sport;
65599 r->id.idiag_dport = inet->inet_dport;
65600@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65601 r->idiag_family = tw->tw_family;
65602 r->idiag_retrans = 0;
65603 r->id.idiag_if = tw->tw_bound_dev_if;
65604+
65605+#ifdef CONFIG_GRKERNSEC_HIDESYM
65606+ r->id.idiag_cookie[0] = 0;
65607+ r->id.idiag_cookie[1] = 0;
65608+#else
65609 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65610 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65611+#endif
65612+
65613 r->id.idiag_sport = tw->tw_sport;
65614 r->id.idiag_dport = tw->tw_dport;
65615 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65616@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65617 if (sk == NULL)
65618 goto unlock;
65619
65620+#ifndef CONFIG_GRKERNSEC_HIDESYM
65621 err = -ESTALE;
65622 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65623 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65624 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65625 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65626 goto out;
65627+#endif
65628
65629 err = -ENOMEM;
65630 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65631@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65632 r->idiag_retrans = req->retrans;
65633
65634 r->id.idiag_if = sk->sk_bound_dev_if;
65635+
65636+#ifdef CONFIG_GRKERNSEC_HIDESYM
65637+ r->id.idiag_cookie[0] = 0;
65638+ r->id.idiag_cookie[1] = 0;
65639+#else
65640 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65641 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65642+#endif
65643
65644 tmo = req->expires - jiffies;
65645 if (tmo < 0)
65646diff -urNp linux-2.6.39.4/net/ipv4/inet_hashtables.c linux-2.6.39.4/net/ipv4/inet_hashtables.c
65647--- linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-05-19 00:06:34.000000000 -0400
65648+++ linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-08-05 19:44:37.000000000 -0400
65649@@ -18,11 +18,14 @@
65650 #include <linux/sched.h>
65651 #include <linux/slab.h>
65652 #include <linux/wait.h>
65653+#include <linux/security.h>
65654
65655 #include <net/inet_connection_sock.h>
65656 #include <net/inet_hashtables.h>
65657 #include <net/ip.h>
65658
65659+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65660+
65661 /*
65662 * Allocate and initialize a new local port bind bucket.
65663 * The bindhash mutex for snum's hash chain must be held here.
65664@@ -529,6 +532,8 @@ ok:
65665 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65666 spin_unlock(&head->lock);
65667
65668+ gr_update_task_in_ip_table(current, inet_sk(sk));
65669+
65670 if (tw) {
65671 inet_twsk_deschedule(tw, death_row);
65672 while (twrefcnt) {
65673diff -urNp linux-2.6.39.4/net/ipv4/inetpeer.c linux-2.6.39.4/net/ipv4/inetpeer.c
65674--- linux-2.6.39.4/net/ipv4/inetpeer.c 2011-07-09 09:18:51.000000000 -0400
65675+++ linux-2.6.39.4/net/ipv4/inetpeer.c 2011-08-05 19:44:37.000000000 -0400
65676@@ -480,6 +480,8 @@ struct inet_peer *inet_getpeer(struct in
65677 unsigned int sequence;
65678 int invalidated, newrefcnt = 0;
65679
65680+ pax_track_stack();
65681+
65682 /* Look up for the address quickly, lockless.
65683 * Because of a concurrent writer, we might not find an existing entry.
65684 */
65685@@ -516,8 +518,8 @@ found: /* The existing node has been fo
65686 if (p) {
65687 p->daddr = *daddr;
65688 atomic_set(&p->refcnt, 1);
65689- atomic_set(&p->rid, 0);
65690- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65691+ atomic_set_unchecked(&p->rid, 0);
65692+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65693 p->tcp_ts_stamp = 0;
65694 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65695 p->rate_tokens = 0;
65696diff -urNp linux-2.6.39.4/net/ipv4/ip_fragment.c linux-2.6.39.4/net/ipv4/ip_fragment.c
65697--- linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-05-19 00:06:34.000000000 -0400
65698+++ linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-08-05 19:44:37.000000000 -0400
65699@@ -297,7 +297,7 @@ static inline int ip_frag_too_far(struct
65700 return 0;
65701
65702 start = qp->rid;
65703- end = atomic_inc_return(&peer->rid);
65704+ end = atomic_inc_return_unchecked(&peer->rid);
65705 qp->rid = end;
65706
65707 rc = qp->q.fragments && (end - start) > max;
65708diff -urNp linux-2.6.39.4/net/ipv4/ip_sockglue.c linux-2.6.39.4/net/ipv4/ip_sockglue.c
65709--- linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-05-19 00:06:34.000000000 -0400
65710+++ linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-08-05 19:44:37.000000000 -0400
65711@@ -1064,6 +1064,8 @@ static int do_ip_getsockopt(struct sock
65712 int val;
65713 int len;
65714
65715+ pax_track_stack();
65716+
65717 if (level != SOL_IP)
65718 return -EOPNOTSUPP;
65719
65720diff -urNp linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
65721--- linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-05-19 00:06:34.000000000 -0400
65722+++ linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-05 19:44:37.000000000 -0400
65723@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65724
65725 *len = 0;
65726
65727- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65728+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65729 if (*octets == NULL) {
65730 if (net_ratelimit())
65731 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65732diff -urNp linux-2.6.39.4/net/ipv4/raw.c linux-2.6.39.4/net/ipv4/raw.c
65733--- linux-2.6.39.4/net/ipv4/raw.c 2011-05-19 00:06:34.000000000 -0400
65734+++ linux-2.6.39.4/net/ipv4/raw.c 2011-08-05 19:44:37.000000000 -0400
65735@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65736 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65737 {
65738 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65739- atomic_inc(&sk->sk_drops);
65740+ atomic_inc_unchecked(&sk->sk_drops);
65741 kfree_skb(skb);
65742 return NET_RX_DROP;
65743 }
65744@@ -730,15 +730,19 @@ static int raw_init(struct sock *sk)
65745
65746 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65747 {
65748+ struct icmp_filter filter;
65749+
65750 if (optlen > sizeof(struct icmp_filter))
65751 optlen = sizeof(struct icmp_filter);
65752- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65753+ if (copy_from_user(&filter, optval, optlen))
65754 return -EFAULT;
65755+ memcpy(&raw_sk(sk)->filter, &filter, sizeof(filter));
65756 return 0;
65757 }
65758
65759 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65760 {
65761+ struct icmp_filter filter;
65762 int len, ret = -EFAULT;
65763
65764 if (get_user(len, optlen))
65765@@ -749,8 +753,9 @@ static int raw_geticmpfilter(struct sock
65766 if (len > sizeof(struct icmp_filter))
65767 len = sizeof(struct icmp_filter);
65768 ret = -EFAULT;
65769+ memcpy(&filter, &raw_sk(sk)->filter, len);
65770 if (put_user(len, optlen) ||
65771- copy_to_user(optval, &raw_sk(sk)->filter, len))
65772+ copy_to_user(optval, &filter, len))
65773 goto out;
65774 ret = 0;
65775 out: return ret;
65776@@ -978,7 +983,13 @@ static void raw_sock_seq_show(struct seq
65777 sk_wmem_alloc_get(sp),
65778 sk_rmem_alloc_get(sp),
65779 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65780- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65781+ atomic_read(&sp->sk_refcnt),
65782+#ifdef CONFIG_GRKERNSEC_HIDESYM
65783+ NULL,
65784+#else
65785+ sp,
65786+#endif
65787+ atomic_read_unchecked(&sp->sk_drops));
65788 }
65789
65790 static int raw_seq_show(struct seq_file *seq, void *v)
65791diff -urNp linux-2.6.39.4/net/ipv4/route.c linux-2.6.39.4/net/ipv4/route.c
65792--- linux-2.6.39.4/net/ipv4/route.c 2011-07-09 09:18:51.000000000 -0400
65793+++ linux-2.6.39.4/net/ipv4/route.c 2011-08-05 19:44:37.000000000 -0400
65794@@ -303,7 +303,7 @@ static inline unsigned int rt_hash(__be3
65795
65796 static inline int rt_genid(struct net *net)
65797 {
65798- return atomic_read(&net->ipv4.rt_genid);
65799+ return atomic_read_unchecked(&net->ipv4.rt_genid);
65800 }
65801
65802 #ifdef CONFIG_PROC_FS
65803@@ -831,7 +831,7 @@ static void rt_cache_invalidate(struct n
65804 unsigned char shuffle;
65805
65806 get_random_bytes(&shuffle, sizeof(shuffle));
65807- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65808+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65809 }
65810
65811 /*
65812@@ -2833,7 +2833,7 @@ static int rt_fill_info(struct net *net,
65813 rt->peer->pmtu_expires - jiffies : 0;
65814 if (rt->peer) {
65815 inet_peer_refcheck(rt->peer);
65816- id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
65817+ id = atomic_read_unchecked(&rt->peer->ip_id_count) & 0xffff;
65818 if (rt->peer->tcp_ts_stamp) {
65819 ts = rt->peer->tcp_ts;
65820 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
65821diff -urNp linux-2.6.39.4/net/ipv4/tcp.c linux-2.6.39.4/net/ipv4/tcp.c
65822--- linux-2.6.39.4/net/ipv4/tcp.c 2011-05-19 00:06:34.000000000 -0400
65823+++ linux-2.6.39.4/net/ipv4/tcp.c 2011-08-05 19:44:37.000000000 -0400
65824@@ -2121,6 +2121,8 @@ static int do_tcp_setsockopt(struct sock
65825 int val;
65826 int err = 0;
65827
65828+ pax_track_stack();
65829+
65830 /* These are data/string values, all the others are ints */
65831 switch (optname) {
65832 case TCP_CONGESTION: {
65833@@ -2500,6 +2502,8 @@ static int do_tcp_getsockopt(struct sock
65834 struct tcp_sock *tp = tcp_sk(sk);
65835 int val, len;
65836
65837+ pax_track_stack();
65838+
65839 if (get_user(len, optlen))
65840 return -EFAULT;
65841
65842diff -urNp linux-2.6.39.4/net/ipv4/tcp_ipv4.c linux-2.6.39.4/net/ipv4/tcp_ipv4.c
65843--- linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-05-19 00:06:34.000000000 -0400
65844+++ linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-08-05 19:44:37.000000000 -0400
65845@@ -86,6 +86,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65846 int sysctl_tcp_low_latency __read_mostly;
65847 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65848
65849+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65850+extern int grsec_enable_blackhole;
65851+#endif
65852
65853 #ifdef CONFIG_TCP_MD5SIG
65854 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
65855@@ -1594,6 +1597,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
65856 return 0;
65857
65858 reset:
65859+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65860+ if (!grsec_enable_blackhole)
65861+#endif
65862 tcp_v4_send_reset(rsk, skb);
65863 discard:
65864 kfree_skb(skb);
65865@@ -1656,12 +1662,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
65866 TCP_SKB_CB(skb)->sacked = 0;
65867
65868 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
65869- if (!sk)
65870+ if (!sk) {
65871+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65872+ ret = 1;
65873+#endif
65874 goto no_tcp_socket;
65875-
65876+ }
65877 process:
65878- if (sk->sk_state == TCP_TIME_WAIT)
65879+ if (sk->sk_state == TCP_TIME_WAIT) {
65880+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65881+ ret = 2;
65882+#endif
65883 goto do_time_wait;
65884+ }
65885
65886 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
65887 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
65888@@ -1711,6 +1724,10 @@ no_tcp_socket:
65889 bad_packet:
65890 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
65891 } else {
65892+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65893+ if (!grsec_enable_blackhole || (ret == 1 &&
65894+ (skb->dev->flags & IFF_LOOPBACK)))
65895+#endif
65896 tcp_v4_send_reset(NULL, skb);
65897 }
65898
65899@@ -2374,7 +2391,11 @@ static void get_openreq4(struct sock *sk
65900 0, /* non standard timer */
65901 0, /* open_requests have no inode */
65902 atomic_read(&sk->sk_refcnt),
65903+#ifdef CONFIG_GRKERNSEC_HIDESYM
65904+ NULL,
65905+#else
65906 req,
65907+#endif
65908 len);
65909 }
65910
65911@@ -2424,7 +2445,12 @@ static void get_tcp4_sock(struct sock *s
65912 sock_i_uid(sk),
65913 icsk->icsk_probes_out,
65914 sock_i_ino(sk),
65915- atomic_read(&sk->sk_refcnt), sk,
65916+ atomic_read(&sk->sk_refcnt),
65917+#ifdef CONFIG_GRKERNSEC_HIDESYM
65918+ NULL,
65919+#else
65920+ sk,
65921+#endif
65922 jiffies_to_clock_t(icsk->icsk_rto),
65923 jiffies_to_clock_t(icsk->icsk_ack.ato),
65924 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
65925@@ -2452,7 +2478,13 @@ static void get_timewait4_sock(struct in
65926 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
65927 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
65928 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
65929- atomic_read(&tw->tw_refcnt), tw, len);
65930+ atomic_read(&tw->tw_refcnt),
65931+#ifdef CONFIG_GRKERNSEC_HIDESYM
65932+ NULL,
65933+#else
65934+ tw,
65935+#endif
65936+ len);
65937 }
65938
65939 #define TMPSZ 150
65940diff -urNp linux-2.6.39.4/net/ipv4/tcp_minisocks.c linux-2.6.39.4/net/ipv4/tcp_minisocks.c
65941--- linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-05-19 00:06:34.000000000 -0400
65942+++ linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-08-05 19:44:37.000000000 -0400
65943@@ -27,6 +27,10 @@
65944 #include <net/inet_common.h>
65945 #include <net/xfrm.h>
65946
65947+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65948+extern int grsec_enable_blackhole;
65949+#endif
65950+
65951 int sysctl_tcp_syncookies __read_mostly = 1;
65952 EXPORT_SYMBOL(sysctl_tcp_syncookies);
65953
65954@@ -745,6 +749,10 @@ listen_overflow:
65955
65956 embryonic_reset:
65957 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
65958+
65959+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65960+ if (!grsec_enable_blackhole)
65961+#endif
65962 if (!(flg & TCP_FLAG_RST))
65963 req->rsk_ops->send_reset(sk, skb);
65964
65965diff -urNp linux-2.6.39.4/net/ipv4/tcp_output.c linux-2.6.39.4/net/ipv4/tcp_output.c
65966--- linux-2.6.39.4/net/ipv4/tcp_output.c 2011-05-19 00:06:34.000000000 -0400
65967+++ linux-2.6.39.4/net/ipv4/tcp_output.c 2011-08-05 19:44:37.000000000 -0400
65968@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
65969 int mss;
65970 int s_data_desired = 0;
65971
65972+ pax_track_stack();
65973+
65974 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
65975 s_data_desired = cvp->s_data_desired;
65976 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
65977diff -urNp linux-2.6.39.4/net/ipv4/tcp_probe.c linux-2.6.39.4/net/ipv4/tcp_probe.c
65978--- linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-05-19 00:06:34.000000000 -0400
65979+++ linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-08-05 19:44:37.000000000 -0400
65980@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
65981 if (cnt + width >= len)
65982 break;
65983
65984- if (copy_to_user(buf + cnt, tbuf, width))
65985+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
65986 return -EFAULT;
65987 cnt += width;
65988 }
65989diff -urNp linux-2.6.39.4/net/ipv4/tcp_timer.c linux-2.6.39.4/net/ipv4/tcp_timer.c
65990--- linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-05-19 00:06:34.000000000 -0400
65991+++ linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-08-05 19:44:37.000000000 -0400
65992@@ -22,6 +22,10 @@
65993 #include <linux/gfp.h>
65994 #include <net/tcp.h>
65995
65996+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65997+extern int grsec_lastack_retries;
65998+#endif
65999+
66000 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
66001 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
66002 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
66003@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
66004 }
66005 }
66006
66007+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66008+ if ((sk->sk_state == TCP_LAST_ACK) &&
66009+ (grsec_lastack_retries > 0) &&
66010+ (grsec_lastack_retries < retry_until))
66011+ retry_until = grsec_lastack_retries;
66012+#endif
66013+
66014 if (retransmits_timed_out(sk, retry_until,
66015 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
66016 /* Has it gone just too far? */
66017diff -urNp linux-2.6.39.4/net/ipv4/udp.c linux-2.6.39.4/net/ipv4/udp.c
66018--- linux-2.6.39.4/net/ipv4/udp.c 2011-07-09 09:18:51.000000000 -0400
66019+++ linux-2.6.39.4/net/ipv4/udp.c 2011-08-05 19:44:37.000000000 -0400
66020@@ -86,6 +86,7 @@
66021 #include <linux/types.h>
66022 #include <linux/fcntl.h>
66023 #include <linux/module.h>
66024+#include <linux/security.h>
66025 #include <linux/socket.h>
66026 #include <linux/sockios.h>
66027 #include <linux/igmp.h>
66028@@ -107,6 +108,10 @@
66029 #include <net/xfrm.h>
66030 #include "udp_impl.h"
66031
66032+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66033+extern int grsec_enable_blackhole;
66034+#endif
66035+
66036 struct udp_table udp_table __read_mostly;
66037 EXPORT_SYMBOL(udp_table);
66038
66039@@ -564,6 +569,9 @@ found:
66040 return s;
66041 }
66042
66043+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
66044+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
66045+
66046 /*
66047 * This routine is called by the ICMP module when it gets some
66048 * sort of error condition. If err < 0 then the socket should
66049@@ -853,9 +861,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
66050 dport = usin->sin_port;
66051 if (dport == 0)
66052 return -EINVAL;
66053+
66054+ err = gr_search_udp_sendmsg(sk, usin);
66055+ if (err)
66056+ return err;
66057 } else {
66058 if (sk->sk_state != TCP_ESTABLISHED)
66059 return -EDESTADDRREQ;
66060+
66061+ err = gr_search_udp_sendmsg(sk, NULL);
66062+ if (err)
66063+ return err;
66064+
66065 daddr = inet->inet_daddr;
66066 dport = inet->inet_dport;
66067 /* Open fast path for connected socket.
66068@@ -1090,7 +1107,7 @@ static unsigned int first_packet_length(
66069 udp_lib_checksum_complete(skb)) {
66070 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66071 IS_UDPLITE(sk));
66072- atomic_inc(&sk->sk_drops);
66073+ atomic_inc_unchecked(&sk->sk_drops);
66074 __skb_unlink(skb, rcvq);
66075 __skb_queue_tail(&list_kill, skb);
66076 }
66077@@ -1176,6 +1193,10 @@ try_again:
66078 if (!skb)
66079 goto out;
66080
66081+ err = gr_search_udp_recvmsg(sk, skb);
66082+ if (err)
66083+ goto out_free;
66084+
66085 ulen = skb->len - sizeof(struct udphdr);
66086 if (len > ulen)
66087 len = ulen;
66088@@ -1475,7 +1496,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
66089
66090 drop:
66091 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66092- atomic_inc(&sk->sk_drops);
66093+ atomic_inc_unchecked(&sk->sk_drops);
66094 kfree_skb(skb);
66095 return -1;
66096 }
66097@@ -1494,7 +1515,7 @@ static void flush_stack(struct sock **st
66098 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
66099
66100 if (!skb1) {
66101- atomic_inc(&sk->sk_drops);
66102+ atomic_inc_unchecked(&sk->sk_drops);
66103 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
66104 IS_UDPLITE(sk));
66105 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66106@@ -1663,6 +1684,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
66107 goto csum_error;
66108
66109 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
66110+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66111+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66112+#endif
66113 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
66114
66115 /*
66116@@ -2090,8 +2114,13 @@ static void udp4_format_sock(struct sock
66117 sk_wmem_alloc_get(sp),
66118 sk_rmem_alloc_get(sp),
66119 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
66120- atomic_read(&sp->sk_refcnt), sp,
66121- atomic_read(&sp->sk_drops), len);
66122+ atomic_read(&sp->sk_refcnt),
66123+#ifdef CONFIG_GRKERNSEC_HIDESYM
66124+ NULL,
66125+#else
66126+ sp,
66127+#endif
66128+ atomic_read_unchecked(&sp->sk_drops), len);
66129 }
66130
66131 int udp4_seq_show(struct seq_file *seq, void *v)
66132diff -urNp linux-2.6.39.4/net/ipv6/inet6_connection_sock.c linux-2.6.39.4/net/ipv6/inet6_connection_sock.c
66133--- linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-05-19 00:06:34.000000000 -0400
66134+++ linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-08-05 19:44:37.000000000 -0400
66135@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
66136 #ifdef CONFIG_XFRM
66137 {
66138 struct rt6_info *rt = (struct rt6_info *)dst;
66139- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
66140+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
66141 }
66142 #endif
66143 }
66144@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
66145 #ifdef CONFIG_XFRM
66146 if (dst) {
66147 struct rt6_info *rt = (struct rt6_info *)dst;
66148- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
66149+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
66150 __sk_dst_reset(sk);
66151 dst = NULL;
66152 }
66153diff -urNp linux-2.6.39.4/net/ipv6/ipv6_sockglue.c linux-2.6.39.4/net/ipv6/ipv6_sockglue.c
66154--- linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-05-19 00:06:34.000000000 -0400
66155+++ linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-08-05 19:44:37.000000000 -0400
66156@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
66157 int val, valbool;
66158 int retv = -ENOPROTOOPT;
66159
66160+ pax_track_stack();
66161+
66162 if (optval == NULL)
66163 val=0;
66164 else {
66165@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
66166 int len;
66167 int val;
66168
66169+ pax_track_stack();
66170+
66171 if (ip6_mroute_opt(optname))
66172 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
66173
66174diff -urNp linux-2.6.39.4/net/ipv6/raw.c linux-2.6.39.4/net/ipv6/raw.c
66175--- linux-2.6.39.4/net/ipv6/raw.c 2011-05-19 00:06:34.000000000 -0400
66176+++ linux-2.6.39.4/net/ipv6/raw.c 2011-08-05 19:44:37.000000000 -0400
66177@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
66178 {
66179 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
66180 skb_checksum_complete(skb)) {
66181- atomic_inc(&sk->sk_drops);
66182+ atomic_inc_unchecked(&sk->sk_drops);
66183 kfree_skb(skb);
66184 return NET_RX_DROP;
66185 }
66186@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66187 struct raw6_sock *rp = raw6_sk(sk);
66188
66189 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
66190- atomic_inc(&sk->sk_drops);
66191+ atomic_inc_unchecked(&sk->sk_drops);
66192 kfree_skb(skb);
66193 return NET_RX_DROP;
66194 }
66195@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66196
66197 if (inet->hdrincl) {
66198 if (skb_checksum_complete(skb)) {
66199- atomic_inc(&sk->sk_drops);
66200+ atomic_inc_unchecked(&sk->sk_drops);
66201 kfree_skb(skb);
66202 return NET_RX_DROP;
66203 }
66204@@ -601,7 +601,7 @@ out:
66205 return err;
66206 }
66207
66208-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
66209+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66210 struct flowi6 *fl6, struct dst_entry **dstp,
66211 unsigned int flags)
66212 {
66213@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
66214 u16 proto;
66215 int err;
66216
66217+ pax_track_stack();
66218+
66219 /* Rough check on arithmetic overflow,
66220 better check is made in ip6_append_data().
66221 */
66222@@ -909,12 +911,15 @@ do_confirm:
66223 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
66224 char __user *optval, int optlen)
66225 {
66226+ struct icmp6_filter filter;
66227+
66228 switch (optname) {
66229 case ICMPV6_FILTER:
66230 if (optlen > sizeof(struct icmp6_filter))
66231 optlen = sizeof(struct icmp6_filter);
66232- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66233+ if (copy_from_user(&filter, optval, optlen))
66234 return -EFAULT;
66235+ memcpy(&raw6_sk(sk)->filter, &filter, optlen);
66236 return 0;
66237 default:
66238 return -ENOPROTOOPT;
66239@@ -926,6 +931,7 @@ static int rawv6_seticmpfilter(struct so
66240 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
66241 char __user *optval, int __user *optlen)
66242 {
66243+ struct icmp6_filter filter;
66244 int len;
66245
66246 switch (optname) {
66247@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66248 len = sizeof(struct icmp6_filter);
66249 if (put_user(len, optlen))
66250 return -EFAULT;
66251- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66252+ memcpy(&filter, &raw6_sk(sk)->filter, len);
66253+ if (copy_to_user(optval, &filter, len))
66254 return -EFAULT;
66255 return 0;
66256 default:
66257@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66258 0, 0L, 0,
66259 sock_i_uid(sp), 0,
66260 sock_i_ino(sp),
66261- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66262+ atomic_read(&sp->sk_refcnt),
66263+#ifdef CONFIG_GRKERNSEC_HIDESYM
66264+ NULL,
66265+#else
66266+ sp,
66267+#endif
66268+ atomic_read_unchecked(&sp->sk_drops));
66269 }
66270
66271 static int raw6_seq_show(struct seq_file *seq, void *v)
66272diff -urNp linux-2.6.39.4/net/ipv6/tcp_ipv6.c linux-2.6.39.4/net/ipv6/tcp_ipv6.c
66273--- linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-05-19 00:06:34.000000000 -0400
66274+++ linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-08-05 19:44:37.000000000 -0400
66275@@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66276 }
66277 #endif
66278
66279+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66280+extern int grsec_enable_blackhole;
66281+#endif
66282+
66283 static void tcp_v6_hash(struct sock *sk)
66284 {
66285 if (sk->sk_state != TCP_CLOSE) {
66286@@ -1660,6 +1664,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66287 return 0;
66288
66289 reset:
66290+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66291+ if (!grsec_enable_blackhole)
66292+#endif
66293 tcp_v6_send_reset(sk, skb);
66294 discard:
66295 if (opt_skb)
66296@@ -1739,12 +1746,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66297 TCP_SKB_CB(skb)->sacked = 0;
66298
66299 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66300- if (!sk)
66301+ if (!sk) {
66302+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66303+ ret = 1;
66304+#endif
66305 goto no_tcp_socket;
66306+ }
66307
66308 process:
66309- if (sk->sk_state == TCP_TIME_WAIT)
66310+ if (sk->sk_state == TCP_TIME_WAIT) {
66311+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66312+ ret = 2;
66313+#endif
66314 goto do_time_wait;
66315+ }
66316
66317 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66318 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66319@@ -1792,6 +1807,10 @@ no_tcp_socket:
66320 bad_packet:
66321 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66322 } else {
66323+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66324+ if (!grsec_enable_blackhole || (ret == 1 &&
66325+ (skb->dev->flags & IFF_LOOPBACK)))
66326+#endif
66327 tcp_v6_send_reset(NULL, skb);
66328 }
66329
66330@@ -2052,7 +2071,13 @@ static void get_openreq6(struct seq_file
66331 uid,
66332 0, /* non standard timer */
66333 0, /* open_requests have no inode */
66334- 0, req);
66335+ 0,
66336+#ifdef CONFIG_GRKERNSEC_HIDESYM
66337+ NULL
66338+#else
66339+ req
66340+#endif
66341+ );
66342 }
66343
66344 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66345@@ -2102,7 +2127,12 @@ static void get_tcp6_sock(struct seq_fil
66346 sock_i_uid(sp),
66347 icsk->icsk_probes_out,
66348 sock_i_ino(sp),
66349- atomic_read(&sp->sk_refcnt), sp,
66350+ atomic_read(&sp->sk_refcnt),
66351+#ifdef CONFIG_GRKERNSEC_HIDESYM
66352+ NULL,
66353+#else
66354+ sp,
66355+#endif
66356 jiffies_to_clock_t(icsk->icsk_rto),
66357 jiffies_to_clock_t(icsk->icsk_ack.ato),
66358 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66359@@ -2137,7 +2167,13 @@ static void get_timewait6_sock(struct se
66360 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66361 tw->tw_substate, 0, 0,
66362 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66363- atomic_read(&tw->tw_refcnt), tw);
66364+ atomic_read(&tw->tw_refcnt),
66365+#ifdef CONFIG_GRKERNSEC_HIDESYM
66366+ NULL
66367+#else
66368+ tw
66369+#endif
66370+ );
66371 }
66372
66373 static int tcp6_seq_show(struct seq_file *seq, void *v)
66374diff -urNp linux-2.6.39.4/net/ipv6/udp.c linux-2.6.39.4/net/ipv6/udp.c
66375--- linux-2.6.39.4/net/ipv6/udp.c 2011-07-09 09:18:51.000000000 -0400
66376+++ linux-2.6.39.4/net/ipv6/udp.c 2011-08-05 19:44:37.000000000 -0400
66377@@ -50,6 +50,10 @@
66378 #include <linux/seq_file.h>
66379 #include "udp_impl.h"
66380
66381+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66382+extern int grsec_enable_blackhole;
66383+#endif
66384+
66385 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66386 {
66387 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66388@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66389
66390 return 0;
66391 drop:
66392- atomic_inc(&sk->sk_drops);
66393+ atomic_inc_unchecked(&sk->sk_drops);
66394 drop_no_sk_drops_inc:
66395 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66396 kfree_skb(skb);
66397@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66398 continue;
66399 }
66400 drop:
66401- atomic_inc(&sk->sk_drops);
66402+ atomic_inc_unchecked(&sk->sk_drops);
66403 UDP6_INC_STATS_BH(sock_net(sk),
66404 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66405 UDP6_INC_STATS_BH(sock_net(sk),
66406@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66407 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66408 proto == IPPROTO_UDPLITE);
66409
66410+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66411+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66412+#endif
66413 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66414
66415 kfree_skb(skb);
66416@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66417 if (!sock_owned_by_user(sk))
66418 udpv6_queue_rcv_skb(sk, skb);
66419 else if (sk_add_backlog(sk, skb)) {
66420- atomic_inc(&sk->sk_drops);
66421+ atomic_inc_unchecked(&sk->sk_drops);
66422 bh_unlock_sock(sk);
66423 sock_put(sk);
66424 goto discard;
66425@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66426 0, 0L, 0,
66427 sock_i_uid(sp), 0,
66428 sock_i_ino(sp),
66429- atomic_read(&sp->sk_refcnt), sp,
66430- atomic_read(&sp->sk_drops));
66431+ atomic_read(&sp->sk_refcnt),
66432+#ifdef CONFIG_GRKERNSEC_HIDESYM
66433+ NULL,
66434+#else
66435+ sp,
66436+#endif
66437+ atomic_read_unchecked(&sp->sk_drops));
66438 }
66439
66440 int udp6_seq_show(struct seq_file *seq, void *v)
66441diff -urNp linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c
66442--- linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-05-19 00:06:34.000000000 -0400
66443+++ linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-08-05 19:44:37.000000000 -0400
66444@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
66445 add_wait_queue(&self->open_wait, &wait);
66446
66447 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66448- __FILE__,__LINE__, tty->driver->name, self->open_count );
66449+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66450
66451 /* As far as I can see, we protect open_count - Jean II */
66452 spin_lock_irqsave(&self->spinlock, flags);
66453 if (!tty_hung_up_p(filp)) {
66454 extra_count = 1;
66455- self->open_count--;
66456+ local_dec(&self->open_count);
66457 }
66458 spin_unlock_irqrestore(&self->spinlock, flags);
66459- self->blocked_open++;
66460+ local_inc(&self->blocked_open);
66461
66462 while (1) {
66463 if (tty->termios->c_cflag & CBAUD) {
66464@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
66465 }
66466
66467 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66468- __FILE__,__LINE__, tty->driver->name, self->open_count );
66469+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66470
66471 schedule();
66472 }
66473@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
66474 if (extra_count) {
66475 /* ++ is not atomic, so this should be protected - Jean II */
66476 spin_lock_irqsave(&self->spinlock, flags);
66477- self->open_count++;
66478+ local_inc(&self->open_count);
66479 spin_unlock_irqrestore(&self->spinlock, flags);
66480 }
66481- self->blocked_open--;
66482+ local_dec(&self->blocked_open);
66483
66484 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66485- __FILE__,__LINE__, tty->driver->name, self->open_count);
66486+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66487
66488 if (!retval)
66489 self->flags |= ASYNC_NORMAL_ACTIVE;
66490@@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
66491 }
66492 /* ++ is not atomic, so this should be protected - Jean II */
66493 spin_lock_irqsave(&self->spinlock, flags);
66494- self->open_count++;
66495+ local_inc(&self->open_count);
66496
66497 tty->driver_data = self;
66498 self->tty = tty;
66499 spin_unlock_irqrestore(&self->spinlock, flags);
66500
66501 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66502- self->line, self->open_count);
66503+ self->line, local_read(&self->open_count));
66504
66505 /* Not really used by us, but lets do it anyway */
66506 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66507@@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
66508 return;
66509 }
66510
66511- if ((tty->count == 1) && (self->open_count != 1)) {
66512+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66513 /*
66514 * Uh, oh. tty->count is 1, which means that the tty
66515 * structure will be freed. state->count should always
66516@@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
66517 */
66518 IRDA_DEBUG(0, "%s(), bad serial port count; "
66519 "tty->count is 1, state->count is %d\n", __func__ ,
66520- self->open_count);
66521- self->open_count = 1;
66522+ local_read(&self->open_count));
66523+ local_set(&self->open_count, 1);
66524 }
66525
66526- if (--self->open_count < 0) {
66527+ if (local_dec_return(&self->open_count) < 0) {
66528 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66529- __func__, self->line, self->open_count);
66530- self->open_count = 0;
66531+ __func__, self->line, local_read(&self->open_count));
66532+ local_set(&self->open_count, 0);
66533 }
66534- if (self->open_count) {
66535+ if (local_read(&self->open_count)) {
66536 spin_unlock_irqrestore(&self->spinlock, flags);
66537
66538 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66539@@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
66540 tty->closing = 0;
66541 self->tty = NULL;
66542
66543- if (self->blocked_open) {
66544+ if (local_read(&self->blocked_open)) {
66545 if (self->close_delay)
66546 schedule_timeout_interruptible(self->close_delay);
66547 wake_up_interruptible(&self->open_wait);
66548@@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
66549 spin_lock_irqsave(&self->spinlock, flags);
66550 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66551 self->tty = NULL;
66552- self->open_count = 0;
66553+ local_set(&self->open_count, 0);
66554 spin_unlock_irqrestore(&self->spinlock, flags);
66555
66556 wake_up_interruptible(&self->open_wait);
66557@@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
66558 seq_putc(m, '\n');
66559
66560 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66561- seq_printf(m, "Open count: %d\n", self->open_count);
66562+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66563 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66564 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66565
66566diff -urNp linux-2.6.39.4/net/iucv/af_iucv.c linux-2.6.39.4/net/iucv/af_iucv.c
66567--- linux-2.6.39.4/net/iucv/af_iucv.c 2011-05-19 00:06:34.000000000 -0400
66568+++ linux-2.6.39.4/net/iucv/af_iucv.c 2011-08-05 19:44:37.000000000 -0400
66569@@ -653,10 +653,10 @@ static int iucv_sock_autobind(struct soc
66570
66571 write_lock_bh(&iucv_sk_list.lock);
66572
66573- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66574+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66575 while (__iucv_get_sock_by_name(name)) {
66576 sprintf(name, "%08x",
66577- atomic_inc_return(&iucv_sk_list.autobind_name));
66578+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66579 }
66580
66581 write_unlock_bh(&iucv_sk_list.lock);
66582diff -urNp linux-2.6.39.4/net/key/af_key.c linux-2.6.39.4/net/key/af_key.c
66583--- linux-2.6.39.4/net/key/af_key.c 2011-05-19 00:06:34.000000000 -0400
66584+++ linux-2.6.39.4/net/key/af_key.c 2011-08-05 19:44:37.000000000 -0400
66585@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66586 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66587 struct xfrm_kmaddress k;
66588
66589+ pax_track_stack();
66590+
66591 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66592 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66593 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66594@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66595 static u32 get_acqseq(void)
66596 {
66597 u32 res;
66598- static atomic_t acqseq;
66599+ static atomic_unchecked_t acqseq;
66600
66601 do {
66602- res = atomic_inc_return(&acqseq);
66603+ res = atomic_inc_return_unchecked(&acqseq);
66604 } while (!res);
66605 return res;
66606 }
66607@@ -3657,7 +3659,11 @@ static int pfkey_seq_show(struct seq_fil
66608 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
66609 else
66610 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
66611+#ifdef CONFIG_GRKERNSEC_HIDESYM
66612+ NULL,
66613+#else
66614 s,
66615+#endif
66616 atomic_read(&s->sk_refcnt),
66617 sk_rmem_alloc_get(s),
66618 sk_wmem_alloc_get(s),
66619diff -urNp linux-2.6.39.4/net/lapb/lapb_iface.c linux-2.6.39.4/net/lapb/lapb_iface.c
66620--- linux-2.6.39.4/net/lapb/lapb_iface.c 2011-05-19 00:06:34.000000000 -0400
66621+++ linux-2.6.39.4/net/lapb/lapb_iface.c 2011-08-05 20:34:06.000000000 -0400
66622@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66623 goto out;
66624
66625 lapb->dev = dev;
66626- lapb->callbacks = *callbacks;
66627+ lapb->callbacks = callbacks;
66628
66629 __lapb_insert_cb(lapb);
66630
66631@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66632
66633 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66634 {
66635- if (lapb->callbacks.connect_confirmation)
66636- lapb->callbacks.connect_confirmation(lapb->dev, reason);
66637+ if (lapb->callbacks->connect_confirmation)
66638+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
66639 }
66640
66641 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66642 {
66643- if (lapb->callbacks.connect_indication)
66644- lapb->callbacks.connect_indication(lapb->dev, reason);
66645+ if (lapb->callbacks->connect_indication)
66646+ lapb->callbacks->connect_indication(lapb->dev, reason);
66647 }
66648
66649 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66650 {
66651- if (lapb->callbacks.disconnect_confirmation)
66652- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66653+ if (lapb->callbacks->disconnect_confirmation)
66654+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66655 }
66656
66657 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66658 {
66659- if (lapb->callbacks.disconnect_indication)
66660- lapb->callbacks.disconnect_indication(lapb->dev, reason);
66661+ if (lapb->callbacks->disconnect_indication)
66662+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
66663 }
66664
66665 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66666 {
66667- if (lapb->callbacks.data_indication)
66668- return lapb->callbacks.data_indication(lapb->dev, skb);
66669+ if (lapb->callbacks->data_indication)
66670+ return lapb->callbacks->data_indication(lapb->dev, skb);
66671
66672 kfree_skb(skb);
66673 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66674@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66675 {
66676 int used = 0;
66677
66678- if (lapb->callbacks.data_transmit) {
66679- lapb->callbacks.data_transmit(lapb->dev, skb);
66680+ if (lapb->callbacks->data_transmit) {
66681+ lapb->callbacks->data_transmit(lapb->dev, skb);
66682 used = 1;
66683 }
66684
66685diff -urNp linux-2.6.39.4/net/mac80211/debugfs_sta.c linux-2.6.39.4/net/mac80211/debugfs_sta.c
66686--- linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-05-19 00:06:34.000000000 -0400
66687+++ linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-08-05 19:44:37.000000000 -0400
66688@@ -115,6 +115,8 @@ static ssize_t sta_agg_status_read(struc
66689 struct tid_ampdu_rx *tid_rx;
66690 struct tid_ampdu_tx *tid_tx;
66691
66692+ pax_track_stack();
66693+
66694 rcu_read_lock();
66695
66696 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66697@@ -215,6 +217,8 @@ static ssize_t sta_ht_capa_read(struct f
66698 struct sta_info *sta = file->private_data;
66699 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66700
66701+ pax_track_stack();
66702+
66703 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66704 htc->ht_supported ? "" : "not ");
66705 if (htc->ht_supported) {
66706diff -urNp linux-2.6.39.4/net/mac80211/ieee80211_i.h linux-2.6.39.4/net/mac80211/ieee80211_i.h
66707--- linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-05-19 00:06:34.000000000 -0400
66708+++ linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-08-05 19:44:37.000000000 -0400
66709@@ -27,6 +27,7 @@
66710 #include <net/ieee80211_radiotap.h>
66711 #include <net/cfg80211.h>
66712 #include <net/mac80211.h>
66713+#include <asm/local.h>
66714 #include "key.h"
66715 #include "sta_info.h"
66716
66717@@ -714,7 +715,7 @@ struct ieee80211_local {
66718 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66719 spinlock_t queue_stop_reason_lock;
66720
66721- int open_count;
66722+ local_t open_count;
66723 int monitors, cooked_mntrs;
66724 /* number of interfaces with corresponding FIF_ flags */
66725 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66726diff -urNp linux-2.6.39.4/net/mac80211/iface.c linux-2.6.39.4/net/mac80211/iface.c
66727--- linux-2.6.39.4/net/mac80211/iface.c 2011-05-19 00:06:34.000000000 -0400
66728+++ linux-2.6.39.4/net/mac80211/iface.c 2011-08-05 19:44:37.000000000 -0400
66729@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66730 break;
66731 }
66732
66733- if (local->open_count == 0) {
66734+ if (local_read(&local->open_count) == 0) {
66735 res = drv_start(local);
66736 if (res)
66737 goto err_del_bss;
66738@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66739 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66740
66741 if (!is_valid_ether_addr(dev->dev_addr)) {
66742- if (!local->open_count)
66743+ if (!local_read(&local->open_count))
66744 drv_stop(local);
66745 return -EADDRNOTAVAIL;
66746 }
66747@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66748 mutex_unlock(&local->mtx);
66749
66750 if (coming_up)
66751- local->open_count++;
66752+ local_inc(&local->open_count);
66753
66754 if (hw_reconf_flags) {
66755 ieee80211_hw_config(local, hw_reconf_flags);
66756@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66757 err_del_interface:
66758 drv_remove_interface(local, &sdata->vif);
66759 err_stop:
66760- if (!local->open_count)
66761+ if (!local_read(&local->open_count))
66762 drv_stop(local);
66763 err_del_bss:
66764 sdata->bss = NULL;
66765@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
66766 }
66767
66768 if (going_down)
66769- local->open_count--;
66770+ local_dec(&local->open_count);
66771
66772 switch (sdata->vif.type) {
66773 case NL80211_IFTYPE_AP_VLAN:
66774@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
66775
66776 ieee80211_recalc_ps(local, -1);
66777
66778- if (local->open_count == 0) {
66779+ if (local_read(&local->open_count) == 0) {
66780 if (local->ops->napi_poll)
66781 napi_disable(&local->napi);
66782 ieee80211_clear_tx_pending(local);
66783diff -urNp linux-2.6.39.4/net/mac80211/main.c linux-2.6.39.4/net/mac80211/main.c
66784--- linux-2.6.39.4/net/mac80211/main.c 2011-05-19 00:06:34.000000000 -0400
66785+++ linux-2.6.39.4/net/mac80211/main.c 2011-08-05 19:44:37.000000000 -0400
66786@@ -215,7 +215,7 @@ int ieee80211_hw_config(struct ieee80211
66787 local->hw.conf.power_level = power;
66788 }
66789
66790- if (changed && local->open_count) {
66791+ if (changed && local_read(&local->open_count)) {
66792 ret = drv_config(local, changed);
66793 /*
66794 * Goal:
66795diff -urNp linux-2.6.39.4/net/mac80211/mlme.c linux-2.6.39.4/net/mac80211/mlme.c
66796--- linux-2.6.39.4/net/mac80211/mlme.c 2011-06-03 00:04:14.000000000 -0400
66797+++ linux-2.6.39.4/net/mac80211/mlme.c 2011-08-05 19:44:37.000000000 -0400
66798@@ -1431,6 +1431,8 @@ static bool ieee80211_assoc_success(stru
66799 bool have_higher_than_11mbit = false;
66800 u16 ap_ht_cap_flags;
66801
66802+ pax_track_stack();
66803+
66804 /* AssocResp and ReassocResp have identical structure */
66805
66806 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66807diff -urNp linux-2.6.39.4/net/mac80211/pm.c linux-2.6.39.4/net/mac80211/pm.c
66808--- linux-2.6.39.4/net/mac80211/pm.c 2011-05-19 00:06:34.000000000 -0400
66809+++ linux-2.6.39.4/net/mac80211/pm.c 2011-08-05 19:44:37.000000000 -0400
66810@@ -95,7 +95,7 @@ int __ieee80211_suspend(struct ieee80211
66811 }
66812
66813 /* stop hardware - this must stop RX */
66814- if (local->open_count)
66815+ if (local_read(&local->open_count))
66816 ieee80211_stop_device(local);
66817
66818 local->suspended = true;
66819diff -urNp linux-2.6.39.4/net/mac80211/rate.c linux-2.6.39.4/net/mac80211/rate.c
66820--- linux-2.6.39.4/net/mac80211/rate.c 2011-05-19 00:06:34.000000000 -0400
66821+++ linux-2.6.39.4/net/mac80211/rate.c 2011-08-05 19:44:37.000000000 -0400
66822@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66823
66824 ASSERT_RTNL();
66825
66826- if (local->open_count)
66827+ if (local_read(&local->open_count))
66828 return -EBUSY;
66829
66830 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66831diff -urNp linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c
66832--- linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-05-19 00:06:34.000000000 -0400
66833+++ linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-05 19:44:37.000000000 -0400
66834@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66835
66836 spin_unlock_irqrestore(&events->lock, status);
66837
66838- if (copy_to_user(buf, pb, p))
66839+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66840 return -EFAULT;
66841
66842 return p;
66843diff -urNp linux-2.6.39.4/net/mac80211/util.c linux-2.6.39.4/net/mac80211/util.c
66844--- linux-2.6.39.4/net/mac80211/util.c 2011-05-19 00:06:34.000000000 -0400
66845+++ linux-2.6.39.4/net/mac80211/util.c 2011-08-05 19:44:37.000000000 -0400
66846@@ -1129,7 +1129,7 @@ int ieee80211_reconfig(struct ieee80211_
66847 local->resuming = true;
66848
66849 /* restart hardware */
66850- if (local->open_count) {
66851+ if (local_read(&local->open_count)) {
66852 /*
66853 * Upon resume hardware can sometimes be goofy due to
66854 * various platform / driver / bus issues, so restarting
66855diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c
66856--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-09 09:18:51.000000000 -0400
66857+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-05 19:44:37.000000000 -0400
66858@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
66859 /* Increase the refcnt counter of the dest */
66860 atomic_inc(&dest->refcnt);
66861
66862- conn_flags = atomic_read(&dest->conn_flags);
66863+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
66864 if (cp->protocol != IPPROTO_UDP)
66865 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
66866 /* Bind with the destination and its corresponding transmitter */
66867@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
66868 atomic_set(&cp->refcnt, 1);
66869
66870 atomic_set(&cp->n_control, 0);
66871- atomic_set(&cp->in_pkts, 0);
66872+ atomic_set_unchecked(&cp->in_pkts, 0);
66873
66874 atomic_inc(&ipvs->conn_count);
66875 if (flags & IP_VS_CONN_F_NO_CPORT)
66876@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
66877
66878 /* Don't drop the entry if its number of incoming packets is not
66879 located in [0, 8] */
66880- i = atomic_read(&cp->in_pkts);
66881+ i = atomic_read_unchecked(&cp->in_pkts);
66882 if (i > 8 || i < 0) return 0;
66883
66884 if (!todrop_rate[i]) return 0;
66885diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c
66886--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-09 09:18:51.000000000 -0400
66887+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-05 19:44:37.000000000 -0400
66888@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
66889 ret = cp->packet_xmit(skb, cp, pd->pp);
66890 /* do not touch skb anymore */
66891
66892- atomic_inc(&cp->in_pkts);
66893+ atomic_inc_unchecked(&cp->in_pkts);
66894 ip_vs_conn_put(cp);
66895 return ret;
66896 }
66897@@ -1633,7 +1633,7 @@ ip_vs_in(unsigned int hooknum, struct sk
66898 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
66899 pkts = sysctl_sync_threshold(ipvs);
66900 else
66901- pkts = atomic_add_return(1, &cp->in_pkts);
66902+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66903
66904 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
66905 cp->protocol == IPPROTO_SCTP) {
66906diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c
66907--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-19 00:06:34.000000000 -0400
66908+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-05 19:44:37.000000000 -0400
66909@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
66910 ip_vs_rs_hash(ipvs, dest);
66911 write_unlock_bh(&ipvs->rs_lock);
66912 }
66913- atomic_set(&dest->conn_flags, conn_flags);
66914+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
66915
66916 /* bind the service */
66917 if (!dest->svc) {
66918@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
66919 " %-7s %-6d %-10d %-10d\n",
66920 &dest->addr.in6,
66921 ntohs(dest->port),
66922- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66923+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66924 atomic_read(&dest->weight),
66925 atomic_read(&dest->activeconns),
66926 atomic_read(&dest->inactconns));
66927@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
66928 "%-7s %-6d %-10d %-10d\n",
66929 ntohl(dest->addr.ip),
66930 ntohs(dest->port),
66931- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66932+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66933 atomic_read(&dest->weight),
66934 atomic_read(&dest->activeconns),
66935 atomic_read(&dest->inactconns));
66936@@ -2287,6 +2287,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
66937 struct ip_vs_dest_user *udest_compat;
66938 struct ip_vs_dest_user_kern udest;
66939
66940+ pax_track_stack();
66941+
66942 if (!capable(CAP_NET_ADMIN))
66943 return -EPERM;
66944
66945@@ -2501,7 +2503,7 @@ __ip_vs_get_dest_entries(struct net *net
66946
66947 entry.addr = dest->addr.ip;
66948 entry.port = dest->port;
66949- entry.conn_flags = atomic_read(&dest->conn_flags);
66950+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
66951 entry.weight = atomic_read(&dest->weight);
66952 entry.u_threshold = dest->u_threshold;
66953 entry.l_threshold = dest->l_threshold;
66954@@ -3029,7 +3031,7 @@ static int ip_vs_genl_fill_dest(struct s
66955 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
66956
66957 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
66958- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66959+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66960 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
66961 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
66962 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
66963diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c
66964--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-05-19 00:06:34.000000000 -0400
66965+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-05 19:44:37.000000000 -0400
66966@@ -648,7 +648,7 @@ control:
66967 * i.e only increment in_pkts for Templates.
66968 */
66969 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
66970- int pkts = atomic_add_return(1, &cp->in_pkts);
66971+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66972
66973 if (pkts % sysctl_sync_period(ipvs) != 1)
66974 return;
66975@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
66976
66977 if (opt)
66978 memcpy(&cp->in_seq, opt, sizeof(*opt));
66979- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66980+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66981 cp->state = state;
66982 cp->old_state = cp->state;
66983 /*
66984diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c
66985--- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-19 00:06:34.000000000 -0400
66986+++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-05 19:44:37.000000000 -0400
66987@@ -1127,7 +1127,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
66988 else
66989 rc = NF_ACCEPT;
66990 /* do not touch skb anymore */
66991- atomic_inc(&cp->in_pkts);
66992+ atomic_inc_unchecked(&cp->in_pkts);
66993 goto out;
66994 }
66995
66996@@ -1245,7 +1245,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
66997 else
66998 rc = NF_ACCEPT;
66999 /* do not touch skb anymore */
67000- atomic_inc(&cp->in_pkts);
67001+ atomic_inc_unchecked(&cp->in_pkts);
67002 goto out;
67003 }
67004
67005diff -urNp linux-2.6.39.4/net/netfilter/Kconfig linux-2.6.39.4/net/netfilter/Kconfig
67006--- linux-2.6.39.4/net/netfilter/Kconfig 2011-05-19 00:06:34.000000000 -0400
67007+++ linux-2.6.39.4/net/netfilter/Kconfig 2011-08-05 19:44:37.000000000 -0400
67008@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
67009
67010 To compile it as a module, choose M here. If unsure, say N.
67011
67012+config NETFILTER_XT_MATCH_GRADM
67013+ tristate '"gradm" match support'
67014+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
67015+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
67016+ ---help---
67017+ The gradm match allows to match on grsecurity RBAC being enabled.
67018+ It is useful when iptables rules are applied early on bootup to
67019+ prevent connections to the machine (except from a trusted host)
67020+ while the RBAC system is disabled.
67021+
67022 config NETFILTER_XT_MATCH_HASHLIMIT
67023 tristate '"hashlimit" match support'
67024 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
67025diff -urNp linux-2.6.39.4/net/netfilter/Makefile linux-2.6.39.4/net/netfilter/Makefile
67026--- linux-2.6.39.4/net/netfilter/Makefile 2011-05-19 00:06:34.000000000 -0400
67027+++ linux-2.6.39.4/net/netfilter/Makefile 2011-08-05 19:44:37.000000000 -0400
67028@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
67029 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
67030 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
67031 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
67032+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
67033 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
67034 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
67035 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
67036diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_log.c linux-2.6.39.4/net/netfilter/nfnetlink_log.c
67037--- linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-05-19 00:06:34.000000000 -0400
67038+++ linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-08-05 19:44:37.000000000 -0400
67039@@ -70,7 +70,7 @@ struct nfulnl_instance {
67040 };
67041
67042 static DEFINE_SPINLOCK(instances_lock);
67043-static atomic_t global_seq;
67044+static atomic_unchecked_t global_seq;
67045
67046 #define INSTANCE_BUCKETS 16
67047 static struct hlist_head instance_table[INSTANCE_BUCKETS];
67048@@ -506,7 +506,7 @@ __build_packet_message(struct nfulnl_ins
67049 /* global sequence number */
67050 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
67051 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
67052- htonl(atomic_inc_return(&global_seq)));
67053+ htonl(atomic_inc_return_unchecked(&global_seq)));
67054
67055 if (data_len) {
67056 struct nlattr *nla;
67057diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_queue.c linux-2.6.39.4/net/netfilter/nfnetlink_queue.c
67058--- linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-05-19 00:06:34.000000000 -0400
67059+++ linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-08-05 19:44:37.000000000 -0400
67060@@ -58,7 +58,7 @@ struct nfqnl_instance {
67061 */
67062 spinlock_t lock;
67063 unsigned int queue_total;
67064- atomic_t id_sequence; /* 'sequence' of pkt ids */
67065+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
67066 struct list_head queue_list; /* packets in queue */
67067 };
67068
67069@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
67070 nfmsg->version = NFNETLINK_V0;
67071 nfmsg->res_id = htons(queue->queue_num);
67072
67073- entry->id = atomic_inc_return(&queue->id_sequence);
67074+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
67075 pmsg.packet_id = htonl(entry->id);
67076 pmsg.hw_protocol = entskb->protocol;
67077 pmsg.hook = entry->hook;
67078@@ -869,7 +869,7 @@ static int seq_show(struct seq_file *s,
67079 inst->peer_pid, inst->queue_total,
67080 inst->copy_mode, inst->copy_range,
67081 inst->queue_dropped, inst->queue_user_dropped,
67082- atomic_read(&inst->id_sequence), 1);
67083+ atomic_read_unchecked(&inst->id_sequence), 1);
67084 }
67085
67086 static const struct seq_operations nfqnl_seq_ops = {
67087diff -urNp linux-2.6.39.4/net/netfilter/xt_gradm.c linux-2.6.39.4/net/netfilter/xt_gradm.c
67088--- linux-2.6.39.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
67089+++ linux-2.6.39.4/net/netfilter/xt_gradm.c 2011-08-05 19:44:37.000000000 -0400
67090@@ -0,0 +1,51 @@
67091+/*
67092+ * gradm match for netfilter
67093